root/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mt76x02_mac_get_key_info
  2. mt76x02_mac_shared_key_setup
  3. mt76x02_mac_wcid_sync_pn
  4. mt76x02_mac_wcid_set_key
  5. mt76x02_mac_wcid_setup
  6. mt76x02_mac_wcid_set_drop
  7. mt76x02_mac_tx_rate_val
  8. mt76x02_mac_wcid_set_rate
  9. mt76x02_mac_set_short_preamble
  10. mt76x02_mac_load_tx_status
  11. mt76x02_mac_process_tx_rate
  12. mt76x02_mac_write_txwi
  13. mt76x02_tx_rate_fallback
  14. mt76x02_mac_fill_tx_status
  15. mt76x02_send_tx_status
  16. mt76x02_mac_process_rate
  17. mt76x02_mac_setaddr
  18. mt76x02_mac_get_rssi
  19. mt76x02_mac_process_rx
  20. mt76x02_mac_poll_tx_status
  21. mt76x02_tx_complete_skb
  22. mt76x02_mac_set_rts_thresh
  23. mt76x02_mac_set_tx_protection
  24. mt76x02_update_channel
  25. mt76x02_check_mac_err
  26. mt76x02_edcca_tx_enable
  27. mt76x02_edcca_init
  28. mt76x02_edcca_check
  29. mt76x02_mac_work
  30. mt76x02_mac_set_bssid

   1 // SPDX-License-Identifier: ISC
   2 /*
   3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
   4  * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
   5  */
   6 
   7 #include "mt76x02.h"
   8 #include "mt76x02_trace.h"
   9 
  10 static enum mt76x02_cipher_type
  11 mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
  12 {
  13         memset(key_data, 0, 32);
  14         if (!key)
  15                 return MT_CIPHER_NONE;
  16 
  17         if (key->keylen > 32)
  18                 return MT_CIPHER_NONE;
  19 
  20         memcpy(key_data, key->key, key->keylen);
  21 
  22         switch (key->cipher) {
  23         case WLAN_CIPHER_SUITE_WEP40:
  24                 return MT_CIPHER_WEP40;
  25         case WLAN_CIPHER_SUITE_WEP104:
  26                 return MT_CIPHER_WEP104;
  27         case WLAN_CIPHER_SUITE_TKIP:
  28                 return MT_CIPHER_TKIP;
  29         case WLAN_CIPHER_SUITE_CCMP:
  30                 return MT_CIPHER_AES_CCMP;
  31         default:
  32                 return MT_CIPHER_NONE;
  33         }
  34 }
  35 
  36 int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
  37                                  u8 key_idx, struct ieee80211_key_conf *key)
  38 {
  39         enum mt76x02_cipher_type cipher;
  40         u8 key_data[32];
  41         u32 val;
  42 
  43         cipher = mt76x02_mac_get_key_info(key, key_data);
  44         if (cipher == MT_CIPHER_NONE && key)
  45                 return -EOPNOTSUPP;
  46 
  47         val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
  48         val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
  49         val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
  50         mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
  51 
  52         mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
  53                      sizeof(key_data));
  54 
  55         return 0;
  56 }
  57 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
  58 
  59 void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
  60                               struct ieee80211_key_conf *key)
  61 {
  62         enum mt76x02_cipher_type cipher;
  63         u8 key_data[32];
  64         u32 iv, eiv;
  65         u64 pn;
  66 
  67         cipher = mt76x02_mac_get_key_info(key, key_data);
  68         iv = mt76_rr(dev, MT_WCID_IV(idx));
  69         eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
  70 
  71         pn = (u64)eiv << 16;
  72         if (cipher == MT_CIPHER_TKIP) {
  73                 pn |= (iv >> 16) & 0xff;
  74                 pn |= (iv & 0xff) << 8;
  75         } else if (cipher >= MT_CIPHER_AES_CCMP) {
  76                 pn |= iv & 0xffff;
  77         } else {
  78                 return;
  79         }
  80 
  81         atomic64_set(&key->tx_pn, pn);
  82 }
  83 
  84 int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
  85                              struct ieee80211_key_conf *key)
  86 {
  87         enum mt76x02_cipher_type cipher;
  88         u8 key_data[32];
  89         u8 iv_data[8];
  90         u64 pn;
  91 
  92         cipher = mt76x02_mac_get_key_info(key, key_data);
  93         if (cipher == MT_CIPHER_NONE && key)
  94                 return -EOPNOTSUPP;
  95 
  96         mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
  97         mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
  98 
  99         memset(iv_data, 0, sizeof(iv_data));
 100         if (key) {
 101                 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
 102                                !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
 103 
 104                 pn = atomic64_read(&key->tx_pn);
 105 
 106                 iv_data[3] = key->keyidx << 6;
 107                 if (cipher >= MT_CIPHER_TKIP) {
 108                         iv_data[3] |= 0x20;
 109                         put_unaligned_le32(pn >> 16, &iv_data[4]);
 110                 }
 111 
 112                 if (cipher == MT_CIPHER_TKIP) {
 113                         iv_data[0] = (pn >> 8) & 0xff;
 114                         iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
 115                         iv_data[2] = pn & 0xff;
 116                 } else if (cipher >= MT_CIPHER_AES_CCMP) {
 117                         put_unaligned_le16((pn & 0xffff), &iv_data[0]);
 118                 }
 119         }
 120 
 121         mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
 122 
 123         return 0;
 124 }
 125 
 126 void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
 127                             u8 vif_idx, u8 *mac)
 128 {
 129         struct mt76_wcid_addr addr = {};
 130         u32 attr;
 131 
 132         attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
 133                FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
 134 
 135         mt76_wr(dev, MT_WCID_ATTR(idx), attr);
 136 
 137         if (idx >= 128)
 138                 return;
 139 
 140         if (mac)
 141                 memcpy(addr.macaddr, mac, ETH_ALEN);
 142 
 143         mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
 144 }
 145 EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
 146 
 147 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
 148 {
 149         u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
 150         u32 bit = MT_WCID_DROP_MASK(idx);
 151 
 152         /* prevent unnecessary writes */
 153         if ((val & bit) != (bit * drop))
 154                 mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
 155 }
 156 
 157 static __le16
 158 mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
 159                         const struct ieee80211_tx_rate *rate, u8 *nss_val)
 160 {
 161         u8 phy, rate_idx, nss, bw = 0;
 162         u16 rateval;
 163 
 164         if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
 165                 rate_idx = rate->idx;
 166                 nss = 1 + (rate->idx >> 4);
 167                 phy = MT_PHY_TYPE_VHT;
 168                 if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
 169                         bw = 2;
 170                 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
 171                         bw = 1;
 172         } else if (rate->flags & IEEE80211_TX_RC_MCS) {
 173                 rate_idx = rate->idx;
 174                 nss = 1 + (rate->idx >> 3);
 175                 phy = MT_PHY_TYPE_HT;
 176                 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
 177                         phy = MT_PHY_TYPE_HT_GF;
 178                 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
 179                         bw = 1;
 180         } else {
 181                 const struct ieee80211_rate *r;
 182                 int band = dev->mt76.chandef.chan->band;
 183                 u16 val;
 184 
 185                 r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
 186                 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 187                         val = r->hw_value_short;
 188                 else
 189                         val = r->hw_value;
 190 
 191                 phy = val >> 8;
 192                 rate_idx = val & 0xff;
 193                 nss = 1;
 194         }
 195 
 196         rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
 197         rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
 198         rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
 199         if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
 200                 rateval |= MT_RXWI_RATE_SGI;
 201 
 202         *nss_val = nss;
 203         return cpu_to_le16(rateval);
 204 }
 205 
 206 void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
 207                                const struct ieee80211_tx_rate *rate)
 208 {
 209         s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
 210         __le16 rateval;
 211         u32 tx_info;
 212         s8 nss;
 213 
 214         rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
 215         tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) |
 216                   FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) |
 217                   FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) |
 218                   MT_WCID_TX_INFO_SET;
 219         wcid->tx_info = tx_info;
 220 }
 221 
 222 void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
 223 {
 224         if (enable)
 225                 mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
 226         else
 227                 mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
 228 }
 229 
 230 bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
 231                                 struct mt76x02_tx_status *stat)
 232 {
 233         u32 stat1, stat2;
 234 
 235         stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
 236         stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
 237 
 238         stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
 239         if (!stat->valid)
 240                 return false;
 241 
 242         stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
 243         stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
 244         stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
 245         stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
 246         stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
 247 
 248         stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
 249         stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
 250 
 251         trace_mac_txstat_fetch(dev, stat);
 252 
 253         return true;
 254 }
 255 
 256 static int
 257 mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
 258                             enum nl80211_band band)
 259 {
 260         u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
 261 
 262         txrate->idx = 0;
 263         txrate->flags = 0;
 264         txrate->count = 1;
 265 
 266         switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
 267         case MT_PHY_TYPE_OFDM:
 268                 if (band == NL80211_BAND_2GHZ)
 269                         idx += 4;
 270 
 271                 txrate->idx = idx;
 272                 return 0;
 273         case MT_PHY_TYPE_CCK:
 274                 if (idx >= 8)
 275                         idx -= 8;
 276 
 277                 txrate->idx = idx;
 278                 return 0;
 279         case MT_PHY_TYPE_HT_GF:
 280                 txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
 281                 /* fall through */
 282         case MT_PHY_TYPE_HT:
 283                 txrate->flags |= IEEE80211_TX_RC_MCS;
 284                 txrate->idx = idx;
 285                 break;
 286         case MT_PHY_TYPE_VHT:
 287                 txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
 288                 txrate->idx = idx;
 289                 break;
 290         default:
 291                 return -EINVAL;
 292         }
 293 
 294         switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
 295         case MT_PHY_BW_20:
 296                 break;
 297         case MT_PHY_BW_40:
 298                 txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 299                 break;
 300         case MT_PHY_BW_80:
 301                 txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
 302                 break;
 303         default:
 304                 return -EINVAL;
 305         }
 306 
 307         if (rate & MT_RXWI_RATE_SGI)
 308                 txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
 309 
 310         return 0;
 311 }
 312 
 313 void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
 314                             struct sk_buff *skb, struct mt76_wcid *wcid,
 315                             struct ieee80211_sta *sta, int len)
 316 {
 317         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 318         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 319         struct ieee80211_tx_rate *rate = &info->control.rates[0];
 320         struct ieee80211_key_conf *key = info->control.hw_key;
 321         u32 wcid_tx_info;
 322         u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
 323         u16 txwi_flags = 0;
 324         u8 nss;
 325         s8 txpwr_adj, max_txpwr_adj;
 326         u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
 327 
 328         memset(txwi, 0, sizeof(*txwi));
 329 
 330         if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
 331             ieee80211_has_protected(hdr->frame_control)) {
 332                 wcid = NULL;
 333                 ieee80211_get_tx_rates(info->control.vif, sta, skb,
 334                                        info->control.rates, 1);
 335         }
 336 
 337         if (wcid)
 338                 txwi->wcid = wcid->idx;
 339         else
 340                 txwi->wcid = 0xff;
 341 
 342         if (wcid && wcid->sw_iv && key) {
 343                 u64 pn = atomic64_inc_return(&key->tx_pn);
 344 
 345                 ccmp_pn[0] = pn;
 346                 ccmp_pn[1] = pn >> 8;
 347                 ccmp_pn[2] = 0;
 348                 ccmp_pn[3] = 0x20 | (key->keyidx << 6);
 349                 ccmp_pn[4] = pn >> 16;
 350                 ccmp_pn[5] = pn >> 24;
 351                 ccmp_pn[6] = pn >> 32;
 352                 ccmp_pn[7] = pn >> 40;
 353                 txwi->iv = *((__le32 *)&ccmp_pn[0]);
 354                 txwi->eiv = *((__le32 *)&ccmp_pn[4]);
 355         }
 356 
 357         if (wcid && (rate->idx < 0 || !rate->count)) {
 358                 wcid_tx_info = wcid->tx_info;
 359                 txwi->rate = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
 360                 max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
 361                                           wcid_tx_info);
 362                 nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
 363         } else {
 364                 txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
 365                 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
 366         }
 367 
 368         txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
 369                                              max_txpwr_adj);
 370         txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
 371 
 372         if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
 373                 txwi->txstream = 0x13;
 374         else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
 375                  !(txwi->rate & cpu_to_le16(rate_ht_mask)))
 376                 txwi->txstream = 0x93;
 377 
 378         if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
 379                 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
 380         if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
 381                 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
 382         if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
 383                 txwi_flags |= MT_TXWI_FLAGS_MMPS;
 384         if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
 385                 txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
 386         if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
 387                 txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
 388         if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
 389                 u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
 390 
 391                 ba_size <<= sta->ht_cap.ampdu_factor;
 392                 ba_size = min_t(int, 63, ba_size - 1);
 393                 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
 394                         ba_size = 0;
 395                 txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
 396 
 397                 txwi_flags |= MT_TXWI_FLAGS_AMPDU |
 398                          FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
 399                                     sta->ht_cap.ampdu_density);
 400         }
 401 
 402         if (ieee80211_is_probe_resp(hdr->frame_control) ||
 403             ieee80211_is_beacon(hdr->frame_control))
 404                 txwi_flags |= MT_TXWI_FLAGS_TS;
 405 
 406         txwi->flags |= cpu_to_le16(txwi_flags);
 407         txwi->len_ctl = cpu_to_le16(len);
 408 }
 409 EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
 410 
 411 static void
 412 mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
 413 {
 414         u8 mcs, nss;
 415 
 416         if (!idx)
 417                 return;
 418 
 419         rates += idx - 1;
 420         rates[1] = rates[0];
 421         switch (phy) {
 422         case MT_PHY_TYPE_VHT:
 423                 mcs = ieee80211_rate_get_vht_mcs(rates);
 424                 nss = ieee80211_rate_get_vht_nss(rates);
 425 
 426                 if (mcs == 0)
 427                         nss = max_t(int, nss - 1, 1);
 428                 else
 429                         mcs--;
 430 
 431                 ieee80211_rate_set_vht(rates + 1, mcs, nss);
 432                 break;
 433         case MT_PHY_TYPE_HT_GF:
 434         case MT_PHY_TYPE_HT:
 435                 /* MCS 8 falls back to MCS 0 */
 436                 if (rates[0].idx == 8) {
 437                         rates[1].idx = 0;
 438                         break;
 439                 }
 440                 /* fall through */
 441         default:
 442                 rates[1].idx = max_t(int, rates[0].idx - 1, 0);
 443                 break;
 444         }
 445 }
 446 
 447 static void
 448 mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
 449                            struct ieee80211_tx_info *info,
 450                            struct mt76x02_tx_status *st, int n_frames)
 451 {
 452         struct ieee80211_tx_rate *rate = info->status.rates;
 453         struct ieee80211_tx_rate last_rate;
 454         u16 first_rate;
 455         int retry = st->retry;
 456         int phy;
 457         int i;
 458 
 459         if (!n_frames)
 460                 return;
 461 
 462         phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
 463 
 464         if (st->pktid & MT_PACKET_ID_HAS_RATE) {
 465                 first_rate = st->rate & ~MT_RXWI_RATE_INDEX;
 466                 first_rate |= st->pktid & MT_RXWI_RATE_INDEX;
 467 
 468                 mt76x02_mac_process_tx_rate(&rate[0], first_rate,
 469                                             dev->mt76.chandef.chan->band);
 470         } else if (rate[0].idx < 0) {
 471                 if (!msta)
 472                         return;
 473 
 474                 mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
 475                                             dev->mt76.chandef.chan->band);
 476         }
 477 
 478         mt76x02_mac_process_tx_rate(&last_rate, st->rate,
 479                                     dev->mt76.chandef.chan->band);
 480 
 481         for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
 482                 retry--;
 483                 if (i + 1 == ARRAY_SIZE(info->status.rates)) {
 484                         info->status.rates[i] = last_rate;
 485                         info->status.rates[i].count = max_t(int, retry, 1);
 486                         break;
 487                 }
 488 
 489                 mt76x02_tx_rate_fallback(info->status.rates, i, phy);
 490                 if (info->status.rates[i].idx == last_rate.idx)
 491                         break;
 492         }
 493 
 494         if (i + 1 < ARRAY_SIZE(info->status.rates)) {
 495                 info->status.rates[i + 1].idx = -1;
 496                 info->status.rates[i + 1].count = 0;
 497         }
 498 
 499         info->status.ampdu_len = n_frames;
 500         info->status.ampdu_ack_len = st->success ? n_frames : 0;
 501 
 502         if (st->aggr)
 503                 info->flags |= IEEE80211_TX_CTL_AMPDU |
 504                                IEEE80211_TX_STAT_AMPDU;
 505 
 506         if (!st->ack_req)
 507                 info->flags |= IEEE80211_TX_CTL_NO_ACK;
 508         else if (st->success)
 509                 info->flags |= IEEE80211_TX_STAT_ACK;
 510 }
 511 
 512 void mt76x02_send_tx_status(struct mt76x02_dev *dev,
 513                             struct mt76x02_tx_status *stat, u8 *update)
 514 {
 515         struct ieee80211_tx_info info = {};
 516         struct ieee80211_tx_status status = {
 517                 .info = &info
 518         };
 519         struct mt76_wcid *wcid = NULL;
 520         struct mt76x02_sta *msta = NULL;
 521         struct mt76_dev *mdev = &dev->mt76;
 522         struct sk_buff_head list;
 523 
 524         if (stat->pktid == MT_PACKET_ID_NO_ACK)
 525                 return;
 526 
 527         rcu_read_lock();
 528 
 529         if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
 530                 wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
 531 
 532         if (wcid && wcid->sta) {
 533                 void *priv;
 534 
 535                 priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
 536                 status.sta = container_of(priv, struct ieee80211_sta,
 537                                           drv_priv);
 538         }
 539 
 540         mt76_tx_status_lock(mdev, &list);
 541 
 542         if (wcid) {
 543                 if (mt76_is_skb_pktid(stat->pktid))
 544                         status.skb = mt76_tx_status_skb_get(mdev, wcid,
 545                                                             stat->pktid, &list);
 546                 if (status.skb)
 547                         status.info = IEEE80211_SKB_CB(status.skb);
 548         }
 549 
 550         if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
 551                 mt76_tx_status_unlock(mdev, &list);
 552                 rcu_read_unlock();
 553                 return;
 554         }
 555 
 556         if (msta && stat->aggr && !status.skb) {
 557                 u32 stat_val, stat_cache;
 558 
 559                 stat_val = stat->rate;
 560                 stat_val |= ((u32)stat->retry) << 16;
 561                 stat_cache = msta->status.rate;
 562                 stat_cache |= ((u32)msta->status.retry) << 16;
 563 
 564                 if (*update == 0 && stat_val == stat_cache &&
 565                     stat->wcid == msta->status.wcid && msta->n_frames < 32) {
 566                         msta->n_frames++;
 567                         mt76_tx_status_unlock(mdev, &list);
 568                         rcu_read_unlock();
 569                         return;
 570                 }
 571 
 572                 mt76x02_mac_fill_tx_status(dev, msta, status.info,
 573                                            &msta->status, msta->n_frames);
 574 
 575                 msta->status = *stat;
 576                 msta->n_frames = 1;
 577                 *update = 0;
 578         } else {
 579                 mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
 580                 *update = 1;
 581         }
 582 
 583         if (status.skb)
 584                 mt76_tx_status_skb_done(mdev, status.skb, &list);
 585         mt76_tx_status_unlock(mdev, &list);
 586 
 587         if (!status.skb)
 588                 ieee80211_tx_status_ext(mt76_hw(dev), &status);
 589         rcu_read_unlock();
 590 }
 591 
 592 static int
 593 mt76x02_mac_process_rate(struct mt76x02_dev *dev,
 594                          struct mt76_rx_status *status,
 595                          u16 rate)
 596 {
 597         u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
 598 
 599         switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
 600         case MT_PHY_TYPE_OFDM:
 601                 if (idx >= 8)
 602                         idx = 0;
 603 
 604                 if (status->band == NL80211_BAND_2GHZ)
 605                         idx += 4;
 606 
 607                 status->rate_idx = idx;
 608                 return 0;
 609         case MT_PHY_TYPE_CCK:
 610                 if (idx >= 8) {
 611                         idx -= 8;
 612                         status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
 613                 }
 614 
 615                 if (idx >= 4)
 616                         idx = 0;
 617 
 618                 status->rate_idx = idx;
 619                 return 0;
 620         case MT_PHY_TYPE_HT_GF:
 621                 status->enc_flags |= RX_ENC_FLAG_HT_GF;
 622                 /* fall through */
 623         case MT_PHY_TYPE_HT:
 624                 status->encoding = RX_ENC_HT;
 625                 status->rate_idx = idx;
 626                 break;
 627         case MT_PHY_TYPE_VHT: {
 628                 u8 n_rxstream = dev->mt76.chainmask & 0xf;
 629 
 630                 status->encoding = RX_ENC_VHT;
 631                 status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
 632                 status->nss = min_t(u8, n_rxstream,
 633                                     FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1);
 634                 break;
 635         }
 636         default:
 637                 return -EINVAL;
 638         }
 639 
 640         if (rate & MT_RXWI_RATE_LDPC)
 641                 status->enc_flags |= RX_ENC_FLAG_LDPC;
 642 
 643         if (rate & MT_RXWI_RATE_SGI)
 644                 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
 645 
 646         if (rate & MT_RXWI_RATE_STBC)
 647                 status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
 648 
 649         switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
 650         case MT_PHY_BW_20:
 651                 break;
 652         case MT_PHY_BW_40:
 653                 status->bw = RATE_INFO_BW_40;
 654                 break;
 655         case MT_PHY_BW_80:
 656                 status->bw = RATE_INFO_BW_80;
 657                 break;
 658         default:
 659                 break;
 660         }
 661 
 662         return 0;
 663 }
 664 
 665 void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
 666 {
 667         static const u8 null_addr[ETH_ALEN] = {};
 668         int i;
 669 
 670         ether_addr_copy(dev->mt76.macaddr, addr);
 671 
 672         if (!is_valid_ether_addr(dev->mt76.macaddr)) {
 673                 eth_random_addr(dev->mt76.macaddr);
 674                 dev_info(dev->mt76.dev,
 675                          "Invalid MAC address, using random address %pM\n",
 676                          dev->mt76.macaddr);
 677         }
 678 
 679         mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
 680         mt76_wr(dev, MT_MAC_ADDR_DW1,
 681                 get_unaligned_le16(dev->mt76.macaddr + 4) |
 682                 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
 683 
 684         mt76_wr(dev, MT_MAC_BSSID_DW0,
 685                 get_unaligned_le32(dev->mt76.macaddr));
 686         mt76_wr(dev, MT_MAC_BSSID_DW1,
 687                 get_unaligned_le16(dev->mt76.macaddr + 4) |
 688                 FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
 689                 MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
 690 
 691         for (i = 0; i < 16; i++)
 692                 mt76x02_mac_set_bssid(dev, i, null_addr);
 693 }
 694 EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
 695 
 696 static int
 697 mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
 698 {
 699         struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
 700 
 701         rssi += cal->rssi_offset[chain];
 702         rssi -= cal->lna_gain;
 703 
 704         return rssi;
 705 }
 706 
 707 int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
 708                            void *rxi)
 709 {
 710         struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
 711         struct mt76x02_rxwi *rxwi = rxi;
 712         struct mt76x02_sta *sta;
 713         u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
 714         u32 ctl = le32_to_cpu(rxwi->ctl);
 715         u16 rate = le16_to_cpu(rxwi->rate);
 716         u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
 717         bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
 718         int pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
 719         s8 signal;
 720         u8 pn_len;
 721         u8 wcid;
 722         int len;
 723 
 724         if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
 725                 return -EINVAL;
 726 
 727         if (rxinfo & MT_RXINFO_L2PAD)
 728                 pad_len += 2;
 729 
 730         if (rxinfo & MT_RXINFO_DECRYPT) {
 731                 status->flag |= RX_FLAG_DECRYPTED;
 732                 status->flag |= RX_FLAG_MMIC_STRIPPED;
 733                 status->flag |= RX_FLAG_MIC_STRIPPED;
 734                 status->flag |= RX_FLAG_IV_STRIPPED;
 735         }
 736 
 737         wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
 738         sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
 739         status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast);
 740 
 741         len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
 742         pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
 743         if (pn_len) {
 744                 int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
 745                 u8 *data = skb->data + offset;
 746 
 747                 status->iv[0] = data[7];
 748                 status->iv[1] = data[6];
 749                 status->iv[2] = data[5];
 750                 status->iv[3] = data[4];
 751                 status->iv[4] = data[1];
 752                 status->iv[5] = data[0];
 753 
 754                 /*
 755                  * Driver CCMP validation can't deal with fragments.
 756                  * Let mac80211 take care of it.
 757                  */
 758                 if (rxinfo & MT_RXINFO_FRAG) {
 759                         status->flag &= ~RX_FLAG_IV_STRIPPED;
 760                 } else {
 761                         pad_len += pn_len << 2;
 762                         len -= pn_len << 2;
 763                 }
 764         }
 765 
 766         mt76x02_remove_hdr_pad(skb, pad_len);
 767 
 768         if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
 769                 status->aggr = true;
 770 
 771         if (WARN_ON_ONCE(len > skb->len))
 772                 return -EINVAL;
 773 
 774         pskb_trim(skb, len);
 775 
 776         status->chains = BIT(0);
 777         signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
 778         status->chain_signal[0] = signal;
 779         if (nstreams > 1) {
 780                 status->chains |= BIT(1);
 781                 status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
 782                                                                rxwi->rssi[1],
 783                                                                1);
 784                 signal = max_t(s8, signal, status->chain_signal[1]);
 785         }
 786         status->signal = signal;
 787         status->freq = dev->mt76.chandef.chan->center_freq;
 788         status->band = dev->mt76.chandef.chan->band;
 789 
 790         status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
 791         status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
 792 
 793         return mt76x02_mac_process_rate(dev, status, rate);
 794 }
 795 
 796 void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
 797 {
 798         struct mt76x02_tx_status stat = {};
 799         u8 update = 1;
 800         bool ret;
 801 
 802         if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
 803                 return;
 804 
 805         trace_mac_txstat_poll(dev);
 806 
 807         while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
 808                 if (!spin_trylock(&dev->txstatus_fifo_lock))
 809                         break;
 810 
 811                 ret = mt76x02_mac_load_tx_status(dev, &stat);
 812                 spin_unlock(&dev->txstatus_fifo_lock);
 813 
 814                 if (!ret)
 815                         break;
 816 
 817                 if (!irq) {
 818                         mt76x02_send_tx_status(dev, &stat, &update);
 819                         continue;
 820                 }
 821 
 822                 kfifo_put(&dev->txstatus_fifo, stat);
 823         }
 824 }
 825 
 826 void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
 827                              struct mt76_queue_entry *e)
 828 {
 829         struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
 830         struct mt76x02_txwi *txwi;
 831         u8 *txwi_ptr;
 832 
 833         if (!e->txwi) {
 834                 dev_kfree_skb_any(e->skb);
 835                 return;
 836         }
 837 
 838         mt76x02_mac_poll_tx_status(dev, false);
 839 
 840         txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
 841         txwi = (struct mt76x02_txwi *)txwi_ptr;
 842         trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
 843 
 844         mt76_tx_complete_skb(mdev, e->skb);
 845 }
 846 EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
 847 
 848 void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val)
 849 {
 850         u32 data = 0;
 851 
 852         if (val != ~0)
 853                 data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
 854                        MT_PROT_CFG_RTS_THRESH;
 855 
 856         mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
 857 
 858         mt76_rmw(dev, MT_CCK_PROT_CFG,
 859                  MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
 860         mt76_rmw(dev, MT_OFDM_PROT_CFG,
 861                  MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
 862 }
 863 
 864 void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
 865                                    int ht_mode)
 866 {
 867         int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
 868         bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
 869         u32 prot[6];
 870         u32 vht_prot[3];
 871         int i;
 872         u16 rts_thr;
 873 
 874         for (i = 0; i < ARRAY_SIZE(prot); i++) {
 875                 prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4);
 876                 prot[i] &= ~MT_PROT_CFG_CTRL;
 877                 if (i >= 2)
 878                         prot[i] &= ~MT_PROT_CFG_RATE;
 879         }
 880 
 881         for (i = 0; i < ARRAY_SIZE(vht_prot); i++) {
 882                 vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4);
 883                 vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE);
 884         }
 885 
 886         rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH);
 887 
 888         if (rts_thr != 0xffff)
 889                 prot[0] |= MT_PROT_CTRL_RTS_CTS;
 890 
 891         if (legacy_prot) {
 892                 prot[1] |= MT_PROT_CTRL_CTS2SELF;
 893 
 894                 prot[2] |= MT_PROT_RATE_CCK_11;
 895                 prot[3] |= MT_PROT_RATE_CCK_11;
 896                 prot[4] |= MT_PROT_RATE_CCK_11;
 897                 prot[5] |= MT_PROT_RATE_CCK_11;
 898 
 899                 vht_prot[0] |= MT_PROT_RATE_CCK_11;
 900                 vht_prot[1] |= MT_PROT_RATE_CCK_11;
 901                 vht_prot[2] |= MT_PROT_RATE_CCK_11;
 902         } else {
 903                 if (rts_thr != 0xffff)
 904                         prot[1] |= MT_PROT_CTRL_RTS_CTS;
 905 
 906                 prot[2] |= MT_PROT_RATE_OFDM_24;
 907                 prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
 908                 prot[4] |= MT_PROT_RATE_OFDM_24;
 909                 prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
 910 
 911                 vht_prot[0] |= MT_PROT_RATE_OFDM_24;
 912                 vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24;
 913                 vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24;
 914         }
 915 
 916         switch (mode) {
 917         case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
 918         case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
 919                 prot[2] |= MT_PROT_CTRL_RTS_CTS;
 920                 prot[3] |= MT_PROT_CTRL_RTS_CTS;
 921                 prot[4] |= MT_PROT_CTRL_RTS_CTS;
 922                 prot[5] |= MT_PROT_CTRL_RTS_CTS;
 923                 vht_prot[0] |= MT_PROT_CTRL_RTS_CTS;
 924                 vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
 925                 vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
 926                 break;
 927         case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
 928                 prot[3] |= MT_PROT_CTRL_RTS_CTS;
 929                 prot[5] |= MT_PROT_CTRL_RTS_CTS;
 930                 vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
 931                 vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
 932                 break;
 933         }
 934 
 935         if (non_gf) {
 936                 prot[4] |= MT_PROT_CTRL_RTS_CTS;
 937                 prot[5] |= MT_PROT_CTRL_RTS_CTS;
 938         }
 939 
 940         for (i = 0; i < ARRAY_SIZE(prot); i++)
 941                 mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
 942 
 943         for (i = 0; i < ARRAY_SIZE(vht_prot); i++)
 944                 mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
 945 }
 946 
 947 void mt76x02_update_channel(struct mt76_dev *mdev)
 948 {
 949         struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
 950         struct mt76_channel_state *state;
 951         u32 active, busy;
 952 
 953         state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
 954 
 955         busy = mt76_rr(dev, MT_CH_BUSY);
 956         active = busy + mt76_rr(dev, MT_CH_IDLE);
 957 
 958         spin_lock_bh(&dev->mt76.cc_lock);
 959         state->cc_busy += busy;
 960         state->cc_active += active;
 961         spin_unlock_bh(&dev->mt76.cc_lock);
 962 }
 963 EXPORT_SYMBOL_GPL(mt76x02_update_channel);
 964 
 965 static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
 966 {
 967         u32 val = mt76_rr(dev, 0x10f4);
 968 
 969         if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
 970                 return;
 971 
 972         dev_err(dev->mt76.dev, "mac specific condition occurred\n");
 973 
 974         mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
 975         udelay(10);
 976         mt76_wr(dev, MT_MAC_SYS_CTRL,
 977                 MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
 978 }
 979 
 980 static void
 981 mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
 982 {
 983         if (enable) {
 984                 u32 data;
 985 
 986                 mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
 987                 mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
 988                 /* enable pa-lna */
 989                 data = mt76_rr(dev, MT_TX_PIN_CFG);
 990                 data |= MT_TX_PIN_CFG_TXANT |
 991                         MT_TX_PIN_CFG_RXANT |
 992                         MT_TX_PIN_RFTR_EN |
 993                         MT_TX_PIN_TRSW_EN;
 994                 mt76_wr(dev, MT_TX_PIN_CFG, data);
 995         } else {
 996                 mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
 997                 mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
 998                 /* disable pa-lna */
 999                 mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT);
1000                 mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT);
1001         }
1002         dev->ed_tx_blocked = !enable;
1003 }
1004 
1005 void mt76x02_edcca_init(struct mt76x02_dev *dev)
1006 {
1007         dev->ed_trigger = 0;
1008         dev->ed_silent = 0;
1009 
1010         if (dev->ed_monitor) {
1011                 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
1012                 u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
1013 
1014                 mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
1015                 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
1016                 mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
1017                          ed_th << 8 | ed_th);
1018                 mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
1019         } else {
1020                 mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
1021                 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
1022                 if (is_mt76x2(dev)) {
1023                         mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
1024                         mt76_set(dev, MT_TXOP_HLDR_ET,
1025                                  MT_TXOP_HLDR_TX40M_BLK_EN);
1026                 } else {
1027                         mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
1028                         mt76_clear(dev, MT_TXOP_HLDR_ET,
1029                                    MT_TXOP_HLDR_TX40M_BLK_EN);
1030                 }
1031         }
1032         mt76x02_edcca_tx_enable(dev, true);
1033         dev->ed_monitor_learning = true;
1034 
1035         /* clear previous CCA timer value */
1036         mt76_rr(dev, MT_ED_CCA_TIMER);
1037         dev->ed_time = ktime_get_boottime();
1038 }
1039 EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
1040 
1041 #define MT_EDCCA_TH             92
1042 #define MT_EDCCA_BLOCK_TH       2
1043 #define MT_EDCCA_LEARN_TH       50
1044 #define MT_EDCCA_LEARN_CCA      180
1045 #define MT_EDCCA_LEARN_TIMEOUT  (20 * HZ)
1046 
1047 static void mt76x02_edcca_check(struct mt76x02_dev *dev)
1048 {
1049         ktime_t cur_time;
1050         u32 active, val, busy;
1051 
1052         cur_time = ktime_get_boottime();
1053         val = mt76_rr(dev, MT_ED_CCA_TIMER);
1054 
1055         active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
1056         dev->ed_time = cur_time;
1057 
1058         busy = (val * 100) / active;
1059         busy = min_t(u32, busy, 100);
1060 
1061         if (busy > MT_EDCCA_TH) {
1062                 dev->ed_trigger++;
1063                 dev->ed_silent = 0;
1064         } else {
1065                 dev->ed_silent++;
1066                 dev->ed_trigger = 0;
1067         }
1068 
1069         if (dev->cal.agc_lowest_gain &&
1070             dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
1071             dev->ed_trigger > MT_EDCCA_LEARN_TH) {
1072                 dev->ed_monitor_learning = false;
1073                 dev->ed_trigger_timeout = jiffies + 20 * HZ;
1074         } else if (!dev->ed_monitor_learning &&
1075                    time_is_after_jiffies(dev->ed_trigger_timeout)) {
1076                 dev->ed_monitor_learning = true;
1077                 mt76x02_edcca_tx_enable(dev, true);
1078         }
1079 
1080         if (dev->ed_monitor_learning)
1081                 return;
1082 
1083         if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
1084                 mt76x02_edcca_tx_enable(dev, false);
1085         else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
1086                 mt76x02_edcca_tx_enable(dev, true);
1087 }
1088 
1089 void mt76x02_mac_work(struct work_struct *work)
1090 {
1091         struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
1092                                                mt76.mac_work.work);
1093         int i, idx;
1094 
1095         mutex_lock(&dev->mt76.mutex);
1096 
1097         mt76x02_update_channel(&dev->mt76);
1098         for (i = 0, idx = 0; i < 16; i++) {
1099                 u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
1100 
1101                 dev->aggr_stats[idx++] += val & 0xffff;
1102                 dev->aggr_stats[idx++] += val >> 16;
1103         }
1104 
1105         if (!dev->mt76.beacon_mask)
1106                 mt76x02_check_mac_err(dev);
1107 
1108         if (dev->ed_monitor)
1109                 mt76x02_edcca_check(dev);
1110 
1111         mutex_unlock(&dev->mt76.mutex);
1112 
1113         mt76_tx_status_check(&dev->mt76, NULL, false);
1114 
1115         ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
1116                                      MT_MAC_WORK_INTERVAL);
1117 }
1118 
1119 void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
1120 {
1121         idx &= 7;
1122         mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
1123         mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
1124                        get_unaligned_le16(addr + 4));
1125 }

/* [<][>][^][v][top][bottom][index][help] */