1/* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13#include <linux/jiffies.h> 14#include <linux/slab.h> 15#include <linux/kernel.h> 16#include <linux/skbuff.h> 17#include <linux/netdevice.h> 18#include <linux/etherdevice.h> 19#include <linux/rcupdate.h> 20#include <linux/export.h> 21#include <net/mac80211.h> 22#include <net/ieee80211_radiotap.h> 23#include <asm/unaligned.h> 24 25#include "ieee80211_i.h" 26#include "driver-ops.h" 27#include "led.h" 28#include "mesh.h" 29#include "wep.h" 30#include "wpa.h" 31#include "tkip.h" 32#include "wme.h" 33#include "rate.h" 34 35/* 36 * monitor mode reception 37 * 38 * This function cleans up the SKB, i.e. it removes all the stuff 39 * only useful for monitoring. 40 */ 41static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 42 struct sk_buff *skb, 43 unsigned int rtap_vendor_space) 44{ 45 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 46 if (likely(skb->len > FCS_LEN)) 47 __pskb_trim(skb, skb->len - FCS_LEN); 48 else { 49 /* driver bug */ 50 WARN_ON(1); 51 dev_kfree_skb(skb); 52 return NULL; 53 } 54 } 55 56 __pskb_pull(skb, rtap_vendor_space); 57 58 return skb; 59} 60 61static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 62 unsigned int rtap_vendor_space) 63{ 64 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 65 struct ieee80211_hdr *hdr; 66 67 hdr = (void *)(skb->data + rtap_vendor_space); 68 69 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 70 RX_FLAG_FAILED_PLCP_CRC | 71 RX_FLAG_AMPDU_IS_ZEROLEN)) 72 return true; 73 74 if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space)) 75 return true; 76 77 if (ieee80211_is_ctl(hdr->frame_control) && 78 !ieee80211_is_pspoll(hdr->frame_control) && 79 !ieee80211_is_back_req(hdr->frame_control)) 80 return true; 81 82 return false; 83} 84 85static int 86ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 87 struct ieee80211_rx_status *status, 88 struct sk_buff *skb) 89{ 90 int len; 91 92 /* always present fields */ 93 len = sizeof(struct ieee80211_radiotap_header) + 8; 94 95 /* allocate extra bitmaps */ 96 if (status->chains) 97 len += 4 * hweight8(status->chains); 98 99 if (ieee80211_have_rx_timestamp(status)) { 100 len = ALIGN(len, 8); 101 len += 8; 102 } 103 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 104 len += 1; 105 106 /* antenna field, if we don't have per-chain info */ 107 if (!status->chains) 108 len += 1; 109 110 /* padding for RX_FLAGS if necessary */ 111 len = ALIGN(len, 2); 112 113 if (status->flag & RX_FLAG_HT) /* HT info */ 114 len += 3; 115 116 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 117 len = ALIGN(len, 4); 118 len += 8; 119 } 120 121 if (status->flag & RX_FLAG_VHT) { 122 len = ALIGN(len, 2); 123 len += 12; 124 } 125 126 if (status->chains) { 127 /* antenna and antenna signal fields */ 128 len += 2 * hweight8(status->chains); 129 } 130 131 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 132 struct ieee80211_vendor_radiotap *rtap = (void *)skb->data; 133 134 /* vendor presence bitmap */ 135 len += 4; 136 /* alignment for fixed 6-byte vendor data header */ 137 len = ALIGN(len, 2); 138 /* vendor data header */ 139 len += 6; 140 if (WARN_ON(rtap->align == 0)) 141 rtap->align = 1; 142 len = ALIGN(len, rtap->align); 143 len += rtap->len + rtap->pad; 144 } 145 146 return len; 147} 148 149/* 150 * ieee80211_add_rx_radiotap_header - add radiotap header 151 * 152 * add a radiotap header containing all the fields which the hardware provided. 153 */ 154static void 155ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 156 struct sk_buff *skb, 157 struct ieee80211_rate *rate, 158 int rtap_len, bool has_fcs) 159{ 160 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 161 struct ieee80211_radiotap_header *rthdr; 162 unsigned char *pos; 163 __le32 *it_present; 164 u32 it_present_val; 165 u16 rx_flags = 0; 166 u16 channel_flags = 0; 167 int mpdulen, chain; 168 unsigned long chains = status->chains; 169 struct ieee80211_vendor_radiotap rtap = {}; 170 171 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 172 rtap = *(struct ieee80211_vendor_radiotap *)skb->data; 173 /* rtap.len and rtap.pad are undone immediately */ 174 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); 175 } 176 177 mpdulen = skb->len; 178 if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))) 179 mpdulen += FCS_LEN; 180 181 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 182 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); 183 it_present = &rthdr->it_present; 184 185 /* radiotap header, set always present flags */ 186 rthdr->it_len = cpu_to_le16(rtap_len); 187 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 188 BIT(IEEE80211_RADIOTAP_CHANNEL) | 189 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 190 191 if (!status->chains) 192 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 193 194 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 195 it_present_val |= 196 BIT(IEEE80211_RADIOTAP_EXT) | 197 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 198 put_unaligned_le32(it_present_val, it_present); 199 it_present++; 200 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 201 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 202 } 203 204 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 205 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | 206 BIT(IEEE80211_RADIOTAP_EXT); 207 put_unaligned_le32(it_present_val, it_present); 208 it_present++; 209 it_present_val = rtap.present; 210 } 211 212 put_unaligned_le32(it_present_val, it_present); 213 214 pos = (void *)(it_present + 1); 215 216 /* the order of the following fields is important */ 217 218 /* IEEE80211_RADIOTAP_TSFT */ 219 if (ieee80211_have_rx_timestamp(status)) { 220 /* padding */ 221 while ((pos - (u8 *)rthdr) & 7) 222 *pos++ = 0; 223 put_unaligned_le64( 224 ieee80211_calculate_rx_timestamp(local, status, 225 mpdulen, 0), 226 pos); 227 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 228 pos += 8; 229 } 230 231 /* IEEE80211_RADIOTAP_FLAGS */ 232 if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)) 233 *pos |= IEEE80211_RADIOTAP_F_FCS; 234 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 235 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 236 if (status->flag & RX_FLAG_SHORTPRE) 237 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 238 pos++; 239 240 /* IEEE80211_RADIOTAP_RATE */ 241 if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) { 242 /* 243 * Without rate information don't add it. If we have, 244 * MCS information is a separate field in radiotap, 245 * added below. The byte here is needed as padding 246 * for the channel though, so initialise it to 0. 247 */ 248 *pos = 0; 249 } else { 250 int shift = 0; 251 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 252 if (status->flag & RX_FLAG_10MHZ) 253 shift = 1; 254 else if (status->flag & RX_FLAG_5MHZ) 255 shift = 2; 256 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 257 } 258 pos++; 259 260 /* IEEE80211_RADIOTAP_CHANNEL */ 261 put_unaligned_le16(status->freq, pos); 262 pos += 2; 263 if (status->flag & RX_FLAG_10MHZ) 264 channel_flags |= IEEE80211_CHAN_HALF; 265 else if (status->flag & RX_FLAG_5MHZ) 266 channel_flags |= IEEE80211_CHAN_QUARTER; 267 268 if (status->band == IEEE80211_BAND_5GHZ) 269 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 270 else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) 271 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 272 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 273 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 274 else if (rate) 275 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 276 else 277 channel_flags |= IEEE80211_CHAN_2GHZ; 278 put_unaligned_le16(channel_flags, pos); 279 pos += 2; 280 281 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 282 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM && 283 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 284 *pos = status->signal; 285 rthdr->it_present |= 286 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 287 pos++; 288 } 289 290 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 291 292 if (!status->chains) { 293 /* IEEE80211_RADIOTAP_ANTENNA */ 294 *pos = status->antenna; 295 pos++; 296 } 297 298 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 299 300 /* IEEE80211_RADIOTAP_RX_FLAGS */ 301 /* ensure 2 byte alignment for the 2 byte field as required */ 302 if ((pos - (u8 *)rthdr) & 1) 303 *pos++ = 0; 304 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 305 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 306 put_unaligned_le16(rx_flags, pos); 307 pos += 2; 308 309 if (status->flag & RX_FLAG_HT) { 310 unsigned int stbc; 311 312 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 313 *pos++ = local->hw.radiotap_mcs_details; 314 *pos = 0; 315 if (status->flag & RX_FLAG_SHORT_GI) 316 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 317 if (status->flag & RX_FLAG_40MHZ) 318 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 319 if (status->flag & RX_FLAG_HT_GF) 320 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 321 if (status->flag & RX_FLAG_LDPC) 322 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 323 stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT; 324 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 325 pos++; 326 *pos++ = status->rate_idx; 327 } 328 329 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 330 u16 flags = 0; 331 332 /* ensure 4 byte alignment */ 333 while ((pos - (u8 *)rthdr) & 3) 334 pos++; 335 rthdr->it_present |= 336 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 337 put_unaligned_le32(status->ampdu_reference, pos); 338 pos += 4; 339 if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN) 340 flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN; 341 if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN) 342 flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN; 343 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 344 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 345 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 346 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 347 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 348 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 349 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 350 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 351 put_unaligned_le16(flags, pos); 352 pos += 2; 353 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 354 *pos++ = status->ampdu_delimiter_crc; 355 else 356 *pos++ = 0; 357 *pos++ = 0; 358 } 359 360 if (status->flag & RX_FLAG_VHT) { 361 u16 known = local->hw.radiotap_vht_details; 362 363 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 364 put_unaligned_le16(known, pos); 365 pos += 2; 366 /* flags */ 367 if (status->flag & RX_FLAG_SHORT_GI) 368 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 369 /* in VHT, STBC is binary */ 370 if (status->flag & RX_FLAG_STBC_MASK) 371 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 372 if (status->vht_flag & RX_VHT_FLAG_BF) 373 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 374 pos++; 375 /* bandwidth */ 376 if (status->vht_flag & RX_VHT_FLAG_80MHZ) 377 *pos++ = 4; 378 else if (status->vht_flag & RX_VHT_FLAG_160MHZ) 379 *pos++ = 11; 380 else if (status->flag & RX_FLAG_40MHZ) 381 *pos++ = 1; 382 else /* 20 MHz */ 383 *pos++ = 0; 384 /* MCS/NSS */ 385 *pos = (status->rate_idx << 4) | status->vht_nss; 386 pos += 4; 387 /* coding field */ 388 if (status->flag & RX_FLAG_LDPC) 389 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 390 pos++; 391 /* group ID */ 392 pos++; 393 /* partial_aid */ 394 pos += 2; 395 } 396 397 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 398 *pos++ = status->chain_signal[chain]; 399 *pos++ = chain; 400 } 401 402 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 403 /* ensure 2 byte alignment for the vendor field as required */ 404 if ((pos - (u8 *)rthdr) & 1) 405 *pos++ = 0; 406 *pos++ = rtap.oui[0]; 407 *pos++ = rtap.oui[1]; 408 *pos++ = rtap.oui[2]; 409 *pos++ = rtap.subns; 410 put_unaligned_le16(rtap.len, pos); 411 pos += 2; 412 /* align the actual payload as requested */ 413 while ((pos - (u8 *)rthdr) & (rtap.align - 1)) 414 *pos++ = 0; 415 /* data (and possible padding) already follows */ 416 } 417} 418 419/* 420 * This function copies a received frame to all monitor interfaces and 421 * returns a cleaned-up SKB that no longer includes the FCS nor the 422 * radiotap header the driver might have added. 423 */ 424static struct sk_buff * 425ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 426 struct ieee80211_rate *rate) 427{ 428 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 429 struct ieee80211_sub_if_data *sdata; 430 int rt_hdrlen, needed_headroom; 431 struct sk_buff *skb, *skb2; 432 struct net_device *prev_dev = NULL; 433 int present_fcs_len = 0; 434 unsigned int rtap_vendor_space = 0; 435 436 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { 437 struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data; 438 439 rtap_vendor_space = sizeof(*rtap) + rtap->len + rtap->pad; 440 } 441 442 /* 443 * First, we may need to make a copy of the skb because 444 * (1) we need to modify it for radiotap (if not present), and 445 * (2) the other RX handlers will modify the skb we got. 446 * 447 * We don't need to, of course, if we aren't going to return 448 * the SKB because it has a bad FCS/PLCP checksum. 449 */ 450 451 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 452 present_fcs_len = FCS_LEN; 453 454 /* ensure hdr->frame_control and vendor radiotap data are in skb head */ 455 if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) { 456 dev_kfree_skb(origskb); 457 return NULL; 458 } 459 460 if (!local->monitors) { 461 if (should_drop_frame(origskb, present_fcs_len, 462 rtap_vendor_space)) { 463 dev_kfree_skb(origskb); 464 return NULL; 465 } 466 467 return remove_monitor_info(local, origskb, rtap_vendor_space); 468 } 469 470 /* room for the radiotap header based on driver features */ 471 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); 472 needed_headroom = rt_hdrlen - rtap_vendor_space; 473 474 if (should_drop_frame(origskb, present_fcs_len, rtap_vendor_space)) { 475 /* only need to expand headroom if necessary */ 476 skb = origskb; 477 origskb = NULL; 478 479 /* 480 * This shouldn't trigger often because most devices have an 481 * RX header they pull before we get here, and that should 482 * be big enough for our radiotap information. We should 483 * probably export the length to drivers so that we can have 484 * them allocate enough headroom to start with. 485 */ 486 if (skb_headroom(skb) < needed_headroom && 487 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 488 dev_kfree_skb(skb); 489 return NULL; 490 } 491 } else { 492 /* 493 * Need to make a copy and possibly remove radiotap header 494 * and FCS from the original. 495 */ 496 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 497 498 origskb = remove_monitor_info(local, origskb, 499 rtap_vendor_space); 500 501 if (!skb) 502 return origskb; 503 } 504 505 /* prepend radiotap information */ 506 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 507 508 skb_reset_mac_header(skb); 509 skb->ip_summed = CHECKSUM_UNNECESSARY; 510 skb->pkt_type = PACKET_OTHERHOST; 511 skb->protocol = htons(ETH_P_802_2); 512 513 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 514 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 515 continue; 516 517 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 518 continue; 519 520 if (!ieee80211_sdata_running(sdata)) 521 continue; 522 523 if (prev_dev) { 524 skb2 = skb_clone(skb, GFP_ATOMIC); 525 if (skb2) { 526 skb2->dev = prev_dev; 527 netif_receive_skb(skb2); 528 } 529 } 530 531 prev_dev = sdata->dev; 532 sdata->dev->stats.rx_packets++; 533 sdata->dev->stats.rx_bytes += skb->len; 534 } 535 536 if (prev_dev) { 537 skb->dev = prev_dev; 538 netif_receive_skb(skb); 539 } else 540 dev_kfree_skb(skb); 541 542 return origskb; 543} 544 545static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 546{ 547 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 548 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 549 int tid, seqno_idx, security_idx; 550 551 /* does the frame have a qos control field? */ 552 if (ieee80211_is_data_qos(hdr->frame_control)) { 553 u8 *qc = ieee80211_get_qos_ctl(hdr); 554 /* frame has qos control */ 555 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 556 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 557 status->rx_flags |= IEEE80211_RX_AMSDU; 558 559 seqno_idx = tid; 560 security_idx = tid; 561 } else { 562 /* 563 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 564 * 565 * Sequence numbers for management frames, QoS data 566 * frames with a broadcast/multicast address in the 567 * Address 1 field, and all non-QoS data frames sent 568 * by QoS STAs are assigned using an additional single 569 * modulo-4096 counter, [...] 570 * 571 * We also use that counter for non-QoS STAs. 572 */ 573 seqno_idx = IEEE80211_NUM_TIDS; 574 security_idx = 0; 575 if (ieee80211_is_mgmt(hdr->frame_control)) 576 security_idx = IEEE80211_NUM_TIDS; 577 tid = 0; 578 } 579 580 rx->seqno_idx = seqno_idx; 581 rx->security_idx = security_idx; 582 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 583 * For now, set skb->priority to 0 for other cases. */ 584 rx->skb->priority = (tid > 7) ? 0 : tid; 585} 586 587/** 588 * DOC: Packet alignment 589 * 590 * Drivers always need to pass packets that are aligned to two-byte boundaries 591 * to the stack. 592 * 593 * Additionally, should, if possible, align the payload data in a way that 594 * guarantees that the contained IP header is aligned to a four-byte 595 * boundary. In the case of regular frames, this simply means aligning the 596 * payload to a four-byte boundary (because either the IP header is directly 597 * contained, or IV/RFC1042 headers that have a length divisible by four are 598 * in front of it). If the payload data is not properly aligned and the 599 * architecture doesn't support efficient unaligned operations, mac80211 600 * will align the data. 601 * 602 * With A-MSDU frames, however, the payload data address must yield two modulo 603 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 604 * push the IP header further back to a multiple of four again. Thankfully, the 605 * specs were sane enough this time around to require padding each A-MSDU 606 * subframe to a length that is a multiple of four. 607 * 608 * Padding like Atheros hardware adds which is between the 802.11 header and 609 * the payload is not supported, the driver is required to move the 802.11 610 * header to be directly in front of the payload in that case. 611 */ 612static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 613{ 614#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 615 WARN_ONCE((unsigned long)rx->skb->data & 1, 616 "unaligned packet at 0x%p\n", rx->skb->data); 617#endif 618} 619 620 621/* rx handlers */ 622 623static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 624{ 625 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 626 627 if (is_multicast_ether_addr(hdr->addr1)) 628 return 0; 629 630 return ieee80211_is_robust_mgmt_frame(skb); 631} 632 633 634static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 635{ 636 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 637 638 if (!is_multicast_ether_addr(hdr->addr1)) 639 return 0; 640 641 return ieee80211_is_robust_mgmt_frame(skb); 642} 643 644 645/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 646static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 647{ 648 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 649 struct ieee80211_mmie *mmie; 650 struct ieee80211_mmie_16 *mmie16; 651 652 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 653 return -1; 654 655 if (!ieee80211_is_robust_mgmt_frame(skb)) 656 return -1; /* not a robust management frame */ 657 658 mmie = (struct ieee80211_mmie *) 659 (skb->data + skb->len - sizeof(*mmie)); 660 if (mmie->element_id == WLAN_EID_MMIE && 661 mmie->length == sizeof(*mmie) - 2) 662 return le16_to_cpu(mmie->key_id); 663 664 mmie16 = (struct ieee80211_mmie_16 *) 665 (skb->data + skb->len - sizeof(*mmie16)); 666 if (skb->len >= 24 + sizeof(*mmie16) && 667 mmie16->element_id == WLAN_EID_MMIE && 668 mmie16->length == sizeof(*mmie16) - 2) 669 return le16_to_cpu(mmie16->key_id); 670 671 return -1; 672} 673 674static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, 675 struct sk_buff *skb) 676{ 677 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 678 __le16 fc; 679 int hdrlen; 680 u8 keyid; 681 682 fc = hdr->frame_control; 683 hdrlen = ieee80211_hdrlen(fc); 684 685 if (skb->len < hdrlen + cs->hdr_len) 686 return -EINVAL; 687 688 skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1); 689 keyid &= cs->key_idx_mask; 690 keyid >>= cs->key_idx_shift; 691 692 return keyid; 693} 694 695static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 696{ 697 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 698 char *dev_addr = rx->sdata->vif.addr; 699 700 if (ieee80211_is_data(hdr->frame_control)) { 701 if (is_multicast_ether_addr(hdr->addr1)) { 702 if (ieee80211_has_tods(hdr->frame_control) || 703 !ieee80211_has_fromds(hdr->frame_control)) 704 return RX_DROP_MONITOR; 705 if (ether_addr_equal(hdr->addr3, dev_addr)) 706 return RX_DROP_MONITOR; 707 } else { 708 if (!ieee80211_has_a4(hdr->frame_control)) 709 return RX_DROP_MONITOR; 710 if (ether_addr_equal(hdr->addr4, dev_addr)) 711 return RX_DROP_MONITOR; 712 } 713 } 714 715 /* If there is not an established peer link and this is not a peer link 716 * establisment frame, beacon or probe, drop the frame. 717 */ 718 719 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 720 struct ieee80211_mgmt *mgmt; 721 722 if (!ieee80211_is_mgmt(hdr->frame_control)) 723 return RX_DROP_MONITOR; 724 725 if (ieee80211_is_action(hdr->frame_control)) { 726 u8 category; 727 728 /* make sure category field is present */ 729 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 730 return RX_DROP_MONITOR; 731 732 mgmt = (struct ieee80211_mgmt *)hdr; 733 category = mgmt->u.action.category; 734 if (category != WLAN_CATEGORY_MESH_ACTION && 735 category != WLAN_CATEGORY_SELF_PROTECTED) 736 return RX_DROP_MONITOR; 737 return RX_CONTINUE; 738 } 739 740 if (ieee80211_is_probe_req(hdr->frame_control) || 741 ieee80211_is_probe_resp(hdr->frame_control) || 742 ieee80211_is_beacon(hdr->frame_control) || 743 ieee80211_is_auth(hdr->frame_control)) 744 return RX_CONTINUE; 745 746 return RX_DROP_MONITOR; 747 } 748 749 return RX_CONTINUE; 750} 751 752static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 753 struct tid_ampdu_rx *tid_agg_rx, 754 int index, 755 struct sk_buff_head *frames) 756{ 757 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 758 struct sk_buff *skb; 759 struct ieee80211_rx_status *status; 760 761 lockdep_assert_held(&tid_agg_rx->reorder_lock); 762 763 if (skb_queue_empty(skb_list)) 764 goto no_frame; 765 766 if (!ieee80211_rx_reorder_ready(skb_list)) { 767 __skb_queue_purge(skb_list); 768 goto no_frame; 769 } 770 771 /* release frames from the reorder ring buffer */ 772 tid_agg_rx->stored_mpdu_num--; 773 while ((skb = __skb_dequeue(skb_list))) { 774 status = IEEE80211_SKB_RXCB(skb); 775 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 776 __skb_queue_tail(frames, skb); 777 } 778 779no_frame: 780 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 781} 782 783static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 784 struct tid_ampdu_rx *tid_agg_rx, 785 u16 head_seq_num, 786 struct sk_buff_head *frames) 787{ 788 int index; 789 790 lockdep_assert_held(&tid_agg_rx->reorder_lock); 791 792 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 793 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 794 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 795 frames); 796 } 797} 798 799/* 800 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 801 * the skb was added to the buffer longer than this time ago, the earlier 802 * frames that have not yet been received are assumed to be lost and the skb 803 * can be released for processing. This may also release other skb's from the 804 * reorder buffer if there are no additional gaps between the frames. 805 * 806 * Callers must hold tid_agg_rx->reorder_lock. 807 */ 808#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 809 810static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 811 struct tid_ampdu_rx *tid_agg_rx, 812 struct sk_buff_head *frames) 813{ 814 int index, i, j; 815 816 lockdep_assert_held(&tid_agg_rx->reorder_lock); 817 818 /* release the buffer until next missing frame */ 819 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 820 if (!ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index]) && 821 tid_agg_rx->stored_mpdu_num) { 822 /* 823 * No buffers ready to be released, but check whether any 824 * frames in the reorder buffer have timed out. 825 */ 826 int skipped = 1; 827 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 828 j = (j + 1) % tid_agg_rx->buf_size) { 829 if (!ieee80211_rx_reorder_ready( 830 &tid_agg_rx->reorder_buf[j])) { 831 skipped++; 832 continue; 833 } 834 if (skipped && 835 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 836 HT_RX_REORDER_BUF_TIMEOUT)) 837 goto set_release_timer; 838 839 /* don't leave incomplete A-MSDUs around */ 840 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 841 i = (i + 1) % tid_agg_rx->buf_size) 842 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 843 844 ht_dbg_ratelimited(sdata, 845 "release an RX reorder frame due to timeout on earlier frames\n"); 846 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 847 frames); 848 849 /* 850 * Increment the head seq# also for the skipped slots. 851 */ 852 tid_agg_rx->head_seq_num = 853 (tid_agg_rx->head_seq_num + 854 skipped) & IEEE80211_SN_MASK; 855 skipped = 0; 856 } 857 } else while (ieee80211_rx_reorder_ready( 858 &tid_agg_rx->reorder_buf[index])) { 859 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 860 frames); 861 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 862 } 863 864 if (tid_agg_rx->stored_mpdu_num) { 865 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 866 867 for (; j != (index - 1) % tid_agg_rx->buf_size; 868 j = (j + 1) % tid_agg_rx->buf_size) { 869 if (ieee80211_rx_reorder_ready( 870 &tid_agg_rx->reorder_buf[j])) 871 break; 872 } 873 874 set_release_timer: 875 876 if (!tid_agg_rx->removed) 877 mod_timer(&tid_agg_rx->reorder_timer, 878 tid_agg_rx->reorder_time[j] + 1 + 879 HT_RX_REORDER_BUF_TIMEOUT); 880 } else { 881 del_timer(&tid_agg_rx->reorder_timer); 882 } 883} 884 885/* 886 * As this function belongs to the RX path it must be under 887 * rcu_read_lock protection. It returns false if the frame 888 * can be processed immediately, true if it was consumed. 889 */ 890static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 891 struct tid_ampdu_rx *tid_agg_rx, 892 struct sk_buff *skb, 893 struct sk_buff_head *frames) 894{ 895 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 896 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 897 u16 sc = le16_to_cpu(hdr->seq_ctrl); 898 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 899 u16 head_seq_num, buf_size; 900 int index; 901 bool ret = true; 902 903 spin_lock(&tid_agg_rx->reorder_lock); 904 905 /* 906 * Offloaded BA sessions have no known starting sequence number so pick 907 * one from first Rxed frame for this tid after BA was started. 908 */ 909 if (unlikely(tid_agg_rx->auto_seq)) { 910 tid_agg_rx->auto_seq = false; 911 tid_agg_rx->ssn = mpdu_seq_num; 912 tid_agg_rx->head_seq_num = mpdu_seq_num; 913 } 914 915 buf_size = tid_agg_rx->buf_size; 916 head_seq_num = tid_agg_rx->head_seq_num; 917 918 /* frame with out of date sequence number */ 919 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 920 dev_kfree_skb(skb); 921 goto out; 922 } 923 924 /* 925 * If frame the sequence number exceeds our buffering window 926 * size release some previous frames to make room for this one. 927 */ 928 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 929 head_seq_num = ieee80211_sn_inc( 930 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 931 /* release stored frames up to new head to stack */ 932 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 933 head_seq_num, frames); 934 } 935 936 /* Now the new frame is always in the range of the reordering buffer */ 937 938 index = mpdu_seq_num % tid_agg_rx->buf_size; 939 940 /* check if we already stored this frame */ 941 if (ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index])) { 942 dev_kfree_skb(skb); 943 goto out; 944 } 945 946 /* 947 * If the current MPDU is in the right order and nothing else 948 * is stored we can process it directly, no need to buffer it. 949 * If it is first but there's something stored, we may be able 950 * to release frames after this one. 951 */ 952 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 953 tid_agg_rx->stored_mpdu_num == 0) { 954 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 955 tid_agg_rx->head_seq_num = 956 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 957 ret = false; 958 goto out; 959 } 960 961 /* put the frame in the reordering buffer */ 962 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 963 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 964 tid_agg_rx->reorder_time[index] = jiffies; 965 tid_agg_rx->stored_mpdu_num++; 966 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 967 } 968 969 out: 970 spin_unlock(&tid_agg_rx->reorder_lock); 971 return ret; 972} 973 974/* 975 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 976 * true if the MPDU was buffered, false if it should be processed. 977 */ 978static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 979 struct sk_buff_head *frames) 980{ 981 struct sk_buff *skb = rx->skb; 982 struct ieee80211_local *local = rx->local; 983 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 984 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 985 struct sta_info *sta = rx->sta; 986 struct tid_ampdu_rx *tid_agg_rx; 987 u16 sc; 988 u8 tid, ack_policy; 989 990 if (!ieee80211_is_data_qos(hdr->frame_control) || 991 is_multicast_ether_addr(hdr->addr1)) 992 goto dont_reorder; 993 994 /* 995 * filter the QoS data rx stream according to 996 * STA/TID and check if this STA/TID is on aggregation 997 */ 998 999 if (!sta) 1000 goto dont_reorder; 1001 1002 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1003 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1004 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 1005 1006 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1007 if (!tid_agg_rx) 1008 goto dont_reorder; 1009 1010 /* qos null data frames are excluded */ 1011 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1012 goto dont_reorder; 1013 1014 /* not part of a BA session */ 1015 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1016 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 1017 goto dont_reorder; 1018 1019 /* not actually part of this BA session */ 1020 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1021 goto dont_reorder; 1022 1023 /* new, potentially un-ordered, ampdu frame - process it */ 1024 1025 /* reset session timer */ 1026 if (tid_agg_rx->timeout) 1027 tid_agg_rx->last_rx = jiffies; 1028 1029 /* if this mpdu is fragmented - terminate rx aggregation session */ 1030 sc = le16_to_cpu(hdr->seq_ctrl); 1031 if (sc & IEEE80211_SCTL_FRAG) { 1032 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 1033 skb_queue_tail(&rx->sdata->skb_queue, skb); 1034 ieee80211_queue_work(&local->hw, &rx->sdata->work); 1035 return; 1036 } 1037 1038 /* 1039 * No locking needed -- we will only ever process one 1040 * RX packet at a time, and thus own tid_agg_rx. All 1041 * other code manipulating it needs to (and does) make 1042 * sure that we cannot get to it any more before doing 1043 * anything with it. 1044 */ 1045 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1046 frames)) 1047 return; 1048 1049 dont_reorder: 1050 __skb_queue_tail(frames, skb); 1051} 1052 1053static ieee80211_rx_result debug_noinline 1054ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1055{ 1056 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1057 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1058 1059 /* 1060 * Drop duplicate 802.11 retransmissions 1061 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1062 */ 1063 1064 if (rx->skb->len < 24) 1065 return RX_CONTINUE; 1066 1067 if (ieee80211_is_ctl(hdr->frame_control) || 1068 ieee80211_is_qos_nullfunc(hdr->frame_control) || 1069 is_multicast_ether_addr(hdr->addr1)) 1070 return RX_CONTINUE; 1071 1072 if (rx->sta) { 1073 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1074 rx->sta->last_seq_ctrl[rx->seqno_idx] == 1075 hdr->seq_ctrl)) { 1076 if (status->rx_flags & IEEE80211_RX_RA_MATCH) { 1077 rx->local->dot11FrameDuplicateCount++; 1078 rx->sta->num_duplicates++; 1079 } 1080 return RX_DROP_UNUSABLE; 1081 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1082 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1083 } 1084 } 1085 1086 return RX_CONTINUE; 1087} 1088 1089static ieee80211_rx_result debug_noinline 1090ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1091{ 1092 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1093 1094 if (unlikely(rx->skb->len < 16)) { 1095 I802_DEBUG_INC(rx->local->rx_handlers_drop_short); 1096 return RX_DROP_MONITOR; 1097 } 1098 1099 /* Drop disallowed frame classes based on STA auth/assoc state; 1100 * IEEE 802.11, Chap 5.5. 1101 * 1102 * mac80211 filters only based on association state, i.e. it drops 1103 * Class 3 frames from not associated stations. hostapd sends 1104 * deauth/disassoc frames when needed. In addition, hostapd is 1105 * responsible for filtering on both auth and assoc states. 1106 */ 1107 1108 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1109 return ieee80211_rx_mesh_check(rx); 1110 1111 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1112 ieee80211_is_pspoll(hdr->frame_control)) && 1113 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1114 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 1115 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1116 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1117 /* 1118 * accept port control frames from the AP even when it's not 1119 * yet marked ASSOC to prevent a race where we don't set the 1120 * assoc bit quickly enough before it sends the first frame 1121 */ 1122 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1123 ieee80211_is_data_present(hdr->frame_control)) { 1124 unsigned int hdrlen; 1125 __be16 ethertype; 1126 1127 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1128 1129 if (rx->skb->len < hdrlen + 8) 1130 return RX_DROP_MONITOR; 1131 1132 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1133 if (ethertype == rx->sdata->control_port_protocol) 1134 return RX_CONTINUE; 1135 } 1136 1137 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1138 cfg80211_rx_spurious_frame(rx->sdata->dev, 1139 hdr->addr2, 1140 GFP_ATOMIC)) 1141 return RX_DROP_UNUSABLE; 1142 1143 return RX_DROP_MONITOR; 1144 } 1145 1146 return RX_CONTINUE; 1147} 1148 1149 1150static ieee80211_rx_result debug_noinline 1151ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1152{ 1153 struct ieee80211_local *local; 1154 struct ieee80211_hdr *hdr; 1155 struct sk_buff *skb; 1156 1157 local = rx->local; 1158 skb = rx->skb; 1159 hdr = (struct ieee80211_hdr *) skb->data; 1160 1161 if (!local->pspolling) 1162 return RX_CONTINUE; 1163 1164 if (!ieee80211_has_fromds(hdr->frame_control)) 1165 /* this is not from AP */ 1166 return RX_CONTINUE; 1167 1168 if (!ieee80211_is_data(hdr->frame_control)) 1169 return RX_CONTINUE; 1170 1171 if (!ieee80211_has_moredata(hdr->frame_control)) { 1172 /* AP has no more frames buffered for us */ 1173 local->pspolling = false; 1174 return RX_CONTINUE; 1175 } 1176 1177 /* more data bit is set, let's request a new frame from the AP */ 1178 ieee80211_send_pspoll(local, rx->sdata); 1179 1180 return RX_CONTINUE; 1181} 1182 1183static void sta_ps_start(struct sta_info *sta) 1184{ 1185 struct ieee80211_sub_if_data *sdata = sta->sdata; 1186 struct ieee80211_local *local = sdata->local; 1187 struct ps_data *ps; 1188 int tid; 1189 1190 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1191 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1192 ps = &sdata->bss->ps; 1193 else 1194 return; 1195 1196 atomic_inc(&ps->num_sta_ps); 1197 set_sta_flag(sta, WLAN_STA_PS_STA); 1198 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1199 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1200 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1201 sta->sta.addr, sta->sta.aid); 1202 1203 if (!sta->sta.txq[0]) 1204 return; 1205 1206 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1207 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 1208 1209 if (!skb_queue_len(&txqi->queue)) 1210 set_bit(tid, &sta->txq_buffered_tids); 1211 else 1212 clear_bit(tid, &sta->txq_buffered_tids); 1213 } 1214} 1215 1216static void sta_ps_end(struct sta_info *sta) 1217{ 1218 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1219 sta->sta.addr, sta->sta.aid); 1220 1221 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1222 /* 1223 * Clear the flag only if the other one is still set 1224 * so that the TX path won't start TX'ing new frames 1225 * directly ... In the case that the driver flag isn't 1226 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1227 */ 1228 clear_sta_flag(sta, WLAN_STA_PS_STA); 1229 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1230 sta->sta.addr, sta->sta.aid); 1231 return; 1232 } 1233 1234 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1235 clear_sta_flag(sta, WLAN_STA_PS_STA); 1236 ieee80211_sta_ps_deliver_wakeup(sta); 1237} 1238 1239int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start) 1240{ 1241 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta); 1242 bool in_ps; 1243 1244 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS)); 1245 1246 /* Don't let the same PS state be set twice */ 1247 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA); 1248 if ((start && in_ps) || (!start && !in_ps)) 1249 return -EINVAL; 1250 1251 if (start) 1252 sta_ps_start(sta_inf); 1253 else 1254 sta_ps_end(sta_inf); 1255 1256 return 0; 1257} 1258EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1259 1260static ieee80211_rx_result debug_noinline 1261ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1262{ 1263 struct ieee80211_sub_if_data *sdata = rx->sdata; 1264 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1265 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1266 int tid, ac; 1267 1268 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1269 return RX_CONTINUE; 1270 1271 if (sdata->vif.type != NL80211_IFTYPE_AP && 1272 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1273 return RX_CONTINUE; 1274 1275 /* 1276 * The device handles station powersave, so don't do anything about 1277 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1278 * it to mac80211 since they're handled.) 1279 */ 1280 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS) 1281 return RX_CONTINUE; 1282 1283 /* 1284 * Don't do anything if the station isn't already asleep. In 1285 * the uAPSD case, the station will probably be marked asleep, 1286 * in the PS-Poll case the station must be confused ... 1287 */ 1288 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1289 return RX_CONTINUE; 1290 1291 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1292 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) { 1293 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1294 ieee80211_sta_ps_deliver_poll_response(rx->sta); 1295 else 1296 set_sta_flag(rx->sta, WLAN_STA_PSPOLL); 1297 } 1298 1299 /* Free PS Poll skb here instead of returning RX_DROP that would 1300 * count as an dropped frame. */ 1301 dev_kfree_skb(rx->skb); 1302 1303 return RX_QUEUED; 1304 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1305 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1306 ieee80211_has_pm(hdr->frame_control) && 1307 (ieee80211_is_data_qos(hdr->frame_control) || 1308 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1309 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 1310 ac = ieee802_1d_to_ac[tid & 7]; 1311 1312 /* 1313 * If this AC is not trigger-enabled do nothing. 1314 * 1315 * NB: This could/should check a separate bitmap of trigger- 1316 * enabled queues, but for now we only implement uAPSD w/o 1317 * TSPEC changes to the ACs, so they're always the same. 1318 */ 1319 if (!(rx->sta->sta.uapsd_queues & BIT(ac))) 1320 return RX_CONTINUE; 1321 1322 /* if we are in a service period, do nothing */ 1323 if (test_sta_flag(rx->sta, WLAN_STA_SP)) 1324 return RX_CONTINUE; 1325 1326 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1327 ieee80211_sta_ps_deliver_uapsd(rx->sta); 1328 else 1329 set_sta_flag(rx->sta, WLAN_STA_UAPSD); 1330 } 1331 1332 return RX_CONTINUE; 1333} 1334 1335static ieee80211_rx_result debug_noinline 1336ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1337{ 1338 struct sta_info *sta = rx->sta; 1339 struct sk_buff *skb = rx->skb; 1340 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1341 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1342 int i; 1343 1344 if (!sta) 1345 return RX_CONTINUE; 1346 1347 /* 1348 * Update last_rx only for IBSS packets which are for the current 1349 * BSSID and for station already AUTHORIZED to avoid keeping the 1350 * current IBSS network alive in cases where other STAs start 1351 * using different BSSID. This will also give the station another 1352 * chance to restart the authentication/authorization in case 1353 * something went wrong the first time. 1354 */ 1355 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1356 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1357 NL80211_IFTYPE_ADHOC); 1358 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1359 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1360 sta->last_rx = jiffies; 1361 if (ieee80211_is_data(hdr->frame_control) && 1362 !is_multicast_ether_addr(hdr->addr1)) { 1363 sta->last_rx_rate_idx = status->rate_idx; 1364 sta->last_rx_rate_flag = status->flag; 1365 sta->last_rx_rate_vht_flag = status->vht_flag; 1366 sta->last_rx_rate_vht_nss = status->vht_nss; 1367 } 1368 } 1369 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1370 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1371 NL80211_IFTYPE_OCB); 1372 /* OCB uses wild-card BSSID */ 1373 if (is_broadcast_ether_addr(bssid)) 1374 sta->last_rx = jiffies; 1375 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1376 /* 1377 * Mesh beacons will update last_rx when if they are found to 1378 * match the current local configuration when processed. 1379 */ 1380 sta->last_rx = jiffies; 1381 if (ieee80211_is_data(hdr->frame_control)) { 1382 sta->last_rx_rate_idx = status->rate_idx; 1383 sta->last_rx_rate_flag = status->flag; 1384 sta->last_rx_rate_vht_flag = status->vht_flag; 1385 sta->last_rx_rate_vht_nss = status->vht_nss; 1386 } 1387 } 1388 1389 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1390 return RX_CONTINUE; 1391 1392 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1393 ieee80211_sta_rx_notify(rx->sdata, hdr); 1394 1395 sta->rx_fragments++; 1396 sta->rx_bytes += rx->skb->len; 1397 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1398 sta->last_signal = status->signal; 1399 ewma_add(&sta->avg_signal, -status->signal); 1400 } 1401 1402 if (status->chains) { 1403 sta->chains = status->chains; 1404 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1405 int signal = status->chain_signal[i]; 1406 1407 if (!(status->chains & BIT(i))) 1408 continue; 1409 1410 sta->chain_signal_last[i] = signal; 1411 ewma_add(&sta->chain_signal_avg[i], -signal); 1412 } 1413 } 1414 1415 /* 1416 * Change STA power saving mode only at the end of a frame 1417 * exchange sequence. 1418 */ 1419 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) && 1420 !ieee80211_has_morefrags(hdr->frame_control) && 1421 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1422 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1423 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1424 /* PM bit is only checked in frames where it isn't reserved, 1425 * in AP mode it's reserved in non-bufferable management frames 1426 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) 1427 */ 1428 (!ieee80211_is_mgmt(hdr->frame_control) || 1429 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { 1430 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1431 if (!ieee80211_has_pm(hdr->frame_control)) 1432 sta_ps_end(sta); 1433 } else { 1434 if (ieee80211_has_pm(hdr->frame_control)) 1435 sta_ps_start(sta); 1436 } 1437 } 1438 1439 /* mesh power save support */ 1440 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1441 ieee80211_mps_rx_h_sta_process(sta, hdr); 1442 1443 /* 1444 * Drop (qos-)data::nullfunc frames silently, since they 1445 * are used only to control station power saving mode. 1446 */ 1447 if (ieee80211_is_nullfunc(hdr->frame_control) || 1448 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1449 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1450 1451 /* 1452 * If we receive a 4-addr nullfunc frame from a STA 1453 * that was not moved to a 4-addr STA vlan yet send 1454 * the event to userspace and for older hostapd drop 1455 * the frame to the monitor interface. 1456 */ 1457 if (ieee80211_has_a4(hdr->frame_control) && 1458 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1459 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1460 !rx->sdata->u.vlan.sta))) { 1461 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1462 cfg80211_rx_unexpected_4addr_frame( 1463 rx->sdata->dev, sta->sta.addr, 1464 GFP_ATOMIC); 1465 return RX_DROP_MONITOR; 1466 } 1467 /* 1468 * Update counter and free packet here to avoid 1469 * counting this as a dropped packed. 1470 */ 1471 sta->rx_packets++; 1472 dev_kfree_skb(rx->skb); 1473 return RX_QUEUED; 1474 } 1475 1476 return RX_CONTINUE; 1477} /* ieee80211_rx_h_sta_process */ 1478 1479static ieee80211_rx_result debug_noinline 1480ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 1481{ 1482 struct sk_buff *skb = rx->skb; 1483 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1484 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1485 int keyidx; 1486 int hdrlen; 1487 ieee80211_rx_result result = RX_DROP_UNUSABLE; 1488 struct ieee80211_key *sta_ptk = NULL; 1489 int mmie_keyidx = -1; 1490 __le16 fc; 1491 const struct ieee80211_cipher_scheme *cs = NULL; 1492 1493 /* 1494 * Key selection 101 1495 * 1496 * There are four types of keys: 1497 * - GTK (group keys) 1498 * - IGTK (group keys for management frames) 1499 * - PTK (pairwise keys) 1500 * - STK (station-to-station pairwise keys) 1501 * 1502 * When selecting a key, we have to distinguish between multicast 1503 * (including broadcast) and unicast frames, the latter can only 1504 * use PTKs and STKs while the former always use GTKs and IGTKs. 1505 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 1506 * unicast frames can also use key indices like GTKs. Hence, if we 1507 * don't have a PTK/STK we check the key index for a WEP key. 1508 * 1509 * Note that in a regular BSS, multicast frames are sent by the 1510 * AP only, associated stations unicast the frame to the AP first 1511 * which then multicasts it on their behalf. 1512 * 1513 * There is also a slight problem in IBSS mode: GTKs are negotiated 1514 * with each station, that is something we don't currently handle. 1515 * The spec seems to expect that one negotiates the same key with 1516 * every station but there's no such requirement; VLANs could be 1517 * possible. 1518 */ 1519 1520 /* 1521 * No point in finding a key and decrypting if the frame is neither 1522 * addressed to us nor a multicast frame. 1523 */ 1524 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1525 return RX_CONTINUE; 1526 1527 /* start without a key */ 1528 rx->key = NULL; 1529 fc = hdr->frame_control; 1530 1531 if (rx->sta) { 1532 int keyid = rx->sta->ptk_idx; 1533 1534 if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) { 1535 cs = rx->sta->cipher_scheme; 1536 keyid = iwl80211_get_cs_keyid(cs, rx->skb); 1537 if (unlikely(keyid < 0)) 1538 return RX_DROP_UNUSABLE; 1539 } 1540 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 1541 } 1542 1543 if (!ieee80211_has_protected(fc)) 1544 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 1545 1546 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 1547 rx->key = sta_ptk; 1548 if ((status->flag & RX_FLAG_DECRYPTED) && 1549 (status->flag & RX_FLAG_IV_STRIPPED)) 1550 return RX_CONTINUE; 1551 /* Skip decryption if the frame is not protected. */ 1552 if (!ieee80211_has_protected(fc)) 1553 return RX_CONTINUE; 1554 } else if (mmie_keyidx >= 0) { 1555 /* Broadcast/multicast robust management frame / BIP */ 1556 if ((status->flag & RX_FLAG_DECRYPTED) && 1557 (status->flag & RX_FLAG_IV_STRIPPED)) 1558 return RX_CONTINUE; 1559 1560 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1561 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1562 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1563 if (rx->sta) 1564 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 1565 if (!rx->key) 1566 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 1567 } else if (!ieee80211_has_protected(fc)) { 1568 /* 1569 * The frame was not protected, so skip decryption. However, we 1570 * need to set rx->key if there is a key that could have been 1571 * used so that the frame may be dropped if encryption would 1572 * have been expected. 1573 */ 1574 struct ieee80211_key *key = NULL; 1575 struct ieee80211_sub_if_data *sdata = rx->sdata; 1576 int i; 1577 1578 if (ieee80211_is_mgmt(fc) && 1579 is_multicast_ether_addr(hdr->addr1) && 1580 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 1581 rx->key = key; 1582 else { 1583 if (rx->sta) { 1584 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1585 key = rcu_dereference(rx->sta->gtk[i]); 1586 if (key) 1587 break; 1588 } 1589 } 1590 if (!key) { 1591 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1592 key = rcu_dereference(sdata->keys[i]); 1593 if (key) 1594 break; 1595 } 1596 } 1597 if (key) 1598 rx->key = key; 1599 } 1600 return RX_CONTINUE; 1601 } else { 1602 u8 keyid; 1603 1604 /* 1605 * The device doesn't give us the IV so we won't be 1606 * able to look up the key. That's ok though, we 1607 * don't need to decrypt the frame, we just won't 1608 * be able to keep statistics accurate. 1609 * Except for key threshold notifications, should 1610 * we somehow allow the driver to tell us which key 1611 * the hardware used if this flag is set? 1612 */ 1613 if ((status->flag & RX_FLAG_DECRYPTED) && 1614 (status->flag & RX_FLAG_IV_STRIPPED)) 1615 return RX_CONTINUE; 1616 1617 hdrlen = ieee80211_hdrlen(fc); 1618 1619 if (cs) { 1620 keyidx = iwl80211_get_cs_keyid(cs, rx->skb); 1621 1622 if (unlikely(keyidx < 0)) 1623 return RX_DROP_UNUSABLE; 1624 } else { 1625 if (rx->skb->len < 8 + hdrlen) 1626 return RX_DROP_UNUSABLE; /* TODO: count this? */ 1627 /* 1628 * no need to call ieee80211_wep_get_keyidx, 1629 * it verifies a bunch of things we've done already 1630 */ 1631 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 1632 keyidx = keyid >> 6; 1633 } 1634 1635 /* check per-station GTK first, if multicast packet */ 1636 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 1637 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 1638 1639 /* if not found, try default key */ 1640 if (!rx->key) { 1641 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 1642 1643 /* 1644 * RSNA-protected unicast frames should always be 1645 * sent with pairwise or station-to-station keys, 1646 * but for WEP we allow using a key index as well. 1647 */ 1648 if (rx->key && 1649 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 1650 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 1651 !is_multicast_ether_addr(hdr->addr1)) 1652 rx->key = NULL; 1653 } 1654 } 1655 1656 if (rx->key) { 1657 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 1658 return RX_DROP_MONITOR; 1659 1660 rx->key->tx_rx_count++; 1661 /* TODO: add threshold stuff again */ 1662 } else { 1663 return RX_DROP_MONITOR; 1664 } 1665 1666 switch (rx->key->conf.cipher) { 1667 case WLAN_CIPHER_SUITE_WEP40: 1668 case WLAN_CIPHER_SUITE_WEP104: 1669 result = ieee80211_crypto_wep_decrypt(rx); 1670 break; 1671 case WLAN_CIPHER_SUITE_TKIP: 1672 result = ieee80211_crypto_tkip_decrypt(rx); 1673 break; 1674 case WLAN_CIPHER_SUITE_CCMP: 1675 result = ieee80211_crypto_ccmp_decrypt( 1676 rx, IEEE80211_CCMP_MIC_LEN); 1677 break; 1678 case WLAN_CIPHER_SUITE_CCMP_256: 1679 result = ieee80211_crypto_ccmp_decrypt( 1680 rx, IEEE80211_CCMP_256_MIC_LEN); 1681 break; 1682 case WLAN_CIPHER_SUITE_AES_CMAC: 1683 result = ieee80211_crypto_aes_cmac_decrypt(rx); 1684 break; 1685 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 1686 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 1687 break; 1688 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 1689 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 1690 result = ieee80211_crypto_aes_gmac_decrypt(rx); 1691 break; 1692 case WLAN_CIPHER_SUITE_GCMP: 1693 case WLAN_CIPHER_SUITE_GCMP_256: 1694 result = ieee80211_crypto_gcmp_decrypt(rx); 1695 break; 1696 default: 1697 result = ieee80211_crypto_hw_decrypt(rx); 1698 } 1699 1700 /* the hdr variable is invalid after the decrypt handlers */ 1701 1702 /* either the frame has been decrypted or will be dropped */ 1703 status->flag |= RX_FLAG_DECRYPTED; 1704 1705 return result; 1706} 1707 1708static inline struct ieee80211_fragment_entry * 1709ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 1710 unsigned int frag, unsigned int seq, int rx_queue, 1711 struct sk_buff **skb) 1712{ 1713 struct ieee80211_fragment_entry *entry; 1714 1715 entry = &sdata->fragments[sdata->fragment_next++]; 1716 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1717 sdata->fragment_next = 0; 1718 1719 if (!skb_queue_empty(&entry->skb_list)) 1720 __skb_queue_purge(&entry->skb_list); 1721 1722 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1723 *skb = NULL; 1724 entry->first_frag_time = jiffies; 1725 entry->seq = seq; 1726 entry->rx_queue = rx_queue; 1727 entry->last_frag = frag; 1728 entry->check_sequential_pn = false; 1729 entry->extra_len = 0; 1730 1731 return entry; 1732} 1733 1734static inline struct ieee80211_fragment_entry * 1735ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 1736 unsigned int frag, unsigned int seq, 1737 int rx_queue, struct ieee80211_hdr *hdr) 1738{ 1739 struct ieee80211_fragment_entry *entry; 1740 int i, idx; 1741 1742 idx = sdata->fragment_next; 1743 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 1744 struct ieee80211_hdr *f_hdr; 1745 1746 idx--; 1747 if (idx < 0) 1748 idx = IEEE80211_FRAGMENT_MAX - 1; 1749 1750 entry = &sdata->fragments[idx]; 1751 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 1752 entry->rx_queue != rx_queue || 1753 entry->last_frag + 1 != frag) 1754 continue; 1755 1756 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; 1757 1758 /* 1759 * Check ftype and addresses are equal, else check next fragment 1760 */ 1761 if (((hdr->frame_control ^ f_hdr->frame_control) & 1762 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 1763 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 1764 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 1765 continue; 1766 1767 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 1768 __skb_queue_purge(&entry->skb_list); 1769 continue; 1770 } 1771 return entry; 1772 } 1773 1774 return NULL; 1775} 1776 1777static ieee80211_rx_result debug_noinline 1778ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 1779{ 1780 struct ieee80211_hdr *hdr; 1781 u16 sc; 1782 __le16 fc; 1783 unsigned int frag, seq; 1784 struct ieee80211_fragment_entry *entry; 1785 struct sk_buff *skb; 1786 struct ieee80211_rx_status *status; 1787 1788 hdr = (struct ieee80211_hdr *)rx->skb->data; 1789 fc = hdr->frame_control; 1790 1791 if (ieee80211_is_ctl(fc)) 1792 return RX_CONTINUE; 1793 1794 sc = le16_to_cpu(hdr->seq_ctrl); 1795 frag = sc & IEEE80211_SCTL_FRAG; 1796 1797 if (is_multicast_ether_addr(hdr->addr1)) { 1798 rx->local->dot11MulticastReceivedFrameCount++; 1799 goto out_no_led; 1800 } 1801 1802 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 1803 goto out; 1804 1805 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1806 1807 if (skb_linearize(rx->skb)) 1808 return RX_DROP_UNUSABLE; 1809 1810 /* 1811 * skb_linearize() might change the skb->data and 1812 * previously cached variables (in this case, hdr) need to 1813 * be refreshed with the new data. 1814 */ 1815 hdr = (struct ieee80211_hdr *)rx->skb->data; 1816 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1817 1818 if (frag == 0) { 1819 /* This is the first fragment of a new frame. */ 1820 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1821 rx->seqno_idx, &(rx->skb)); 1822 if (rx->key && 1823 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 1824 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 1825 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 1826 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 1827 ieee80211_has_protected(fc)) { 1828 int queue = rx->security_idx; 1829 1830 /* Store CCMP/GCMP PN so that we can verify that the 1831 * next fragment has a sequential PN value. 1832 */ 1833 entry->check_sequential_pn = true; 1834 memcpy(entry->last_pn, 1835 rx->key->u.ccmp.rx_pn[queue], 1836 IEEE80211_CCMP_PN_LEN); 1837 BUILD_BUG_ON(offsetof(struct ieee80211_key, 1838 u.ccmp.rx_pn) != 1839 offsetof(struct ieee80211_key, 1840 u.gcmp.rx_pn)); 1841 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 1842 sizeof(rx->key->u.gcmp.rx_pn[queue])); 1843 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 1844 IEEE80211_GCMP_PN_LEN); 1845 } 1846 return RX_QUEUED; 1847 } 1848 1849 /* This is a fragment for a frame that should already be pending in 1850 * fragment cache. Add this fragment to the end of the pending entry. 1851 */ 1852 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, 1853 rx->seqno_idx, hdr); 1854 if (!entry) { 1855 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1856 return RX_DROP_MONITOR; 1857 } 1858 1859 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 1860 * MPDU PN values are not incrementing in steps of 1." 1861 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 1862 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 1863 */ 1864 if (entry->check_sequential_pn) { 1865 int i; 1866 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 1867 int queue; 1868 1869 if (!rx->key || 1870 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && 1871 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && 1872 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && 1873 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) 1874 return RX_DROP_UNUSABLE; 1875 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 1876 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 1877 pn[i]++; 1878 if (pn[i]) 1879 break; 1880 } 1881 queue = rx->security_idx; 1882 rpn = rx->key->u.ccmp.rx_pn[queue]; 1883 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 1884 return RX_DROP_UNUSABLE; 1885 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 1886 } 1887 1888 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 1889 __skb_queue_tail(&entry->skb_list, rx->skb); 1890 entry->last_frag = frag; 1891 entry->extra_len += rx->skb->len; 1892 if (ieee80211_has_morefrags(fc)) { 1893 rx->skb = NULL; 1894 return RX_QUEUED; 1895 } 1896 1897 rx->skb = __skb_dequeue(&entry->skb_list); 1898 if (skb_tailroom(rx->skb) < entry->extra_len) { 1899 I802_DEBUG_INC(rx->local->rx_expand_skb_head2); 1900 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 1901 GFP_ATOMIC))) { 1902 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1903 __skb_queue_purge(&entry->skb_list); 1904 return RX_DROP_UNUSABLE; 1905 } 1906 } 1907 while ((skb = __skb_dequeue(&entry->skb_list))) { 1908 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); 1909 dev_kfree_skb(skb); 1910 } 1911 1912 /* Complete frame has been reassembled - process it now */ 1913 status = IEEE80211_SKB_RXCB(rx->skb); 1914 status->rx_flags |= IEEE80211_RX_FRAGMENTED; 1915 1916 out: 1917 ieee80211_led_rx(rx->local); 1918 out_no_led: 1919 if (rx->sta) 1920 rx->sta->rx_packets++; 1921 return RX_CONTINUE; 1922} 1923 1924static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1925{ 1926 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 1927 return -EACCES; 1928 1929 return 0; 1930} 1931 1932static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 1933{ 1934 struct sk_buff *skb = rx->skb; 1935 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1936 1937 /* 1938 * Pass through unencrypted frames if the hardware has 1939 * decrypted them already. 1940 */ 1941 if (status->flag & RX_FLAG_DECRYPTED) 1942 return 0; 1943 1944 /* Drop unencrypted frames if key is set. */ 1945 if (unlikely(!ieee80211_has_protected(fc) && 1946 !ieee80211_is_nullfunc(fc) && 1947 ieee80211_is_data(fc) && rx->key)) 1948 return -EACCES; 1949 1950 return 0; 1951} 1952 1953static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 1954{ 1955 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1956 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1957 __le16 fc = hdr->frame_control; 1958 1959 /* 1960 * Pass through unencrypted frames if the hardware has 1961 * decrypted them already. 1962 */ 1963 if (status->flag & RX_FLAG_DECRYPTED) 1964 return 0; 1965 1966 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 1967 if (unlikely(!ieee80211_has_protected(fc) && 1968 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1969 rx->key)) { 1970 if (ieee80211_is_deauth(fc) || 1971 ieee80211_is_disassoc(fc)) 1972 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 1973 rx->skb->data, 1974 rx->skb->len); 1975 return -EACCES; 1976 } 1977 /* BIP does not use Protected field, so need to check MMIE */ 1978 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 1979 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 1980 if (ieee80211_is_deauth(fc) || 1981 ieee80211_is_disassoc(fc)) 1982 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 1983 rx->skb->data, 1984 rx->skb->len); 1985 return -EACCES; 1986 } 1987 /* 1988 * When using MFP, Action frames are not allowed prior to 1989 * having configured keys. 1990 */ 1991 if (unlikely(ieee80211_is_action(fc) && !rx->key && 1992 ieee80211_is_robust_mgmt_frame(rx->skb))) 1993 return -EACCES; 1994 } 1995 1996 return 0; 1997} 1998 1999static int 2000__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2001{ 2002 struct ieee80211_sub_if_data *sdata = rx->sdata; 2003 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2004 bool check_port_control = false; 2005 struct ethhdr *ehdr; 2006 int ret; 2007 2008 *port_control = false; 2009 if (ieee80211_has_a4(hdr->frame_control) && 2010 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2011 return -1; 2012 2013 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2014 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2015 2016 if (!sdata->u.mgd.use_4addr) 2017 return -1; 2018 else 2019 check_port_control = true; 2020 } 2021 2022 if (is_multicast_ether_addr(hdr->addr1) && 2023 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2024 return -1; 2025 2026 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2027 if (ret < 0) 2028 return ret; 2029 2030 ehdr = (struct ethhdr *) rx->skb->data; 2031 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2032 *port_control = true; 2033 else if (check_port_control) 2034 return -1; 2035 2036 return 0; 2037} 2038 2039/* 2040 * requires that rx->skb is a frame with ethernet header 2041 */ 2042static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2043{ 2044 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2045 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2046 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2047 2048 /* 2049 * Allow EAPOL frames to us/the PAE group address regardless 2050 * of whether the frame was encrypted or not. 2051 */ 2052 if (ehdr->h_proto == rx->sdata->control_port_protocol && 2053 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 2054 ether_addr_equal(ehdr->h_dest, pae_group_addr))) 2055 return true; 2056 2057 if (ieee80211_802_1x_port_control(rx) || 2058 ieee80211_drop_unencrypted(rx, fc)) 2059 return false; 2060 2061 return true; 2062} 2063 2064/* 2065 * requires that rx->skb is a frame with ethernet header 2066 */ 2067static void 2068ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2069{ 2070 struct ieee80211_sub_if_data *sdata = rx->sdata; 2071 struct net_device *dev = sdata->dev; 2072 struct sk_buff *skb, *xmit_skb; 2073 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2074 struct sta_info *dsta; 2075 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2076 2077 dev->stats.rx_packets++; 2078 dev->stats.rx_bytes += rx->skb->len; 2079 2080 skb = rx->skb; 2081 xmit_skb = NULL; 2082 2083 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2084 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2085 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2086 (status->rx_flags & IEEE80211_RX_RA_MATCH) && 2087 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2088 if (is_multicast_ether_addr(ehdr->h_dest)) { 2089 /* 2090 * send multicast frames both to higher layers in 2091 * local net stack and back to the wireless medium 2092 */ 2093 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2094 if (!xmit_skb) 2095 net_info_ratelimited("%s: failed to clone multicast frame\n", 2096 dev->name); 2097 } else { 2098 dsta = sta_info_get(sdata, skb->data); 2099 if (dsta) { 2100 /* 2101 * The destination station is associated to 2102 * this AP (in this VLAN), so send the frame 2103 * directly to it and do not pass it to local 2104 * net stack. 2105 */ 2106 xmit_skb = skb; 2107 skb = NULL; 2108 } 2109 } 2110 } 2111 2112#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2113 if (skb) { 2114 /* 'align' will only take the values 0 or 2 here since all 2115 * frames are required to be aligned to 2-byte boundaries 2116 * when being passed to mac80211; the code here works just 2117 * as well if that isn't true, but mac80211 assumes it can 2118 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2119 */ 2120 int align; 2121 2122 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2123 if (align) { 2124 if (WARN_ON(skb_headroom(skb) < 3)) { 2125 dev_kfree_skb(skb); 2126 skb = NULL; 2127 } else { 2128 u8 *data = skb->data; 2129 size_t len = skb_headlen(skb); 2130 skb->data -= align; 2131 memmove(skb->data, data, len); 2132 skb_set_tail_pointer(skb, len); 2133 } 2134 } 2135 } 2136#endif 2137 2138 if (skb) { 2139 /* deliver to local stack */ 2140 skb->protocol = eth_type_trans(skb, dev); 2141 memset(skb->cb, 0, sizeof(skb->cb)); 2142 if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) && 2143 rx->local->napi) 2144 napi_gro_receive(rx->local->napi, skb); 2145 else 2146 netif_receive_skb(skb); 2147 } 2148 2149 if (xmit_skb) { 2150 /* 2151 * Send to wireless media and increase priority by 256 to 2152 * keep the received priority instead of reclassifying 2153 * the frame (see cfg80211_classify8021d). 2154 */ 2155 xmit_skb->priority += 256; 2156 xmit_skb->protocol = htons(ETH_P_802_3); 2157 skb_reset_network_header(xmit_skb); 2158 skb_reset_mac_header(xmit_skb); 2159 dev_queue_xmit(xmit_skb); 2160 } 2161} 2162 2163static ieee80211_rx_result debug_noinline 2164ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 2165{ 2166 struct net_device *dev = rx->sdata->dev; 2167 struct sk_buff *skb = rx->skb; 2168 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2169 __le16 fc = hdr->frame_control; 2170 struct sk_buff_head frame_list; 2171 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2172 2173 if (unlikely(!ieee80211_is_data(fc))) 2174 return RX_CONTINUE; 2175 2176 if (unlikely(!ieee80211_is_data_present(fc))) 2177 return RX_DROP_MONITOR; 2178 2179 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2180 return RX_CONTINUE; 2181 2182 if (ieee80211_has_a4(hdr->frame_control) && 2183 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2184 !rx->sdata->u.vlan.sta) 2185 return RX_DROP_UNUSABLE; 2186 2187 if (is_multicast_ether_addr(hdr->addr1) && 2188 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2189 rx->sdata->u.vlan.sta) || 2190 (rx->sdata->vif.type == NL80211_IFTYPE_STATION && 2191 rx->sdata->u.mgd.use_4addr))) 2192 return RX_DROP_UNUSABLE; 2193 2194 skb->dev = dev; 2195 __skb_queue_head_init(&frame_list); 2196 2197 if (skb_linearize(skb)) 2198 return RX_DROP_UNUSABLE; 2199 2200 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2201 rx->sdata->vif.type, 2202 rx->local->hw.extra_tx_headroom, true); 2203 2204 while (!skb_queue_empty(&frame_list)) { 2205 rx->skb = __skb_dequeue(&frame_list); 2206 2207 if (!ieee80211_frame_allowed(rx, fc)) { 2208 dev_kfree_skb(rx->skb); 2209 continue; 2210 } 2211 2212 ieee80211_deliver_skb(rx); 2213 } 2214 2215 return RX_QUEUED; 2216} 2217 2218#ifdef CONFIG_MAC80211_MESH 2219static ieee80211_rx_result 2220ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 2221{ 2222 struct ieee80211_hdr *fwd_hdr, *hdr; 2223 struct ieee80211_tx_info *info; 2224 struct ieee80211s_hdr *mesh_hdr; 2225 struct sk_buff *skb = rx->skb, *fwd_skb; 2226 struct ieee80211_local *local = rx->local; 2227 struct ieee80211_sub_if_data *sdata = rx->sdata; 2228 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2229 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2230 u16 q, hdrlen; 2231 2232 hdr = (struct ieee80211_hdr *) skb->data; 2233 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2234 2235 /* make sure fixed part of mesh header is there, also checks skb len */ 2236 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 2237 return RX_DROP_MONITOR; 2238 2239 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2240 2241 /* make sure full mesh header is there, also checks skb len */ 2242 if (!pskb_may_pull(rx->skb, 2243 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 2244 return RX_DROP_MONITOR; 2245 2246 /* reload pointers */ 2247 hdr = (struct ieee80211_hdr *) skb->data; 2248 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2249 2250 if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) 2251 return RX_DROP_MONITOR; 2252 2253 /* frame is in RMC, don't forward */ 2254 if (ieee80211_is_data(hdr->frame_control) && 2255 is_multicast_ether_addr(hdr->addr1) && 2256 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) 2257 return RX_DROP_MONITOR; 2258 2259 if (!ieee80211_is_data(hdr->frame_control) || 2260 !(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2261 return RX_CONTINUE; 2262 2263 if (!mesh_hdr->ttl) 2264 return RX_DROP_MONITOR; 2265 2266 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2267 struct mesh_path *mppath; 2268 char *proxied_addr; 2269 char *mpp_addr; 2270 2271 if (is_multicast_ether_addr(hdr->addr1)) { 2272 mpp_addr = hdr->addr3; 2273 proxied_addr = mesh_hdr->eaddr1; 2274 } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { 2275 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2276 mpp_addr = hdr->addr4; 2277 proxied_addr = mesh_hdr->eaddr2; 2278 } else { 2279 return RX_DROP_MONITOR; 2280 } 2281 2282 rcu_read_lock(); 2283 mppath = mpp_path_lookup(sdata, proxied_addr); 2284 if (!mppath) { 2285 mpp_path_add(sdata, proxied_addr, mpp_addr); 2286 } else { 2287 spin_lock_bh(&mppath->state_lock); 2288 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2289 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 2290 spin_unlock_bh(&mppath->state_lock); 2291 } 2292 rcu_read_unlock(); 2293 } 2294 2295 /* Frame has reached destination. Don't forward */ 2296 if (!is_multicast_ether_addr(hdr->addr1) && 2297 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 2298 return RX_CONTINUE; 2299 2300 q = ieee80211_select_queue_80211(sdata, skb, hdr); 2301 if (ieee80211_queue_stopped(&local->hw, q)) { 2302 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 2303 return RX_DROP_MONITOR; 2304 } 2305 skb_set_queue_mapping(skb, q); 2306 2307 if (!--mesh_hdr->ttl) { 2308 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 2309 goto out; 2310 } 2311 2312 if (!ifmsh->mshcfg.dot11MeshForwarding) 2313 goto out; 2314 2315 fwd_skb = skb_copy(skb, GFP_ATOMIC); 2316 if (!fwd_skb) { 2317 net_info_ratelimited("%s: failed to clone mesh frame\n", 2318 sdata->name); 2319 goto out; 2320 } 2321 2322 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 2323 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); 2324 info = IEEE80211_SKB_CB(fwd_skb); 2325 memset(info, 0, sizeof(*info)); 2326 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 2327 info->control.vif = &rx->sdata->vif; 2328 info->control.jiffies = jiffies; 2329 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2330 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2331 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2332 /* update power mode indication when forwarding */ 2333 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 2334 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 2335 /* mesh power mode flags updated in mesh_nexthop_lookup */ 2336 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2337 } else { 2338 /* unable to resolve next hop */ 2339 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 2340 fwd_hdr->addr3, 0, 2341 WLAN_REASON_MESH_PATH_NOFORWARD, 2342 fwd_hdr->addr2); 2343 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2344 kfree_skb(fwd_skb); 2345 return RX_DROP_MONITOR; 2346 } 2347 2348 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2349 ieee80211_add_pending_skb(local, fwd_skb); 2350 out: 2351 if (is_multicast_ether_addr(hdr->addr1) || 2352 sdata->dev->flags & IFF_PROMISC) 2353 return RX_CONTINUE; 2354 else 2355 return RX_DROP_MONITOR; 2356} 2357#endif 2358 2359static ieee80211_rx_result debug_noinline 2360ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2361{ 2362 struct ieee80211_sub_if_data *sdata = rx->sdata; 2363 struct ieee80211_local *local = rx->local; 2364 struct net_device *dev = sdata->dev; 2365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2366 __le16 fc = hdr->frame_control; 2367 bool port_control; 2368 int err; 2369 2370 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2371 return RX_CONTINUE; 2372 2373 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2374 return RX_DROP_MONITOR; 2375 2376 if (rx->sta) { 2377 /* The seqno index has the same property as needed 2378 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2379 * for non-QoS-data frames. Here we know it's a data 2380 * frame, so count MSDUs. 2381 */ 2382 rx->sta->rx_msdu[rx->seqno_idx]++; 2383 } 2384 2385 /* 2386 * Send unexpected-4addr-frame event to hostapd. For older versions, 2387 * also drop the frame to cooked monitor interfaces. 2388 */ 2389 if (ieee80211_has_a4(hdr->frame_control) && 2390 sdata->vif.type == NL80211_IFTYPE_AP) { 2391 if (rx->sta && 2392 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2393 cfg80211_rx_unexpected_4addr_frame( 2394 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2395 return RX_DROP_MONITOR; 2396 } 2397 2398 err = __ieee80211_data_to_8023(rx, &port_control); 2399 if (unlikely(err)) 2400 return RX_DROP_UNUSABLE; 2401 2402 if (!ieee80211_frame_allowed(rx, fc)) 2403 return RX_DROP_MONITOR; 2404 2405 /* directly handle TDLS channel switch requests/responses */ 2406 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 2407 cpu_to_be16(ETH_P_TDLS))) { 2408 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 2409 2410 if (pskb_may_pull(rx->skb, 2411 offsetof(struct ieee80211_tdls_data, u)) && 2412 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 2413 tf->category == WLAN_CATEGORY_TDLS && 2414 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 2415 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 2416 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW; 2417 skb_queue_tail(&sdata->skb_queue, rx->skb); 2418 ieee80211_queue_work(&rx->local->hw, &sdata->work); 2419 if (rx->sta) 2420 rx->sta->rx_packets++; 2421 2422 return RX_QUEUED; 2423 } 2424 } 2425 2426 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2427 unlikely(port_control) && sdata->bss) { 2428 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 2429 u.ap); 2430 dev = sdata->dev; 2431 rx->sdata = sdata; 2432 } 2433 2434 rx->skb->dev = dev; 2435 2436 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 2437 !is_multicast_ether_addr( 2438 ((struct ethhdr *)rx->skb->data)->h_dest) && 2439 (!local->scanning && 2440 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) { 2441 mod_timer(&local->dynamic_ps_timer, jiffies + 2442 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 2443 } 2444 2445 ieee80211_deliver_skb(rx); 2446 2447 return RX_QUEUED; 2448} 2449 2450static ieee80211_rx_result debug_noinline 2451ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 2452{ 2453 struct sk_buff *skb = rx->skb; 2454 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2455 struct tid_ampdu_rx *tid_agg_rx; 2456 u16 start_seq_num; 2457 u16 tid; 2458 2459 if (likely(!ieee80211_is_ctl(bar->frame_control))) 2460 return RX_CONTINUE; 2461 2462 if (ieee80211_is_back_req(bar->frame_control)) { 2463 struct { 2464 __le16 control, start_seq_num; 2465 } __packed bar_data; 2466 2467 if (!rx->sta) 2468 return RX_DROP_MONITOR; 2469 2470 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 2471 &bar_data, sizeof(bar_data))) 2472 return RX_DROP_MONITOR; 2473 2474 tid = le16_to_cpu(bar_data.control) >> 12; 2475 2476 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 2477 if (!tid_agg_rx) 2478 return RX_DROP_MONITOR; 2479 2480 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 2481 2482 /* reset session timer */ 2483 if (tid_agg_rx->timeout) 2484 mod_timer(&tid_agg_rx->session_timer, 2485 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 2486 2487 spin_lock(&tid_agg_rx->reorder_lock); 2488 /* release stored frames up to start of BAR */ 2489 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 2490 start_seq_num, frames); 2491 spin_unlock(&tid_agg_rx->reorder_lock); 2492 2493 kfree_skb(skb); 2494 return RX_QUEUED; 2495 } 2496 2497 /* 2498 * After this point, we only want management frames, 2499 * so we can drop all remaining control frames to 2500 * cooked monitor interfaces. 2501 */ 2502 return RX_DROP_MONITOR; 2503} 2504 2505static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 2506 struct ieee80211_mgmt *mgmt, 2507 size_t len) 2508{ 2509 struct ieee80211_local *local = sdata->local; 2510 struct sk_buff *skb; 2511 struct ieee80211_mgmt *resp; 2512 2513 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 2514 /* Not to own unicast address */ 2515 return; 2516 } 2517 2518 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 2519 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 2520 /* Not from the current AP or not associated yet. */ 2521 return; 2522 } 2523 2524 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 2525 /* Too short SA Query request frame */ 2526 return; 2527 } 2528 2529 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 2530 if (skb == NULL) 2531 return; 2532 2533 skb_reserve(skb, local->hw.extra_tx_headroom); 2534 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 2535 memset(resp, 0, 24); 2536 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2537 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 2538 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 2539 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2540 IEEE80211_STYPE_ACTION); 2541 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 2542 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2543 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2544 memcpy(resp->u.action.u.sa_query.trans_id, 2545 mgmt->u.action.u.sa_query.trans_id, 2546 WLAN_SA_QUERY_TR_ID_LEN); 2547 2548 ieee80211_tx_skb(sdata, skb); 2549} 2550 2551static ieee80211_rx_result debug_noinline 2552ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 2553{ 2554 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2555 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2556 2557 /* 2558 * From here on, look only at management frames. 2559 * Data and control frames are already handled, 2560 * and unknown (reserved) frames are useless. 2561 */ 2562 if (rx->skb->len < 24) 2563 return RX_DROP_MONITOR; 2564 2565 if (!ieee80211_is_mgmt(mgmt->frame_control)) 2566 return RX_DROP_MONITOR; 2567 2568 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 2569 ieee80211_is_beacon(mgmt->frame_control) && 2570 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 2571 int sig = 0; 2572 2573 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 2574 sig = status->signal; 2575 2576 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 2577 rx->skb->data, rx->skb->len, 2578 status->freq, sig); 2579 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 2580 } 2581 2582 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2583 return RX_DROP_MONITOR; 2584 2585 if (ieee80211_drop_unencrypted_mgmt(rx)) 2586 return RX_DROP_UNUSABLE; 2587 2588 return RX_CONTINUE; 2589} 2590 2591static ieee80211_rx_result debug_noinline 2592ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 2593{ 2594 struct ieee80211_local *local = rx->local; 2595 struct ieee80211_sub_if_data *sdata = rx->sdata; 2596 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2597 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2598 int len = rx->skb->len; 2599 2600 if (!ieee80211_is_action(mgmt->frame_control)) 2601 return RX_CONTINUE; 2602 2603 /* drop too small frames */ 2604 if (len < IEEE80211_MIN_ACTION_SIZE) 2605 return RX_DROP_UNUSABLE; 2606 2607 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 2608 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 2609 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 2610 return RX_DROP_UNUSABLE; 2611 2612 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2613 return RX_DROP_UNUSABLE; 2614 2615 switch (mgmt->u.action.category) { 2616 case WLAN_CATEGORY_HT: 2617 /* reject HT action frames from stations not supporting HT */ 2618 if (!rx->sta->sta.ht_cap.ht_supported) 2619 goto invalid; 2620 2621 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2622 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2623 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2624 sdata->vif.type != NL80211_IFTYPE_AP && 2625 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2626 break; 2627 2628 /* verify action & smps_control/chanwidth are present */ 2629 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2630 goto invalid; 2631 2632 switch (mgmt->u.action.u.ht_smps.action) { 2633 case WLAN_HT_ACTION_SMPS: { 2634 struct ieee80211_supported_band *sband; 2635 enum ieee80211_smps_mode smps_mode; 2636 2637 /* convert to HT capability */ 2638 switch (mgmt->u.action.u.ht_smps.smps_control) { 2639 case WLAN_HT_SMPS_CONTROL_DISABLED: 2640 smps_mode = IEEE80211_SMPS_OFF; 2641 break; 2642 case WLAN_HT_SMPS_CONTROL_STATIC: 2643 smps_mode = IEEE80211_SMPS_STATIC; 2644 break; 2645 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 2646 smps_mode = IEEE80211_SMPS_DYNAMIC; 2647 break; 2648 default: 2649 goto invalid; 2650 } 2651 2652 /* if no change do nothing */ 2653 if (rx->sta->sta.smps_mode == smps_mode) 2654 goto handled; 2655 rx->sta->sta.smps_mode = smps_mode; 2656 2657 sband = rx->local->hw.wiphy->bands[status->band]; 2658 2659 rate_control_rate_update(local, sband, rx->sta, 2660 IEEE80211_RC_SMPS_CHANGED); 2661 goto handled; 2662 } 2663 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 2664 struct ieee80211_supported_band *sband; 2665 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 2666 enum ieee80211_sta_rx_bandwidth max_bw, new_bw; 2667 2668 /* If it doesn't support 40 MHz it can't change ... */ 2669 if (!(rx->sta->sta.ht_cap.cap & 2670 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 2671 goto handled; 2672 2673 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) 2674 max_bw = IEEE80211_STA_RX_BW_20; 2675 else 2676 max_bw = ieee80211_sta_cap_rx_bw(rx->sta); 2677 2678 /* set cur_max_bandwidth and recalc sta bw */ 2679 rx->sta->cur_max_bandwidth = max_bw; 2680 new_bw = ieee80211_sta_cur_vht_bw(rx->sta); 2681 2682 if (rx->sta->sta.bandwidth == new_bw) 2683 goto handled; 2684 2685 rx->sta->sta.bandwidth = new_bw; 2686 sband = rx->local->hw.wiphy->bands[status->band]; 2687 2688 rate_control_rate_update(local, sband, rx->sta, 2689 IEEE80211_RC_BW_CHANGED); 2690 goto handled; 2691 } 2692 default: 2693 goto invalid; 2694 } 2695 2696 break; 2697 case WLAN_CATEGORY_PUBLIC: 2698 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2699 goto invalid; 2700 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2701 break; 2702 if (!rx->sta) 2703 break; 2704 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 2705 break; 2706 if (mgmt->u.action.u.ext_chan_switch.action_code != 2707 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 2708 break; 2709 if (len < offsetof(struct ieee80211_mgmt, 2710 u.action.u.ext_chan_switch.variable)) 2711 goto invalid; 2712 goto queue; 2713 case WLAN_CATEGORY_VHT: 2714 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2715 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2716 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2717 sdata->vif.type != NL80211_IFTYPE_AP && 2718 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2719 break; 2720 2721 /* verify action code is present */ 2722 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2723 goto invalid; 2724 2725 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 2726 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 2727 u8 opmode; 2728 2729 /* verify opmode is present */ 2730 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2731 goto invalid; 2732 2733 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; 2734 2735 ieee80211_vht_handle_opmode(rx->sdata, rx->sta, 2736 opmode, status->band, 2737 false); 2738 goto handled; 2739 } 2740 default: 2741 break; 2742 } 2743 break; 2744 case WLAN_CATEGORY_BACK: 2745 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2746 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2747 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2748 sdata->vif.type != NL80211_IFTYPE_AP && 2749 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2750 break; 2751 2752 /* verify action_code is present */ 2753 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2754 break; 2755 2756 switch (mgmt->u.action.u.addba_req.action_code) { 2757 case WLAN_ACTION_ADDBA_REQ: 2758 if (len < (IEEE80211_MIN_ACTION_SIZE + 2759 sizeof(mgmt->u.action.u.addba_req))) 2760 goto invalid; 2761 break; 2762 case WLAN_ACTION_ADDBA_RESP: 2763 if (len < (IEEE80211_MIN_ACTION_SIZE + 2764 sizeof(mgmt->u.action.u.addba_resp))) 2765 goto invalid; 2766 break; 2767 case WLAN_ACTION_DELBA: 2768 if (len < (IEEE80211_MIN_ACTION_SIZE + 2769 sizeof(mgmt->u.action.u.delba))) 2770 goto invalid; 2771 break; 2772 default: 2773 goto invalid; 2774 } 2775 2776 goto queue; 2777 case WLAN_CATEGORY_SPECTRUM_MGMT: 2778 /* verify action_code is present */ 2779 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2780 break; 2781 2782 switch (mgmt->u.action.u.measurement.action_code) { 2783 case WLAN_ACTION_SPCT_MSR_REQ: 2784 if (status->band != IEEE80211_BAND_5GHZ) 2785 break; 2786 2787 if (len < (IEEE80211_MIN_ACTION_SIZE + 2788 sizeof(mgmt->u.action.u.measurement))) 2789 break; 2790 2791 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2792 break; 2793 2794 ieee80211_process_measurement_req(sdata, mgmt, len); 2795 goto handled; 2796 case WLAN_ACTION_SPCT_CHL_SWITCH: { 2797 u8 *bssid; 2798 if (len < (IEEE80211_MIN_ACTION_SIZE + 2799 sizeof(mgmt->u.action.u.chan_switch))) 2800 break; 2801 2802 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2803 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2804 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 2805 break; 2806 2807 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2808 bssid = sdata->u.mgd.bssid; 2809 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 2810 bssid = sdata->u.ibss.bssid; 2811 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 2812 bssid = mgmt->sa; 2813 else 2814 break; 2815 2816 if (!ether_addr_equal(mgmt->bssid, bssid)) 2817 break; 2818 2819 goto queue; 2820 } 2821 } 2822 break; 2823 case WLAN_CATEGORY_SA_QUERY: 2824 if (len < (IEEE80211_MIN_ACTION_SIZE + 2825 sizeof(mgmt->u.action.u.sa_query))) 2826 break; 2827 2828 switch (mgmt->u.action.u.sa_query.action) { 2829 case WLAN_ACTION_SA_QUERY_REQUEST: 2830 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2831 break; 2832 ieee80211_process_sa_query_req(sdata, mgmt, len); 2833 goto handled; 2834 } 2835 break; 2836 case WLAN_CATEGORY_SELF_PROTECTED: 2837 if (len < (IEEE80211_MIN_ACTION_SIZE + 2838 sizeof(mgmt->u.action.u.self_prot.action_code))) 2839 break; 2840 2841 switch (mgmt->u.action.u.self_prot.action_code) { 2842 case WLAN_SP_MESH_PEERING_OPEN: 2843 case WLAN_SP_MESH_PEERING_CLOSE: 2844 case WLAN_SP_MESH_PEERING_CONFIRM: 2845 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2846 goto invalid; 2847 if (sdata->u.mesh.user_mpm) 2848 /* userspace handles this frame */ 2849 break; 2850 goto queue; 2851 case WLAN_SP_MGK_INFORM: 2852 case WLAN_SP_MGK_ACK: 2853 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2854 goto invalid; 2855 break; 2856 } 2857 break; 2858 case WLAN_CATEGORY_MESH_ACTION: 2859 if (len < (IEEE80211_MIN_ACTION_SIZE + 2860 sizeof(mgmt->u.action.u.mesh_action.action_code))) 2861 break; 2862 2863 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2864 break; 2865 if (mesh_action_is_path_sel(mgmt) && 2866 !mesh_path_sel_is_hwmp(sdata)) 2867 break; 2868 goto queue; 2869 } 2870 2871 return RX_CONTINUE; 2872 2873 invalid: 2874 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 2875 /* will return in the next handlers */ 2876 return RX_CONTINUE; 2877 2878 handled: 2879 if (rx->sta) 2880 rx->sta->rx_packets++; 2881 dev_kfree_skb(rx->skb); 2882 return RX_QUEUED; 2883 2884 queue: 2885 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2886 skb_queue_tail(&sdata->skb_queue, rx->skb); 2887 ieee80211_queue_work(&local->hw, &sdata->work); 2888 if (rx->sta) 2889 rx->sta->rx_packets++; 2890 return RX_QUEUED; 2891} 2892 2893static ieee80211_rx_result debug_noinline 2894ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 2895{ 2896 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2897 int sig = 0; 2898 2899 /* skip known-bad action frames and return them in the next handler */ 2900 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 2901 return RX_CONTINUE; 2902 2903 /* 2904 * Getting here means the kernel doesn't know how to handle 2905 * it, but maybe userspace does ... include returned frames 2906 * so userspace can register for those to know whether ones 2907 * it transmitted were processed or returned. 2908 */ 2909 2910 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 2911 sig = status->signal; 2912 2913 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, 2914 rx->skb->data, rx->skb->len, 0)) { 2915 if (rx->sta) 2916 rx->sta->rx_packets++; 2917 dev_kfree_skb(rx->skb); 2918 return RX_QUEUED; 2919 } 2920 2921 return RX_CONTINUE; 2922} 2923 2924static ieee80211_rx_result debug_noinline 2925ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 2926{ 2927 struct ieee80211_local *local = rx->local; 2928 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2929 struct sk_buff *nskb; 2930 struct ieee80211_sub_if_data *sdata = rx->sdata; 2931 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2932 2933 if (!ieee80211_is_action(mgmt->frame_control)) 2934 return RX_CONTINUE; 2935 2936 /* 2937 * For AP mode, hostapd is responsible for handling any action 2938 * frames that we didn't handle, including returning unknown 2939 * ones. For all other modes we will return them to the sender, 2940 * setting the 0x80 bit in the action category, as required by 2941 * 802.11-2012 9.24.4. 2942 * Newer versions of hostapd shall also use the management frame 2943 * registration mechanisms, but older ones still use cooked 2944 * monitor interfaces so push all frames there. 2945 */ 2946 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 2947 (sdata->vif.type == NL80211_IFTYPE_AP || 2948 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 2949 return RX_DROP_MONITOR; 2950 2951 if (is_multicast_ether_addr(mgmt->da)) 2952 return RX_DROP_MONITOR; 2953 2954 /* do not return rejected action frames */ 2955 if (mgmt->u.action.category & 0x80) 2956 return RX_DROP_UNUSABLE; 2957 2958 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 2959 GFP_ATOMIC); 2960 if (nskb) { 2961 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 2962 2963 nmgmt->u.action.category |= 0x80; 2964 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 2965 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 2966 2967 memset(nskb->cb, 0, sizeof(nskb->cb)); 2968 2969 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 2970 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 2971 2972 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 2973 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 2974 IEEE80211_TX_CTL_NO_CCK_RATE; 2975 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) 2976 info->hw_queue = 2977 local->hw.offchannel_tx_hw_queue; 2978 } 2979 2980 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 2981 status->band); 2982 } 2983 dev_kfree_skb(rx->skb); 2984 return RX_QUEUED; 2985} 2986 2987static ieee80211_rx_result debug_noinline 2988ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2989{ 2990 struct ieee80211_sub_if_data *sdata = rx->sdata; 2991 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2992 __le16 stype; 2993 2994 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 2995 2996 if (!ieee80211_vif_is_mesh(&sdata->vif) && 2997 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2998 sdata->vif.type != NL80211_IFTYPE_OCB && 2999 sdata->vif.type != NL80211_IFTYPE_STATION) 3000 return RX_DROP_MONITOR; 3001 3002 switch (stype) { 3003 case cpu_to_le16(IEEE80211_STYPE_AUTH): 3004 case cpu_to_le16(IEEE80211_STYPE_BEACON): 3005 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 3006 /* process for all: mesh, mlme, ibss */ 3007 break; 3008 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 3009 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 3010 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 3011 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 3012 if (is_multicast_ether_addr(mgmt->da) && 3013 !is_broadcast_ether_addr(mgmt->da)) 3014 return RX_DROP_MONITOR; 3015 3016 /* process only for station */ 3017 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3018 return RX_DROP_MONITOR; 3019 break; 3020 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 3021 /* process only for ibss and mesh */ 3022 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 3023 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3024 return RX_DROP_MONITOR; 3025 break; 3026 default: 3027 return RX_DROP_MONITOR; 3028 } 3029 3030 /* queue up frame and kick off work to process it */ 3031 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 3032 skb_queue_tail(&sdata->skb_queue, rx->skb); 3033 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3034 if (rx->sta) 3035 rx->sta->rx_packets++; 3036 3037 return RX_QUEUED; 3038} 3039 3040/* TODO: use IEEE80211_RX_FRAGMENTED */ 3041static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 3042 struct ieee80211_rate *rate) 3043{ 3044 struct ieee80211_sub_if_data *sdata; 3045 struct ieee80211_local *local = rx->local; 3046 struct sk_buff *skb = rx->skb, *skb2; 3047 struct net_device *prev_dev = NULL; 3048 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3049 int needed_headroom; 3050 3051 /* 3052 * If cooked monitor has been processed already, then 3053 * don't do it again. If not, set the flag. 3054 */ 3055 if (rx->flags & IEEE80211_RX_CMNTR) 3056 goto out_free_skb; 3057 rx->flags |= IEEE80211_RX_CMNTR; 3058 3059 /* If there are no cooked monitor interfaces, just free the SKB */ 3060 if (!local->cooked_mntrs) 3061 goto out_free_skb; 3062 3063 /* vendor data is long removed here */ 3064 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; 3065 /* room for the radiotap header based on driver features */ 3066 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); 3067 3068 if (skb_headroom(skb) < needed_headroom && 3069 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 3070 goto out_free_skb; 3071 3072 /* prepend radiotap information */ 3073 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 3074 false); 3075 3076 skb_set_mac_header(skb, 0); 3077 skb->ip_summed = CHECKSUM_UNNECESSARY; 3078 skb->pkt_type = PACKET_OTHERHOST; 3079 skb->protocol = htons(ETH_P_802_2); 3080 3081 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3082 if (!ieee80211_sdata_running(sdata)) 3083 continue; 3084 3085 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 3086 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 3087 continue; 3088 3089 if (prev_dev) { 3090 skb2 = skb_clone(skb, GFP_ATOMIC); 3091 if (skb2) { 3092 skb2->dev = prev_dev; 3093 netif_receive_skb(skb2); 3094 } 3095 } 3096 3097 prev_dev = sdata->dev; 3098 sdata->dev->stats.rx_packets++; 3099 sdata->dev->stats.rx_bytes += skb->len; 3100 } 3101 3102 if (prev_dev) { 3103 skb->dev = prev_dev; 3104 netif_receive_skb(skb); 3105 return; 3106 } 3107 3108 out_free_skb: 3109 dev_kfree_skb(skb); 3110} 3111 3112static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 3113 ieee80211_rx_result res) 3114{ 3115 switch (res) { 3116 case RX_DROP_MONITOR: 3117 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3118 if (rx->sta) 3119 rx->sta->rx_dropped++; 3120 /* fall through */ 3121 case RX_CONTINUE: { 3122 struct ieee80211_rate *rate = NULL; 3123 struct ieee80211_supported_band *sband; 3124 struct ieee80211_rx_status *status; 3125 3126 status = IEEE80211_SKB_RXCB((rx->skb)); 3127 3128 sband = rx->local->hw.wiphy->bands[status->band]; 3129 if (!(status->flag & RX_FLAG_HT) && 3130 !(status->flag & RX_FLAG_VHT)) 3131 rate = &sband->bitrates[status->rate_idx]; 3132 3133 ieee80211_rx_cooked_monitor(rx, rate); 3134 break; 3135 } 3136 case RX_DROP_UNUSABLE: 3137 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3138 if (rx->sta) 3139 rx->sta->rx_dropped++; 3140 dev_kfree_skb(rx->skb); 3141 break; 3142 case RX_QUEUED: 3143 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 3144 break; 3145 } 3146} 3147 3148static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 3149 struct sk_buff_head *frames) 3150{ 3151 ieee80211_rx_result res = RX_DROP_MONITOR; 3152 struct sk_buff *skb; 3153 3154#define CALL_RXH(rxh) \ 3155 do { \ 3156 res = rxh(rx); \ 3157 if (res != RX_CONTINUE) \ 3158 goto rxh_next; \ 3159 } while (0); 3160 3161 /* Lock here to avoid hitting all of the data used in the RX 3162 * path (e.g. key data, station data, ...) concurrently when 3163 * a frame is released from the reorder buffer due to timeout 3164 * from the timer, potentially concurrently with RX from the 3165 * driver. 3166 */ 3167 spin_lock_bh(&rx->local->rx_path_lock); 3168 3169 while ((skb = __skb_dequeue(frames))) { 3170 /* 3171 * all the other fields are valid across frames 3172 * that belong to an aMPDU since they are on the 3173 * same TID from the same station 3174 */ 3175 rx->skb = skb; 3176 3177 CALL_RXH(ieee80211_rx_h_check_more_data) 3178 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll) 3179 CALL_RXH(ieee80211_rx_h_sta_process) 3180 CALL_RXH(ieee80211_rx_h_decrypt) 3181 CALL_RXH(ieee80211_rx_h_defragment) 3182 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 3183 /* must be after MMIC verify so header is counted in MPDU mic */ 3184#ifdef CONFIG_MAC80211_MESH 3185 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 3186 CALL_RXH(ieee80211_rx_h_mesh_fwding); 3187#endif 3188 CALL_RXH(ieee80211_rx_h_amsdu) 3189 CALL_RXH(ieee80211_rx_h_data) 3190 3191 /* special treatment -- needs the queue */ 3192 res = ieee80211_rx_h_ctrl(rx, frames); 3193 if (res != RX_CONTINUE) 3194 goto rxh_next; 3195 3196 CALL_RXH(ieee80211_rx_h_mgmt_check) 3197 CALL_RXH(ieee80211_rx_h_action) 3198 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 3199 CALL_RXH(ieee80211_rx_h_action_return) 3200 CALL_RXH(ieee80211_rx_h_mgmt) 3201 3202 rxh_next: 3203 ieee80211_rx_handlers_result(rx, res); 3204 3205#undef CALL_RXH 3206 } 3207 3208 spin_unlock_bh(&rx->local->rx_path_lock); 3209} 3210 3211static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 3212{ 3213 struct sk_buff_head reorder_release; 3214 ieee80211_rx_result res = RX_DROP_MONITOR; 3215 3216 __skb_queue_head_init(&reorder_release); 3217 3218#define CALL_RXH(rxh) \ 3219 do { \ 3220 res = rxh(rx); \ 3221 if (res != RX_CONTINUE) \ 3222 goto rxh_next; \ 3223 } while (0); 3224 3225 CALL_RXH(ieee80211_rx_h_check_dup) 3226 CALL_RXH(ieee80211_rx_h_check) 3227 3228 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 3229 3230 ieee80211_rx_handlers(rx, &reorder_release); 3231 return; 3232 3233 rxh_next: 3234 ieee80211_rx_handlers_result(rx, res); 3235 3236#undef CALL_RXH 3237} 3238 3239/* 3240 * This function makes calls into the RX path, therefore 3241 * it has to be invoked under RCU read lock. 3242 */ 3243void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 3244{ 3245 struct sk_buff_head frames; 3246 struct ieee80211_rx_data rx = { 3247 .sta = sta, 3248 .sdata = sta->sdata, 3249 .local = sta->local, 3250 /* This is OK -- must be QoS data frame */ 3251 .security_idx = tid, 3252 .seqno_idx = tid, 3253 .flags = IEEE80211_RX_REORDER_TIMER, 3254 }; 3255 struct tid_ampdu_rx *tid_agg_rx; 3256 3257 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3258 if (!tid_agg_rx) 3259 return; 3260 3261 __skb_queue_head_init(&frames); 3262 3263 spin_lock(&tid_agg_rx->reorder_lock); 3264 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3265 spin_unlock(&tid_agg_rx->reorder_lock); 3266 3267 ieee80211_rx_handlers(&rx, &frames); 3268} 3269 3270/* main receive path */ 3271 3272static bool prepare_for_handlers(struct ieee80211_rx_data *rx, 3273 struct ieee80211_hdr *hdr) 3274{ 3275 struct ieee80211_sub_if_data *sdata = rx->sdata; 3276 struct sk_buff *skb = rx->skb; 3277 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3278 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 3279 int multicast = is_multicast_ether_addr(hdr->addr1); 3280 3281 switch (sdata->vif.type) { 3282 case NL80211_IFTYPE_STATION: 3283 if (!bssid && !sdata->u.mgd.use_4addr) 3284 return false; 3285 if (!multicast && 3286 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { 3287 if (!(sdata->dev->flags & IFF_PROMISC) || 3288 sdata->u.mgd.use_4addr) 3289 return false; 3290 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 3291 } 3292 break; 3293 case NL80211_IFTYPE_ADHOC: 3294 if (!bssid) 3295 return false; 3296 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 3297 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 3298 return false; 3299 if (ieee80211_is_beacon(hdr->frame_control)) { 3300 return true; 3301 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { 3302 return false; 3303 } else if (!multicast && 3304 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { 3305 if (!(sdata->dev->flags & IFF_PROMISC)) 3306 return false; 3307 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 3308 } else if (!rx->sta) { 3309 int rate_idx; 3310 if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) 3311 rate_idx = 0; /* TODO: HT/VHT rates */ 3312 else 3313 rate_idx = status->rate_idx; 3314 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 3315 BIT(rate_idx)); 3316 } 3317 break; 3318 case NL80211_IFTYPE_OCB: 3319 if (!bssid) 3320 return false; 3321 if (ieee80211_is_beacon(hdr->frame_control)) { 3322 return false; 3323 } else if (!is_broadcast_ether_addr(bssid)) { 3324 ocb_dbg(sdata, "BSSID mismatch in OCB mode!\n"); 3325 return false; 3326 } else if (!multicast && 3327 !ether_addr_equal(sdata->dev->dev_addr, 3328 hdr->addr1)) { 3329 /* if we are in promisc mode we also accept 3330 * packets not destined for us 3331 */ 3332 if (!(sdata->dev->flags & IFF_PROMISC)) 3333 return false; 3334 rx->flags &= ~IEEE80211_RX_RA_MATCH; 3335 } else if (!rx->sta) { 3336 int rate_idx; 3337 if (status->flag & RX_FLAG_HT) 3338 rate_idx = 0; /* TODO: HT rates */ 3339 else 3340 rate_idx = status->rate_idx; 3341 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 3342 BIT(rate_idx)); 3343 } 3344 break; 3345 case NL80211_IFTYPE_MESH_POINT: 3346 if (!multicast && 3347 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { 3348 if (!(sdata->dev->flags & IFF_PROMISC)) 3349 return false; 3350 3351 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 3352 } 3353 break; 3354 case NL80211_IFTYPE_AP_VLAN: 3355 case NL80211_IFTYPE_AP: 3356 if (!bssid) { 3357 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3358 return false; 3359 } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 3360 /* 3361 * Accept public action frames even when the 3362 * BSSID doesn't match, this is used for P2P 3363 * and location updates. Note that mac80211 3364 * itself never looks at these frames. 3365 */ 3366 if (!multicast && 3367 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3368 return false; 3369 if (ieee80211_is_public_action(hdr, skb->len)) 3370 return true; 3371 if (!ieee80211_is_beacon(hdr->frame_control)) 3372 return false; 3373 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 3374 } else if (!ieee80211_has_tods(hdr->frame_control)) { 3375 /* ignore data frames to TDLS-peers */ 3376 if (ieee80211_is_data(hdr->frame_control)) 3377 return false; 3378 /* ignore action frames to TDLS-peers */ 3379 if (ieee80211_is_action(hdr->frame_control) && 3380 !is_broadcast_ether_addr(bssid) && 3381 !ether_addr_equal(bssid, hdr->addr1)) 3382 return false; 3383 } 3384 break; 3385 case NL80211_IFTYPE_WDS: 3386 if (bssid || !ieee80211_is_data(hdr->frame_control)) 3387 return false; 3388 if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2)) 3389 return false; 3390 break; 3391 case NL80211_IFTYPE_P2P_DEVICE: 3392 if (!ieee80211_is_public_action(hdr, skb->len) && 3393 !ieee80211_is_probe_req(hdr->frame_control) && 3394 !ieee80211_is_probe_resp(hdr->frame_control) && 3395 !ieee80211_is_beacon(hdr->frame_control)) 3396 return false; 3397 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1) && 3398 !multicast) 3399 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 3400 break; 3401 default: 3402 /* should never get here */ 3403 WARN_ON_ONCE(1); 3404 break; 3405 } 3406 3407 return true; 3408} 3409 3410/* 3411 * This function returns whether or not the SKB 3412 * was destined for RX processing or not, which, 3413 * if consume is true, is equivalent to whether 3414 * or not the skb was consumed. 3415 */ 3416static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 3417 struct sk_buff *skb, bool consume) 3418{ 3419 struct ieee80211_local *local = rx->local; 3420 struct ieee80211_sub_if_data *sdata = rx->sdata; 3421 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3422 struct ieee80211_hdr *hdr = (void *)skb->data; 3423 3424 rx->skb = skb; 3425 status->rx_flags |= IEEE80211_RX_RA_MATCH; 3426 3427 if (!prepare_for_handlers(rx, hdr)) 3428 return false; 3429 3430 if (!consume) { 3431 skb = skb_copy(skb, GFP_ATOMIC); 3432 if (!skb) { 3433 if (net_ratelimit()) 3434 wiphy_debug(local->hw.wiphy, 3435 "failed to copy skb for %s\n", 3436 sdata->name); 3437 return true; 3438 } 3439 3440 rx->skb = skb; 3441 } 3442 3443 ieee80211_invoke_rx_handlers(rx); 3444 return true; 3445} 3446 3447/* 3448 * This is the actual Rx frames handler. as it belongs to Rx path it must 3449 * be called with rcu_read_lock protection. 3450 */ 3451static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 3452 struct sk_buff *skb) 3453{ 3454 struct ieee80211_local *local = hw_to_local(hw); 3455 struct ieee80211_sub_if_data *sdata; 3456 struct ieee80211_hdr *hdr; 3457 __le16 fc; 3458 struct ieee80211_rx_data rx; 3459 struct ieee80211_sub_if_data *prev; 3460 struct sta_info *sta, *prev_sta; 3461 struct rhash_head *tmp; 3462 int err = 0; 3463 3464 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 3465 memset(&rx, 0, sizeof(rx)); 3466 rx.skb = skb; 3467 rx.local = local; 3468 3469 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 3470 local->dot11ReceivedFragmentCount++; 3471 3472 if (ieee80211_is_mgmt(fc)) { 3473 /* drop frame if too short for header */ 3474 if (skb->len < ieee80211_hdrlen(fc)) 3475 err = -ENOBUFS; 3476 else 3477 err = skb_linearize(skb); 3478 } else { 3479 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 3480 } 3481 3482 if (err) { 3483 dev_kfree_skb(skb); 3484 return; 3485 } 3486 3487 hdr = (struct ieee80211_hdr *)skb->data; 3488 ieee80211_parse_qos(&rx); 3489 ieee80211_verify_alignment(&rx); 3490 3491 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 3492 ieee80211_is_beacon(hdr->frame_control))) 3493 ieee80211_scan_rx(local, skb); 3494 3495 if (ieee80211_is_data(fc)) { 3496 const struct bucket_table *tbl; 3497 3498 prev_sta = NULL; 3499 3500 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); 3501 3502 for_each_sta_info(local, tbl, hdr->addr2, sta, tmp) { 3503 if (!prev_sta) { 3504 prev_sta = sta; 3505 continue; 3506 } 3507 3508 rx.sta = prev_sta; 3509 rx.sdata = prev_sta->sdata; 3510 ieee80211_prepare_and_rx_handle(&rx, skb, false); 3511 3512 prev_sta = sta; 3513 } 3514 3515 if (prev_sta) { 3516 rx.sta = prev_sta; 3517 rx.sdata = prev_sta->sdata; 3518 3519 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 3520 return; 3521 goto out; 3522 } 3523 } 3524 3525 prev = NULL; 3526 3527 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3528 if (!ieee80211_sdata_running(sdata)) 3529 continue; 3530 3531 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 3532 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 3533 continue; 3534 3535 /* 3536 * frame is destined for this interface, but if it's 3537 * not also for the previous one we handle that after 3538 * the loop to avoid copying the SKB once too much 3539 */ 3540 3541 if (!prev) { 3542 prev = sdata; 3543 continue; 3544 } 3545 3546 rx.sta = sta_info_get_bss(prev, hdr->addr2); 3547 rx.sdata = prev; 3548 ieee80211_prepare_and_rx_handle(&rx, skb, false); 3549 3550 prev = sdata; 3551 } 3552 3553 if (prev) { 3554 rx.sta = sta_info_get_bss(prev, hdr->addr2); 3555 rx.sdata = prev; 3556 3557 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 3558 return; 3559 } 3560 3561 out: 3562 dev_kfree_skb(skb); 3563} 3564 3565/* 3566 * This is the receive path handler. It is called by a low level driver when an 3567 * 802.11 MPDU is received from the hardware. 3568 */ 3569void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) 3570{ 3571 struct ieee80211_local *local = hw_to_local(hw); 3572 struct ieee80211_rate *rate = NULL; 3573 struct ieee80211_supported_band *sband; 3574 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3575 3576 WARN_ON_ONCE(softirq_count() == 0); 3577 3578 if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) 3579 goto drop; 3580 3581 sband = local->hw.wiphy->bands[status->band]; 3582 if (WARN_ON(!sband)) 3583 goto drop; 3584 3585 /* 3586 * If we're suspending, it is possible although not too likely 3587 * that we'd be receiving frames after having already partially 3588 * quiesced the stack. We can't process such frames then since 3589 * that might, for example, cause stations to be added or other 3590 * driver callbacks be invoked. 3591 */ 3592 if (unlikely(local->quiescing || local->suspended)) 3593 goto drop; 3594 3595 /* We might be during a HW reconfig, prevent Rx for the same reason */ 3596 if (unlikely(local->in_reconfig)) 3597 goto drop; 3598 3599 /* 3600 * The same happens when we're not even started, 3601 * but that's worth a warning. 3602 */ 3603 if (WARN_ON(!local->started)) 3604 goto drop; 3605 3606 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 3607 /* 3608 * Validate the rate, unless a PLCP error means that 3609 * we probably can't have a valid rate here anyway. 3610 */ 3611 3612 if (status->flag & RX_FLAG_HT) { 3613 /* 3614 * rate_idx is MCS index, which can be [0-76] 3615 * as documented on: 3616 * 3617 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 3618 * 3619 * Anything else would be some sort of driver or 3620 * hardware error. The driver should catch hardware 3621 * errors. 3622 */ 3623 if (WARN(status->rate_idx > 76, 3624 "Rate marked as an HT rate but passed " 3625 "status->rate_idx is not " 3626 "an MCS index [0-76]: %d (0x%02x)\n", 3627 status->rate_idx, 3628 status->rate_idx)) 3629 goto drop; 3630 } else if (status->flag & RX_FLAG_VHT) { 3631 if (WARN_ONCE(status->rate_idx > 9 || 3632 !status->vht_nss || 3633 status->vht_nss > 8, 3634 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 3635 status->rate_idx, status->vht_nss)) 3636 goto drop; 3637 } else { 3638 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 3639 goto drop; 3640 rate = &sband->bitrates[status->rate_idx]; 3641 } 3642 } 3643 3644 status->rx_flags = 0; 3645 3646 /* 3647 * key references and virtual interfaces are protected using RCU 3648 * and this requires that we are in a read-side RCU section during 3649 * receive processing 3650 */ 3651 rcu_read_lock(); 3652 3653 /* 3654 * Frames with failed FCS/PLCP checksum are not returned, 3655 * all other frames are returned without radiotap header 3656 * if it was previously present. 3657 * Also, frames with less than 16 bytes are dropped. 3658 */ 3659 skb = ieee80211_rx_monitor(local, skb, rate); 3660 if (!skb) { 3661 rcu_read_unlock(); 3662 return; 3663 } 3664 3665 ieee80211_tpt_led_trig_rx(local, 3666 ((struct ieee80211_hdr *)skb->data)->frame_control, 3667 skb->len); 3668 __ieee80211_rx_handle_packet(hw, skb); 3669 3670 rcu_read_unlock(); 3671 3672 return; 3673 drop: 3674 kfree_skb(skb); 3675} 3676EXPORT_SYMBOL(ieee80211_rx); 3677 3678/* This is a version of the rx handler that can be called from hard irq 3679 * context. Post the skb on the queue and schedule the tasklet */ 3680void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 3681{ 3682 struct ieee80211_local *local = hw_to_local(hw); 3683 3684 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 3685 3686 skb->pkt_type = IEEE80211_RX_MSG; 3687 skb_queue_tail(&local->skb_queue, skb); 3688 tasklet_schedule(&local->tasklet); 3689} 3690EXPORT_SYMBOL(ieee80211_rx_irqsafe); 3691