1/****************************************************************************** 2 * 3 * GPL LICENSE SUMMARY 4 * 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of version 2 of the GNU General Public License as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 19 * USA 20 * 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <ilw@linux.intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 *****************************************************************************/ 28 29#include <linux/kernel.h> 30#include <linux/module.h> 31#include <linux/etherdevice.h> 32#include <linux/sched.h> 33#include <linux/slab.h> 34#include <linux/types.h> 35#include <linux/lockdep.h> 36#include <linux/pci.h> 37#include <linux/dma-mapping.h> 38#include <linux/delay.h> 39#include <linux/skbuff.h> 40#include <net/mac80211.h> 41 42#include "common.h" 43 44int 45_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout) 46{ 47 const int interval = 10; /* microseconds */ 48 int t = 0; 49 50 do { 51 if ((_il_rd(il, addr) & mask) == (bits & mask)) 52 return t; 53 udelay(interval); 54 t += interval; 55 } while (t < timeout); 56 57 return -ETIMEDOUT; 58} 59EXPORT_SYMBOL(_il_poll_bit); 60 61void 62il_set_bit(struct il_priv *p, u32 r, u32 m) 63{ 64 unsigned long reg_flags; 65 66 spin_lock_irqsave(&p->reg_lock, reg_flags); 67 _il_set_bit(p, r, m); 68 spin_unlock_irqrestore(&p->reg_lock, reg_flags); 69} 70EXPORT_SYMBOL(il_set_bit); 71 72void 73il_clear_bit(struct il_priv *p, u32 r, u32 m) 74{ 75 unsigned long reg_flags; 76 77 spin_lock_irqsave(&p->reg_lock, reg_flags); 78 _il_clear_bit(p, r, m); 79 spin_unlock_irqrestore(&p->reg_lock, reg_flags); 80} 81EXPORT_SYMBOL(il_clear_bit); 82 83bool 84_il_grab_nic_access(struct il_priv *il) 85{ 86 int ret; 87 u32 val; 88 89 /* this bit wakes up the NIC */ 90 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 91 92 /* 93 * These bits say the device is running, and should keep running for 94 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 95 * but they do not indicate that embedded SRAM is restored yet; 96 * 3945 and 4965 have volatile SRAM, and must save/restore contents 97 * to/from host DRAM when sleeping/waking for power-saving. 98 * Each direction takes approximately 1/4 millisecond; with this 99 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 100 * series of register accesses are expected (e.g. reading Event Log), 101 * to keep device from sleeping. 102 * 103 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 104 * SRAM is okay/restored. We don't check that here because this call 105 * is just for hardware register access; but GP1 MAC_SLEEP check is a 106 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). 107 * 108 */ 109 ret = 110 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 111 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 112 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 113 if (unlikely(ret < 0)) { 114 val = _il_rd(il, CSR_GP_CNTRL); 115 WARN_ONCE(1, "Timeout waiting for ucode processor access " 116 "(CSR_GP_CNTRL 0x%08x)\n", val); 117 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); 118 return false; 119 } 120 121 return true; 122} 123EXPORT_SYMBOL_GPL(_il_grab_nic_access); 124 125int 126il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout) 127{ 128 const int interval = 10; /* microseconds */ 129 int t = 0; 130 131 do { 132 if ((il_rd(il, addr) & mask) == mask) 133 return t; 134 udelay(interval); 135 t += interval; 136 } while (t < timeout); 137 138 return -ETIMEDOUT; 139} 140EXPORT_SYMBOL(il_poll_bit); 141 142u32 143il_rd_prph(struct il_priv *il, u32 reg) 144{ 145 unsigned long reg_flags; 146 u32 val; 147 148 spin_lock_irqsave(&il->reg_lock, reg_flags); 149 _il_grab_nic_access(il); 150 val = _il_rd_prph(il, reg); 151 _il_release_nic_access(il); 152 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 153 return val; 154} 155EXPORT_SYMBOL(il_rd_prph); 156 157void 158il_wr_prph(struct il_priv *il, u32 addr, u32 val) 159{ 160 unsigned long reg_flags; 161 162 spin_lock_irqsave(&il->reg_lock, reg_flags); 163 if (likely(_il_grab_nic_access(il))) { 164 _il_wr_prph(il, addr, val); 165 _il_release_nic_access(il); 166 } 167 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 168} 169EXPORT_SYMBOL(il_wr_prph); 170 171u32 172il_read_targ_mem(struct il_priv *il, u32 addr) 173{ 174 unsigned long reg_flags; 175 u32 value; 176 177 spin_lock_irqsave(&il->reg_lock, reg_flags); 178 _il_grab_nic_access(il); 179 180 _il_wr(il, HBUS_TARG_MEM_RADDR, addr); 181 value = _il_rd(il, HBUS_TARG_MEM_RDAT); 182 183 _il_release_nic_access(il); 184 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 185 return value; 186} 187EXPORT_SYMBOL(il_read_targ_mem); 188 189void 190il_write_targ_mem(struct il_priv *il, u32 addr, u32 val) 191{ 192 unsigned long reg_flags; 193 194 spin_lock_irqsave(&il->reg_lock, reg_flags); 195 if (likely(_il_grab_nic_access(il))) { 196 _il_wr(il, HBUS_TARG_MEM_WADDR, addr); 197 _il_wr(il, HBUS_TARG_MEM_WDAT, val); 198 _il_release_nic_access(il); 199 } 200 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 201} 202EXPORT_SYMBOL(il_write_targ_mem); 203 204const char * 205il_get_cmd_string(u8 cmd) 206{ 207 switch (cmd) { 208 IL_CMD(N_ALIVE); 209 IL_CMD(N_ERROR); 210 IL_CMD(C_RXON); 211 IL_CMD(C_RXON_ASSOC); 212 IL_CMD(C_QOS_PARAM); 213 IL_CMD(C_RXON_TIMING); 214 IL_CMD(C_ADD_STA); 215 IL_CMD(C_REM_STA); 216 IL_CMD(C_WEPKEY); 217 IL_CMD(N_3945_RX); 218 IL_CMD(C_TX); 219 IL_CMD(C_RATE_SCALE); 220 IL_CMD(C_LEDS); 221 IL_CMD(C_TX_LINK_QUALITY_CMD); 222 IL_CMD(C_CHANNEL_SWITCH); 223 IL_CMD(N_CHANNEL_SWITCH); 224 IL_CMD(C_SPECTRUM_MEASUREMENT); 225 IL_CMD(N_SPECTRUM_MEASUREMENT); 226 IL_CMD(C_POWER_TBL); 227 IL_CMD(N_PM_SLEEP); 228 IL_CMD(N_PM_DEBUG_STATS); 229 IL_CMD(C_SCAN); 230 IL_CMD(C_SCAN_ABORT); 231 IL_CMD(N_SCAN_START); 232 IL_CMD(N_SCAN_RESULTS); 233 IL_CMD(N_SCAN_COMPLETE); 234 IL_CMD(N_BEACON); 235 IL_CMD(C_TX_BEACON); 236 IL_CMD(C_TX_PWR_TBL); 237 IL_CMD(C_BT_CONFIG); 238 IL_CMD(C_STATS); 239 IL_CMD(N_STATS); 240 IL_CMD(N_CARD_STATE); 241 IL_CMD(N_MISSED_BEACONS); 242 IL_CMD(C_CT_KILL_CONFIG); 243 IL_CMD(C_SENSITIVITY); 244 IL_CMD(C_PHY_CALIBRATION); 245 IL_CMD(N_RX_PHY); 246 IL_CMD(N_RX_MPDU); 247 IL_CMD(N_RX); 248 IL_CMD(N_COMPRESSED_BA); 249 default: 250 return "UNKNOWN"; 251 252 } 253} 254EXPORT_SYMBOL(il_get_cmd_string); 255 256#define HOST_COMPLETE_TIMEOUT (HZ / 2) 257 258static void 259il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd, 260 struct il_rx_pkt *pkt) 261{ 262 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 263 IL_ERR("Bad return from %s (0x%08X)\n", 264 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 265 return; 266 } 267#ifdef CONFIG_IWLEGACY_DEBUG 268 switch (cmd->hdr.cmd) { 269 case C_TX_LINK_QUALITY_CMD: 270 case C_SENSITIVITY: 271 D_HC_DUMP("back from %s (0x%08X)\n", 272 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 273 break; 274 default: 275 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), 276 pkt->hdr.flags); 277 } 278#endif 279} 280 281static int 282il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd) 283{ 284 int ret; 285 286 BUG_ON(!(cmd->flags & CMD_ASYNC)); 287 288 /* An asynchronous command can not expect an SKB to be set. */ 289 BUG_ON(cmd->flags & CMD_WANT_SKB); 290 291 /* Assign a generic callback if one is not provided */ 292 if (!cmd->callback) 293 cmd->callback = il_generic_cmd_callback; 294 295 if (test_bit(S_EXIT_PENDING, &il->status)) 296 return -EBUSY; 297 298 ret = il_enqueue_hcmd(il, cmd); 299 if (ret < 0) { 300 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", 301 il_get_cmd_string(cmd->id), ret); 302 return ret; 303 } 304 return 0; 305} 306 307int 308il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd) 309{ 310 int cmd_idx; 311 int ret; 312 313 lockdep_assert_held(&il->mutex); 314 315 BUG_ON(cmd->flags & CMD_ASYNC); 316 317 /* A synchronous command can not have a callback set. */ 318 BUG_ON(cmd->callback); 319 320 D_INFO("Attempting to send sync command %s\n", 321 il_get_cmd_string(cmd->id)); 322 323 set_bit(S_HCMD_ACTIVE, &il->status); 324 D_INFO("Setting HCMD_ACTIVE for command %s\n", 325 il_get_cmd_string(cmd->id)); 326 327 cmd_idx = il_enqueue_hcmd(il, cmd); 328 if (cmd_idx < 0) { 329 ret = cmd_idx; 330 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", 331 il_get_cmd_string(cmd->id), ret); 332 goto out; 333 } 334 335 ret = wait_event_timeout(il->wait_command_queue, 336 !test_bit(S_HCMD_ACTIVE, &il->status), 337 HOST_COMPLETE_TIMEOUT); 338 if (!ret) { 339 if (test_bit(S_HCMD_ACTIVE, &il->status)) { 340 IL_ERR("Error sending %s: time out after %dms.\n", 341 il_get_cmd_string(cmd->id), 342 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 343 344 clear_bit(S_HCMD_ACTIVE, &il->status); 345 D_INFO("Clearing HCMD_ACTIVE for command %s\n", 346 il_get_cmd_string(cmd->id)); 347 ret = -ETIMEDOUT; 348 goto cancel; 349 } 350 } 351 352 if (test_bit(S_RFKILL, &il->status)) { 353 IL_ERR("Command %s aborted: RF KILL Switch\n", 354 il_get_cmd_string(cmd->id)); 355 ret = -ECANCELED; 356 goto fail; 357 } 358 if (test_bit(S_FW_ERROR, &il->status)) { 359 IL_ERR("Command %s failed: FW Error\n", 360 il_get_cmd_string(cmd->id)); 361 ret = -EIO; 362 goto fail; 363 } 364 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { 365 IL_ERR("Error: Response NULL in '%s'\n", 366 il_get_cmd_string(cmd->id)); 367 ret = -EIO; 368 goto cancel; 369 } 370 371 ret = 0; 372 goto out; 373 374cancel: 375 if (cmd->flags & CMD_WANT_SKB) { 376 /* 377 * Cancel the CMD_WANT_SKB flag for the cmd in the 378 * TX cmd queue. Otherwise in case the cmd comes 379 * in later, it will possibly set an invalid 380 * address (cmd->meta.source). 381 */ 382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; 383 } 384fail: 385 if (cmd->reply_page) { 386 il_free_pages(il, cmd->reply_page); 387 cmd->reply_page = 0; 388 } 389out: 390 return ret; 391} 392EXPORT_SYMBOL(il_send_cmd_sync); 393 394int 395il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd) 396{ 397 if (cmd->flags & CMD_ASYNC) 398 return il_send_cmd_async(il, cmd); 399 400 return il_send_cmd_sync(il, cmd); 401} 402EXPORT_SYMBOL(il_send_cmd); 403 404int 405il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data) 406{ 407 struct il_host_cmd cmd = { 408 .id = id, 409 .len = len, 410 .data = data, 411 }; 412 413 return il_send_cmd_sync(il, &cmd); 414} 415EXPORT_SYMBOL(il_send_cmd_pdu); 416 417int 418il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, 419 void (*callback) (struct il_priv *il, 420 struct il_device_cmd *cmd, 421 struct il_rx_pkt *pkt)) 422{ 423 struct il_host_cmd cmd = { 424 .id = id, 425 .len = len, 426 .data = data, 427 }; 428 429 cmd.flags |= CMD_ASYNC; 430 cmd.callback = callback; 431 432 return il_send_cmd_async(il, &cmd); 433} 434EXPORT_SYMBOL(il_send_cmd_pdu_async); 435 436/* default: IL_LED_BLINK(0) using blinking idx table */ 437static int led_mode; 438module_param(led_mode, int, S_IRUGO); 439MODULE_PARM_DESC(led_mode, 440 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); 441 442/* Throughput OFF time(ms) ON time (ms) 443 * >300 25 25 444 * >200 to 300 40 40 445 * >100 to 200 55 55 446 * >70 to 100 65 65 447 * >50 to 70 75 75 448 * >20 to 50 85 85 449 * >10 to 20 95 95 450 * >5 to 10 110 110 451 * >1 to 5 130 130 452 * >0 to 1 167 167 453 * <=0 SOLID ON 454 */ 455static const struct ieee80211_tpt_blink il_blink[] = { 456 {.throughput = 0, .blink_time = 334}, 457 {.throughput = 1 * 1024 - 1, .blink_time = 260}, 458 {.throughput = 5 * 1024 - 1, .blink_time = 220}, 459 {.throughput = 10 * 1024 - 1, .blink_time = 190}, 460 {.throughput = 20 * 1024 - 1, .blink_time = 170}, 461 {.throughput = 50 * 1024 - 1, .blink_time = 150}, 462 {.throughput = 70 * 1024 - 1, .blink_time = 130}, 463 {.throughput = 100 * 1024 - 1, .blink_time = 110}, 464 {.throughput = 200 * 1024 - 1, .blink_time = 80}, 465 {.throughput = 300 * 1024 - 1, .blink_time = 50}, 466}; 467 468/* 469 * Adjust led blink rate to compensate on a MAC Clock difference on every HW 470 * Led blink rate analysis showed an average deviation of 0% on 3945, 471 * 5% on 4965 HW. 472 * Need to compensate on the led on/off time per HW according to the deviation 473 * to achieve the desired led frequency 474 * The calculation is: (100-averageDeviation)/100 * blinkTime 475 * For code efficiency the calculation will be: 476 * compensation = (100 - averageDeviation) * 64 / 100 477 * NewBlinkTime = (compensation * BlinkTime) / 64 478 */ 479static inline u8 480il_blink_compensation(struct il_priv *il, u8 time, u16 compensation) 481{ 482 if (!compensation) { 483 IL_ERR("undefined blink compensation: " 484 "use pre-defined blinking time\n"); 485 return time; 486 } 487 488 return (u8) ((time * compensation) >> 6); 489} 490 491/* Set led pattern command */ 492static int 493il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off) 494{ 495 struct il_led_cmd led_cmd = { 496 .id = IL_LED_LINK, 497 .interval = IL_DEF_LED_INTRVL 498 }; 499 int ret; 500 501 if (!test_bit(S_READY, &il->status)) 502 return -EBUSY; 503 504 if (il->blink_on == on && il->blink_off == off) 505 return 0; 506 507 if (off == 0) { 508 /* led is SOLID_ON */ 509 on = IL_LED_SOLID; 510 } 511 512 D_LED("Led blink time compensation=%u\n", 513 il->cfg->led_compensation); 514 led_cmd.on = 515 il_blink_compensation(il, on, 516 il->cfg->led_compensation); 517 led_cmd.off = 518 il_blink_compensation(il, off, 519 il->cfg->led_compensation); 520 521 ret = il->ops->send_led_cmd(il, &led_cmd); 522 if (!ret) { 523 il->blink_on = on; 524 il->blink_off = off; 525 } 526 return ret; 527} 528 529static void 530il_led_brightness_set(struct led_classdev *led_cdev, 531 enum led_brightness brightness) 532{ 533 struct il_priv *il = container_of(led_cdev, struct il_priv, led); 534 unsigned long on = 0; 535 536 if (brightness > 0) 537 on = IL_LED_SOLID; 538 539 il_led_cmd(il, on, 0); 540} 541 542static int 543il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, 544 unsigned long *delay_off) 545{ 546 struct il_priv *il = container_of(led_cdev, struct il_priv, led); 547 548 return il_led_cmd(il, *delay_on, *delay_off); 549} 550 551void 552il_leds_init(struct il_priv *il) 553{ 554 int mode = led_mode; 555 int ret; 556 557 if (mode == IL_LED_DEFAULT) 558 mode = il->cfg->led_mode; 559 560 il->led.name = 561 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy)); 562 il->led.brightness_set = il_led_brightness_set; 563 il->led.blink_set = il_led_blink_set; 564 il->led.max_brightness = 1; 565 566 switch (mode) { 567 case IL_LED_DEFAULT: 568 WARN_ON(1); 569 break; 570 case IL_LED_BLINK: 571 il->led.default_trigger = 572 ieee80211_create_tpt_led_trigger(il->hw, 573 IEEE80211_TPT_LEDTRIG_FL_CONNECTED, 574 il_blink, 575 ARRAY_SIZE(il_blink)); 576 break; 577 case IL_LED_RF_STATE: 578 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw); 579 break; 580 } 581 582 ret = led_classdev_register(&il->pci_dev->dev, &il->led); 583 if (ret) { 584 kfree(il->led.name); 585 return; 586 } 587 588 il->led_registered = true; 589} 590EXPORT_SYMBOL(il_leds_init); 591 592void 593il_leds_exit(struct il_priv *il) 594{ 595 if (!il->led_registered) 596 return; 597 598 led_classdev_unregister(&il->led); 599 kfree(il->led.name); 600} 601EXPORT_SYMBOL(il_leds_exit); 602 603/************************** EEPROM BANDS **************************** 604 * 605 * The il_eeprom_band definitions below provide the mapping from the 606 * EEPROM contents to the specific channel number supported for each 607 * band. 608 * 609 * For example, il_priv->eeprom.band_3_channels[4] from the band_3 610 * definition below maps to physical channel 42 in the 5.2GHz spectrum. 611 * The specific geography and calibration information for that channel 612 * is contained in the eeprom map itself. 613 * 614 * During init, we copy the eeprom information and channel map 615 * information into il->channel_info_24/52 and il->channel_map_24/52 616 * 617 * channel_map_24/52 provides the idx in the channel_info array for a 618 * given channel. We have to have two separate maps as there is channel 619 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and 620 * band_2 621 * 622 * A value of 0xff stored in the channel_map indicates that the channel 623 * is not supported by the hardware at all. 624 * 625 * A value of 0xfe in the channel_map indicates that the channel is not 626 * valid for Tx with the current hardware. This means that 627 * while the system can tune and receive on a given channel, it may not 628 * be able to associate or transmit any frames on that 629 * channel. There is no corresponding channel information for that 630 * entry. 631 * 632 *********************************************************************/ 633 634/* 2.4 GHz */ 635const u8 il_eeprom_band_1[14] = { 636 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 637}; 638 639/* 5.2 GHz bands */ 640static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */ 641 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 642}; 643 644static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */ 645 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 646}; 647 648static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */ 649 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 650}; 651 652static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */ 653 145, 149, 153, 157, 161, 165 654}; 655 656static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */ 657 1, 2, 3, 4, 5, 6, 7 658}; 659 660static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */ 661 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 662}; 663 664/****************************************************************************** 665 * 666 * EEPROM related functions 667 * 668******************************************************************************/ 669 670static int 671il_eeprom_verify_signature(struct il_priv *il) 672{ 673 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; 674 int ret = 0; 675 676 D_EEPROM("EEPROM signature=0x%08x\n", gp); 677 switch (gp) { 678 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: 679 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: 680 break; 681 default: 682 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp); 683 ret = -ENOENT; 684 break; 685 } 686 return ret; 687} 688 689const u8 * 690il_eeprom_query_addr(const struct il_priv *il, size_t offset) 691{ 692 BUG_ON(offset >= il->cfg->eeprom_size); 693 return &il->eeprom[offset]; 694} 695EXPORT_SYMBOL(il_eeprom_query_addr); 696 697u16 698il_eeprom_query16(const struct il_priv *il, size_t offset) 699{ 700 if (!il->eeprom) 701 return 0; 702 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8); 703} 704EXPORT_SYMBOL(il_eeprom_query16); 705 706/** 707 * il_eeprom_init - read EEPROM contents 708 * 709 * Load the EEPROM contents from adapter into il->eeprom 710 * 711 * NOTE: This routine uses the non-debug IO access functions. 712 */ 713int 714il_eeprom_init(struct il_priv *il) 715{ 716 __le16 *e; 717 u32 gp = _il_rd(il, CSR_EEPROM_GP); 718 int sz; 719 int ret; 720 u16 addr; 721 722 /* allocate eeprom */ 723 sz = il->cfg->eeprom_size; 724 D_EEPROM("NVM size = %d\n", sz); 725 il->eeprom = kzalloc(sz, GFP_KERNEL); 726 if (!il->eeprom) { 727 ret = -ENOMEM; 728 goto alloc_err; 729 } 730 e = (__le16 *) il->eeprom; 731 732 il->ops->apm_init(il); 733 734 ret = il_eeprom_verify_signature(il); 735 if (ret < 0) { 736 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); 737 ret = -ENOENT; 738 goto err; 739 } 740 741 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 742 ret = il->ops->eeprom_acquire_semaphore(il); 743 if (ret < 0) { 744 IL_ERR("Failed to acquire EEPROM semaphore.\n"); 745 ret = -ENOENT; 746 goto err; 747 } 748 749 /* eeprom is an array of 16bit values */ 750 for (addr = 0; addr < sz; addr += sizeof(u16)) { 751 u32 r; 752 753 _il_wr(il, CSR_EEPROM_REG, 754 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 755 756 ret = 757 _il_poll_bit(il, CSR_EEPROM_REG, 758 CSR_EEPROM_REG_READ_VALID_MSK, 759 CSR_EEPROM_REG_READ_VALID_MSK, 760 IL_EEPROM_ACCESS_TIMEOUT); 761 if (ret < 0) { 762 IL_ERR("Time out reading EEPROM[%d]\n", addr); 763 goto done; 764 } 765 r = _il_rd(il, CSR_EEPROM_REG); 766 e[addr / 2] = cpu_to_le16(r >> 16); 767 } 768 769 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM", 770 il_eeprom_query16(il, EEPROM_VERSION)); 771 772 ret = 0; 773done: 774 il->ops->eeprom_release_semaphore(il); 775 776err: 777 if (ret) 778 il_eeprom_free(il); 779 /* Reset chip to save power until we load uCode during "up". */ 780 il_apm_stop(il); 781alloc_err: 782 return ret; 783} 784EXPORT_SYMBOL(il_eeprom_init); 785 786void 787il_eeprom_free(struct il_priv *il) 788{ 789 kfree(il->eeprom); 790 il->eeprom = NULL; 791} 792EXPORT_SYMBOL(il_eeprom_free); 793 794static void 795il_init_band_reference(const struct il_priv *il, int eep_band, 796 int *eeprom_ch_count, 797 const struct il_eeprom_channel **eeprom_ch_info, 798 const u8 **eeprom_ch_idx) 799{ 800 u32 offset = il->cfg->regulatory_bands[eep_band - 1]; 801 802 switch (eep_band) { 803 case 1: /* 2.4GHz band */ 804 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1); 805 *eeprom_ch_info = 806 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 807 offset); 808 *eeprom_ch_idx = il_eeprom_band_1; 809 break; 810 case 2: /* 4.9GHz band */ 811 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2); 812 *eeprom_ch_info = 813 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 814 offset); 815 *eeprom_ch_idx = il_eeprom_band_2; 816 break; 817 case 3: /* 5.2GHz band */ 818 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3); 819 *eeprom_ch_info = 820 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 821 offset); 822 *eeprom_ch_idx = il_eeprom_band_3; 823 break; 824 case 4: /* 5.5GHz band */ 825 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4); 826 *eeprom_ch_info = 827 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 828 offset); 829 *eeprom_ch_idx = il_eeprom_band_4; 830 break; 831 case 5: /* 5.7GHz band */ 832 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5); 833 *eeprom_ch_info = 834 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 835 offset); 836 *eeprom_ch_idx = il_eeprom_band_5; 837 break; 838 case 6: /* 2.4GHz ht40 channels */ 839 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6); 840 *eeprom_ch_info = 841 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 842 offset); 843 *eeprom_ch_idx = il_eeprom_band_6; 844 break; 845 case 7: /* 5 GHz ht40 channels */ 846 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7); 847 *eeprom_ch_info = 848 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 849 offset); 850 *eeprom_ch_idx = il_eeprom_band_7; 851 break; 852 default: 853 BUG(); 854 } 855} 856 857#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ 858 ? # x " " : "") 859/** 860 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il. 861 * 862 * Does not set up a command, or touch hardware. 863 */ 864static int 865il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel, 866 const struct il_eeprom_channel *eeprom_ch, 867 u8 clear_ht40_extension_channel) 868{ 869 struct il_channel_info *ch_info; 870 871 ch_info = 872 (struct il_channel_info *)il_get_channel_info(il, band, channel); 873 874 if (!il_is_channel_valid(ch_info)) 875 return -1; 876 877 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" 878 " Ad-Hoc %ssupported\n", ch_info->channel, 879 il_is_channel_a_band(ch_info) ? "5.2" : "2.4", 880 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), 881 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), 882 CHECK_AND_PRINT(DFS), eeprom_ch->flags, 883 eeprom_ch->max_power_avg, 884 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && 885 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); 886 887 ch_info->ht40_eeprom = *eeprom_ch; 888 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; 889 ch_info->ht40_flags = eeprom_ch->flags; 890 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) 891 ch_info->ht40_extension_channel &= 892 ~clear_ht40_extension_channel; 893 894 return 0; 895} 896 897#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ 898 ? # x " " : "") 899 900/** 901 * il_init_channel_map - Set up driver's info for all possible channels 902 */ 903int 904il_init_channel_map(struct il_priv *il) 905{ 906 int eeprom_ch_count = 0; 907 const u8 *eeprom_ch_idx = NULL; 908 const struct il_eeprom_channel *eeprom_ch_info = NULL; 909 int band, ch; 910 struct il_channel_info *ch_info; 911 912 if (il->channel_count) { 913 D_EEPROM("Channel map already initialized.\n"); 914 return 0; 915 } 916 917 D_EEPROM("Initializing regulatory info from EEPROM\n"); 918 919 il->channel_count = 920 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) + 921 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) + 922 ARRAY_SIZE(il_eeprom_band_5); 923 924 D_EEPROM("Parsing data for %d channels.\n", il->channel_count); 925 926 il->channel_info = 927 kzalloc(sizeof(struct il_channel_info) * il->channel_count, 928 GFP_KERNEL); 929 if (!il->channel_info) { 930 IL_ERR("Could not allocate channel_info\n"); 931 il->channel_count = 0; 932 return -ENOMEM; 933 } 934 935 ch_info = il->channel_info; 936 937 /* Loop through the 5 EEPROM bands adding them in order to the 938 * channel map we maintain (that contains additional information than 939 * what just in the EEPROM) */ 940 for (band = 1; band <= 5; band++) { 941 942 il_init_band_reference(il, band, &eeprom_ch_count, 943 &eeprom_ch_info, &eeprom_ch_idx); 944 945 /* Loop through each band adding each of the channels */ 946 for (ch = 0; ch < eeprom_ch_count; ch++) { 947 ch_info->channel = eeprom_ch_idx[ch]; 948 ch_info->band = 949 (band == 950 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 951 952 /* permanently store EEPROM's channel regulatory flags 953 * and max power in channel info database. */ 954 ch_info->eeprom = eeprom_ch_info[ch]; 955 956 /* Copy the run-time flags so they are there even on 957 * invalid channels */ 958 ch_info->flags = eeprom_ch_info[ch].flags; 959 /* First write that ht40 is not enabled, and then enable 960 * one by one */ 961 ch_info->ht40_extension_channel = 962 IEEE80211_CHAN_NO_HT40; 963 964 if (!(il_is_channel_valid(ch_info))) { 965 D_EEPROM("Ch. %d Flags %x [%sGHz] - " 966 "No traffic\n", ch_info->channel, 967 ch_info->flags, 968 il_is_channel_a_band(ch_info) ? "5.2" : 969 "2.4"); 970 ch_info++; 971 continue; 972 } 973 974 /* Initialize regulatory-based run-time data */ 975 ch_info->max_power_avg = ch_info->curr_txpow = 976 eeprom_ch_info[ch].max_power_avg; 977 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 978 ch_info->min_power = 0; 979 980 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):" 981 " Ad-Hoc %ssupported\n", ch_info->channel, 982 il_is_channel_a_band(ch_info) ? "5.2" : "2.4", 983 CHECK_AND_PRINT_I(VALID), 984 CHECK_AND_PRINT_I(IBSS), 985 CHECK_AND_PRINT_I(ACTIVE), 986 CHECK_AND_PRINT_I(RADAR), 987 CHECK_AND_PRINT_I(WIDE), 988 CHECK_AND_PRINT_I(DFS), 989 eeprom_ch_info[ch].flags, 990 eeprom_ch_info[ch].max_power_avg, 991 ((eeprom_ch_info[ch]. 992 flags & EEPROM_CHANNEL_IBSS) && 993 !(eeprom_ch_info[ch]. 994 flags & EEPROM_CHANNEL_RADAR)) ? "" : 995 "not "); 996 997 ch_info++; 998 } 999 } 1000 1001 /* Check if we do have HT40 channels */ 1002 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && 1003 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) 1004 return 0; 1005 1006 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ 1007 for (band = 6; band <= 7; band++) { 1008 enum ieee80211_band ieeeband; 1009 1010 il_init_band_reference(il, band, &eeprom_ch_count, 1011 &eeprom_ch_info, &eeprom_ch_idx); 1012 1013 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ 1014 ieeeband = 1015 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 1016 1017 /* Loop through each band adding each of the channels */ 1018 for (ch = 0; ch < eeprom_ch_count; ch++) { 1019 /* Set up driver's info for lower half */ 1020 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch], 1021 &eeprom_ch_info[ch], 1022 IEEE80211_CHAN_NO_HT40PLUS); 1023 1024 /* Set up driver's info for upper half */ 1025 il_mod_ht40_chan_info(il, ieeeband, 1026 eeprom_ch_idx[ch] + 4, 1027 &eeprom_ch_info[ch], 1028 IEEE80211_CHAN_NO_HT40MINUS); 1029 } 1030 } 1031 1032 return 0; 1033} 1034EXPORT_SYMBOL(il_init_channel_map); 1035 1036/* 1037 * il_free_channel_map - undo allocations in il_init_channel_map 1038 */ 1039void 1040il_free_channel_map(struct il_priv *il) 1041{ 1042 kfree(il->channel_info); 1043 il->channel_count = 0; 1044} 1045EXPORT_SYMBOL(il_free_channel_map); 1046 1047/** 1048 * il_get_channel_info - Find driver's ilate channel info 1049 * 1050 * Based on band and channel number. 1051 */ 1052const struct il_channel_info * 1053il_get_channel_info(const struct il_priv *il, enum ieee80211_band band, 1054 u16 channel) 1055{ 1056 int i; 1057 1058 switch (band) { 1059 case IEEE80211_BAND_5GHZ: 1060 for (i = 14; i < il->channel_count; i++) { 1061 if (il->channel_info[i].channel == channel) 1062 return &il->channel_info[i]; 1063 } 1064 break; 1065 case IEEE80211_BAND_2GHZ: 1066 if (channel >= 1 && channel <= 14) 1067 return &il->channel_info[channel - 1]; 1068 break; 1069 default: 1070 BUG(); 1071 } 1072 1073 return NULL; 1074} 1075EXPORT_SYMBOL(il_get_channel_info); 1076 1077/* 1078 * Setting power level allows the card to go to sleep when not busy. 1079 * 1080 * We calculate a sleep command based on the required latency, which 1081 * we get from mac80211. 1082 */ 1083 1084#define SLP_VEC(X0, X1, X2, X3, X4) { \ 1085 cpu_to_le32(X0), \ 1086 cpu_to_le32(X1), \ 1087 cpu_to_le32(X2), \ 1088 cpu_to_le32(X3), \ 1089 cpu_to_le32(X4) \ 1090} 1091 1092static void 1093il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd) 1094{ 1095 const __le32 interval[3][IL_POWER_VEC_SIZE] = { 1096 SLP_VEC(2, 2, 4, 6, 0xFF), 1097 SLP_VEC(2, 4, 7, 10, 10), 1098 SLP_VEC(4, 7, 10, 10, 0xFF) 1099 }; 1100 int i, dtim_period, no_dtim; 1101 u32 max_sleep; 1102 bool skip; 1103 1104 memset(cmd, 0, sizeof(*cmd)); 1105 1106 if (il->power_data.pci_pm) 1107 cmd->flags |= IL_POWER_PCI_PM_MSK; 1108 1109 /* if no Power Save, we are done */ 1110 if (il->power_data.ps_disabled) 1111 return; 1112 1113 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK; 1114 cmd->keep_alive_seconds = 0; 1115 cmd->debug_flags = 0; 1116 cmd->rx_data_timeout = cpu_to_le32(25 * 1024); 1117 cmd->tx_data_timeout = cpu_to_le32(25 * 1024); 1118 cmd->keep_alive_beacons = 0; 1119 1120 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0; 1121 1122 if (dtim_period <= 2) { 1123 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0])); 1124 no_dtim = 2; 1125 } else if (dtim_period <= 10) { 1126 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1])); 1127 no_dtim = 2; 1128 } else { 1129 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2])); 1130 no_dtim = 0; 1131 } 1132 1133 if (dtim_period == 0) { 1134 dtim_period = 1; 1135 skip = false; 1136 } else { 1137 skip = !!no_dtim; 1138 } 1139 1140 if (skip) { 1141 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1]; 1142 1143 max_sleep = le32_to_cpu(tmp); 1144 if (max_sleep == 0xFF) 1145 max_sleep = dtim_period * (skip + 1); 1146 else if (max_sleep > dtim_period) 1147 max_sleep = (max_sleep / dtim_period) * dtim_period; 1148 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK; 1149 } else { 1150 max_sleep = dtim_period; 1151 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK; 1152 } 1153 1154 for (i = 0; i < IL_POWER_VEC_SIZE; i++) 1155 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) 1156 cmd->sleep_interval[i] = cpu_to_le32(max_sleep); 1157} 1158 1159static int 1160il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd) 1161{ 1162 D_POWER("Sending power/sleep command\n"); 1163 D_POWER("Flags value = 0x%08X\n", cmd->flags); 1164 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); 1165 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); 1166 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", 1167 le32_to_cpu(cmd->sleep_interval[0]), 1168 le32_to_cpu(cmd->sleep_interval[1]), 1169 le32_to_cpu(cmd->sleep_interval[2]), 1170 le32_to_cpu(cmd->sleep_interval[3]), 1171 le32_to_cpu(cmd->sleep_interval[4])); 1172 1173 return il_send_cmd_pdu(il, C_POWER_TBL, 1174 sizeof(struct il_powertable_cmd), cmd); 1175} 1176 1177static int 1178il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) 1179{ 1180 int ret; 1181 bool update_chains; 1182 1183 lockdep_assert_held(&il->mutex); 1184 1185 /* Don't update the RX chain when chain noise calibration is running */ 1186 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE || 1187 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE; 1188 1189 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) 1190 return 0; 1191 1192 if (!il_is_ready_rf(il)) 1193 return -EIO; 1194 1195 /* scan complete use sleep_power_next, need to be updated */ 1196 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); 1197 if (test_bit(S_SCANNING, &il->status) && !force) { 1198 D_INFO("Defer power set mode while scanning\n"); 1199 return 0; 1200 } 1201 1202 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK) 1203 set_bit(S_POWER_PMI, &il->status); 1204 1205 ret = il_set_power(il, cmd); 1206 if (!ret) { 1207 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)) 1208 clear_bit(S_POWER_PMI, &il->status); 1209 1210 if (il->ops->update_chain_flags && update_chains) 1211 il->ops->update_chain_flags(il); 1212 else if (il->ops->update_chain_flags) 1213 D_POWER("Cannot update the power, chain noise " 1214 "calibration running: %d\n", 1215 il->chain_noise_data.state); 1216 1217 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)); 1218 } else 1219 IL_ERR("set power fail, ret = %d", ret); 1220 1221 return ret; 1222} 1223 1224int 1225il_power_update_mode(struct il_priv *il, bool force) 1226{ 1227 struct il_powertable_cmd cmd; 1228 1229 il_build_powertable_cmd(il, &cmd); 1230 1231 return il_power_set_mode(il, &cmd, force); 1232} 1233EXPORT_SYMBOL(il_power_update_mode); 1234 1235/* initialize to default */ 1236void 1237il_power_initialize(struct il_priv *il) 1238{ 1239 u16 lctl; 1240 1241 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); 1242 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 1243 1244 il->power_data.debug_sleep_level_override = -1; 1245 1246 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd)); 1247} 1248EXPORT_SYMBOL(il_power_initialize); 1249 1250/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after 1251 * sending probe req. This should be set long enough to hear probe responses 1252 * from more than one AP. */ 1253#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ 1254#define IL_ACTIVE_DWELL_TIME_52 (20) 1255 1256#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3) 1257#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2) 1258 1259/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. 1260 * Must be set longer than active dwell time. 1261 * For the most reliable scan, set > AP beacon interval (typically 100msec). */ 1262#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ 1263#define IL_PASSIVE_DWELL_TIME_52 (10) 1264#define IL_PASSIVE_DWELL_BASE (100) 1265#define IL_CHANNEL_TUNE_TIME 5 1266 1267static int 1268il_send_scan_abort(struct il_priv *il) 1269{ 1270 int ret; 1271 struct il_rx_pkt *pkt; 1272 struct il_host_cmd cmd = { 1273 .id = C_SCAN_ABORT, 1274 .flags = CMD_WANT_SKB, 1275 }; 1276 1277 /* Exit instantly with error when device is not ready 1278 * to receive scan abort command or it does not perform 1279 * hardware scan currently */ 1280 if (!test_bit(S_READY, &il->status) || 1281 !test_bit(S_GEO_CONFIGURED, &il->status) || 1282 !test_bit(S_SCAN_HW, &il->status) || 1283 test_bit(S_FW_ERROR, &il->status) || 1284 test_bit(S_EXIT_PENDING, &il->status)) 1285 return -EIO; 1286 1287 ret = il_send_cmd_sync(il, &cmd); 1288 if (ret) 1289 return ret; 1290 1291 pkt = (struct il_rx_pkt *)cmd.reply_page; 1292 if (pkt->u.status != CAN_ABORT_STATUS) { 1293 /* The scan abort will return 1 for success or 1294 * 2 for "failure". A failure condition can be 1295 * due to simply not being in an active scan which 1296 * can occur if we send the scan abort before we 1297 * the microcode has notified us that a scan is 1298 * completed. */ 1299 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status); 1300 ret = -EIO; 1301 } 1302 1303 il_free_pages(il, cmd.reply_page); 1304 return ret; 1305} 1306 1307static void 1308il_complete_scan(struct il_priv *il, bool aborted) 1309{ 1310 /* check if scan was requested from mac80211 */ 1311 if (il->scan_request) { 1312 D_SCAN("Complete scan in mac80211\n"); 1313 ieee80211_scan_completed(il->hw, aborted); 1314 } 1315 1316 il->scan_vif = NULL; 1317 il->scan_request = NULL; 1318} 1319 1320void 1321il_force_scan_end(struct il_priv *il) 1322{ 1323 lockdep_assert_held(&il->mutex); 1324 1325 if (!test_bit(S_SCANNING, &il->status)) { 1326 D_SCAN("Forcing scan end while not scanning\n"); 1327 return; 1328 } 1329 1330 D_SCAN("Forcing scan end\n"); 1331 clear_bit(S_SCANNING, &il->status); 1332 clear_bit(S_SCAN_HW, &il->status); 1333 clear_bit(S_SCAN_ABORTING, &il->status); 1334 il_complete_scan(il, true); 1335} 1336 1337static void 1338il_do_scan_abort(struct il_priv *il) 1339{ 1340 int ret; 1341 1342 lockdep_assert_held(&il->mutex); 1343 1344 if (!test_bit(S_SCANNING, &il->status)) { 1345 D_SCAN("Not performing scan to abort\n"); 1346 return; 1347 } 1348 1349 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) { 1350 D_SCAN("Scan abort in progress\n"); 1351 return; 1352 } 1353 1354 ret = il_send_scan_abort(il); 1355 if (ret) { 1356 D_SCAN("Send scan abort failed %d\n", ret); 1357 il_force_scan_end(il); 1358 } else 1359 D_SCAN("Successfully send scan abort\n"); 1360} 1361 1362/** 1363 * il_scan_cancel - Cancel any currently executing HW scan 1364 */ 1365int 1366il_scan_cancel(struct il_priv *il) 1367{ 1368 D_SCAN("Queuing abort scan\n"); 1369 queue_work(il->workqueue, &il->abort_scan); 1370 return 0; 1371} 1372EXPORT_SYMBOL(il_scan_cancel); 1373 1374/** 1375 * il_scan_cancel_timeout - Cancel any currently executing HW scan 1376 * @ms: amount of time to wait (in milliseconds) for scan to abort 1377 * 1378 */ 1379int 1380il_scan_cancel_timeout(struct il_priv *il, unsigned long ms) 1381{ 1382 unsigned long timeout = jiffies + msecs_to_jiffies(ms); 1383 1384 lockdep_assert_held(&il->mutex); 1385 1386 D_SCAN("Scan cancel timeout\n"); 1387 1388 il_do_scan_abort(il); 1389 1390 while (time_before_eq(jiffies, timeout)) { 1391 if (!test_bit(S_SCAN_HW, &il->status)) 1392 break; 1393 msleep(20); 1394 } 1395 1396 return test_bit(S_SCAN_HW, &il->status); 1397} 1398EXPORT_SYMBOL(il_scan_cancel_timeout); 1399 1400/* Service response to C_SCAN (0x80) */ 1401static void 1402il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb) 1403{ 1404#ifdef CONFIG_IWLEGACY_DEBUG 1405 struct il_rx_pkt *pkt = rxb_addr(rxb); 1406 struct il_scanreq_notification *notif = 1407 (struct il_scanreq_notification *)pkt->u.raw; 1408 1409 D_SCAN("Scan request status = 0x%x\n", notif->status); 1410#endif 1411} 1412 1413/* Service N_SCAN_START (0x82) */ 1414static void 1415il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb) 1416{ 1417 struct il_rx_pkt *pkt = rxb_addr(rxb); 1418 struct il_scanstart_notification *notif = 1419 (struct il_scanstart_notification *)pkt->u.raw; 1420 il->scan_start_tsf = le32_to_cpu(notif->tsf_low); 1421 D_SCAN("Scan start: " "%d [802.11%s] " 1422 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel, 1423 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high), 1424 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); 1425} 1426 1427/* Service N_SCAN_RESULTS (0x83) */ 1428static void 1429il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb) 1430{ 1431#ifdef CONFIG_IWLEGACY_DEBUG 1432 struct il_rx_pkt *pkt = rxb_addr(rxb); 1433 struct il_scanresults_notification *notif = 1434 (struct il_scanresults_notification *)pkt->u.raw; 1435 1436 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d " 1437 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a", 1438 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), 1439 le32_to_cpu(notif->stats[0]), 1440 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf); 1441#endif 1442} 1443 1444/* Service N_SCAN_COMPLETE (0x84) */ 1445static void 1446il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) 1447{ 1448 1449#ifdef CONFIG_IWLEGACY_DEBUG 1450 struct il_rx_pkt *pkt = rxb_addr(rxb); 1451 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 1452#endif 1453 1454 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 1455 scan_notif->scanned_channels, scan_notif->tsf_low, 1456 scan_notif->tsf_high, scan_notif->status); 1457 1458 /* The HW is no longer scanning */ 1459 clear_bit(S_SCAN_HW, &il->status); 1460 1461 D_SCAN("Scan on %sGHz took %dms\n", 1462 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", 1463 jiffies_to_msecs(jiffies - il->scan_start)); 1464 1465 queue_work(il->workqueue, &il->scan_completed); 1466} 1467 1468void 1469il_setup_rx_scan_handlers(struct il_priv *il) 1470{ 1471 /* scan handlers */ 1472 il->handlers[C_SCAN] = il_hdl_scan; 1473 il->handlers[N_SCAN_START] = il_hdl_scan_start; 1474 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results; 1475 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete; 1476} 1477EXPORT_SYMBOL(il_setup_rx_scan_handlers); 1478 1479u16 1480il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, 1481 u8 n_probes) 1482{ 1483 if (band == IEEE80211_BAND_5GHZ) 1484 return IL_ACTIVE_DWELL_TIME_52 + 1485 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); 1486 else 1487 return IL_ACTIVE_DWELL_TIME_24 + 1488 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); 1489} 1490EXPORT_SYMBOL(il_get_active_dwell_time); 1491 1492u16 1493il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band, 1494 struct ieee80211_vif *vif) 1495{ 1496 u16 value; 1497 1498 u16 passive = 1499 (band == 1500 IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + 1501 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + 1502 IL_PASSIVE_DWELL_TIME_52; 1503 1504 if (il_is_any_associated(il)) { 1505 /* 1506 * If we're associated, we clamp the maximum passive 1507 * dwell time to be 98% of the smallest beacon interval 1508 * (minus 2 * channel tune time) 1509 */ 1510 value = il->vif ? il->vif->bss_conf.beacon_int : 0; 1511 if (value > IL_PASSIVE_DWELL_BASE || !value) 1512 value = IL_PASSIVE_DWELL_BASE; 1513 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2; 1514 passive = min(value, passive); 1515 } 1516 1517 return passive; 1518} 1519EXPORT_SYMBOL(il_get_passive_dwell_time); 1520 1521void 1522il_init_scan_params(struct il_priv *il) 1523{ 1524 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; 1525 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ]) 1526 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; 1527 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ]) 1528 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; 1529} 1530EXPORT_SYMBOL(il_init_scan_params); 1531 1532static int 1533il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif) 1534{ 1535 int ret; 1536 1537 lockdep_assert_held(&il->mutex); 1538 1539 cancel_delayed_work(&il->scan_check); 1540 1541 if (!il_is_ready_rf(il)) { 1542 IL_WARN("Request scan called when driver not ready.\n"); 1543 return -EIO; 1544 } 1545 1546 if (test_bit(S_SCAN_HW, &il->status)) { 1547 D_SCAN("Multiple concurrent scan requests in parallel.\n"); 1548 return -EBUSY; 1549 } 1550 1551 if (test_bit(S_SCAN_ABORTING, &il->status)) { 1552 D_SCAN("Scan request while abort pending.\n"); 1553 return -EBUSY; 1554 } 1555 1556 D_SCAN("Starting scan...\n"); 1557 1558 set_bit(S_SCANNING, &il->status); 1559 il->scan_start = jiffies; 1560 1561 ret = il->ops->request_scan(il, vif); 1562 if (ret) { 1563 clear_bit(S_SCANNING, &il->status); 1564 return ret; 1565 } 1566 1567 queue_delayed_work(il->workqueue, &il->scan_check, 1568 IL_SCAN_CHECK_WATCHDOG); 1569 1570 return 0; 1571} 1572 1573int 1574il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1575 struct ieee80211_scan_request *hw_req) 1576{ 1577 struct cfg80211_scan_request *req = &hw_req->req; 1578 struct il_priv *il = hw->priv; 1579 int ret; 1580 1581 if (req->n_channels == 0) { 1582 IL_ERR("Can not scan on no channels.\n"); 1583 return -EINVAL; 1584 } 1585 1586 mutex_lock(&il->mutex); 1587 D_MAC80211("enter\n"); 1588 1589 if (test_bit(S_SCANNING, &il->status)) { 1590 D_SCAN("Scan already in progress.\n"); 1591 ret = -EAGAIN; 1592 goto out_unlock; 1593 } 1594 1595 /* mac80211 will only ask for one band at a time */ 1596 il->scan_request = req; 1597 il->scan_vif = vif; 1598 il->scan_band = req->channels[0]->band; 1599 1600 ret = il_scan_initiate(il, vif); 1601 1602out_unlock: 1603 D_MAC80211("leave ret %d\n", ret); 1604 mutex_unlock(&il->mutex); 1605 1606 return ret; 1607} 1608EXPORT_SYMBOL(il_mac_hw_scan); 1609 1610static void 1611il_bg_scan_check(struct work_struct *data) 1612{ 1613 struct il_priv *il = 1614 container_of(data, struct il_priv, scan_check.work); 1615 1616 D_SCAN("Scan check work\n"); 1617 1618 /* Since we are here firmware does not finish scan and 1619 * most likely is in bad shape, so we don't bother to 1620 * send abort command, just force scan complete to mac80211 */ 1621 mutex_lock(&il->mutex); 1622 il_force_scan_end(il); 1623 mutex_unlock(&il->mutex); 1624} 1625 1626/** 1627 * il_fill_probe_req - fill in all required fields and IE for probe request 1628 */ 1629 1630u16 1631il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, 1632 const u8 *ta, const u8 *ies, int ie_len, int left) 1633{ 1634 int len = 0; 1635 u8 *pos = NULL; 1636 1637 /* Make sure there is enough space for the probe request, 1638 * two mandatory IEs and the data */ 1639 left -= 24; 1640 if (left < 0) 1641 return 0; 1642 1643 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 1644 eth_broadcast_addr(frame->da); 1645 memcpy(frame->sa, ta, ETH_ALEN); 1646 eth_broadcast_addr(frame->bssid); 1647 frame->seq_ctrl = 0; 1648 1649 len += 24; 1650 1651 /* ...next IE... */ 1652 pos = &frame->u.probe_req.variable[0]; 1653 1654 /* fill in our indirect SSID IE */ 1655 left -= 2; 1656 if (left < 0) 1657 return 0; 1658 *pos++ = WLAN_EID_SSID; 1659 *pos++ = 0; 1660 1661 len += 2; 1662 1663 if (WARN_ON(left < ie_len)) 1664 return len; 1665 1666 if (ies && ie_len) { 1667 memcpy(pos, ies, ie_len); 1668 len += ie_len; 1669 } 1670 1671 return (u16) len; 1672} 1673EXPORT_SYMBOL(il_fill_probe_req); 1674 1675static void 1676il_bg_abort_scan(struct work_struct *work) 1677{ 1678 struct il_priv *il = container_of(work, struct il_priv, abort_scan); 1679 1680 D_SCAN("Abort scan work\n"); 1681 1682 /* We keep scan_check work queued in case when firmware will not 1683 * report back scan completed notification */ 1684 mutex_lock(&il->mutex); 1685 il_scan_cancel_timeout(il, 200); 1686 mutex_unlock(&il->mutex); 1687} 1688 1689static void 1690il_bg_scan_completed(struct work_struct *work) 1691{ 1692 struct il_priv *il = container_of(work, struct il_priv, scan_completed); 1693 bool aborted; 1694 1695 D_SCAN("Completed scan.\n"); 1696 1697 cancel_delayed_work(&il->scan_check); 1698 1699 mutex_lock(&il->mutex); 1700 1701 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status); 1702 if (aborted) 1703 D_SCAN("Aborted scan completed.\n"); 1704 1705 if (!test_and_clear_bit(S_SCANNING, &il->status)) { 1706 D_SCAN("Scan already completed.\n"); 1707 goto out_settings; 1708 } 1709 1710 il_complete_scan(il, aborted); 1711 1712out_settings: 1713 /* Can we still talk to firmware ? */ 1714 if (!il_is_ready_rf(il)) 1715 goto out; 1716 1717 /* 1718 * We do not commit power settings while scan is pending, 1719 * do it now if the settings changed. 1720 */ 1721 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false); 1722 il_set_tx_power(il, il->tx_power_next, false); 1723 1724 il->ops->post_scan(il); 1725 1726out: 1727 mutex_unlock(&il->mutex); 1728} 1729 1730void 1731il_setup_scan_deferred_work(struct il_priv *il) 1732{ 1733 INIT_WORK(&il->scan_completed, il_bg_scan_completed); 1734 INIT_WORK(&il->abort_scan, il_bg_abort_scan); 1735 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check); 1736} 1737EXPORT_SYMBOL(il_setup_scan_deferred_work); 1738 1739void 1740il_cancel_scan_deferred_work(struct il_priv *il) 1741{ 1742 cancel_work_sync(&il->abort_scan); 1743 cancel_work_sync(&il->scan_completed); 1744 1745 if (cancel_delayed_work_sync(&il->scan_check)) { 1746 mutex_lock(&il->mutex); 1747 il_force_scan_end(il); 1748 mutex_unlock(&il->mutex); 1749 } 1750} 1751EXPORT_SYMBOL(il_cancel_scan_deferred_work); 1752 1753/* il->sta_lock must be held */ 1754static void 1755il_sta_ucode_activate(struct il_priv *il, u8 sta_id) 1756{ 1757 1758 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) 1759 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n", 1760 sta_id, il->stations[sta_id].sta.sta.addr); 1761 1762 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) { 1763 D_ASSOC("STA id %u addr %pM already present" 1764 " in uCode (according to driver)\n", sta_id, 1765 il->stations[sta_id].sta.sta.addr); 1766 } else { 1767 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE; 1768 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id, 1769 il->stations[sta_id].sta.sta.addr); 1770 } 1771} 1772 1773static int 1774il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta, 1775 struct il_rx_pkt *pkt, bool sync) 1776{ 1777 u8 sta_id = addsta->sta.sta_id; 1778 unsigned long flags; 1779 int ret = -EIO; 1780 1781 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 1782 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags); 1783 return ret; 1784 } 1785 1786 D_INFO("Processing response for adding station %u\n", sta_id); 1787 1788 spin_lock_irqsave(&il->sta_lock, flags); 1789 1790 switch (pkt->u.add_sta.status) { 1791 case ADD_STA_SUCCESS_MSK: 1792 D_INFO("C_ADD_STA PASSED\n"); 1793 il_sta_ucode_activate(il, sta_id); 1794 ret = 0; 1795 break; 1796 case ADD_STA_NO_ROOM_IN_TBL: 1797 IL_ERR("Adding station %d failed, no room in table.\n", sta_id); 1798 break; 1799 case ADD_STA_NO_BLOCK_ACK_RESOURCE: 1800 IL_ERR("Adding station %d failed, no block ack resource.\n", 1801 sta_id); 1802 break; 1803 case ADD_STA_MODIFY_NON_EXIST_STA: 1804 IL_ERR("Attempting to modify non-existing station %d\n", 1805 sta_id); 1806 break; 1807 default: 1808 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); 1809 break; 1810 } 1811 1812 D_INFO("%s station id %u addr %pM\n", 1813 il->stations[sta_id].sta.mode == 1814 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, 1815 il->stations[sta_id].sta.sta.addr); 1816 1817 /* 1818 * XXX: The MAC address in the command buffer is often changed from 1819 * the original sent to the device. That is, the MAC address 1820 * written to the command buffer often is not the same MAC address 1821 * read from the command buffer when the command returns. This 1822 * issue has not yet been resolved and this debugging is left to 1823 * observe the problem. 1824 */ 1825 D_INFO("%s station according to cmd buffer %pM\n", 1826 il->stations[sta_id].sta.mode == 1827 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); 1828 spin_unlock_irqrestore(&il->sta_lock, flags); 1829 1830 return ret; 1831} 1832 1833static void 1834il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd, 1835 struct il_rx_pkt *pkt) 1836{ 1837 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload; 1838 1839 il_process_add_sta_resp(il, addsta, pkt, false); 1840 1841} 1842 1843int 1844il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags) 1845{ 1846 struct il_rx_pkt *pkt = NULL; 1847 int ret = 0; 1848 u8 data[sizeof(*sta)]; 1849 struct il_host_cmd cmd = { 1850 .id = C_ADD_STA, 1851 .flags = flags, 1852 .data = data, 1853 }; 1854 u8 sta_id __maybe_unused = sta->sta.sta_id; 1855 1856 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, 1857 flags & CMD_ASYNC ? "a" : ""); 1858 1859 if (flags & CMD_ASYNC) 1860 cmd.callback = il_add_sta_callback; 1861 else { 1862 cmd.flags |= CMD_WANT_SKB; 1863 might_sleep(); 1864 } 1865 1866 cmd.len = il->ops->build_addsta_hcmd(sta, data); 1867 ret = il_send_cmd(il, &cmd); 1868 1869 if (ret || (flags & CMD_ASYNC)) 1870 return ret; 1871 1872 if (ret == 0) { 1873 pkt = (struct il_rx_pkt *)cmd.reply_page; 1874 ret = il_process_add_sta_resp(il, sta, pkt, true); 1875 } 1876 il_free_pages(il, cmd.reply_page); 1877 1878 return ret; 1879} 1880EXPORT_SYMBOL(il_send_add_sta); 1881 1882static void 1883il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta) 1884{ 1885 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; 1886 __le32 sta_flags; 1887 1888 if (!sta || !sta_ht_inf->ht_supported) 1889 goto done; 1890 1891 D_ASSOC("spatial multiplexing power save mode: %s\n", 1892 (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" : 1893 (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" : 1894 "disabled"); 1895 1896 sta_flags = il->stations[idx].sta.station_flags; 1897 1898 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 1899 1900 switch (sta->smps_mode) { 1901 case IEEE80211_SMPS_STATIC: 1902 sta_flags |= STA_FLG_MIMO_DIS_MSK; 1903 break; 1904 case IEEE80211_SMPS_DYNAMIC: 1905 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; 1906 break; 1907 case IEEE80211_SMPS_OFF: 1908 break; 1909 default: 1910 IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode); 1911 break; 1912 } 1913 1914 sta_flags |= 1915 cpu_to_le32((u32) sta_ht_inf-> 1916 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); 1917 1918 sta_flags |= 1919 cpu_to_le32((u32) sta_ht_inf-> 1920 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); 1921 1922 if (il_is_ht40_tx_allowed(il, &sta->ht_cap)) 1923 sta_flags |= STA_FLG_HT40_EN_MSK; 1924 else 1925 sta_flags &= ~STA_FLG_HT40_EN_MSK; 1926 1927 il->stations[idx].sta.station_flags = sta_flags; 1928done: 1929 return; 1930} 1931 1932/** 1933 * il_prep_station - Prepare station information for addition 1934 * 1935 * should be called with sta_lock held 1936 */ 1937u8 1938il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap, 1939 struct ieee80211_sta *sta) 1940{ 1941 struct il_station_entry *station; 1942 int i; 1943 u8 sta_id = IL_INVALID_STATION; 1944 u16 rate; 1945 1946 if (is_ap) 1947 sta_id = IL_AP_ID; 1948 else if (is_broadcast_ether_addr(addr)) 1949 sta_id = il->hw_params.bcast_id; 1950 else 1951 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { 1952 if (ether_addr_equal(il->stations[i].sta.sta.addr, 1953 addr)) { 1954 sta_id = i; 1955 break; 1956 } 1957 1958 if (!il->stations[i].used && 1959 sta_id == IL_INVALID_STATION) 1960 sta_id = i; 1961 } 1962 1963 /* 1964 * These two conditions have the same outcome, but keep them 1965 * separate 1966 */ 1967 if (unlikely(sta_id == IL_INVALID_STATION)) 1968 return sta_id; 1969 1970 /* 1971 * uCode is not able to deal with multiple requests to add a 1972 * station. Keep track if one is in progress so that we do not send 1973 * another. 1974 */ 1975 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { 1976 D_INFO("STA %d already in process of being added.\n", sta_id); 1977 return sta_id; 1978 } 1979 1980 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 1981 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && 1982 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) { 1983 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 1984 sta_id, addr); 1985 return sta_id; 1986 } 1987 1988 station = &il->stations[sta_id]; 1989 station->used = IL_STA_DRIVER_ACTIVE; 1990 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr); 1991 il->num_stations++; 1992 1993 /* Set up the C_ADD_STA command to send to device */ 1994 memset(&station->sta, 0, sizeof(struct il_addsta_cmd)); 1995 memcpy(station->sta.sta.addr, addr, ETH_ALEN); 1996 station->sta.mode = 0; 1997 station->sta.sta.sta_id = sta_id; 1998 station->sta.station_flags = 0; 1999 2000 /* 2001 * OK to call unconditionally, since local stations (IBSS BSSID 2002 * STA and broadcast STA) pass in a NULL sta, and mac80211 2003 * doesn't allow HT IBSS. 2004 */ 2005 il_set_ht_add_station(il, sta_id, sta); 2006 2007 /* 3945 only */ 2008 rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; 2009 /* Turn on both antennas for the station... */ 2010 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 2011 2012 return sta_id; 2013 2014} 2015EXPORT_SYMBOL_GPL(il_prep_station); 2016 2017#define STA_WAIT_TIMEOUT (HZ/2) 2018 2019/** 2020 * il_add_station_common - 2021 */ 2022int 2023il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap, 2024 struct ieee80211_sta *sta, u8 *sta_id_r) 2025{ 2026 unsigned long flags_spin; 2027 int ret = 0; 2028 u8 sta_id; 2029 struct il_addsta_cmd sta_cmd; 2030 2031 *sta_id_r = 0; 2032 spin_lock_irqsave(&il->sta_lock, flags_spin); 2033 sta_id = il_prep_station(il, addr, is_ap, sta); 2034 if (sta_id == IL_INVALID_STATION) { 2035 IL_ERR("Unable to prepare station %pM for addition\n", addr); 2036 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2037 return -EINVAL; 2038 } 2039 2040 /* 2041 * uCode is not able to deal with multiple requests to add a 2042 * station. Keep track if one is in progress so that we do not send 2043 * another. 2044 */ 2045 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { 2046 D_INFO("STA %d already in process of being added.\n", sta_id); 2047 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2048 return -EEXIST; 2049 } 2050 2051 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 2052 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { 2053 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 2054 sta_id, addr); 2055 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2056 return -EEXIST; 2057 } 2058 2059 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS; 2060 memcpy(&sta_cmd, &il->stations[sta_id].sta, 2061 sizeof(struct il_addsta_cmd)); 2062 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2063 2064 /* Add station to device's station table */ 2065 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); 2066 if (ret) { 2067 spin_lock_irqsave(&il->sta_lock, flags_spin); 2068 IL_ERR("Adding station %pM failed.\n", 2069 il->stations[sta_id].sta.sta.addr); 2070 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; 2071 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; 2072 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2073 } 2074 *sta_id_r = sta_id; 2075 return ret; 2076} 2077EXPORT_SYMBOL(il_add_station_common); 2078 2079/** 2080 * il_sta_ucode_deactivate - deactivate ucode status for a station 2081 * 2082 * il->sta_lock must be held 2083 */ 2084static void 2085il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id) 2086{ 2087 /* Ucode must be active and driver must be non active */ 2088 if ((il->stations[sta_id]. 2089 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) != 2090 IL_STA_UCODE_ACTIVE) 2091 IL_ERR("removed non active STA %u\n", sta_id); 2092 2093 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE; 2094 2095 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry)); 2096 D_ASSOC("Removed STA %u\n", sta_id); 2097} 2098 2099static int 2100il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id, 2101 bool temporary) 2102{ 2103 struct il_rx_pkt *pkt; 2104 int ret; 2105 2106 unsigned long flags_spin; 2107 struct il_rem_sta_cmd rm_sta_cmd; 2108 2109 struct il_host_cmd cmd = { 2110 .id = C_REM_STA, 2111 .len = sizeof(struct il_rem_sta_cmd), 2112 .flags = CMD_SYNC, 2113 .data = &rm_sta_cmd, 2114 }; 2115 2116 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 2117 rm_sta_cmd.num_sta = 1; 2118 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); 2119 2120 cmd.flags |= CMD_WANT_SKB; 2121 2122 ret = il_send_cmd(il, &cmd); 2123 2124 if (ret) 2125 return ret; 2126 2127 pkt = (struct il_rx_pkt *)cmd.reply_page; 2128 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 2129 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags); 2130 ret = -EIO; 2131 } 2132 2133 if (!ret) { 2134 switch (pkt->u.rem_sta.status) { 2135 case REM_STA_SUCCESS_MSK: 2136 if (!temporary) { 2137 spin_lock_irqsave(&il->sta_lock, flags_spin); 2138 il_sta_ucode_deactivate(il, sta_id); 2139 spin_unlock_irqrestore(&il->sta_lock, 2140 flags_spin); 2141 } 2142 D_ASSOC("C_REM_STA PASSED\n"); 2143 break; 2144 default: 2145 ret = -EIO; 2146 IL_ERR("C_REM_STA failed\n"); 2147 break; 2148 } 2149 } 2150 il_free_pages(il, cmd.reply_page); 2151 2152 return ret; 2153} 2154 2155/** 2156 * il_remove_station - Remove driver's knowledge of station. 2157 */ 2158int 2159il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr) 2160{ 2161 unsigned long flags; 2162 2163 if (!il_is_ready(il)) { 2164 D_INFO("Unable to remove station %pM, device not ready.\n", 2165 addr); 2166 /* 2167 * It is typical for stations to be removed when we are 2168 * going down. Return success since device will be down 2169 * soon anyway 2170 */ 2171 return 0; 2172 } 2173 2174 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr); 2175 2176 if (WARN_ON(sta_id == IL_INVALID_STATION)) 2177 return -EINVAL; 2178 2179 spin_lock_irqsave(&il->sta_lock, flags); 2180 2181 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) { 2182 D_INFO("Removing %pM but non DRIVER active\n", addr); 2183 goto out_err; 2184 } 2185 2186 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { 2187 D_INFO("Removing %pM but non UCODE active\n", addr); 2188 goto out_err; 2189 } 2190 2191 if (il->stations[sta_id].used & IL_STA_LOCAL) { 2192 kfree(il->stations[sta_id].lq); 2193 il->stations[sta_id].lq = NULL; 2194 } 2195 2196 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; 2197 2198 il->num_stations--; 2199 2200 BUG_ON(il->num_stations < 0); 2201 2202 spin_unlock_irqrestore(&il->sta_lock, flags); 2203 2204 return il_send_remove_station(il, addr, sta_id, false); 2205out_err: 2206 spin_unlock_irqrestore(&il->sta_lock, flags); 2207 return -EINVAL; 2208} 2209EXPORT_SYMBOL_GPL(il_remove_station); 2210 2211/** 2212 * il_clear_ucode_stations - clear ucode station table bits 2213 * 2214 * This function clears all the bits in the driver indicating 2215 * which stations are active in the ucode. Call when something 2216 * other than explicit station management would cause this in 2217 * the ucode, e.g. unassociated RXON. 2218 */ 2219void 2220il_clear_ucode_stations(struct il_priv *il) 2221{ 2222 int i; 2223 unsigned long flags_spin; 2224 bool cleared = false; 2225 2226 D_INFO("Clearing ucode stations in driver\n"); 2227 2228 spin_lock_irqsave(&il->sta_lock, flags_spin); 2229 for (i = 0; i < il->hw_params.max_stations; i++) { 2230 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) { 2231 D_INFO("Clearing ucode active for station %d\n", i); 2232 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; 2233 cleared = true; 2234 } 2235 } 2236 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2237 2238 if (!cleared) 2239 D_INFO("No active stations found to be cleared\n"); 2240} 2241EXPORT_SYMBOL(il_clear_ucode_stations); 2242 2243/** 2244 * il_restore_stations() - Restore driver known stations to device 2245 * 2246 * All stations considered active by driver, but not present in ucode, is 2247 * restored. 2248 * 2249 * Function sleeps. 2250 */ 2251void 2252il_restore_stations(struct il_priv *il) 2253{ 2254 struct il_addsta_cmd sta_cmd; 2255 struct il_link_quality_cmd lq; 2256 unsigned long flags_spin; 2257 int i; 2258 bool found = false; 2259 int ret; 2260 bool send_lq; 2261 2262 if (!il_is_ready(il)) { 2263 D_INFO("Not ready yet, not restoring any stations.\n"); 2264 return; 2265 } 2266 2267 D_ASSOC("Restoring all known stations ... start.\n"); 2268 spin_lock_irqsave(&il->sta_lock, flags_spin); 2269 for (i = 0; i < il->hw_params.max_stations; i++) { 2270 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) && 2271 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) { 2272 D_ASSOC("Restoring sta %pM\n", 2273 il->stations[i].sta.sta.addr); 2274 il->stations[i].sta.mode = 0; 2275 il->stations[i].used |= IL_STA_UCODE_INPROGRESS; 2276 found = true; 2277 } 2278 } 2279 2280 for (i = 0; i < il->hw_params.max_stations; i++) { 2281 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) { 2282 memcpy(&sta_cmd, &il->stations[i].sta, 2283 sizeof(struct il_addsta_cmd)); 2284 send_lq = false; 2285 if (il->stations[i].lq) { 2286 memcpy(&lq, il->stations[i].lq, 2287 sizeof(struct il_link_quality_cmd)); 2288 send_lq = true; 2289 } 2290 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2291 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); 2292 if (ret) { 2293 spin_lock_irqsave(&il->sta_lock, flags_spin); 2294 IL_ERR("Adding station %pM failed.\n", 2295 il->stations[i].sta.sta.addr); 2296 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE; 2297 il->stations[i].used &= 2298 ~IL_STA_UCODE_INPROGRESS; 2299 spin_unlock_irqrestore(&il->sta_lock, 2300 flags_spin); 2301 } 2302 /* 2303 * Rate scaling has already been initialized, send 2304 * current LQ command 2305 */ 2306 if (send_lq) 2307 il_send_lq_cmd(il, &lq, CMD_SYNC, true); 2308 spin_lock_irqsave(&il->sta_lock, flags_spin); 2309 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; 2310 } 2311 } 2312 2313 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2314 if (!found) 2315 D_INFO("Restoring all known stations" 2316 " .... no stations to be restored.\n"); 2317 else 2318 D_INFO("Restoring all known stations" " .... complete.\n"); 2319} 2320EXPORT_SYMBOL(il_restore_stations); 2321 2322int 2323il_get_free_ucode_key_idx(struct il_priv *il) 2324{ 2325 int i; 2326 2327 for (i = 0; i < il->sta_key_max_num; i++) 2328 if (!test_and_set_bit(i, &il->ucode_key_table)) 2329 return i; 2330 2331 return WEP_INVALID_OFFSET; 2332} 2333EXPORT_SYMBOL(il_get_free_ucode_key_idx); 2334 2335void 2336il_dealloc_bcast_stations(struct il_priv *il) 2337{ 2338 unsigned long flags; 2339 int i; 2340 2341 spin_lock_irqsave(&il->sta_lock, flags); 2342 for (i = 0; i < il->hw_params.max_stations; i++) { 2343 if (!(il->stations[i].used & IL_STA_BCAST)) 2344 continue; 2345 2346 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; 2347 il->num_stations--; 2348 BUG_ON(il->num_stations < 0); 2349 kfree(il->stations[i].lq); 2350 il->stations[i].lq = NULL; 2351 } 2352 spin_unlock_irqrestore(&il->sta_lock, flags); 2353} 2354EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations); 2355 2356#ifdef CONFIG_IWLEGACY_DEBUG 2357static void 2358il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) 2359{ 2360 int i; 2361 D_RATE("lq station id 0x%x\n", lq->sta_id); 2362 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk, 2363 lq->general_params.dual_stream_ant_msk); 2364 2365 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 2366 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags); 2367} 2368#else 2369static inline void 2370il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) 2371{ 2372} 2373#endif 2374 2375/** 2376 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity 2377 * 2378 * It sometimes happens when a HT rate has been in use and we 2379 * loose connectivity with AP then mac80211 will first tell us that the 2380 * current channel is not HT anymore before removing the station. In such a 2381 * scenario the RXON flags will be updated to indicate we are not 2382 * communicating HT anymore, but the LQ command may still contain HT rates. 2383 * Test for this to prevent driver from sending LQ command between the time 2384 * RXON flags are updated and when LQ command is updated. 2385 */ 2386static bool 2387il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq) 2388{ 2389 int i; 2390 2391 if (il->ht.enabled) 2392 return true; 2393 2394 D_INFO("Channel %u is not an HT channel\n", il->active.channel); 2395 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 2396 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { 2397 D_INFO("idx %d of LQ expects HT channel\n", i); 2398 return false; 2399 } 2400 } 2401 return true; 2402} 2403 2404/** 2405 * il_send_lq_cmd() - Send link quality command 2406 * @init: This command is sent as part of station initialization right 2407 * after station has been added. 2408 * 2409 * The link quality command is sent as the last step of station creation. 2410 * This is the special case in which init is set and we call a callback in 2411 * this case to clear the state indicating that station creation is in 2412 * progress. 2413 */ 2414int 2415il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq, 2416 u8 flags, bool init) 2417{ 2418 int ret = 0; 2419 unsigned long flags_spin; 2420 2421 struct il_host_cmd cmd = { 2422 .id = C_TX_LINK_QUALITY_CMD, 2423 .len = sizeof(struct il_link_quality_cmd), 2424 .flags = flags, 2425 .data = lq, 2426 }; 2427 2428 if (WARN_ON(lq->sta_id == IL_INVALID_STATION)) 2429 return -EINVAL; 2430 2431 spin_lock_irqsave(&il->sta_lock, flags_spin); 2432 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) { 2433 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2434 return -EINVAL; 2435 } 2436 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2437 2438 il_dump_lq_cmd(il, lq); 2439 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 2440 2441 if (il_is_lq_table_valid(il, lq)) 2442 ret = il_send_cmd(il, &cmd); 2443 else 2444 ret = -EINVAL; 2445 2446 if (cmd.flags & CMD_ASYNC) 2447 return ret; 2448 2449 if (init) { 2450 D_INFO("init LQ command complete," 2451 " clearing sta addition status for sta %d\n", 2452 lq->sta_id); 2453 spin_lock_irqsave(&il->sta_lock, flags_spin); 2454 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS; 2455 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2456 } 2457 return ret; 2458} 2459EXPORT_SYMBOL(il_send_lq_cmd); 2460 2461int 2462il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2463 struct ieee80211_sta *sta) 2464{ 2465 struct il_priv *il = hw->priv; 2466 struct il_station_priv_common *sta_common = (void *)sta->drv_priv; 2467 int ret; 2468 2469 mutex_lock(&il->mutex); 2470 D_MAC80211("enter station %pM\n", sta->addr); 2471 2472 ret = il_remove_station(il, sta_common->sta_id, sta->addr); 2473 if (ret) 2474 IL_ERR("Error removing station %pM\n", sta->addr); 2475 2476 D_MAC80211("leave ret %d\n", ret); 2477 mutex_unlock(&il->mutex); 2478 2479 return ret; 2480} 2481EXPORT_SYMBOL(il_mac_sta_remove); 2482 2483/************************** RX-FUNCTIONS ****************************/ 2484/* 2485 * Rx theory of operation 2486 * 2487 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 2488 * each of which point to Receive Buffers to be filled by the NIC. These get 2489 * used not only for Rx frames, but for any command response or notification 2490 * from the NIC. The driver and NIC manage the Rx buffers by means 2491 * of idxes into the circular buffer. 2492 * 2493 * Rx Queue Indexes 2494 * The host/firmware share two idx registers for managing the Rx buffers. 2495 * 2496 * The READ idx maps to the first position that the firmware may be writing 2497 * to -- the driver can read up to (but not including) this position and get 2498 * good data. 2499 * The READ idx is managed by the firmware once the card is enabled. 2500 * 2501 * The WRITE idx maps to the last position the driver has read from -- the 2502 * position preceding WRITE is the last slot the firmware can place a packet. 2503 * 2504 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 2505 * WRITE = READ. 2506 * 2507 * During initialization, the host sets up the READ queue position to the first 2508 * IDX position, and WRITE to the last (READ - 1 wrapped) 2509 * 2510 * When the firmware places a packet in a buffer, it will advance the READ idx 2511 * and fire the RX interrupt. The driver can then query the READ idx and 2512 * process as many packets as possible, moving the WRITE idx forward as it 2513 * resets the Rx queue buffers with new memory. 2514 * 2515 * The management in the driver is as follows: 2516 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 2517 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 2518 * to replenish the iwl->rxq->rx_free. 2519 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the 2520 * iwl->rxq is replenished and the READ IDX is updated (updating the 2521 * 'processed' and 'read' driver idxes as well) 2522 * + A received packet is processed and handed to the kernel network stack, 2523 * detached from the iwl->rxq. The driver 'processed' idx is updated. 2524 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free 2525 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ 2526 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there 2527 * were enough free buffers and RX_STALLED is set it is cleared. 2528 * 2529 * 2530 * Driver sequence: 2531 * 2532 * il_rx_queue_alloc() Allocates rx_free 2533 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls 2534 * il_rx_queue_restock 2535 * il_rx_queue_restock() Moves available buffers from rx_free into Rx 2536 * queue, updates firmware pointers, and updates 2537 * the WRITE idx. If insufficient rx_free buffers 2538 * are available, schedules il_rx_replenish 2539 * 2540 * -- enable interrupts -- 2541 * ISR - il_rx() Detach il_rx_bufs from pool up to the 2542 * READ IDX, detaching the SKB from the pool. 2543 * Moves the packet buffer from queue to rx_used. 2544 * Calls il_rx_queue_restock to refill any empty 2545 * slots. 2546 * ... 2547 * 2548 */ 2549 2550/** 2551 * il_rx_queue_space - Return number of free slots available in queue. 2552 */ 2553int 2554il_rx_queue_space(const struct il_rx_queue *q) 2555{ 2556 int s = q->read - q->write; 2557 if (s <= 0) 2558 s += RX_QUEUE_SIZE; 2559 /* keep some buffer to not confuse full and empty queue */ 2560 s -= 2; 2561 if (s < 0) 2562 s = 0; 2563 return s; 2564} 2565EXPORT_SYMBOL(il_rx_queue_space); 2566 2567/** 2568 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue 2569 */ 2570void 2571il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q) 2572{ 2573 unsigned long flags; 2574 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg; 2575 u32 reg; 2576 2577 spin_lock_irqsave(&q->lock, flags); 2578 2579 if (q->need_update == 0) 2580 goto exit_unlock; 2581 2582 /* If power-saving is in use, make sure device is awake */ 2583 if (test_bit(S_POWER_PMI, &il->status)) { 2584 reg = _il_rd(il, CSR_UCODE_DRV_GP1); 2585 2586 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2587 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n", 2588 reg); 2589 il_set_bit(il, CSR_GP_CNTRL, 2590 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2591 goto exit_unlock; 2592 } 2593 2594 q->write_actual = (q->write & ~0x7); 2595 il_wr(il, rx_wrt_ptr_reg, q->write_actual); 2596 2597 /* Else device is assumed to be awake */ 2598 } else { 2599 /* Device expects a multiple of 8 */ 2600 q->write_actual = (q->write & ~0x7); 2601 il_wr(il, rx_wrt_ptr_reg, q->write_actual); 2602 } 2603 2604 q->need_update = 0; 2605 2606exit_unlock: 2607 spin_unlock_irqrestore(&q->lock, flags); 2608} 2609EXPORT_SYMBOL(il_rx_queue_update_write_ptr); 2610 2611int 2612il_rx_queue_alloc(struct il_priv *il) 2613{ 2614 struct il_rx_queue *rxq = &il->rxq; 2615 struct device *dev = &il->pci_dev->dev; 2616 int i; 2617 2618 spin_lock_init(&rxq->lock); 2619 INIT_LIST_HEAD(&rxq->rx_free); 2620 INIT_LIST_HEAD(&rxq->rx_used); 2621 2622 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 2623 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, 2624 GFP_KERNEL); 2625 if (!rxq->bd) 2626 goto err_bd; 2627 2628 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status), 2629 &rxq->rb_stts_dma, GFP_KERNEL); 2630 if (!rxq->rb_stts) 2631 goto err_rb; 2632 2633 /* Fill the rx_used queue with _all_ of the Rx buffers */ 2634 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 2635 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 2636 2637 /* Set us so that we have processed and used all buffers, but have 2638 * not restocked the Rx queue with fresh buffers */ 2639 rxq->read = rxq->write = 0; 2640 rxq->write_actual = 0; 2641 rxq->free_count = 0; 2642 rxq->need_update = 0; 2643 return 0; 2644 2645err_rb: 2646 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 2647 rxq->bd_dma); 2648err_bd: 2649 return -ENOMEM; 2650} 2651EXPORT_SYMBOL(il_rx_queue_alloc); 2652 2653void 2654il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb) 2655{ 2656 struct il_rx_pkt *pkt = rxb_addr(rxb); 2657 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif); 2658 2659 if (!report->state) { 2660 D_11H("Spectrum Measure Notification: Start\n"); 2661 return; 2662 } 2663 2664 memcpy(&il->measure_report, report, sizeof(*report)); 2665 il->measurement_status |= MEASUREMENT_READY; 2666} 2667EXPORT_SYMBOL(il_hdl_spectrum_measurement); 2668 2669/* 2670 * returns non-zero if packet should be dropped 2671 */ 2672int 2673il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, 2674 u32 decrypt_res, struct ieee80211_rx_status *stats) 2675{ 2676 u16 fc = le16_to_cpu(hdr->frame_control); 2677 2678 /* 2679 * All contexts have the same setting here due to it being 2680 * a module parameter, so OK to check any context. 2681 */ 2682 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) 2683 return 0; 2684 2685 if (!(fc & IEEE80211_FCTL_PROTECTED)) 2686 return 0; 2687 2688 D_RX("decrypt_res:0x%x\n", decrypt_res); 2689 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { 2690 case RX_RES_STATUS_SEC_TYPE_TKIP: 2691 /* The uCode has got a bad phase 1 Key, pushes the packet. 2692 * Decryption will be done in SW. */ 2693 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2694 RX_RES_STATUS_BAD_KEY_TTAK) 2695 break; 2696 2697 case RX_RES_STATUS_SEC_TYPE_WEP: 2698 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2699 RX_RES_STATUS_BAD_ICV_MIC) { 2700 /* bad ICV, the packet is destroyed since the 2701 * decryption is inplace, drop it */ 2702 D_RX("Packet destroyed\n"); 2703 return -1; 2704 } 2705 case RX_RES_STATUS_SEC_TYPE_CCMP: 2706 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2707 RX_RES_STATUS_DECRYPT_OK) { 2708 D_RX("hw decrypt successfully!!!\n"); 2709 stats->flag |= RX_FLAG_DECRYPTED; 2710 } 2711 break; 2712 2713 default: 2714 break; 2715 } 2716 return 0; 2717} 2718EXPORT_SYMBOL(il_set_decrypted_flag); 2719 2720/** 2721 * il_txq_update_write_ptr - Send new write idx to hardware 2722 */ 2723void 2724il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) 2725{ 2726 u32 reg = 0; 2727 int txq_id = txq->q.id; 2728 2729 if (txq->need_update == 0) 2730 return; 2731 2732 /* if we're trying to save power */ 2733 if (test_bit(S_POWER_PMI, &il->status)) { 2734 /* wake up nic if it's powered down ... 2735 * uCode will wake up, and interrupt us again, so next 2736 * time we'll skip this part. */ 2737 reg = _il_rd(il, CSR_UCODE_DRV_GP1); 2738 2739 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2740 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n", 2741 txq_id, reg); 2742 il_set_bit(il, CSR_GP_CNTRL, 2743 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2744 return; 2745 } 2746 2747 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 2748 2749 /* 2750 * else not in power-save mode, 2751 * uCode will never sleep when we're 2752 * trying to tx (during RFKILL, we're not trying to tx). 2753 */ 2754 } else 2755 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 2756 txq->need_update = 0; 2757} 2758EXPORT_SYMBOL(il_txq_update_write_ptr); 2759 2760/** 2761 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 2762 */ 2763void 2764il_tx_queue_unmap(struct il_priv *il, int txq_id) 2765{ 2766 struct il_tx_queue *txq = &il->txq[txq_id]; 2767 struct il_queue *q = &txq->q; 2768 2769 if (q->n_bd == 0) 2770 return; 2771 2772 while (q->write_ptr != q->read_ptr) { 2773 il->ops->txq_free_tfd(il, txq); 2774 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 2775 } 2776} 2777EXPORT_SYMBOL(il_tx_queue_unmap); 2778 2779/** 2780 * il_tx_queue_free - Deallocate DMA queue. 2781 * @txq: Transmit queue to deallocate. 2782 * 2783 * Empty queue by removing and destroying all BD's. 2784 * Free all buffers. 2785 * 0-fill, but do not free "txq" descriptor structure. 2786 */ 2787void 2788il_tx_queue_free(struct il_priv *il, int txq_id) 2789{ 2790 struct il_tx_queue *txq = &il->txq[txq_id]; 2791 struct device *dev = &il->pci_dev->dev; 2792 int i; 2793 2794 il_tx_queue_unmap(il, txq_id); 2795 2796 /* De-alloc array of command/tx buffers */ 2797 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 2798 kfree(txq->cmd[i]); 2799 2800 /* De-alloc circular buffer of TFDs */ 2801 if (txq->q.n_bd) 2802 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, 2803 txq->tfds, txq->q.dma_addr); 2804 2805 /* De-alloc array of per-TFD driver data */ 2806 kfree(txq->skbs); 2807 txq->skbs = NULL; 2808 2809 /* deallocate arrays */ 2810 kfree(txq->cmd); 2811 kfree(txq->meta); 2812 txq->cmd = NULL; 2813 txq->meta = NULL; 2814 2815 /* 0-fill queue descriptor structure */ 2816 memset(txq, 0, sizeof(*txq)); 2817} 2818EXPORT_SYMBOL(il_tx_queue_free); 2819 2820/** 2821 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue 2822 */ 2823void 2824il_cmd_queue_unmap(struct il_priv *il) 2825{ 2826 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 2827 struct il_queue *q = &txq->q; 2828 int i; 2829 2830 if (q->n_bd == 0) 2831 return; 2832 2833 while (q->read_ptr != q->write_ptr) { 2834 i = il_get_cmd_idx(q, q->read_ptr, 0); 2835 2836 if (txq->meta[i].flags & CMD_MAPPED) { 2837 pci_unmap_single(il->pci_dev, 2838 dma_unmap_addr(&txq->meta[i], mapping), 2839 dma_unmap_len(&txq->meta[i], len), 2840 PCI_DMA_BIDIRECTIONAL); 2841 txq->meta[i].flags = 0; 2842 } 2843 2844 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 2845 } 2846 2847 i = q->n_win; 2848 if (txq->meta[i].flags & CMD_MAPPED) { 2849 pci_unmap_single(il->pci_dev, 2850 dma_unmap_addr(&txq->meta[i], mapping), 2851 dma_unmap_len(&txq->meta[i], len), 2852 PCI_DMA_BIDIRECTIONAL); 2853 txq->meta[i].flags = 0; 2854 } 2855} 2856EXPORT_SYMBOL(il_cmd_queue_unmap); 2857 2858/** 2859 * il_cmd_queue_free - Deallocate DMA queue. 2860 * @txq: Transmit queue to deallocate. 2861 * 2862 * Empty queue by removing and destroying all BD's. 2863 * Free all buffers. 2864 * 0-fill, but do not free "txq" descriptor structure. 2865 */ 2866void 2867il_cmd_queue_free(struct il_priv *il) 2868{ 2869 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 2870 struct device *dev = &il->pci_dev->dev; 2871 int i; 2872 2873 il_cmd_queue_unmap(il); 2874 2875 /* De-alloc array of command/tx buffers */ 2876 for (i = 0; i <= TFD_CMD_SLOTS; i++) 2877 kfree(txq->cmd[i]); 2878 2879 /* De-alloc circular buffer of TFDs */ 2880 if (txq->q.n_bd) 2881 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, 2882 txq->tfds, txq->q.dma_addr); 2883 2884 /* deallocate arrays */ 2885 kfree(txq->cmd); 2886 kfree(txq->meta); 2887 txq->cmd = NULL; 2888 txq->meta = NULL; 2889 2890 /* 0-fill queue descriptor structure */ 2891 memset(txq, 0, sizeof(*txq)); 2892} 2893EXPORT_SYMBOL(il_cmd_queue_free); 2894 2895/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 2896 * DMA services 2897 * 2898 * Theory of operation 2899 * 2900 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 2901 * of buffer descriptors, each of which points to one or more data buffers for 2902 * the device to read from or fill. Driver and device exchange status of each 2903 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 2904 * entries in each circular buffer, to protect against confusing empty and full 2905 * queue states. 2906 * 2907 * The device reads or writes the data in the queues via the device's several 2908 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 2909 * 2910 * For Tx queue, there are low mark and high mark limits. If, after queuing 2911 * the packet for Tx, free space become < low mark, Tx queue stopped. When 2912 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 2913 * Tx queue resumed. 2914 * 2915 * See more detailed info in 4965.h. 2916 ***************************************************/ 2917 2918int 2919il_queue_space(const struct il_queue *q) 2920{ 2921 int s = q->read_ptr - q->write_ptr; 2922 2923 if (q->read_ptr > q->write_ptr) 2924 s -= q->n_bd; 2925 2926 if (s <= 0) 2927 s += q->n_win; 2928 /* keep some reserve to not confuse empty and full situations */ 2929 s -= 2; 2930 if (s < 0) 2931 s = 0; 2932 return s; 2933} 2934EXPORT_SYMBOL(il_queue_space); 2935 2936 2937/** 2938 * il_queue_init - Initialize queue's high/low-water and read/write idxes 2939 */ 2940static int 2941il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id) 2942{ 2943 /* 2944 * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 2945 * il_queue_inc_wrap and il_queue_dec_wrap are broken. 2946 */ 2947 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 2948 /* FIXME: remove q->n_bd */ 2949 q->n_bd = TFD_QUEUE_SIZE_MAX; 2950 2951 q->n_win = slots; 2952 q->id = id; 2953 2954 /* slots_must be power-of-two size, otherwise 2955 * il_get_cmd_idx is broken. */ 2956 BUG_ON(!is_power_of_2(slots)); 2957 2958 q->low_mark = q->n_win / 4; 2959 if (q->low_mark < 4) 2960 q->low_mark = 4; 2961 2962 q->high_mark = q->n_win / 8; 2963 if (q->high_mark < 2) 2964 q->high_mark = 2; 2965 2966 q->write_ptr = q->read_ptr = 0; 2967 2968 return 0; 2969} 2970 2971/** 2972 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue 2973 */ 2974static int 2975il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id) 2976{ 2977 struct device *dev = &il->pci_dev->dev; 2978 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 2979 2980 /* Driver ilate data, only for Tx (not command) queues, 2981 * not shared with device. */ 2982 if (id != il->cmd_queue) { 2983 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, 2984 sizeof(struct sk_buff *), 2985 GFP_KERNEL); 2986 if (!txq->skbs) { 2987 IL_ERR("Fail to alloc skbs\n"); 2988 goto error; 2989 } 2990 } else 2991 txq->skbs = NULL; 2992 2993 /* Circular buffer of transmit frame descriptors (TFDs), 2994 * shared with device */ 2995 txq->tfds = 2996 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); 2997 if (!txq->tfds) 2998 goto error; 2999 3000 txq->q.id = id; 3001 3002 return 0; 3003 3004error: 3005 kfree(txq->skbs); 3006 txq->skbs = NULL; 3007 3008 return -ENOMEM; 3009} 3010 3011/** 3012 * il_tx_queue_init - Allocate and initialize one tx/cmd queue 3013 */ 3014int 3015il_tx_queue_init(struct il_priv *il, u32 txq_id) 3016{ 3017 int i, len, ret; 3018 int slots, actual_slots; 3019 struct il_tx_queue *txq = &il->txq[txq_id]; 3020 3021 /* 3022 * Alloc buffer array for commands (Tx or other types of commands). 3023 * For the command queue (#4/#9), allocate command space + one big 3024 * command for scan, since scan command is very huge; the system will 3025 * not have two scans at the same time, so only one is needed. 3026 * For normal Tx queues (all other queues), no super-size command 3027 * space is needed. 3028 */ 3029 if (txq_id == il->cmd_queue) { 3030 slots = TFD_CMD_SLOTS; 3031 actual_slots = slots + 1; 3032 } else { 3033 slots = TFD_TX_CMD_SLOTS; 3034 actual_slots = slots; 3035 } 3036 3037 txq->meta = 3038 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL); 3039 txq->cmd = 3040 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL); 3041 3042 if (!txq->meta || !txq->cmd) 3043 goto out_free_arrays; 3044 3045 len = sizeof(struct il_device_cmd); 3046 for (i = 0; i < actual_slots; i++) { 3047 /* only happens for cmd queue */ 3048 if (i == slots) 3049 len = IL_MAX_CMD_SIZE; 3050 3051 txq->cmd[i] = kmalloc(len, GFP_KERNEL); 3052 if (!txq->cmd[i]) 3053 goto err; 3054 } 3055 3056 /* Alloc driver data array and TFD circular buffer */ 3057 ret = il_tx_queue_alloc(il, txq, txq_id); 3058 if (ret) 3059 goto err; 3060 3061 txq->need_update = 0; 3062 3063 /* 3064 * For the default queues 0-3, set up the swq_id 3065 * already -- all others need to get one later 3066 * (if they need one at all). 3067 */ 3068 if (txq_id < 4) 3069 il_set_swq_id(txq, txq_id, txq_id); 3070 3071 /* Initialize queue's high/low-water marks, and head/tail idxes */ 3072 il_queue_init(il, &txq->q, slots, txq_id); 3073 3074 /* Tell device where to find queue */ 3075 il->ops->txq_init(il, txq); 3076 3077 return 0; 3078err: 3079 for (i = 0; i < actual_slots; i++) 3080 kfree(txq->cmd[i]); 3081out_free_arrays: 3082 kfree(txq->meta); 3083 kfree(txq->cmd); 3084 3085 return -ENOMEM; 3086} 3087EXPORT_SYMBOL(il_tx_queue_init); 3088 3089void 3090il_tx_queue_reset(struct il_priv *il, u32 txq_id) 3091{ 3092 int slots, actual_slots; 3093 struct il_tx_queue *txq = &il->txq[txq_id]; 3094 3095 if (txq_id == il->cmd_queue) { 3096 slots = TFD_CMD_SLOTS; 3097 actual_slots = TFD_CMD_SLOTS + 1; 3098 } else { 3099 slots = TFD_TX_CMD_SLOTS; 3100 actual_slots = TFD_TX_CMD_SLOTS; 3101 } 3102 3103 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); 3104 txq->need_update = 0; 3105 3106 /* Initialize queue's high/low-water marks, and head/tail idxes */ 3107 il_queue_init(il, &txq->q, slots, txq_id); 3108 3109 /* Tell device where to find queue */ 3110 il->ops->txq_init(il, txq); 3111} 3112EXPORT_SYMBOL(il_tx_queue_reset); 3113 3114/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 3115 3116/** 3117 * il_enqueue_hcmd - enqueue a uCode command 3118 * @il: device ilate data point 3119 * @cmd: a point to the ucode command structure 3120 * 3121 * The function returns < 0 values to indicate the operation is 3122 * failed. On success, it turns the idx (> 0) of command in the 3123 * command queue. 3124 */ 3125int 3126il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) 3127{ 3128 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 3129 struct il_queue *q = &txq->q; 3130 struct il_device_cmd *out_cmd; 3131 struct il_cmd_meta *out_meta; 3132 dma_addr_t phys_addr; 3133 unsigned long flags; 3134 int len; 3135 u32 idx; 3136 u16 fix_size; 3137 3138 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len); 3139 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr)); 3140 3141 /* If any of the command structures end up being larger than 3142 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then 3143 * we will need to increase the size of the TFD entries 3144 * Also, check to see if command buffer should not exceed the size 3145 * of device_cmd and max_cmd_size. */ 3146 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 3147 !(cmd->flags & CMD_SIZE_HUGE)); 3148 BUG_ON(fix_size > IL_MAX_CMD_SIZE); 3149 3150 if (il_is_rfkill(il) || il_is_ctkill(il)) { 3151 IL_WARN("Not sending command - %s KILL\n", 3152 il_is_rfkill(il) ? "RF" : "CT"); 3153 return -EIO; 3154 } 3155 3156 spin_lock_irqsave(&il->hcmd_lock, flags); 3157 3158 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 3159 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3160 3161 IL_ERR("Restarting adapter due to command queue full\n"); 3162 queue_work(il->workqueue, &il->restart); 3163 return -ENOSPC; 3164 } 3165 3166 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 3167 out_cmd = txq->cmd[idx]; 3168 out_meta = &txq->meta[idx]; 3169 3170 if (WARN_ON(out_meta->flags & CMD_MAPPED)) { 3171 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3172 return -ENOSPC; 3173 } 3174 3175 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 3176 out_meta->flags = cmd->flags | CMD_MAPPED; 3177 if (cmd->flags & CMD_WANT_SKB) 3178 out_meta->source = cmd; 3179 if (cmd->flags & CMD_ASYNC) 3180 out_meta->callback = cmd->callback; 3181 3182 out_cmd->hdr.cmd = cmd->id; 3183 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); 3184 3185 /* At this point, the out_cmd now has all of the incoming cmd 3186 * information */ 3187 3188 out_cmd->hdr.flags = 0; 3189 out_cmd->hdr.sequence = 3190 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); 3191 if (cmd->flags & CMD_SIZE_HUGE) 3192 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 3193 len = sizeof(struct il_device_cmd); 3194 if (idx == TFD_CMD_SLOTS) 3195 len = IL_MAX_CMD_SIZE; 3196 3197#ifdef CONFIG_IWLEGACY_DEBUG 3198 switch (out_cmd->hdr.cmd) { 3199 case C_TX_LINK_QUALITY_CMD: 3200 case C_SENSITIVITY: 3201 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, " 3202 "%d bytes at %d[%d]:%d\n", 3203 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, 3204 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 3205 q->write_ptr, idx, il->cmd_queue); 3206 break; 3207 default: 3208 D_HC("Sending command %s (#%x), seq: 0x%04X, " 3209 "%d bytes at %d[%d]:%d\n", 3210 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, 3211 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, 3212 idx, il->cmd_queue); 3213 } 3214#endif 3215 3216 phys_addr = 3217 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, 3218 PCI_DMA_BIDIRECTIONAL); 3219 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) { 3220 idx = -ENOMEM; 3221 goto out; 3222 } 3223 dma_unmap_addr_set(out_meta, mapping, phys_addr); 3224 dma_unmap_len_set(out_meta, len, fix_size); 3225 3226 txq->need_update = 1; 3227 3228 if (il->ops->txq_update_byte_cnt_tbl) 3229 /* Set up entry in queue's byte count circular buffer */ 3230 il->ops->txq_update_byte_cnt_tbl(il, txq, 0); 3231 3232 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1, 3233 U32_PAD(cmd->len)); 3234 3235 /* Increment and update queue's write idx */ 3236 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); 3237 il_txq_update_write_ptr(il, txq); 3238 3239out: 3240 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3241 return idx; 3242} 3243 3244/** 3245 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 3246 * 3247 * When FW advances 'R' idx, all entries between old and new 'R' idx 3248 * need to be reclaimed. As result, some free space forms. If there is 3249 * enough free space (> low mark), wake the stack that feeds us. 3250 */ 3251static void 3252il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx) 3253{ 3254 struct il_tx_queue *txq = &il->txq[txq_id]; 3255 struct il_queue *q = &txq->q; 3256 int nfreed = 0; 3257 3258 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { 3259 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " 3260 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, 3261 q->write_ptr, q->read_ptr); 3262 return; 3263 } 3264 3265 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 3266 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { 3267 3268 if (nfreed++ > 0) { 3269 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx, 3270 q->write_ptr, q->read_ptr); 3271 queue_work(il->workqueue, &il->restart); 3272 } 3273 3274 } 3275} 3276 3277/** 3278 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them 3279 * @rxb: Rx buffer to reclaim 3280 * 3281 * If an Rx buffer has an async callback associated with it the callback 3282 * will be executed. The attached skb (if present) will only be freed 3283 * if the callback returns 1 3284 */ 3285void 3286il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) 3287{ 3288 struct il_rx_pkt *pkt = rxb_addr(rxb); 3289 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 3290 int txq_id = SEQ_TO_QUEUE(sequence); 3291 int idx = SEQ_TO_IDX(sequence); 3292 int cmd_idx; 3293 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 3294 struct il_device_cmd *cmd; 3295 struct il_cmd_meta *meta; 3296 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 3297 unsigned long flags; 3298 3299 /* If a Tx command is being handled and it isn't in the actual 3300 * command queue then there a command routing bug has been introduced 3301 * in the queue management code. */ 3302 if (WARN 3303 (txq_id != il->cmd_queue, 3304 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 3305 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, 3306 il->txq[il->cmd_queue].q.write_ptr)) { 3307 il_print_hex_error(il, pkt, 32); 3308 return; 3309 } 3310 3311 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge); 3312 cmd = txq->cmd[cmd_idx]; 3313 meta = &txq->meta[cmd_idx]; 3314 3315 txq->time_stamp = jiffies; 3316 3317 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping), 3318 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); 3319 3320 /* Input error checking is done when commands are added to queue. */ 3321 if (meta->flags & CMD_WANT_SKB) { 3322 meta->source->reply_page = (unsigned long)rxb_addr(rxb); 3323 rxb->page = NULL; 3324 } else if (meta->callback) 3325 meta->callback(il, cmd, pkt); 3326 3327 spin_lock_irqsave(&il->hcmd_lock, flags); 3328 3329 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx); 3330 3331 if (!(meta->flags & CMD_ASYNC)) { 3332 clear_bit(S_HCMD_ACTIVE, &il->status); 3333 D_INFO("Clearing HCMD_ACTIVE for command %s\n", 3334 il_get_cmd_string(cmd->hdr.cmd)); 3335 wake_up(&il->wait_command_queue); 3336 } 3337 3338 /* Mark as unmapped */ 3339 meta->flags = 0; 3340 3341 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3342} 3343EXPORT_SYMBOL(il_tx_cmd_complete); 3344 3345MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); 3346MODULE_VERSION(IWLWIFI_VERSION); 3347MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 3348MODULE_LICENSE("GPL"); 3349 3350/* 3351 * set bt_coex_active to true, uCode will do kill/defer 3352 * every time the priority line is asserted (BT is sending signals on the 3353 * priority line in the PCIx). 3354 * set bt_coex_active to false, uCode will ignore the BT activity and 3355 * perform the normal operation 3356 * 3357 * User might experience transmit issue on some platform due to WiFi/BT 3358 * co-exist problem. The possible behaviors are: 3359 * Able to scan and finding all the available AP 3360 * Not able to associate with any AP 3361 * On those platforms, WiFi communication can be restored by set 3362 * "bt_coex_active" module parameter to "false" 3363 * 3364 * default: bt_coex_active = true (BT_COEX_ENABLE) 3365 */ 3366static bool bt_coex_active = true; 3367module_param(bt_coex_active, bool, S_IRUGO); 3368MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 3369 3370u32 il_debug_level; 3371EXPORT_SYMBOL(il_debug_level); 3372 3373const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3374EXPORT_SYMBOL(il_bcast_addr); 3375 3376#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 3377#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 3378static void 3379il_init_ht_hw_capab(const struct il_priv *il, 3380 struct ieee80211_sta_ht_cap *ht_info, 3381 enum ieee80211_band band) 3382{ 3383 u16 max_bit_rate = 0; 3384 u8 rx_chains_num = il->hw_params.rx_chains_num; 3385 u8 tx_chains_num = il->hw_params.tx_chains_num; 3386 3387 ht_info->cap = 0; 3388 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 3389 3390 ht_info->ht_supported = true; 3391 3392 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 3393 max_bit_rate = MAX_BIT_RATE_20_MHZ; 3394 if (il->hw_params.ht40_channel & BIT(band)) { 3395 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 3396 ht_info->cap |= IEEE80211_HT_CAP_SGI_40; 3397 ht_info->mcs.rx_mask[4] = 0x01; 3398 max_bit_rate = MAX_BIT_RATE_40_MHZ; 3399 } 3400 3401 if (il->cfg->mod_params->amsdu_size_8K) 3402 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 3403 3404 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 3405 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; 3406 3407 ht_info->mcs.rx_mask[0] = 0xFF; 3408 if (rx_chains_num >= 2) 3409 ht_info->mcs.rx_mask[1] = 0xFF; 3410 if (rx_chains_num >= 3) 3411 ht_info->mcs.rx_mask[2] = 0xFF; 3412 3413 /* Highest supported Rx data rate */ 3414 max_bit_rate *= rx_chains_num; 3415 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); 3416 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); 3417 3418 /* Tx MCS capabilities */ 3419 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 3420 if (tx_chains_num != rx_chains_num) { 3421 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 3422 ht_info->mcs.tx_params |= 3423 ((tx_chains_num - 3424 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 3425 } 3426} 3427 3428/** 3429 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom 3430 */ 3431int 3432il_init_geos(struct il_priv *il) 3433{ 3434 struct il_channel_info *ch; 3435 struct ieee80211_supported_band *sband; 3436 struct ieee80211_channel *channels; 3437 struct ieee80211_channel *geo_ch; 3438 struct ieee80211_rate *rates; 3439 int i = 0; 3440 s8 max_tx_power = 0; 3441 3442 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates || 3443 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) { 3444 D_INFO("Geography modes already initialized.\n"); 3445 set_bit(S_GEO_CONFIGURED, &il->status); 3446 return 0; 3447 } 3448 3449 channels = 3450 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count, 3451 GFP_KERNEL); 3452 if (!channels) 3453 return -ENOMEM; 3454 3455 rates = 3456 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY), 3457 GFP_KERNEL); 3458 if (!rates) { 3459 kfree(channels); 3460 return -ENOMEM; 3461 } 3462 3463 /* 5.2GHz channels start after the 2.4GHz channels */ 3464 sband = &il->bands[IEEE80211_BAND_5GHZ]; 3465 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; 3466 /* just OFDM */ 3467 sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; 3468 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; 3469 3470 if (il->cfg->sku & IL_SKU_N) 3471 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ); 3472 3473 sband = &il->bands[IEEE80211_BAND_2GHZ]; 3474 sband->channels = channels; 3475 /* OFDM & CCK */ 3476 sband->bitrates = rates; 3477 sband->n_bitrates = RATE_COUNT_LEGACY; 3478 3479 if (il->cfg->sku & IL_SKU_N) 3480 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ); 3481 3482 il->ieee_channels = channels; 3483 il->ieee_rates = rates; 3484 3485 for (i = 0; i < il->channel_count; i++) { 3486 ch = &il->channel_info[i]; 3487 3488 if (!il_is_channel_valid(ch)) 3489 continue; 3490 3491 sband = &il->bands[ch->band]; 3492 3493 geo_ch = &sband->channels[sband->n_channels++]; 3494 3495 geo_ch->center_freq = 3496 ieee80211_channel_to_frequency(ch->channel, ch->band); 3497 geo_ch->max_power = ch->max_power_avg; 3498 geo_ch->max_antenna_gain = 0xff; 3499 geo_ch->hw_value = ch->channel; 3500 3501 if (il_is_channel_valid(ch)) { 3502 if (!(ch->flags & EEPROM_CHANNEL_IBSS)) 3503 geo_ch->flags |= IEEE80211_CHAN_NO_IR; 3504 3505 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) 3506 geo_ch->flags |= IEEE80211_CHAN_NO_IR; 3507 3508 if (ch->flags & EEPROM_CHANNEL_RADAR) 3509 geo_ch->flags |= IEEE80211_CHAN_RADAR; 3510 3511 geo_ch->flags |= ch->ht40_extension_channel; 3512 3513 if (ch->max_power_avg > max_tx_power) 3514 max_tx_power = ch->max_power_avg; 3515 } else { 3516 geo_ch->flags |= IEEE80211_CHAN_DISABLED; 3517 } 3518 3519 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel, 3520 geo_ch->center_freq, 3521 il_is_channel_a_band(ch) ? "5.2" : "2.4", 3522 geo_ch-> 3523 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid", 3524 geo_ch->flags); 3525 } 3526 3527 il->tx_power_device_lmt = max_tx_power; 3528 il->tx_power_user_lmt = max_tx_power; 3529 il->tx_power_next = max_tx_power; 3530 3531 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 && 3532 (il->cfg->sku & IL_SKU_A)) { 3533 IL_INFO("Incorrectly detected BG card as ABG. " 3534 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", 3535 il->pci_dev->device, il->pci_dev->subsystem_device); 3536 il->cfg->sku &= ~IL_SKU_A; 3537 } 3538 3539 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n", 3540 il->bands[IEEE80211_BAND_2GHZ].n_channels, 3541 il->bands[IEEE80211_BAND_5GHZ].n_channels); 3542 3543 set_bit(S_GEO_CONFIGURED, &il->status); 3544 3545 return 0; 3546} 3547EXPORT_SYMBOL(il_init_geos); 3548 3549/* 3550 * il_free_geos - undo allocations in il_init_geos 3551 */ 3552void 3553il_free_geos(struct il_priv *il) 3554{ 3555 kfree(il->ieee_channels); 3556 kfree(il->ieee_rates); 3557 clear_bit(S_GEO_CONFIGURED, &il->status); 3558} 3559EXPORT_SYMBOL(il_free_geos); 3560 3561static bool 3562il_is_channel_extension(struct il_priv *il, enum ieee80211_band band, 3563 u16 channel, u8 extension_chan_offset) 3564{ 3565 const struct il_channel_info *ch_info; 3566 3567 ch_info = il_get_channel_info(il, band, channel); 3568 if (!il_is_channel_valid(ch_info)) 3569 return false; 3570 3571 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) 3572 return !(ch_info-> 3573 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS); 3574 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) 3575 return !(ch_info-> 3576 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS); 3577 3578 return false; 3579} 3580 3581bool 3582il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap) 3583{ 3584 if (!il->ht.enabled || !il->ht.is_40mhz) 3585 return false; 3586 3587 /* 3588 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 3589 * the bit will not set if it is pure 40MHz case 3590 */ 3591 if (ht_cap && !ht_cap->ht_supported) 3592 return false; 3593 3594#ifdef CONFIG_IWLEGACY_DEBUGFS 3595 if (il->disable_ht40) 3596 return false; 3597#endif 3598 3599 return il_is_channel_extension(il, il->band, 3600 le16_to_cpu(il->staging.channel), 3601 il->ht.extension_chan_offset); 3602} 3603EXPORT_SYMBOL(il_is_ht40_tx_allowed); 3604 3605static u16 3606il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) 3607{ 3608 u16 new_val; 3609 u16 beacon_factor; 3610 3611 /* 3612 * If mac80211 hasn't given us a beacon interval, program 3613 * the default into the device. 3614 */ 3615 if (!beacon_val) 3616 return DEFAULT_BEACON_INTERVAL; 3617 3618 /* 3619 * If the beacon interval we obtained from the peer 3620 * is too large, we'll have to wake up more often 3621 * (and in IBSS case, we'll beacon too much) 3622 * 3623 * For example, if max_beacon_val is 4096, and the 3624 * requested beacon interval is 7000, we'll have to 3625 * use 3500 to be able to wake up on the beacons. 3626 * 3627 * This could badly influence beacon detection stats. 3628 */ 3629 3630 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; 3631 new_val = beacon_val / beacon_factor; 3632 3633 if (!new_val) 3634 new_val = max_beacon_val; 3635 3636 return new_val; 3637} 3638 3639int 3640il_send_rxon_timing(struct il_priv *il) 3641{ 3642 u64 tsf; 3643 s32 interval_tm, rem; 3644 struct ieee80211_conf *conf = NULL; 3645 u16 beacon_int; 3646 struct ieee80211_vif *vif = il->vif; 3647 3648 conf = &il->hw->conf; 3649 3650 lockdep_assert_held(&il->mutex); 3651 3652 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd)); 3653 3654 il->timing.timestamp = cpu_to_le64(il->timestamp); 3655 il->timing.listen_interval = cpu_to_le16(conf->listen_interval); 3656 3657 beacon_int = vif ? vif->bss_conf.beacon_int : 0; 3658 3659 /* 3660 * TODO: For IBSS we need to get atim_win from mac80211, 3661 * for now just always use 0 3662 */ 3663 il->timing.atim_win = 0; 3664 3665 beacon_int = 3666 il_adjust_beacon_interval(beacon_int, 3667 il->hw_params.max_beacon_itrvl * 3668 TIME_UNIT); 3669 il->timing.beacon_interval = cpu_to_le16(beacon_int); 3670 3671 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */ 3672 interval_tm = beacon_int * TIME_UNIT; 3673 rem = do_div(tsf, interval_tm); 3674 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); 3675 3676 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1; 3677 3678 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n", 3679 le16_to_cpu(il->timing.beacon_interval), 3680 le32_to_cpu(il->timing.beacon_init_val), 3681 le16_to_cpu(il->timing.atim_win)); 3682 3683 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing), 3684 &il->timing); 3685} 3686EXPORT_SYMBOL(il_send_rxon_timing); 3687 3688void 3689il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt) 3690{ 3691 struct il_rxon_cmd *rxon = &il->staging; 3692 3693 if (hw_decrypt) 3694 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 3695 else 3696 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; 3697 3698} 3699EXPORT_SYMBOL(il_set_rxon_hwcrypto); 3700 3701/* validate RXON structure is valid */ 3702int 3703il_check_rxon_cmd(struct il_priv *il) 3704{ 3705 struct il_rxon_cmd *rxon = &il->staging; 3706 bool error = false; 3707 3708 if (rxon->flags & RXON_FLG_BAND_24G_MSK) { 3709 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { 3710 IL_WARN("check 2.4G: wrong narrow\n"); 3711 error = true; 3712 } 3713 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { 3714 IL_WARN("check 2.4G: wrong radar\n"); 3715 error = true; 3716 } 3717 } else { 3718 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { 3719 IL_WARN("check 5.2G: not short slot!\n"); 3720 error = true; 3721 } 3722 if (rxon->flags & RXON_FLG_CCK_MSK) { 3723 IL_WARN("check 5.2G: CCK!\n"); 3724 error = true; 3725 } 3726 } 3727 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { 3728 IL_WARN("mac/bssid mcast!\n"); 3729 error = true; 3730 } 3731 3732 /* make sure basic rates 6Mbps and 1Mbps are supported */ 3733 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 && 3734 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) { 3735 IL_WARN("neither 1 nor 6 are basic\n"); 3736 error = true; 3737 } 3738 3739 if (le16_to_cpu(rxon->assoc_id) > 2007) { 3740 IL_WARN("aid > 2007\n"); 3741 error = true; 3742 } 3743 3744 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) == 3745 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { 3746 IL_WARN("CCK and short slot\n"); 3747 error = true; 3748 } 3749 3750 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) == 3751 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { 3752 IL_WARN("CCK and auto detect"); 3753 error = true; 3754 } 3755 3756 if ((rxon-> 3757 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) == 3758 RXON_FLG_TGG_PROTECT_MSK) { 3759 IL_WARN("TGg but no auto-detect\n"); 3760 error = true; 3761 } 3762 3763 if (error) 3764 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel)); 3765 3766 if (error) { 3767 IL_ERR("Invalid RXON\n"); 3768 return -EINVAL; 3769 } 3770 return 0; 3771} 3772EXPORT_SYMBOL(il_check_rxon_cmd); 3773 3774/** 3775 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 3776 * @il: staging_rxon is compared to active_rxon 3777 * 3778 * If the RXON structure is changing enough to require a new tune, 3779 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 3780 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 3781 */ 3782int 3783il_full_rxon_required(struct il_priv *il) 3784{ 3785 const struct il_rxon_cmd *staging = &il->staging; 3786 const struct il_rxon_cmd *active = &il->active; 3787 3788#define CHK(cond) \ 3789 if ((cond)) { \ 3790 D_INFO("need full RXON - " #cond "\n"); \ 3791 return 1; \ 3792 } 3793 3794#define CHK_NEQ(c1, c2) \ 3795 if ((c1) != (c2)) { \ 3796 D_INFO("need full RXON - " \ 3797 #c1 " != " #c2 " - %d != %d\n", \ 3798 (c1), (c2)); \ 3799 return 1; \ 3800 } 3801 3802 /* These items are only settable from the full RXON command */ 3803 CHK(!il_is_associated(il)); 3804 CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr)); 3805 CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr)); 3806 CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr, 3807 active->wlap_bssid_addr)); 3808 CHK_NEQ(staging->dev_type, active->dev_type); 3809 CHK_NEQ(staging->channel, active->channel); 3810 CHK_NEQ(staging->air_propagation, active->air_propagation); 3811 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, 3812 active->ofdm_ht_single_stream_basic_rates); 3813 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, 3814 active->ofdm_ht_dual_stream_basic_rates); 3815 CHK_NEQ(staging->assoc_id, active->assoc_id); 3816 3817 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can 3818 * be updated with the RXON_ASSOC command -- however only some 3819 * flag transitions are allowed using RXON_ASSOC */ 3820 3821 /* Check if we are not switching bands */ 3822 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, 3823 active->flags & RXON_FLG_BAND_24G_MSK); 3824 3825 /* Check if we are switching association toggle */ 3826 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, 3827 active->filter_flags & RXON_FILTER_ASSOC_MSK); 3828 3829#undef CHK 3830#undef CHK_NEQ 3831 3832 return 0; 3833} 3834EXPORT_SYMBOL(il_full_rxon_required); 3835 3836u8 3837il_get_lowest_plcp(struct il_priv *il) 3838{ 3839 /* 3840 * Assign the lowest rate -- should really get this from 3841 * the beacon skb from mac80211. 3842 */ 3843 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) 3844 return RATE_1M_PLCP; 3845 else 3846 return RATE_6M_PLCP; 3847} 3848EXPORT_SYMBOL(il_get_lowest_plcp); 3849 3850static void 3851_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) 3852{ 3853 struct il_rxon_cmd *rxon = &il->staging; 3854 3855 if (!il->ht.enabled) { 3856 rxon->flags &= 3857 ~(RXON_FLG_CHANNEL_MODE_MSK | 3858 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK 3859 | RXON_FLG_HT_PROT_MSK); 3860 return; 3861 } 3862 3863 rxon->flags |= 3864 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); 3865 3866 /* Set up channel bandwidth: 3867 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ 3868 /* clear the HT channel mode before set the mode */ 3869 rxon->flags &= 3870 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 3871 if (il_is_ht40_tx_allowed(il, NULL)) { 3872 /* pure ht40 */ 3873 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { 3874 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; 3875 /* Note: control channel is opposite of extension channel */ 3876 switch (il->ht.extension_chan_offset) { 3877 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3878 rxon->flags &= 3879 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3880 break; 3881 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3882 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3883 break; 3884 } 3885 } else { 3886 /* Note: control channel is opposite of extension channel */ 3887 switch (il->ht.extension_chan_offset) { 3888 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3889 rxon->flags &= 3890 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 3891 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 3892 break; 3893 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3894 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3895 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 3896 break; 3897 case IEEE80211_HT_PARAM_CHA_SEC_NONE: 3898 default: 3899 /* channel location only valid if in Mixed mode */ 3900 IL_ERR("invalid extension channel offset\n"); 3901 break; 3902 } 3903 } 3904 } else { 3905 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; 3906 } 3907 3908 if (il->ops->set_rxon_chain) 3909 il->ops->set_rxon_chain(il); 3910 3911 D_ASSOC("rxon flags 0x%X operation mode :0x%X " 3912 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags), 3913 il->ht.protection, il->ht.extension_chan_offset); 3914} 3915 3916void 3917il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) 3918{ 3919 _il_set_rxon_ht(il, ht_conf); 3920} 3921EXPORT_SYMBOL(il_set_rxon_ht); 3922 3923/* Return valid, unused, channel for a passive scan to reset the RF */ 3924u8 3925il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band) 3926{ 3927 const struct il_channel_info *ch_info; 3928 int i; 3929 u8 channel = 0; 3930 u8 min, max; 3931 3932 if (band == IEEE80211_BAND_5GHZ) { 3933 min = 14; 3934 max = il->channel_count; 3935 } else { 3936 min = 0; 3937 max = 14; 3938 } 3939 3940 for (i = min; i < max; i++) { 3941 channel = il->channel_info[i].channel; 3942 if (channel == le16_to_cpu(il->staging.channel)) 3943 continue; 3944 3945 ch_info = il_get_channel_info(il, band, channel); 3946 if (il_is_channel_valid(ch_info)) 3947 break; 3948 } 3949 3950 return channel; 3951} 3952EXPORT_SYMBOL(il_get_single_channel_number); 3953 3954/** 3955 * il_set_rxon_channel - Set the band and channel values in staging RXON 3956 * @ch: requested channel as a pointer to struct ieee80211_channel 3957 3958 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 3959 * in the staging RXON flag structure based on the ch->band 3960 */ 3961int 3962il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch) 3963{ 3964 enum ieee80211_band band = ch->band; 3965 u16 channel = ch->hw_value; 3966 3967 if (le16_to_cpu(il->staging.channel) == channel && il->band == band) 3968 return 0; 3969 3970 il->staging.channel = cpu_to_le16(channel); 3971 if (band == IEEE80211_BAND_5GHZ) 3972 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK; 3973 else 3974 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 3975 3976 il->band = band; 3977 3978 D_INFO("Staging channel set to %d [%d]\n", channel, band); 3979 3980 return 0; 3981} 3982EXPORT_SYMBOL(il_set_rxon_channel); 3983 3984void 3985il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band, 3986 struct ieee80211_vif *vif) 3987{ 3988 if (band == IEEE80211_BAND_5GHZ) { 3989 il->staging.flags &= 3990 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | 3991 RXON_FLG_CCK_MSK); 3992 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 3993 } else { 3994 /* Copied from il_post_associate() */ 3995 if (vif && vif->bss_conf.use_short_slot) 3996 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 3997 else 3998 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3999 4000 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 4001 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; 4002 il->staging.flags &= ~RXON_FLG_CCK_MSK; 4003 } 4004} 4005EXPORT_SYMBOL(il_set_flags_for_band); 4006 4007/* 4008 * initialize rxon structure with default values from eeprom 4009 */ 4010void 4011il_connection_init_rx_config(struct il_priv *il) 4012{ 4013 const struct il_channel_info *ch_info; 4014 4015 memset(&il->staging, 0, sizeof(il->staging)); 4016 4017 switch (il->iw_mode) { 4018 case NL80211_IFTYPE_UNSPECIFIED: 4019 il->staging.dev_type = RXON_DEV_TYPE_ESS; 4020 break; 4021 case NL80211_IFTYPE_STATION: 4022 il->staging.dev_type = RXON_DEV_TYPE_ESS; 4023 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 4024 break; 4025 case NL80211_IFTYPE_ADHOC: 4026 il->staging.dev_type = RXON_DEV_TYPE_IBSS; 4027 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 4028 il->staging.filter_flags = 4029 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 4030 break; 4031 default: 4032 IL_ERR("Unsupported interface type %d\n", il->vif->type); 4033 return; 4034 } 4035 4036#if 0 4037 /* TODO: Figure out when short_preamble would be set and cache from 4038 * that */ 4039 if (!hw_to_local(il->hw)->short_preamble) 4040 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 4041 else 4042 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 4043#endif 4044 4045 ch_info = 4046 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel)); 4047 4048 if (!ch_info) 4049 ch_info = &il->channel_info[0]; 4050 4051 il->staging.channel = cpu_to_le16(ch_info->channel); 4052 il->band = ch_info->band; 4053 4054 il_set_flags_for_band(il, il->band, il->vif); 4055 4056 il->staging.ofdm_basic_rates = 4057 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; 4058 il->staging.cck_basic_rates = 4059 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; 4060 4061 /* clear both MIX and PURE40 mode flag */ 4062 il->staging.flags &= 4063 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40); 4064 if (il->vif) 4065 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN); 4066 4067 il->staging.ofdm_ht_single_stream_basic_rates = 0xff; 4068 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff; 4069} 4070EXPORT_SYMBOL(il_connection_init_rx_config); 4071 4072void 4073il_set_rate(struct il_priv *il) 4074{ 4075 const struct ieee80211_supported_band *hw = NULL; 4076 struct ieee80211_rate *rate; 4077 int i; 4078 4079 hw = il_get_hw_mode(il, il->band); 4080 if (!hw) { 4081 IL_ERR("Failed to set rate: unable to get hw mode\n"); 4082 return; 4083 } 4084 4085 il->active_rate = 0; 4086 4087 for (i = 0; i < hw->n_bitrates; i++) { 4088 rate = &(hw->bitrates[i]); 4089 if (rate->hw_value < RATE_COUNT_LEGACY) 4090 il->active_rate |= (1 << rate->hw_value); 4091 } 4092 4093 D_RATE("Set active_rate = %0x\n", il->active_rate); 4094 4095 il->staging.cck_basic_rates = 4096 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; 4097 4098 il->staging.ofdm_basic_rates = 4099 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; 4100} 4101EXPORT_SYMBOL(il_set_rate); 4102 4103void 4104il_chswitch_done(struct il_priv *il, bool is_success) 4105{ 4106 if (test_bit(S_EXIT_PENDING, &il->status)) 4107 return; 4108 4109 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) 4110 ieee80211_chswitch_done(il->vif, is_success); 4111} 4112EXPORT_SYMBOL(il_chswitch_done); 4113 4114void 4115il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb) 4116{ 4117 struct il_rx_pkt *pkt = rxb_addr(rxb); 4118 struct il_csa_notification *csa = &(pkt->u.csa_notif); 4119 struct il_rxon_cmd *rxon = (void *)&il->active; 4120 4121 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) 4122 return; 4123 4124 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) { 4125 rxon->channel = csa->channel; 4126 il->staging.channel = csa->channel; 4127 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel)); 4128 il_chswitch_done(il, true); 4129 } else { 4130 IL_ERR("CSA notif (fail) : channel %d\n", 4131 le16_to_cpu(csa->channel)); 4132 il_chswitch_done(il, false); 4133 } 4134} 4135EXPORT_SYMBOL(il_hdl_csa); 4136 4137#ifdef CONFIG_IWLEGACY_DEBUG 4138void 4139il_print_rx_config_cmd(struct il_priv *il) 4140{ 4141 struct il_rxon_cmd *rxon = &il->staging; 4142 4143 D_RADIO("RX CONFIG:\n"); 4144 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 4145 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); 4146 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); 4147 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags)); 4148 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); 4149 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates); 4150 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); 4151 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr); 4152 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 4153 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 4154} 4155EXPORT_SYMBOL(il_print_rx_config_cmd); 4156#endif 4157/** 4158 * il_irq_handle_error - called for HW or SW error interrupt from card 4159 */ 4160void 4161il_irq_handle_error(struct il_priv *il) 4162{ 4163 /* Set the FW error flag -- cleared on il_down */ 4164 set_bit(S_FW_ERROR, &il->status); 4165 4166 /* Cancel currently queued command. */ 4167 clear_bit(S_HCMD_ACTIVE, &il->status); 4168 4169 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version); 4170 4171 il->ops->dump_nic_error_log(il); 4172 if (il->ops->dump_fh) 4173 il->ops->dump_fh(il, NULL, false); 4174#ifdef CONFIG_IWLEGACY_DEBUG 4175 if (il_get_debug_level(il) & IL_DL_FW_ERRORS) 4176 il_print_rx_config_cmd(il); 4177#endif 4178 4179 wake_up(&il->wait_command_queue); 4180 4181 /* Keep the restart process from trying to send host 4182 * commands by clearing the INIT status bit */ 4183 clear_bit(S_READY, &il->status); 4184 4185 if (!test_bit(S_EXIT_PENDING, &il->status)) { 4186 IL_DBG(IL_DL_FW_ERRORS, 4187 "Restarting adapter due to uCode error.\n"); 4188 4189 if (il->cfg->mod_params->restart_fw) 4190 queue_work(il->workqueue, &il->restart); 4191 } 4192} 4193EXPORT_SYMBOL(il_irq_handle_error); 4194 4195static int 4196_il_apm_stop_master(struct il_priv *il) 4197{ 4198 int ret = 0; 4199 4200 /* stop device's busmaster DMA activity */ 4201 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 4202 4203 ret = 4204 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, 4205 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 4206 if (ret < 0) 4207 IL_WARN("Master Disable Timed Out, 100 usec\n"); 4208 4209 D_INFO("stop master\n"); 4210 4211 return ret; 4212} 4213 4214void 4215_il_apm_stop(struct il_priv *il) 4216{ 4217 lockdep_assert_held(&il->reg_lock); 4218 4219 D_INFO("Stop card, put in low power state\n"); 4220 4221 /* Stop device's DMA activity */ 4222 _il_apm_stop_master(il); 4223 4224 /* Reset the entire device */ 4225 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 4226 4227 udelay(10); 4228 4229 /* 4230 * Clear "initialization complete" bit to move adapter from 4231 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 4232 */ 4233 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4234} 4235EXPORT_SYMBOL(_il_apm_stop); 4236 4237void 4238il_apm_stop(struct il_priv *il) 4239{ 4240 unsigned long flags; 4241 4242 spin_lock_irqsave(&il->reg_lock, flags); 4243 _il_apm_stop(il); 4244 spin_unlock_irqrestore(&il->reg_lock, flags); 4245} 4246EXPORT_SYMBOL(il_apm_stop); 4247 4248/* 4249 * Start up NIC's basic functionality after it has been reset 4250 * (e.g. after platform boot, or shutdown via il_apm_stop()) 4251 * NOTE: This does not load uCode nor start the embedded processor 4252 */ 4253int 4254il_apm_init(struct il_priv *il) 4255{ 4256 int ret = 0; 4257 u16 lctl; 4258 4259 D_INFO("Init card's basic functions\n"); 4260 4261 /* 4262 * Use "set_bit" below rather than "write", to preserve any hardware 4263 * bits already set by default after reset. 4264 */ 4265 4266 /* Disable L0S exit timer (platform NMI Work/Around) */ 4267 il_set_bit(il, CSR_GIO_CHICKEN_BITS, 4268 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 4269 4270 /* 4271 * Disable L0s without affecting L1; 4272 * don't wait for ICH L0s (ICH bug W/A) 4273 */ 4274 il_set_bit(il, CSR_GIO_CHICKEN_BITS, 4275 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 4276 4277 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 4278 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 4279 4280 /* 4281 * Enable HAP INTA (interrupt from management bus) to 4282 * wake device's PCI Express link L1a -> L0s 4283 * NOTE: This is no-op for 3945 (non-existent bit) 4284 */ 4285 il_set_bit(il, CSR_HW_IF_CONFIG_REG, 4286 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 4287 4288 /* 4289 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. 4290 * Check if BIOS (or OS) enabled L1-ASPM on this device. 4291 * If so (likely), disable L0S, so device moves directly L0->L1; 4292 * costs negligible amount of power savings. 4293 * If not (unlikely), enable L0S, so there is at least some 4294 * power savings, even without L1. 4295 */ 4296 if (il->cfg->set_l0s) { 4297 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); 4298 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { 4299 /* L1-ASPM enabled; disable(!) L0S */ 4300 il_set_bit(il, CSR_GIO_REG, 4301 CSR_GIO_REG_VAL_L0S_ENABLED); 4302 D_POWER("L1 Enabled; Disabling L0S\n"); 4303 } else { 4304 /* L1-ASPM disabled; enable(!) L0S */ 4305 il_clear_bit(il, CSR_GIO_REG, 4306 CSR_GIO_REG_VAL_L0S_ENABLED); 4307 D_POWER("L1 Disabled; Enabling L0S\n"); 4308 } 4309 } 4310 4311 /* Configure analog phase-lock-loop before activating to D0A */ 4312 if (il->cfg->pll_cfg_val) 4313 il_set_bit(il, CSR_ANA_PLL_CFG, 4314 il->cfg->pll_cfg_val); 4315 4316 /* 4317 * Set "initialization complete" bit to move adapter from 4318 * D0U* --> D0A* (powered-up active) state. 4319 */ 4320 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4321 4322 /* 4323 * Wait for clock stabilization; once stabilized, access to 4324 * device-internal resources is supported, e.g. il_wr_prph() 4325 * and accesses to uCode SRAM. 4326 */ 4327 ret = 4328 _il_poll_bit(il, CSR_GP_CNTRL, 4329 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 4330 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 4331 if (ret < 0) { 4332 D_INFO("Failed to init the card\n"); 4333 goto out; 4334 } 4335 4336 /* 4337 * Enable DMA and BSM (if used) clocks, wait for them to stabilize. 4338 * BSM (Boostrap State Machine) is only in 3945 and 4965. 4339 * 4340 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits 4341 * do not disable clocks. This preserves any hardware bits already 4342 * set by default in "CLK_CTRL_REG" after reset. 4343 */ 4344 if (il->cfg->use_bsm) 4345 il_wr_prph(il, APMG_CLK_EN_REG, 4346 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); 4347 else 4348 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); 4349 udelay(20); 4350 4351 /* Disable L1-Active */ 4352 il_set_bits_prph(il, APMG_PCIDEV_STT_REG, 4353 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 4354 4355out: 4356 return ret; 4357} 4358EXPORT_SYMBOL(il_apm_init); 4359 4360int 4361il_set_tx_power(struct il_priv *il, s8 tx_power, bool force) 4362{ 4363 int ret; 4364 s8 prev_tx_power; 4365 bool defer; 4366 4367 lockdep_assert_held(&il->mutex); 4368 4369 if (il->tx_power_user_lmt == tx_power && !force) 4370 return 0; 4371 4372 if (!il->ops->send_tx_power) 4373 return -EOPNOTSUPP; 4374 4375 /* 0 dBm mean 1 milliwatt */ 4376 if (tx_power < 0) { 4377 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power); 4378 return -EINVAL; 4379 } 4380 4381 if (tx_power > il->tx_power_device_lmt) { 4382 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n", 4383 tx_power, il->tx_power_device_lmt); 4384 return -EINVAL; 4385 } 4386 4387 if (!il_is_ready_rf(il)) 4388 return -EIO; 4389 4390 /* scan complete and commit_rxon use tx_power_next value, 4391 * it always need to be updated for newest request */ 4392 il->tx_power_next = tx_power; 4393 4394 /* do not set tx power when scanning or channel changing */ 4395 defer = test_bit(S_SCANNING, &il->status) || 4396 memcmp(&il->active, &il->staging, sizeof(il->staging)); 4397 if (defer && !force) { 4398 D_INFO("Deferring tx power set\n"); 4399 return 0; 4400 } 4401 4402 prev_tx_power = il->tx_power_user_lmt; 4403 il->tx_power_user_lmt = tx_power; 4404 4405 ret = il->ops->send_tx_power(il); 4406 4407 /* if fail to set tx_power, restore the orig. tx power */ 4408 if (ret) { 4409 il->tx_power_user_lmt = prev_tx_power; 4410 il->tx_power_next = prev_tx_power; 4411 } 4412 return ret; 4413} 4414EXPORT_SYMBOL(il_set_tx_power); 4415 4416void 4417il_send_bt_config(struct il_priv *il) 4418{ 4419 struct il_bt_cmd bt_cmd = { 4420 .lead_time = BT_LEAD_TIME_DEF, 4421 .max_kill = BT_MAX_KILL_DEF, 4422 .kill_ack_mask = 0, 4423 .kill_cts_mask = 0, 4424 }; 4425 4426 if (!bt_coex_active) 4427 bt_cmd.flags = BT_COEX_DISABLE; 4428 else 4429 bt_cmd.flags = BT_COEX_ENABLE; 4430 4431 D_INFO("BT coex %s\n", 4432 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 4433 4434 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd)) 4435 IL_ERR("failed to send BT Coex Config\n"); 4436} 4437EXPORT_SYMBOL(il_send_bt_config); 4438 4439int 4440il_send_stats_request(struct il_priv *il, u8 flags, bool clear) 4441{ 4442 struct il_stats_cmd stats_cmd = { 4443 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0, 4444 }; 4445 4446 if (flags & CMD_ASYNC) 4447 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd), 4448 &stats_cmd, NULL); 4449 else 4450 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd), 4451 &stats_cmd); 4452} 4453EXPORT_SYMBOL(il_send_stats_request); 4454 4455void 4456il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb) 4457{ 4458#ifdef CONFIG_IWLEGACY_DEBUG 4459 struct il_rx_pkt *pkt = rxb_addr(rxb); 4460 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif); 4461 D_RX("sleep mode: %d, src: %d\n", 4462 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 4463#endif 4464} 4465EXPORT_SYMBOL(il_hdl_pm_sleep); 4466 4467void 4468il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb) 4469{ 4470 struct il_rx_pkt *pkt = rxb_addr(rxb); 4471 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; 4472 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len, 4473 il_get_cmd_string(pkt->hdr.cmd)); 4474 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len); 4475} 4476EXPORT_SYMBOL(il_hdl_pm_debug_stats); 4477 4478void 4479il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb) 4480{ 4481 struct il_rx_pkt *pkt = rxb_addr(rxb); 4482 4483 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) " 4484 "seq 0x%04X ser 0x%08X\n", 4485 le32_to_cpu(pkt->u.err_resp.error_type), 4486 il_get_cmd_string(pkt->u.err_resp.cmd_id), 4487 pkt->u.err_resp.cmd_id, 4488 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), 4489 le32_to_cpu(pkt->u.err_resp.error_info)); 4490} 4491EXPORT_SYMBOL(il_hdl_error); 4492 4493void 4494il_clear_isr_stats(struct il_priv *il) 4495{ 4496 memset(&il->isr_stats, 0, sizeof(il->isr_stats)); 4497} 4498 4499int 4500il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, 4501 const struct ieee80211_tx_queue_params *params) 4502{ 4503 struct il_priv *il = hw->priv; 4504 unsigned long flags; 4505 int q; 4506 4507 D_MAC80211("enter\n"); 4508 4509 if (!il_is_ready_rf(il)) { 4510 D_MAC80211("leave - RF not ready\n"); 4511 return -EIO; 4512 } 4513 4514 if (queue >= AC_NUM) { 4515 D_MAC80211("leave - queue >= AC_NUM %d\n", queue); 4516 return 0; 4517 } 4518 4519 q = AC_NUM - 1 - queue; 4520 4521 spin_lock_irqsave(&il->lock, flags); 4522 4523 il->qos_data.def_qos_parm.ac[q].cw_min = 4524 cpu_to_le16(params->cw_min); 4525 il->qos_data.def_qos_parm.ac[q].cw_max = 4526 cpu_to_le16(params->cw_max); 4527 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; 4528 il->qos_data.def_qos_parm.ac[q].edca_txop = 4529 cpu_to_le16((params->txop * 32)); 4530 4531 il->qos_data.def_qos_parm.ac[q].reserved1 = 0; 4532 4533 spin_unlock_irqrestore(&il->lock, flags); 4534 4535 D_MAC80211("leave\n"); 4536 return 0; 4537} 4538EXPORT_SYMBOL(il_mac_conf_tx); 4539 4540int 4541il_mac_tx_last_beacon(struct ieee80211_hw *hw) 4542{ 4543 struct il_priv *il = hw->priv; 4544 int ret; 4545 4546 D_MAC80211("enter\n"); 4547 4548 ret = (il->ibss_manager == IL_IBSS_MANAGER); 4549 4550 D_MAC80211("leave ret %d\n", ret); 4551 return ret; 4552} 4553EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon); 4554 4555static int 4556il_set_mode(struct il_priv *il) 4557{ 4558 il_connection_init_rx_config(il); 4559 4560 if (il->ops->set_rxon_chain) 4561 il->ops->set_rxon_chain(il); 4562 4563 return il_commit_rxon(il); 4564} 4565 4566int 4567il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 4568{ 4569 struct il_priv *il = hw->priv; 4570 int err; 4571 bool reset; 4572 4573 mutex_lock(&il->mutex); 4574 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 4575 4576 if (!il_is_ready_rf(il)) { 4577 IL_WARN("Try to add interface when device not ready\n"); 4578 err = -EINVAL; 4579 goto out; 4580 } 4581 4582 /* 4583 * We do not support multiple virtual interfaces, but on hardware reset 4584 * we have to add the same interface again. 4585 */ 4586 reset = (il->vif == vif); 4587 if (il->vif && !reset) { 4588 err = -EOPNOTSUPP; 4589 goto out; 4590 } 4591 4592 il->vif = vif; 4593 il->iw_mode = vif->type; 4594 4595 err = il_set_mode(il); 4596 if (err) { 4597 IL_WARN("Fail to set mode %d\n", vif->type); 4598 if (!reset) { 4599 il->vif = NULL; 4600 il->iw_mode = NL80211_IFTYPE_STATION; 4601 } 4602 } 4603 4604out: 4605 D_MAC80211("leave err %d\n", err); 4606 mutex_unlock(&il->mutex); 4607 4608 return err; 4609} 4610EXPORT_SYMBOL(il_mac_add_interface); 4611 4612static void 4613il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif) 4614{ 4615 lockdep_assert_held(&il->mutex); 4616 4617 if (il->scan_vif == vif) { 4618 il_scan_cancel_timeout(il, 200); 4619 il_force_scan_end(il); 4620 } 4621 4622 il_set_mode(il); 4623} 4624 4625void 4626il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 4627{ 4628 struct il_priv *il = hw->priv; 4629 4630 mutex_lock(&il->mutex); 4631 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 4632 4633 WARN_ON(il->vif != vif); 4634 il->vif = NULL; 4635 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; 4636 il_teardown_interface(il, vif); 4637 eth_zero_addr(il->bssid); 4638 4639 D_MAC80211("leave\n"); 4640 mutex_unlock(&il->mutex); 4641} 4642EXPORT_SYMBOL(il_mac_remove_interface); 4643 4644int 4645il_alloc_txq_mem(struct il_priv *il) 4646{ 4647 if (!il->txq) 4648 il->txq = 4649 kzalloc(sizeof(struct il_tx_queue) * 4650 il->cfg->num_of_queues, GFP_KERNEL); 4651 if (!il->txq) { 4652 IL_ERR("Not enough memory for txq\n"); 4653 return -ENOMEM; 4654 } 4655 return 0; 4656} 4657EXPORT_SYMBOL(il_alloc_txq_mem); 4658 4659void 4660il_free_txq_mem(struct il_priv *il) 4661{ 4662 kfree(il->txq); 4663 il->txq = NULL; 4664} 4665EXPORT_SYMBOL(il_free_txq_mem); 4666 4667int 4668il_force_reset(struct il_priv *il, bool external) 4669{ 4670 struct il_force_reset *force_reset; 4671 4672 if (test_bit(S_EXIT_PENDING, &il->status)) 4673 return -EINVAL; 4674 4675 force_reset = &il->force_reset; 4676 force_reset->reset_request_count++; 4677 if (!external) { 4678 if (force_reset->last_force_reset_jiffies && 4679 time_after(force_reset->last_force_reset_jiffies + 4680 force_reset->reset_duration, jiffies)) { 4681 D_INFO("force reset rejected\n"); 4682 force_reset->reset_reject_count++; 4683 return -EAGAIN; 4684 } 4685 } 4686 force_reset->reset_success_count++; 4687 force_reset->last_force_reset_jiffies = jiffies; 4688 4689 /* 4690 * if the request is from external(ex: debugfs), 4691 * then always perform the request in regardless the module 4692 * parameter setting 4693 * if the request is from internal (uCode error or driver 4694 * detect failure), then fw_restart module parameter 4695 * need to be check before performing firmware reload 4696 */ 4697 4698 if (!external && !il->cfg->mod_params->restart_fw) { 4699 D_INFO("Cancel firmware reload based on " 4700 "module parameter setting\n"); 4701 return 0; 4702 } 4703 4704 IL_ERR("On demand firmware reload\n"); 4705 4706 /* Set the FW error flag -- cleared on il_down */ 4707 set_bit(S_FW_ERROR, &il->status); 4708 wake_up(&il->wait_command_queue); 4709 /* 4710 * Keep the restart process from trying to send host 4711 * commands by clearing the INIT status bit 4712 */ 4713 clear_bit(S_READY, &il->status); 4714 queue_work(il->workqueue, &il->restart); 4715 4716 return 0; 4717} 4718EXPORT_SYMBOL(il_force_reset); 4719 4720int 4721il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4722 enum nl80211_iftype newtype, bool newp2p) 4723{ 4724 struct il_priv *il = hw->priv; 4725 int err; 4726 4727 mutex_lock(&il->mutex); 4728 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n", 4729 vif->type, vif->addr, newtype, newp2p); 4730 4731 if (newp2p) { 4732 err = -EOPNOTSUPP; 4733 goto out; 4734 } 4735 4736 if (!il->vif || !il_is_ready_rf(il)) { 4737 /* 4738 * Huh? But wait ... this can maybe happen when 4739 * we're in the middle of a firmware restart! 4740 */ 4741 err = -EBUSY; 4742 goto out; 4743 } 4744 4745 /* success */ 4746 vif->type = newtype; 4747 vif->p2p = false; 4748 il->iw_mode = newtype; 4749 il_teardown_interface(il, vif); 4750 err = 0; 4751 4752out: 4753 D_MAC80211("leave err %d\n", err); 4754 mutex_unlock(&il->mutex); 4755 4756 return err; 4757} 4758EXPORT_SYMBOL(il_mac_change_interface); 4759 4760void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4761 u32 queues, bool drop) 4762{ 4763 struct il_priv *il = hw->priv; 4764 unsigned long timeout = jiffies + msecs_to_jiffies(500); 4765 int i; 4766 4767 mutex_lock(&il->mutex); 4768 D_MAC80211("enter\n"); 4769 4770 if (il->txq == NULL) 4771 goto out; 4772 4773 for (i = 0; i < il->hw_params.max_txq_num; i++) { 4774 struct il_queue *q; 4775 4776 if (i == il->cmd_queue) 4777 continue; 4778 4779 q = &il->txq[i].q; 4780 if (q->read_ptr == q->write_ptr) 4781 continue; 4782 4783 if (time_after(jiffies, timeout)) { 4784 IL_ERR("Failed to flush queue %d\n", q->id); 4785 break; 4786 } 4787 4788 msleep(20); 4789 } 4790out: 4791 D_MAC80211("leave\n"); 4792 mutex_unlock(&il->mutex); 4793} 4794EXPORT_SYMBOL(il_mac_flush); 4795 4796/* 4797 * On every watchdog tick we check (latest) time stamp. If it does not 4798 * change during timeout period and queue is not empty we reset firmware. 4799 */ 4800static int 4801il_check_stuck_queue(struct il_priv *il, int cnt) 4802{ 4803 struct il_tx_queue *txq = &il->txq[cnt]; 4804 struct il_queue *q = &txq->q; 4805 unsigned long timeout; 4806 unsigned long now = jiffies; 4807 int ret; 4808 4809 if (q->read_ptr == q->write_ptr) { 4810 txq->time_stamp = now; 4811 return 0; 4812 } 4813 4814 timeout = 4815 txq->time_stamp + 4816 msecs_to_jiffies(il->cfg->wd_timeout); 4817 4818 if (time_after(now, timeout)) { 4819 IL_ERR("Queue %d stuck for %u ms.\n", q->id, 4820 jiffies_to_msecs(now - txq->time_stamp)); 4821 ret = il_force_reset(il, false); 4822 return (ret == -EAGAIN) ? 0 : 1; 4823 } 4824 4825 return 0; 4826} 4827 4828/* 4829 * Making watchdog tick be a quarter of timeout assure we will 4830 * discover the queue hung between timeout and 1.25*timeout 4831 */ 4832#define IL_WD_TICK(timeout) ((timeout) / 4) 4833 4834/* 4835 * Watchdog timer callback, we check each tx queue for stuck, if if hung 4836 * we reset the firmware. If everything is fine just rearm the timer. 4837 */ 4838void 4839il_bg_watchdog(unsigned long data) 4840{ 4841 struct il_priv *il = (struct il_priv *)data; 4842 int cnt; 4843 unsigned long timeout; 4844 4845 if (test_bit(S_EXIT_PENDING, &il->status)) 4846 return; 4847 4848 timeout = il->cfg->wd_timeout; 4849 if (timeout == 0) 4850 return; 4851 4852 /* monitor and check for stuck cmd queue */ 4853 if (il_check_stuck_queue(il, il->cmd_queue)) 4854 return; 4855 4856 /* monitor and check for other stuck queues */ 4857 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { 4858 /* skip as we already checked the command queue */ 4859 if (cnt == il->cmd_queue) 4860 continue; 4861 if (il_check_stuck_queue(il, cnt)) 4862 return; 4863 } 4864 4865 mod_timer(&il->watchdog, 4866 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4867} 4868EXPORT_SYMBOL(il_bg_watchdog); 4869 4870void 4871il_setup_watchdog(struct il_priv *il) 4872{ 4873 unsigned int timeout = il->cfg->wd_timeout; 4874 4875 if (timeout) 4876 mod_timer(&il->watchdog, 4877 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4878 else 4879 del_timer(&il->watchdog); 4880} 4881EXPORT_SYMBOL(il_setup_watchdog); 4882 4883/* 4884 * extended beacon time format 4885 * time in usec will be changed into a 32-bit value in extended:internal format 4886 * the extended part is the beacon counts 4887 * the internal part is the time in usec within one beacon interval 4888 */ 4889u32 4890il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval) 4891{ 4892 u32 quot; 4893 u32 rem; 4894 u32 interval = beacon_interval * TIME_UNIT; 4895 4896 if (!interval || !usec) 4897 return 0; 4898 4899 quot = 4900 (usec / 4901 interval) & (il_beacon_time_mask_high(il, 4902 il->hw_params. 4903 beacon_time_tsf_bits) >> il-> 4904 hw_params.beacon_time_tsf_bits); 4905 rem = 4906 (usec % interval) & il_beacon_time_mask_low(il, 4907 il->hw_params. 4908 beacon_time_tsf_bits); 4909 4910 return (quot << il->hw_params.beacon_time_tsf_bits) + rem; 4911} 4912EXPORT_SYMBOL(il_usecs_to_beacons); 4913 4914/* base is usually what we get from ucode with each received frame, 4915 * the same as HW timer counter counting down 4916 */ 4917__le32 4918il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, 4919 u32 beacon_interval) 4920{ 4921 u32 base_low = base & il_beacon_time_mask_low(il, 4922 il->hw_params. 4923 beacon_time_tsf_bits); 4924 u32 addon_low = addon & il_beacon_time_mask_low(il, 4925 il->hw_params. 4926 beacon_time_tsf_bits); 4927 u32 interval = beacon_interval * TIME_UNIT; 4928 u32 res = (base & il_beacon_time_mask_high(il, 4929 il->hw_params. 4930 beacon_time_tsf_bits)) + 4931 (addon & il_beacon_time_mask_high(il, 4932 il->hw_params. 4933 beacon_time_tsf_bits)); 4934 4935 if (base_low > addon_low) 4936 res += base_low - addon_low; 4937 else if (base_low < addon_low) { 4938 res += interval + base_low - addon_low; 4939 res += (1 << il->hw_params.beacon_time_tsf_bits); 4940 } else 4941 res += (1 << il->hw_params.beacon_time_tsf_bits); 4942 4943 return cpu_to_le32(res); 4944} 4945EXPORT_SYMBOL(il_add_beacon_time); 4946 4947#ifdef CONFIG_PM_SLEEP 4948 4949static int 4950il_pci_suspend(struct device *device) 4951{ 4952 struct pci_dev *pdev = to_pci_dev(device); 4953 struct il_priv *il = pci_get_drvdata(pdev); 4954 4955 /* 4956 * This function is called when system goes into suspend state 4957 * mac80211 will call il_mac_stop() from the mac80211 suspend function 4958 * first but since il_mac_stop() has no knowledge of who the caller is, 4959 * it will not call apm_ops.stop() to stop the DMA operation. 4960 * Calling apm_ops.stop here to make sure we stop the DMA. 4961 */ 4962 il_apm_stop(il); 4963 4964 return 0; 4965} 4966 4967static int 4968il_pci_resume(struct device *device) 4969{ 4970 struct pci_dev *pdev = to_pci_dev(device); 4971 struct il_priv *il = pci_get_drvdata(pdev); 4972 bool hw_rfkill = false; 4973 4974 /* 4975 * We disable the RETRY_TIMEOUT register (0x41) to keep 4976 * PCI Tx retries from interfering with C3 CPU state. 4977 */ 4978 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 4979 4980 il_enable_interrupts(il); 4981 4982 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 4983 hw_rfkill = true; 4984 4985 if (hw_rfkill) 4986 set_bit(S_RFKILL, &il->status); 4987 else 4988 clear_bit(S_RFKILL, &il->status); 4989 4990 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill); 4991 4992 return 0; 4993} 4994 4995SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume); 4996EXPORT_SYMBOL(il_pm_ops); 4997 4998#endif /* CONFIG_PM_SLEEP */ 4999 5000static void 5001il_update_qos(struct il_priv *il) 5002{ 5003 if (test_bit(S_EXIT_PENDING, &il->status)) 5004 return; 5005 5006 il->qos_data.def_qos_parm.qos_flags = 0; 5007 5008 if (il->qos_data.qos_active) 5009 il->qos_data.def_qos_parm.qos_flags |= 5010 QOS_PARAM_FLG_UPDATE_EDCA_MSK; 5011 5012 if (il->ht.enabled) 5013 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 5014 5015 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n", 5016 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags); 5017 5018 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd), 5019 &il->qos_data.def_qos_parm, NULL); 5020} 5021 5022/** 5023 * il_mac_config - mac80211 config callback 5024 */ 5025int 5026il_mac_config(struct ieee80211_hw *hw, u32 changed) 5027{ 5028 struct il_priv *il = hw->priv; 5029 const struct il_channel_info *ch_info; 5030 struct ieee80211_conf *conf = &hw->conf; 5031 struct ieee80211_channel *channel = conf->chandef.chan; 5032 struct il_ht_config *ht_conf = &il->current_ht_config; 5033 unsigned long flags = 0; 5034 int ret = 0; 5035 u16 ch; 5036 int scan_active = 0; 5037 bool ht_changed = false; 5038 5039 mutex_lock(&il->mutex); 5040 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value, 5041 changed); 5042 5043 if (unlikely(test_bit(S_SCANNING, &il->status))) { 5044 scan_active = 1; 5045 D_MAC80211("scan active\n"); 5046 } 5047 5048 if (changed & 5049 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) { 5050 /* mac80211 uses static for non-HT which is what we want */ 5051 il->current_ht_config.smps = conf->smps_mode; 5052 5053 /* 5054 * Recalculate chain counts. 5055 * 5056 * If monitor mode is enabled then mac80211 will 5057 * set up the SM PS mode to OFF if an HT channel is 5058 * configured. 5059 */ 5060 if (il->ops->set_rxon_chain) 5061 il->ops->set_rxon_chain(il); 5062 } 5063 5064 /* during scanning mac80211 will delay channel setting until 5065 * scan finish with changed = 0 5066 */ 5067 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { 5068 5069 if (scan_active) 5070 goto set_ch_out; 5071 5072 ch = channel->hw_value; 5073 ch_info = il_get_channel_info(il, channel->band, ch); 5074 if (!il_is_channel_valid(ch_info)) { 5075 D_MAC80211("leave - invalid channel\n"); 5076 ret = -EINVAL; 5077 goto set_ch_out; 5078 } 5079 5080 if (il->iw_mode == NL80211_IFTYPE_ADHOC && 5081 !il_is_channel_ibss(ch_info)) { 5082 D_MAC80211("leave - not IBSS channel\n"); 5083 ret = -EINVAL; 5084 goto set_ch_out; 5085 } 5086 5087 spin_lock_irqsave(&il->lock, flags); 5088 5089 /* Configure HT40 channels */ 5090 if (il->ht.enabled != conf_is_ht(conf)) { 5091 il->ht.enabled = conf_is_ht(conf); 5092 ht_changed = true; 5093 } 5094 if (il->ht.enabled) { 5095 if (conf_is_ht40_minus(conf)) { 5096 il->ht.extension_chan_offset = 5097 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 5098 il->ht.is_40mhz = true; 5099 } else if (conf_is_ht40_plus(conf)) { 5100 il->ht.extension_chan_offset = 5101 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 5102 il->ht.is_40mhz = true; 5103 } else { 5104 il->ht.extension_chan_offset = 5105 IEEE80211_HT_PARAM_CHA_SEC_NONE; 5106 il->ht.is_40mhz = false; 5107 } 5108 } else 5109 il->ht.is_40mhz = false; 5110 5111 /* 5112 * Default to no protection. Protection mode will 5113 * later be set from BSS config in il_ht_conf 5114 */ 5115 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 5116 5117 /* if we are switching from ht to 2.4 clear flags 5118 * from any ht related info since 2.4 does not 5119 * support ht */ 5120 if ((le16_to_cpu(il->staging.channel) != ch)) 5121 il->staging.flags = 0; 5122 5123 il_set_rxon_channel(il, channel); 5124 il_set_rxon_ht(il, ht_conf); 5125 5126 il_set_flags_for_band(il, channel->band, il->vif); 5127 5128 spin_unlock_irqrestore(&il->lock, flags); 5129 5130 if (il->ops->update_bcast_stations) 5131 ret = il->ops->update_bcast_stations(il); 5132 5133set_ch_out: 5134 /* The list of supported rates and rate mask can be different 5135 * for each band; since the band may have changed, reset 5136 * the rate mask to what mac80211 lists */ 5137 il_set_rate(il); 5138 } 5139 5140 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) { 5141 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS); 5142 ret = il_power_update_mode(il, false); 5143 if (ret) 5144 D_MAC80211("Error setting sleep level\n"); 5145 } 5146 5147 if (changed & IEEE80211_CONF_CHANGE_POWER) { 5148 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt, 5149 conf->power_level); 5150 5151 il_set_tx_power(il, conf->power_level, false); 5152 } 5153 5154 if (!il_is_ready(il)) { 5155 D_MAC80211("leave - not ready\n"); 5156 goto out; 5157 } 5158 5159 if (scan_active) 5160 goto out; 5161 5162 if (memcmp(&il->active, &il->staging, sizeof(il->staging))) 5163 il_commit_rxon(il); 5164 else 5165 D_INFO("Not re-sending same RXON configuration.\n"); 5166 if (ht_changed) 5167 il_update_qos(il); 5168 5169out: 5170 D_MAC80211("leave ret %d\n", ret); 5171 mutex_unlock(&il->mutex); 5172 5173 return ret; 5174} 5175EXPORT_SYMBOL(il_mac_config); 5176 5177void 5178il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 5179{ 5180 struct il_priv *il = hw->priv; 5181 unsigned long flags; 5182 5183 mutex_lock(&il->mutex); 5184 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 5185 5186 spin_lock_irqsave(&il->lock, flags); 5187 5188 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config)); 5189 5190 /* new association get rid of ibss beacon skb */ 5191 if (il->beacon_skb) 5192 dev_kfree_skb(il->beacon_skb); 5193 il->beacon_skb = NULL; 5194 il->timestamp = 0; 5195 5196 spin_unlock_irqrestore(&il->lock, flags); 5197 5198 il_scan_cancel_timeout(il, 100); 5199 if (!il_is_ready_rf(il)) { 5200 D_MAC80211("leave - not ready\n"); 5201 mutex_unlock(&il->mutex); 5202 return; 5203 } 5204 5205 /* we are restarting association process */ 5206 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5207 il_commit_rxon(il); 5208 5209 il_set_rate(il); 5210 5211 D_MAC80211("leave\n"); 5212 mutex_unlock(&il->mutex); 5213} 5214EXPORT_SYMBOL(il_mac_reset_tsf); 5215 5216static void 5217il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif) 5218{ 5219 struct il_ht_config *ht_conf = &il->current_ht_config; 5220 struct ieee80211_sta *sta; 5221 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 5222 5223 D_ASSOC("enter:\n"); 5224 5225 if (!il->ht.enabled) 5226 return; 5227 5228 il->ht.protection = 5229 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; 5230 il->ht.non_gf_sta_present = 5231 !!(bss_conf-> 5232 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 5233 5234 ht_conf->single_chain_sufficient = false; 5235 5236 switch (vif->type) { 5237 case NL80211_IFTYPE_STATION: 5238 rcu_read_lock(); 5239 sta = ieee80211_find_sta(vif, bss_conf->bssid); 5240 if (sta) { 5241 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 5242 int maxstreams; 5243 5244 maxstreams = 5245 (ht_cap->mcs. 5246 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) 5247 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; 5248 maxstreams += 1; 5249 5250 if (ht_cap->mcs.rx_mask[1] == 0 && 5251 ht_cap->mcs.rx_mask[2] == 0) 5252 ht_conf->single_chain_sufficient = true; 5253 if (maxstreams <= 1) 5254 ht_conf->single_chain_sufficient = true; 5255 } else { 5256 /* 5257 * If at all, this can only happen through a race 5258 * when the AP disconnects us while we're still 5259 * setting up the connection, in that case mac80211 5260 * will soon tell us about that. 5261 */ 5262 ht_conf->single_chain_sufficient = true; 5263 } 5264 rcu_read_unlock(); 5265 break; 5266 case NL80211_IFTYPE_ADHOC: 5267 ht_conf->single_chain_sufficient = true; 5268 break; 5269 default: 5270 break; 5271 } 5272 5273 D_ASSOC("leave\n"); 5274} 5275 5276static inline void 5277il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif) 5278{ 5279 /* 5280 * inform the ucode that there is no longer an 5281 * association and that no more packets should be 5282 * sent 5283 */ 5284 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5285 il->staging.assoc_id = 0; 5286 il_commit_rxon(il); 5287} 5288 5289static void 5290il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 5291{ 5292 struct il_priv *il = hw->priv; 5293 unsigned long flags; 5294 __le64 timestamp; 5295 struct sk_buff *skb = ieee80211_beacon_get(hw, vif); 5296 5297 if (!skb) 5298 return; 5299 5300 D_MAC80211("enter\n"); 5301 5302 lockdep_assert_held(&il->mutex); 5303 5304 if (!il->beacon_enabled) { 5305 IL_ERR("update beacon with no beaconing enabled\n"); 5306 dev_kfree_skb(skb); 5307 return; 5308 } 5309 5310 spin_lock_irqsave(&il->lock, flags); 5311 5312 if (il->beacon_skb) 5313 dev_kfree_skb(il->beacon_skb); 5314 5315 il->beacon_skb = skb; 5316 5317 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 5318 il->timestamp = le64_to_cpu(timestamp); 5319 5320 D_MAC80211("leave\n"); 5321 spin_unlock_irqrestore(&il->lock, flags); 5322 5323 if (!il_is_ready_rf(il)) { 5324 D_MAC80211("leave - RF not ready\n"); 5325 return; 5326 } 5327 5328 il->ops->post_associate(il); 5329} 5330 5331void 5332il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 5333 struct ieee80211_bss_conf *bss_conf, u32 changes) 5334{ 5335 struct il_priv *il = hw->priv; 5336 int ret; 5337 5338 mutex_lock(&il->mutex); 5339 D_MAC80211("enter: changes 0x%x\n", changes); 5340 5341 if (!il_is_alive(il)) { 5342 D_MAC80211("leave - not alive\n"); 5343 mutex_unlock(&il->mutex); 5344 return; 5345 } 5346 5347 if (changes & BSS_CHANGED_QOS) { 5348 unsigned long flags; 5349 5350 spin_lock_irqsave(&il->lock, flags); 5351 il->qos_data.qos_active = bss_conf->qos; 5352 il_update_qos(il); 5353 spin_unlock_irqrestore(&il->lock, flags); 5354 } 5355 5356 if (changes & BSS_CHANGED_BEACON_ENABLED) { 5357 /* FIXME: can we remove beacon_enabled ? */ 5358 if (vif->bss_conf.enable_beacon) 5359 il->beacon_enabled = true; 5360 else 5361 il->beacon_enabled = false; 5362 } 5363 5364 if (changes & BSS_CHANGED_BSSID) { 5365 D_MAC80211("BSSID %pM\n", bss_conf->bssid); 5366 5367 /* 5368 * On passive channel we wait with blocked queues to see if 5369 * there is traffic on that channel. If no frame will be 5370 * received (what is very unlikely since scan detects AP on 5371 * that channel, but theoretically possible), mac80211 associate 5372 * procedure will time out and mac80211 will call us with NULL 5373 * bssid. We have to unblock queues on such condition. 5374 */ 5375 if (is_zero_ether_addr(bss_conf->bssid)) 5376 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); 5377 5378 /* 5379 * If there is currently a HW scan going on in the background, 5380 * then we need to cancel it, otherwise sometimes we are not 5381 * able to authenticate (FIXME: why ?) 5382 */ 5383 if (il_scan_cancel_timeout(il, 100)) { 5384 D_MAC80211("leave - scan abort failed\n"); 5385 mutex_unlock(&il->mutex); 5386 return; 5387 } 5388 5389 /* mac80211 only sets assoc when in STATION mode */ 5390 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); 5391 5392 /* FIXME: currently needed in a few places */ 5393 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); 5394 } 5395 5396 /* 5397 * This needs to be after setting the BSSID in case 5398 * mac80211 decides to do both changes at once because 5399 * it will invoke post_associate. 5400 */ 5401 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON)) 5402 il_beacon_update(hw, vif); 5403 5404 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 5405 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble); 5406 if (bss_conf->use_short_preamble) 5407 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 5408 else 5409 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 5410 } 5411 5412 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 5413 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); 5414 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ) 5415 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; 5416 else 5417 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 5418 if (bss_conf->use_cts_prot) 5419 il->staging.flags |= RXON_FLG_SELF_CTS_EN; 5420 else 5421 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN; 5422 } 5423 5424 if (changes & BSS_CHANGED_BASIC_RATES) { 5425 /* XXX use this information 5426 * 5427 * To do that, remove code from il_set_rate() and put something 5428 * like this here: 5429 * 5430 if (A-band) 5431 il->staging.ofdm_basic_rates = 5432 bss_conf->basic_rates; 5433 else 5434 il->staging.ofdm_basic_rates = 5435 bss_conf->basic_rates >> 4; 5436 il->staging.cck_basic_rates = 5437 bss_conf->basic_rates & 0xF; 5438 */ 5439 } 5440 5441 if (changes & BSS_CHANGED_HT) { 5442 il_ht_conf(il, vif); 5443 5444 if (il->ops->set_rxon_chain) 5445 il->ops->set_rxon_chain(il); 5446 } 5447 5448 if (changes & BSS_CHANGED_ASSOC) { 5449 D_MAC80211("ASSOC %d\n", bss_conf->assoc); 5450 if (bss_conf->assoc) { 5451 il->timestamp = bss_conf->sync_tsf; 5452 5453 if (!il_is_rfkill(il)) 5454 il->ops->post_associate(il); 5455 } else 5456 il_set_no_assoc(il, vif); 5457 } 5458 5459 if (changes && il_is_associated(il) && bss_conf->aid) { 5460 D_MAC80211("Changes (%#x) while associated\n", changes); 5461 ret = il_send_rxon_assoc(il); 5462 if (!ret) { 5463 /* Sync active_rxon with latest change. */ 5464 memcpy((void *)&il->active, &il->staging, 5465 sizeof(struct il_rxon_cmd)); 5466 } 5467 } 5468 5469 if (changes & BSS_CHANGED_BEACON_ENABLED) { 5470 if (vif->bss_conf.enable_beacon) { 5471 memcpy(il->staging.bssid_addr, bss_conf->bssid, 5472 ETH_ALEN); 5473 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); 5474 il->ops->config_ap(il); 5475 } else 5476 il_set_no_assoc(il, vif); 5477 } 5478 5479 if (changes & BSS_CHANGED_IBSS) { 5480 ret = il->ops->manage_ibss_station(il, vif, 5481 bss_conf->ibss_joined); 5482 if (ret) 5483 IL_ERR("failed to %s IBSS station %pM\n", 5484 bss_conf->ibss_joined ? "add" : "remove", 5485 bss_conf->bssid); 5486 } 5487 5488 D_MAC80211("leave\n"); 5489 mutex_unlock(&il->mutex); 5490} 5491EXPORT_SYMBOL(il_mac_bss_info_changed); 5492 5493irqreturn_t 5494il_isr(int irq, void *data) 5495{ 5496 struct il_priv *il = data; 5497 u32 inta, inta_mask; 5498 u32 inta_fh; 5499 unsigned long flags; 5500 if (!il) 5501 return IRQ_NONE; 5502 5503 spin_lock_irqsave(&il->lock, flags); 5504 5505 /* Disable (but don't clear!) interrupts here to avoid 5506 * back-to-back ISRs and sporadic interrupts from our NIC. 5507 * If we have something to service, the tasklet will re-enable ints. 5508 * If we *don't* have something, we'll re-enable before leaving here. */ 5509 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */ 5510 _il_wr(il, CSR_INT_MASK, 0x00000000); 5511 5512 /* Discover which interrupts are active/pending */ 5513 inta = _il_rd(il, CSR_INT); 5514 inta_fh = _il_rd(il, CSR_FH_INT_STATUS); 5515 5516 /* Ignore interrupt if there's nothing in NIC to service. 5517 * This may be due to IRQ shared with another device, 5518 * or due to sporadic interrupts thrown from our NIC. */ 5519 if (!inta && !inta_fh) { 5520 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); 5521 goto none; 5522 } 5523 5524 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) { 5525 /* Hardware disappeared. It might have already raised 5526 * an interrupt */ 5527 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta); 5528 goto unplugged; 5529 } 5530 5531 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask, 5532 inta_fh); 5533 5534 inta &= ~CSR_INT_BIT_SCD; 5535 5536 /* il_irq_tasklet() will service interrupts and re-enable them */ 5537 if (likely(inta || inta_fh)) 5538 tasklet_schedule(&il->irq_tasklet); 5539 5540unplugged: 5541 spin_unlock_irqrestore(&il->lock, flags); 5542 return IRQ_HANDLED; 5543 5544none: 5545 /* re-enable interrupts here since we don't have anything to service. */ 5546 /* only Re-enable if disabled by irq */ 5547 if (test_bit(S_INT_ENABLED, &il->status)) 5548 il_enable_interrupts(il); 5549 spin_unlock_irqrestore(&il->lock, flags); 5550 return IRQ_NONE; 5551} 5552EXPORT_SYMBOL(il_isr); 5553 5554/* 5555 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this 5556 * function. 5557 */ 5558void 5559il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info, 5560 __le16 fc, __le32 *tx_flags) 5561{ 5562 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 5563 *tx_flags |= TX_CMD_FLG_RTS_MSK; 5564 *tx_flags &= ~TX_CMD_FLG_CTS_MSK; 5565 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 5566 5567 if (!ieee80211_is_mgmt(fc)) 5568 return; 5569 5570 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { 5571 case cpu_to_le16(IEEE80211_STYPE_AUTH): 5572 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 5573 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): 5574 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): 5575 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 5576 *tx_flags |= TX_CMD_FLG_CTS_MSK; 5577 break; 5578 } 5579 } else if (info->control.rates[0]. 5580 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 5581 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 5582 *tx_flags |= TX_CMD_FLG_CTS_MSK; 5583 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 5584 } 5585} 5586EXPORT_SYMBOL(il_tx_cmd_protection); 5587