1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23  * USA
24  *
25  * The full GNU General Public License is included in this distribution
26  * in the file called COPYING.
27  *
28  * Contact Information:
29  *  Intel Linux Wireless <ilw@linux.intel.com>
30  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31  *
32  * BSD LICENSE
33  *
34  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  *
42  *  * Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  *  * Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in
46  *    the documentation and/or other materials provided with the
47  *    distribution.
48  *  * Neither the name Intel Corporation nor the names of its
49  *    contributors may be used to endorse or promote products derived
50  *    from this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63  *
64  *****************************************************************************/
65 #include <linux/kernel.h>
66 #include <linux/slab.h>
67 #include <linux/skbuff.h>
68 #include <linux/netdevice.h>
69 #include <linux/etherdevice.h>
70 #include <linux/ip.h>
71 #include <linux/if_arp.h>
72 #include <linux/devcoredump.h>
73 #include <net/mac80211.h>
74 #include <net/ieee80211_radiotap.h>
75 #include <net/tcp.h>
76 
77 #include "iwl-op-mode.h"
78 #include "iwl-io.h"
79 #include "mvm.h"
80 #include "sta.h"
81 #include "time-event.h"
82 #include "iwl-eeprom-parse.h"
83 #include "iwl-phy-db.h"
84 #include "testmode.h"
85 #include "iwl-fw-error-dump.h"
86 #include "iwl-prph.h"
87 #include "iwl-csr.h"
88 #include "iwl-nvm-parse.h"
89 
90 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
91 	{
92 		.max = 1,
93 		.types = BIT(NL80211_IFTYPE_STATION),
94 	},
95 	{
96 		.max = 1,
97 		.types = BIT(NL80211_IFTYPE_AP) |
98 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
99 			BIT(NL80211_IFTYPE_P2P_GO),
100 	},
101 	{
102 		.max = 1,
103 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
104 	},
105 };
106 
107 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
108 	{
109 		.num_different_channels = 2,
110 		.max_interfaces = 3,
111 		.limits = iwl_mvm_limits,
112 		.n_limits = ARRAY_SIZE(iwl_mvm_limits),
113 	},
114 };
115 
116 #ifdef CONFIG_PM_SLEEP
117 static const struct nl80211_wowlan_tcp_data_token_feature
118 iwl_mvm_wowlan_tcp_token_feature = {
119 	.min_len = 0,
120 	.max_len = 255,
121 	.bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
122 };
123 
124 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
125 	.tok = &iwl_mvm_wowlan_tcp_token_feature,
126 	.data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
127 			    sizeof(struct ethhdr) -
128 			    sizeof(struct iphdr) -
129 			    sizeof(struct tcphdr),
130 	.data_interval_max = 65535, /* __le16 in API */
131 	.wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
132 			    sizeof(struct ethhdr) -
133 			    sizeof(struct iphdr) -
134 			    sizeof(struct tcphdr),
135 	.seq = true,
136 };
137 #endif
138 
139 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
140 /*
141  * Use the reserved field to indicate magic values.
142  * these values will only be used internally by the driver,
143  * and won't make it to the fw (reserved will be 0).
144  * BC_FILTER_MAGIC_IP - configure the val of this attribute to
145  *	be the vif's ip address. in case there is not a single
146  *	ip address (0, or more than 1), this attribute will
147  *	be skipped.
148  * BC_FILTER_MAGIC_MAC - set the val of this attribute to
149  *	the LSB bytes of the vif's mac address
150  */
151 enum {
152 	BC_FILTER_MAGIC_NONE = 0,
153 	BC_FILTER_MAGIC_IP,
154 	BC_FILTER_MAGIC_MAC,
155 };
156 
157 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
158 	{
159 		/* arp */
160 		.discard = 0,
161 		.frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
162 		.attrs = {
163 			{
164 				/* frame type - arp, hw type - ethernet */
165 				.offset_type =
166 					BCAST_FILTER_OFFSET_PAYLOAD_START,
167 				.offset = sizeof(rfc1042_header),
168 				.val = cpu_to_be32(0x08060001),
169 				.mask = cpu_to_be32(0xffffffff),
170 			},
171 			{
172 				/* arp dest ip */
173 				.offset_type =
174 					BCAST_FILTER_OFFSET_PAYLOAD_START,
175 				.offset = sizeof(rfc1042_header) + 2 +
176 					  sizeof(struct arphdr) +
177 					  ETH_ALEN + sizeof(__be32) +
178 					  ETH_ALEN,
179 				.mask = cpu_to_be32(0xffffffff),
180 				/* mark it as special field */
181 				.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
182 			},
183 		},
184 	},
185 	{
186 		/* dhcp offer bcast */
187 		.discard = 0,
188 		.frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
189 		.attrs = {
190 			{
191 				/* udp dest port - 68 (bootp client)*/
192 				.offset_type = BCAST_FILTER_OFFSET_IP_END,
193 				.offset = offsetof(struct udphdr, dest),
194 				.val = cpu_to_be32(0x00440000),
195 				.mask = cpu_to_be32(0xffff0000),
196 			},
197 			{
198 				/* dhcp - lsb bytes of client hw address */
199 				.offset_type = BCAST_FILTER_OFFSET_IP_END,
200 				.offset = 38,
201 				.mask = cpu_to_be32(0xffffffff),
202 				/* mark it as special field */
203 				.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
204 			},
205 		},
206 	},
207 	/* last filter must be empty */
208 	{},
209 };
210 #endif
211 
iwl_mvm_ref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)212 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
213 {
214 	if (!iwl_mvm_is_d0i3_supported(mvm))
215 		return;
216 
217 	IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
218 	spin_lock_bh(&mvm->refs_lock);
219 	mvm->refs[ref_type]++;
220 	spin_unlock_bh(&mvm->refs_lock);
221 	iwl_trans_ref(mvm->trans);
222 }
223 
iwl_mvm_unref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)224 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
225 {
226 	if (!iwl_mvm_is_d0i3_supported(mvm))
227 		return;
228 
229 	IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
230 	spin_lock_bh(&mvm->refs_lock);
231 	WARN_ON(!mvm->refs[ref_type]--);
232 	spin_unlock_bh(&mvm->refs_lock);
233 	iwl_trans_unref(mvm->trans);
234 }
235 
iwl_mvm_unref_all_except(struct iwl_mvm * mvm,enum iwl_mvm_ref_type except_ref)236 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
237 				     enum iwl_mvm_ref_type except_ref)
238 {
239 	int i, j;
240 
241 	if (!iwl_mvm_is_d0i3_supported(mvm))
242 		return;
243 
244 	spin_lock_bh(&mvm->refs_lock);
245 	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
246 		if (except_ref == i || !mvm->refs[i])
247 			continue;
248 
249 		IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
250 			      i, mvm->refs[i]);
251 		for (j = 0; j < mvm->refs[i]; j++)
252 			iwl_trans_unref(mvm->trans);
253 		mvm->refs[i] = 0;
254 	}
255 	spin_unlock_bh(&mvm->refs_lock);
256 }
257 
iwl_mvm_ref_taken(struct iwl_mvm * mvm)258 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
259 {
260 	int i;
261 	bool taken = false;
262 
263 	if (!iwl_mvm_is_d0i3_supported(mvm))
264 		return true;
265 
266 	spin_lock_bh(&mvm->refs_lock);
267 	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
268 		if (mvm->refs[i]) {
269 			taken = true;
270 			break;
271 		}
272 	}
273 	spin_unlock_bh(&mvm->refs_lock);
274 
275 	return taken;
276 }
277 
iwl_mvm_ref_sync(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)278 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
279 {
280 	iwl_mvm_ref(mvm, ref_type);
281 
282 	if (!wait_event_timeout(mvm->d0i3_exit_waitq,
283 				!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
284 				HZ)) {
285 		WARN_ON_ONCE(1);
286 		iwl_mvm_unref(mvm, ref_type);
287 		return -EIO;
288 	}
289 
290 	return 0;
291 }
292 
iwl_mvm_reset_phy_ctxts(struct iwl_mvm * mvm)293 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
294 {
295 	int i;
296 
297 	memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
298 	for (i = 0; i < NUM_PHY_CTX; i++) {
299 		mvm->phy_ctxts[i].id = i;
300 		mvm->phy_ctxts[i].ref = 0;
301 	}
302 }
303 
iwl_mvm_get_regdomain(struct wiphy * wiphy,const char * alpha2,enum iwl_mcc_source src_id,bool * changed)304 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
305 						  const char *alpha2,
306 						  enum iwl_mcc_source src_id,
307 						  bool *changed)
308 {
309 	struct ieee80211_regdomain *regd = NULL;
310 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
311 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
312 	struct iwl_mcc_update_resp *resp;
313 
314 	IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
315 
316 	lockdep_assert_held(&mvm->mutex);
317 
318 	resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
319 	if (IS_ERR_OR_NULL(resp)) {
320 		IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
321 			      PTR_ERR_OR_ZERO(resp));
322 		goto out;
323 	}
324 
325 	if (changed)
326 		*changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
327 
328 	regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
329 				      __le32_to_cpu(resp->n_channels),
330 				      resp->channels,
331 				      __le16_to_cpu(resp->mcc));
332 	/* Store the return source id */
333 	src_id = resp->source_id;
334 	kfree(resp);
335 	if (IS_ERR_OR_NULL(regd)) {
336 		IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
337 			      PTR_ERR_OR_ZERO(regd));
338 		goto out;
339 	}
340 
341 	IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
342 		      regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
343 	mvm->lar_regdom_set = true;
344 	mvm->mcc_src = src_id;
345 
346 out:
347 	return regd;
348 }
349 
iwl_mvm_update_changed_regdom(struct iwl_mvm * mvm)350 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
351 {
352 	bool changed;
353 	struct ieee80211_regdomain *regd;
354 
355 	if (!iwl_mvm_is_lar_supported(mvm))
356 		return;
357 
358 	regd = iwl_mvm_get_current_regdomain(mvm, &changed);
359 	if (!IS_ERR_OR_NULL(regd)) {
360 		/* only update the regulatory core if changed */
361 		if (changed)
362 			regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
363 
364 		kfree(regd);
365 	}
366 }
367 
iwl_mvm_get_current_regdomain(struct iwl_mvm * mvm,bool * changed)368 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
369 							  bool *changed)
370 {
371 	return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
372 				     iwl_mvm_is_wifi_mcc_supported(mvm) ?
373 				     MCC_SOURCE_GET_CURRENT :
374 				     MCC_SOURCE_OLD_FW, changed);
375 }
376 
iwl_mvm_init_fw_regd(struct iwl_mvm * mvm)377 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
378 {
379 	enum iwl_mcc_source used_src;
380 	struct ieee80211_regdomain *regd;
381 	int ret;
382 	bool changed;
383 	const struct ieee80211_regdomain *r =
384 			rtnl_dereference(mvm->hw->wiphy->regd);
385 
386 	if (!r)
387 		return -ENOENT;
388 
389 	/* save the last source in case we overwrite it below */
390 	used_src = mvm->mcc_src;
391 	if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
392 		/* Notify the firmware we support wifi location updates */
393 		regd = iwl_mvm_get_current_regdomain(mvm, NULL);
394 		if (!IS_ERR_OR_NULL(regd))
395 			kfree(regd);
396 	}
397 
398 	/* Now set our last stored MCC and source */
399 	regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
400 				     &changed);
401 	if (IS_ERR_OR_NULL(regd))
402 		return -EIO;
403 
404 	/* update cfg80211 if the regdomain was changed */
405 	if (changed)
406 		ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
407 	else
408 		ret = 0;
409 
410 	kfree(regd);
411 	return ret;
412 }
413 
iwl_mvm_mac_setup_register(struct iwl_mvm * mvm)414 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
415 {
416 	struct ieee80211_hw *hw = mvm->hw;
417 	int num_mac, ret, i;
418 	static const u32 mvm_ciphers[] = {
419 		WLAN_CIPHER_SUITE_WEP40,
420 		WLAN_CIPHER_SUITE_WEP104,
421 		WLAN_CIPHER_SUITE_TKIP,
422 		WLAN_CIPHER_SUITE_CCMP,
423 	};
424 
425 	/* Tell mac80211 our characteristics */
426 	ieee80211_hw_set(hw, SIGNAL_DBM);
427 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
428 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
429 	ieee80211_hw_set(hw, QUEUE_CONTROL);
430 	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
431 	ieee80211_hw_set(hw, SUPPORTS_PS);
432 	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
433 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
434 	ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
435 	ieee80211_hw_set(hw, CONNECTION_MONITOR);
436 	ieee80211_hw_set(hw, CHANCTX_STA_CSA);
437 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
438 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
439 
440 	hw->queues = mvm->first_agg_queue;
441 	hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
442 	hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
443 				    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
444 	hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
445 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
446 	hw->rate_control_algorithm = "iwl-mvm-rs";
447 	hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
448 	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
449 
450 	BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
451 	memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
452 	hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
453 	hw->wiphy->cipher_suites = mvm->ciphers;
454 
455 	/*
456 	 * Enable 11w if advertised by firmware and software crypto
457 	 * is not enabled (as the firmware will interpret some mgmt
458 	 * packets, so enabling it with software crypto isn't safe)
459 	 */
460 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
461 	    !iwlwifi_mod_params.sw_crypto) {
462 		ieee80211_hw_set(hw, MFP_CAPABLE);
463 		mvm->ciphers[hw->wiphy->n_cipher_suites] =
464 			WLAN_CIPHER_SUITE_AES_CMAC;
465 		hw->wiphy->n_cipher_suites++;
466 	}
467 
468 	/* currently FW API supports only one optional cipher scheme */
469 	if (mvm->fw->cs[0].cipher) {
470 		mvm->hw->n_cipher_schemes = 1;
471 		mvm->hw->cipher_schemes = &mvm->fw->cs[0];
472 		mvm->ciphers[hw->wiphy->n_cipher_suites] =
473 			mvm->fw->cs[0].cipher;
474 		hw->wiphy->n_cipher_suites++;
475 	}
476 
477 	ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
478 	hw->wiphy->features |=
479 		NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
480 		NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
481 		NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
482 
483 	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
484 	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
485 	hw->chanctx_data_size = sizeof(u16);
486 
487 	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
488 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
489 		BIT(NL80211_IFTYPE_AP) |
490 		BIT(NL80211_IFTYPE_P2P_GO) |
491 		BIT(NL80211_IFTYPE_P2P_DEVICE) |
492 		BIT(NL80211_IFTYPE_ADHOC);
493 
494 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
495 	hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
496 	if (iwl_mvm_is_lar_supported(mvm))
497 		hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
498 	else
499 		hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
500 					       REGULATORY_DISABLE_BEACON_HINTS;
501 
502 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
503 		hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
504 
505 	hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
506 
507 	hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
508 	hw->wiphy->n_iface_combinations =
509 		ARRAY_SIZE(iwl_mvm_iface_combinations);
510 
511 	hw->wiphy->max_remain_on_channel_duration = 10000;
512 	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
513 	/* we can compensate an offset of up to 3 channels = 15 MHz */
514 	hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
515 
516 	/* Extract MAC address */
517 	memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
518 	hw->wiphy->addresses = mvm->addresses;
519 	hw->wiphy->n_addresses = 1;
520 
521 	/* Extract additional MAC addresses if available */
522 	num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
523 		min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
524 
525 	for (i = 1; i < num_mac; i++) {
526 		memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
527 		       ETH_ALEN);
528 		mvm->addresses[i].addr[5]++;
529 		hw->wiphy->n_addresses++;
530 	}
531 
532 	iwl_mvm_reset_phy_ctxts(mvm);
533 
534 	hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
535 
536 	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
537 
538 	BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
539 	BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
540 		     IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
541 
542 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
543 		mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
544 	else
545 		mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
546 
547 	if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
548 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
549 			&mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
550 	if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
551 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
552 			&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
553 
554 		if (fw_has_capa(&mvm->fw->ucode_capa,
555 				IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
556 		    fw_has_api(&mvm->fw->ucode_capa,
557 			       IWL_UCODE_TLV_API_LQ_SS_PARAMS))
558 			hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
559 				IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
560 	}
561 
562 	hw->wiphy->hw_version = mvm->trans->hw_id;
563 
564 	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
565 		hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
566 	else
567 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
568 
569 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
570 	hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
571 	hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
572 	/* we create the 802.11 header and zero length SSID IE. */
573 	hw->wiphy->max_sched_scan_ie_len =
574 		SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
575 	hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
576 	hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
577 
578 	/*
579 	 * the firmware uses u8 for num of iterations, but 0xff is saved for
580 	 * infinite loop, so the maximum number of iterations is actually 254.
581 	 */
582 	hw->wiphy->max_sched_scan_plan_iterations = 254;
583 
584 	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
585 			       NL80211_FEATURE_LOW_PRIORITY_SCAN |
586 			       NL80211_FEATURE_P2P_GO_OPPPS |
587 			       NL80211_FEATURE_DYNAMIC_SMPS |
588 			       NL80211_FEATURE_STATIC_SMPS |
589 			       NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
590 
591 	if (fw_has_capa(&mvm->fw->ucode_capa,
592 			IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
593 		hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
594 	if (fw_has_capa(&mvm->fw->ucode_capa,
595 			IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
596 		hw->wiphy->features |= NL80211_FEATURE_QUIET;
597 
598 	if (fw_has_capa(&mvm->fw->ucode_capa,
599 			IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
600 		hw->wiphy->features |=
601 			NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
602 
603 	if (fw_has_capa(&mvm->fw->ucode_capa,
604 			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
605 		hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
606 
607 	mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
608 
609 #ifdef CONFIG_PM_SLEEP
610 	if (iwl_mvm_is_d0i3_supported(mvm) &&
611 	    device_can_wakeup(mvm->trans->dev)) {
612 		mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
613 		hw->wiphy->wowlan = &mvm->wowlan;
614 	}
615 
616 	if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
617 	    mvm->trans->ops->d3_suspend &&
618 	    mvm->trans->ops->d3_resume &&
619 	    device_can_wakeup(mvm->trans->dev)) {
620 		mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
621 				     WIPHY_WOWLAN_DISCONNECT |
622 				     WIPHY_WOWLAN_EAP_IDENTITY_REQ |
623 				     WIPHY_WOWLAN_RFKILL_RELEASE |
624 				     WIPHY_WOWLAN_NET_DETECT;
625 		if (!iwlwifi_mod_params.sw_crypto)
626 			mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
627 					     WIPHY_WOWLAN_GTK_REKEY_FAILURE |
628 					     WIPHY_WOWLAN_4WAY_HANDSHAKE;
629 
630 		mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
631 		mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
632 		mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
633 		mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
634 		mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
635 		hw->wiphy->wowlan = &mvm->wowlan;
636 	}
637 #endif
638 
639 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
640 	/* assign default bcast filtering configuration */
641 	mvm->bcast_filters = iwl_mvm_default_bcast_filters;
642 #endif
643 
644 	ret = iwl_mvm_leds_init(mvm);
645 	if (ret)
646 		return ret;
647 
648 	if (fw_has_capa(&mvm->fw->ucode_capa,
649 			IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
650 		IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
651 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
652 		ieee80211_hw_set(hw, TDLS_WIDER_BW);
653 	}
654 
655 	if (fw_has_capa(&mvm->fw->ucode_capa,
656 			IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
657 		IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
658 		hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
659 	}
660 
661 	hw->netdev_features |= mvm->cfg->features;
662 	if (!iwl_mvm_is_csum_supported(mvm))
663 		hw->netdev_features &= ~NETIF_F_RXCSUM;
664 
665 	ret = ieee80211_register_hw(mvm->hw);
666 	if (ret)
667 		iwl_mvm_leds_exit(mvm);
668 
669 	return ret;
670 }
671 
iwl_mvm_defer_tx(struct iwl_mvm * mvm,struct ieee80211_sta * sta,struct sk_buff * skb)672 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
673 			     struct ieee80211_sta *sta,
674 			     struct sk_buff *skb)
675 {
676 	struct iwl_mvm_sta *mvmsta;
677 	bool defer = false;
678 
679 	/*
680 	 * double check the IN_D0I3 flag both before and after
681 	 * taking the spinlock, in order to prevent taking
682 	 * the spinlock when not needed.
683 	 */
684 	if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
685 		return false;
686 
687 	spin_lock(&mvm->d0i3_tx_lock);
688 	/*
689 	 * testing the flag again ensures the skb dequeue
690 	 * loop (on d0i3 exit) hasn't run yet.
691 	 */
692 	if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
693 		goto out;
694 
695 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
696 	if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
697 	    mvmsta->sta_id != mvm->d0i3_ap_sta_id)
698 		goto out;
699 
700 	__skb_queue_tail(&mvm->d0i3_tx, skb);
701 	ieee80211_stop_queues(mvm->hw);
702 
703 	/* trigger wakeup */
704 	iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
705 	iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
706 
707 	defer = true;
708 out:
709 	spin_unlock(&mvm->d0i3_tx_lock);
710 	return defer;
711 }
712 
iwl_mvm_mac_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)713 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
714 			   struct ieee80211_tx_control *control,
715 			   struct sk_buff *skb)
716 {
717 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
718 	struct ieee80211_sta *sta = control->sta;
719 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
720 	struct ieee80211_hdr *hdr = (void *)skb->data;
721 
722 	if (iwl_mvm_is_radio_killed(mvm)) {
723 		IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
724 		goto drop;
725 	}
726 
727 	if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
728 	    !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
729 	    !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
730 		goto drop;
731 
732 	/* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
733 	if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
734 		     ieee80211_is_mgmt(hdr->frame_control) &&
735 		     !ieee80211_is_deauth(hdr->frame_control) &&
736 		     !ieee80211_is_disassoc(hdr->frame_control) &&
737 		     !ieee80211_is_action(hdr->frame_control)))
738 		sta = NULL;
739 
740 	if (sta) {
741 		if (iwl_mvm_defer_tx(mvm, sta, skb))
742 			return;
743 		if (iwl_mvm_tx_skb(mvm, skb, sta))
744 			goto drop;
745 		return;
746 	}
747 
748 	if (iwl_mvm_tx_skb_non_sta(mvm, skb))
749 		goto drop;
750 	return;
751  drop:
752 	ieee80211_free_txskb(hw, skb);
753 }
754 
iwl_enable_rx_ampdu(const struct iwl_cfg * cfg)755 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
756 {
757 	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
758 		return false;
759 	return true;
760 }
761 
iwl_enable_tx_ampdu(const struct iwl_cfg * cfg)762 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
763 {
764 	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
765 		return false;
766 	if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
767 		return true;
768 
769 	/* enabled by default */
770 	return true;
771 }
772 
773 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)	\
774 	do {							\
775 		if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))	\
776 			break;					\
777 		iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);	\
778 	} while (0)
779 
780 static void
iwl_mvm_ampdu_check_trigger(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid,u16 rx_ba_ssn,enum ieee80211_ampdu_mlme_action action)781 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
782 			    struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
783 			    enum ieee80211_ampdu_mlme_action action)
784 {
785 	struct iwl_fw_dbg_trigger_tlv *trig;
786 	struct iwl_fw_dbg_trigger_ba *ba_trig;
787 
788 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
789 		return;
790 
791 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
792 	ba_trig = (void *)trig->data;
793 
794 	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
795 		return;
796 
797 	switch (action) {
798 	case IEEE80211_AMPDU_TX_OPERATIONAL: {
799 		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
800 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
801 
802 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
803 				 "TX AGG START: MAC %pM tid %d ssn %d\n",
804 				 sta->addr, tid, tid_data->ssn);
805 		break;
806 		}
807 	case IEEE80211_AMPDU_TX_STOP_CONT:
808 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
809 				 "TX AGG STOP: MAC %pM tid %d\n",
810 				 sta->addr, tid);
811 		break;
812 	case IEEE80211_AMPDU_RX_START:
813 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
814 				 "RX AGG START: MAC %pM tid %d ssn %d\n",
815 				 sta->addr, tid, rx_ba_ssn);
816 		break;
817 	case IEEE80211_AMPDU_RX_STOP:
818 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
819 				 "RX AGG STOP: MAC %pM tid %d\n",
820 				 sta->addr, tid);
821 		break;
822 	default:
823 		break;
824 	}
825 }
826 
iwl_mvm_mac_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum ieee80211_ampdu_mlme_action action,struct ieee80211_sta * sta,u16 tid,u16 * ssn,u8 buf_size,bool amsdu)827 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
828 				    struct ieee80211_vif *vif,
829 				    enum ieee80211_ampdu_mlme_action action,
830 				    struct ieee80211_sta *sta, u16 tid,
831 				    u16 *ssn, u8 buf_size, bool amsdu)
832 {
833 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
834 	int ret;
835 	bool tx_agg_ref = false;
836 
837 	IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
838 		     sta->addr, tid, action);
839 
840 	if (!(mvm->nvm_data->sku_cap_11n_enable))
841 		return -EACCES;
842 
843 	/* return from D0i3 before starting a new Tx aggregation */
844 	switch (action) {
845 	case IEEE80211_AMPDU_TX_START:
846 	case IEEE80211_AMPDU_TX_STOP_CONT:
847 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
848 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
849 	case IEEE80211_AMPDU_TX_OPERATIONAL:
850 		/*
851 		 * for tx start, wait synchronously until D0i3 exit to
852 		 * get the correct sequence number for the tid.
853 		 * additionally, some other ampdu actions use direct
854 		 * target access, which is not handled automatically
855 		 * by the trans layer (unlike commands), so wait for
856 		 * d0i3 exit in these cases as well.
857 		 */
858 		ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
859 		if (ret)
860 			return ret;
861 
862 		tx_agg_ref = true;
863 		break;
864 	default:
865 		break;
866 	}
867 
868 	mutex_lock(&mvm->mutex);
869 
870 	switch (action) {
871 	case IEEE80211_AMPDU_RX_START:
872 		if (!iwl_enable_rx_ampdu(mvm->cfg)) {
873 			ret = -EINVAL;
874 			break;
875 		}
876 		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
877 		break;
878 	case IEEE80211_AMPDU_RX_STOP:
879 		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
880 		break;
881 	case IEEE80211_AMPDU_TX_START:
882 		if (!iwl_enable_tx_ampdu(mvm->cfg)) {
883 			ret = -EINVAL;
884 			break;
885 		}
886 		ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
887 		break;
888 	case IEEE80211_AMPDU_TX_STOP_CONT:
889 		ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
890 		break;
891 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
892 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
893 		ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
894 		break;
895 	case IEEE80211_AMPDU_TX_OPERATIONAL:
896 		ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
897 		break;
898 	default:
899 		WARN_ON_ONCE(1);
900 		ret = -EINVAL;
901 		break;
902 	}
903 
904 	if (!ret) {
905 		u16 rx_ba_ssn = 0;
906 
907 		if (action == IEEE80211_AMPDU_RX_START)
908 			rx_ba_ssn = *ssn;
909 
910 		iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
911 					    rx_ba_ssn, action);
912 	}
913 	mutex_unlock(&mvm->mutex);
914 
915 	/*
916 	 * If the tid is marked as started, we won't use it for offloaded
917 	 * traffic on the next D0i3 entry. It's safe to unref.
918 	 */
919 	if (tx_agg_ref)
920 		iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
921 
922 	return ret;
923 }
924 
iwl_mvm_cleanup_iterator(void * data,u8 * mac,struct ieee80211_vif * vif)925 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
926 				     struct ieee80211_vif *vif)
927 {
928 	struct iwl_mvm *mvm = data;
929 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
930 
931 	mvmvif->uploaded = false;
932 	mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
933 
934 	spin_lock_bh(&mvm->time_event_lock);
935 	iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
936 	spin_unlock_bh(&mvm->time_event_lock);
937 
938 	mvmvif->phy_ctxt = NULL;
939 	memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
940 }
941 
iwl_mvm_read_coredump(char * buffer,loff_t offset,size_t count,const void * data,size_t datalen)942 static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
943 				     const void *data, size_t datalen)
944 {
945 	const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
946 	ssize_t bytes_read;
947 	ssize_t bytes_read_trans;
948 
949 	if (offset < dump_ptrs->op_mode_len) {
950 		bytes_read = min_t(ssize_t, count,
951 				   dump_ptrs->op_mode_len - offset);
952 		memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
953 		       bytes_read);
954 		offset += bytes_read;
955 		count -= bytes_read;
956 
957 		if (count == 0)
958 			return bytes_read;
959 	} else {
960 		bytes_read = 0;
961 	}
962 
963 	if (!dump_ptrs->trans_ptr)
964 		return bytes_read;
965 
966 	offset -= dump_ptrs->op_mode_len;
967 	bytes_read_trans = min_t(ssize_t, count,
968 				 dump_ptrs->trans_ptr->len - offset);
969 	memcpy(buffer + bytes_read,
970 	       (u8 *)dump_ptrs->trans_ptr->data + offset,
971 	       bytes_read_trans);
972 
973 	return bytes_read + bytes_read_trans;
974 }
975 
iwl_mvm_free_coredump(const void * data)976 static void iwl_mvm_free_coredump(const void *data)
977 {
978 	const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
979 
980 	vfree(fw_error_dump->op_mode_ptr);
981 	vfree(fw_error_dump->trans_ptr);
982 	kfree(fw_error_dump);
983 }
984 
iwl_mvm_dump_fifos(struct iwl_mvm * mvm,struct iwl_fw_error_dump_data ** dump_data)985 static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
986 			       struct iwl_fw_error_dump_data **dump_data)
987 {
988 	struct iwl_fw_error_dump_fifo *fifo_hdr;
989 	u32 *fifo_data;
990 	u32 fifo_len;
991 	unsigned long flags;
992 	int i, j;
993 
994 	if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
995 		return;
996 
997 	/* Pull RXF data from all RXFs */
998 	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
999 		/*
1000 		 * Keep aside the additional offset that might be needed for
1001 		 * next RXF
1002 		 */
1003 		u32 offset_diff = RXF_DIFF_FROM_PREV * i;
1004 
1005 		fifo_hdr = (void *)(*dump_data)->data;
1006 		fifo_data = (void *)fifo_hdr->data;
1007 		fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
1008 
1009 		/* No need to try to read the data if the length is 0 */
1010 		if (fifo_len == 0)
1011 			continue;
1012 
1013 		/* Add a TLV for the RXF */
1014 		(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
1015 		(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1016 
1017 		fifo_hdr->fifo_num = cpu_to_le32(i);
1018 		fifo_hdr->available_bytes =
1019 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1020 							RXF_RD_D_SPACE +
1021 							offset_diff));
1022 		fifo_hdr->wr_ptr =
1023 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1024 							RXF_RD_WR_PTR +
1025 							offset_diff));
1026 		fifo_hdr->rd_ptr =
1027 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1028 							RXF_RD_RD_PTR +
1029 							offset_diff));
1030 		fifo_hdr->fence_ptr =
1031 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1032 							RXF_RD_FENCE_PTR +
1033 							offset_diff));
1034 		fifo_hdr->fence_mode =
1035 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1036 							RXF_SET_FENCE_MODE +
1037 							offset_diff));
1038 
1039 		/* Lock fence */
1040 		iwl_trans_write_prph(mvm->trans,
1041 				     RXF_SET_FENCE_MODE + offset_diff, 0x1);
1042 		/* Set fence pointer to the same place like WR pointer */
1043 		iwl_trans_write_prph(mvm->trans,
1044 				     RXF_LD_WR2FENCE + offset_diff, 0x1);
1045 		/* Set fence offset */
1046 		iwl_trans_write_prph(mvm->trans,
1047 				     RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
1048 				     0x0);
1049 
1050 		/* Read FIFO */
1051 		fifo_len /= sizeof(u32); /* Size in DWORDS */
1052 		for (j = 0; j < fifo_len; j++)
1053 			fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1054 							 RXF_FIFO_RD_FENCE_INC +
1055 							 offset_diff);
1056 		*dump_data = iwl_fw_error_next_data(*dump_data);
1057 	}
1058 
1059 	/* Pull TXF data from all TXFs */
1060 	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
1061 		/* Mark the number of TXF we're pulling now */
1062 		iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
1063 
1064 		fifo_hdr = (void *)(*dump_data)->data;
1065 		fifo_data = (void *)fifo_hdr->data;
1066 		fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
1067 
1068 		/* No need to try to read the data if the length is 0 */
1069 		if (fifo_len == 0)
1070 			continue;
1071 
1072 		/* Add a TLV for the FIFO */
1073 		(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
1074 		(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1075 
1076 		fifo_hdr->fifo_num = cpu_to_le32(i);
1077 		fifo_hdr->available_bytes =
1078 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1079 							TXF_FIFO_ITEM_CNT));
1080 		fifo_hdr->wr_ptr =
1081 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1082 							TXF_WR_PTR));
1083 		fifo_hdr->rd_ptr =
1084 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1085 							TXF_RD_PTR));
1086 		fifo_hdr->fence_ptr =
1087 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1088 							TXF_FENCE_PTR));
1089 		fifo_hdr->fence_mode =
1090 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1091 							TXF_LOCK_FENCE));
1092 
1093 		/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
1094 		iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
1095 				     TXF_WR_PTR);
1096 
1097 		/* Dummy-read to advance the read pointer to the head */
1098 		iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
1099 
1100 		/* Read FIFO */
1101 		fifo_len /= sizeof(u32); /* Size in DWORDS */
1102 		for (j = 0; j < fifo_len; j++)
1103 			fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1104 							  TXF_READ_MODIFY_DATA);
1105 		*dump_data = iwl_fw_error_next_data(*dump_data);
1106 	}
1107 
1108 	iwl_trans_release_nic_access(mvm->trans, &flags);
1109 }
1110 
iwl_mvm_free_fw_dump_desc(struct iwl_mvm * mvm)1111 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
1112 {
1113 	if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
1114 	    !mvm->fw_dump_desc)
1115 		return;
1116 
1117 	kfree(mvm->fw_dump_desc);
1118 	mvm->fw_dump_desc = NULL;
1119 }
1120 
1121 #define IWL8260_ICCM_OFFSET		0x44000 /* Only for B-step */
1122 #define IWL8260_ICCM_LEN		0xC000 /* Only for B-step */
1123 
iwl_mvm_fw_error_dump(struct iwl_mvm * mvm)1124 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
1125 {
1126 	struct iwl_fw_error_dump_file *dump_file;
1127 	struct iwl_fw_error_dump_data *dump_data;
1128 	struct iwl_fw_error_dump_info *dump_info;
1129 	struct iwl_fw_error_dump_mem *dump_mem;
1130 	struct iwl_fw_error_dump_trigger_desc *dump_trig;
1131 	struct iwl_mvm_dump_ptrs *fw_error_dump;
1132 	u32 sram_len, sram_ofs;
1133 	u32 file_len, fifo_data_len = 0;
1134 	u32 smem_len = mvm->cfg->smem_len;
1135 	u32 sram2_len = mvm->cfg->dccm2_len;
1136 	bool monitor_dump_only = false;
1137 
1138 	lockdep_assert_held(&mvm->mutex);
1139 
1140 	/* there's no point in fw dump if the bus is dead */
1141 	if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1142 		IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
1143 		return;
1144 	}
1145 
1146 	if (mvm->fw_dump_trig &&
1147 	    mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
1148 		monitor_dump_only = true;
1149 
1150 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
1151 	if (!fw_error_dump)
1152 		return;
1153 
1154 	/* SRAM - include stack CCM if driver knows the values for it */
1155 	if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
1156 		const struct fw_img *img;
1157 
1158 		img = &mvm->fw->img[mvm->cur_ucode];
1159 		sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1160 		sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
1161 	} else {
1162 		sram_ofs = mvm->cfg->dccm_offset;
1163 		sram_len = mvm->cfg->dccm_len;
1164 	}
1165 
1166 	/* reading RXF/TXF sizes */
1167 	if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
1168 		struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
1169 		int i;
1170 
1171 		fifo_data_len = 0;
1172 
1173 		/* Count RXF size */
1174 		for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
1175 			if (!mem_cfg->rxfifo_size[i])
1176 				continue;
1177 
1178 			/* Add header info */
1179 			fifo_data_len += mem_cfg->rxfifo_size[i] +
1180 					 sizeof(*dump_data) +
1181 					 sizeof(struct iwl_fw_error_dump_fifo);
1182 		}
1183 
1184 		for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
1185 			if (!mem_cfg->txfifo_size[i])
1186 				continue;
1187 
1188 			/* Add header info */
1189 			fifo_data_len += mem_cfg->txfifo_size[i] +
1190 					 sizeof(*dump_data) +
1191 					 sizeof(struct iwl_fw_error_dump_fifo);
1192 		}
1193 	}
1194 
1195 	file_len = sizeof(*dump_file) +
1196 		   sizeof(*dump_data) * 2 +
1197 		   sram_len + sizeof(*dump_mem) +
1198 		   fifo_data_len +
1199 		   sizeof(*dump_info);
1200 
1201 	/* Make room for the SMEM, if it exists */
1202 	if (smem_len)
1203 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
1204 
1205 	/* Make room for the secondary SRAM, if it exists */
1206 	if (sram2_len)
1207 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
1208 
1209 	/* Make room for fw's virtual image pages, if it exists */
1210 	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
1211 		file_len += mvm->num_of_paging_blk *
1212 			(sizeof(*dump_data) +
1213 			 sizeof(struct iwl_fw_error_dump_paging) +
1214 			 PAGING_BLOCK_SIZE);
1215 
1216 	/* If we only want a monitor dump, reset the file length */
1217 	if (monitor_dump_only) {
1218 		file_len = sizeof(*dump_file) + sizeof(*dump_data) +
1219 			   sizeof(*dump_info);
1220 	}
1221 
1222 	/*
1223 	 * In 8000 HW family B-step include the ICCM (which resides separately)
1224 	 */
1225 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1226 	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
1227 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
1228 			    IWL8260_ICCM_LEN;
1229 
1230 	if (mvm->fw_dump_desc)
1231 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
1232 			    mvm->fw_dump_desc->len;
1233 
1234 	dump_file = vzalloc(file_len);
1235 	if (!dump_file) {
1236 		kfree(fw_error_dump);
1237 		iwl_mvm_free_fw_dump_desc(mvm);
1238 		return;
1239 	}
1240 
1241 	fw_error_dump->op_mode_ptr = dump_file;
1242 
1243 	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
1244 	dump_data = (void *)dump_file->data;
1245 
1246 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
1247 	dump_data->len = cpu_to_le32(sizeof(*dump_info));
1248 	dump_info = (void *) dump_data->data;
1249 	dump_info->device_family =
1250 		mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
1251 			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
1252 			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
1253 	dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
1254 	memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
1255 	       sizeof(dump_info->fw_human_readable));
1256 	strncpy(dump_info->dev_human_readable, mvm->cfg->name,
1257 		sizeof(dump_info->dev_human_readable));
1258 	strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
1259 		sizeof(dump_info->bus_human_readable));
1260 
1261 	dump_data = iwl_fw_error_next_data(dump_data);
1262 	/* We only dump the FIFOs if the FW is in error state */
1263 	if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
1264 		iwl_mvm_dump_fifos(mvm, &dump_data);
1265 
1266 	if (mvm->fw_dump_desc) {
1267 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
1268 		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
1269 					     mvm->fw_dump_desc->len);
1270 		dump_trig = (void *)dump_data->data;
1271 		memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
1272 		       sizeof(*dump_trig) + mvm->fw_dump_desc->len);
1273 
1274 		/* now we can free this copy */
1275 		iwl_mvm_free_fw_dump_desc(mvm);
1276 		dump_data = iwl_fw_error_next_data(dump_data);
1277 	}
1278 
1279 	/* In case we only want monitor dump, skip to dump trasport data */
1280 	if (monitor_dump_only)
1281 		goto dump_trans_data;
1282 
1283 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1284 	dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
1285 	dump_mem = (void *)dump_data->data;
1286 	dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1287 	dump_mem->offset = cpu_to_le32(sram_ofs);
1288 	iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
1289 				 sram_len);
1290 
1291 	if (smem_len) {
1292 		dump_data = iwl_fw_error_next_data(dump_data);
1293 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1294 		dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
1295 		dump_mem = (void *)dump_data->data;
1296 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
1297 		dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
1298 		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
1299 					 dump_mem->data, smem_len);
1300 	}
1301 
1302 	if (sram2_len) {
1303 		dump_data = iwl_fw_error_next_data(dump_data);
1304 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1305 		dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
1306 		dump_mem = (void *)dump_data->data;
1307 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1308 		dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
1309 		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
1310 					 dump_mem->data, sram2_len);
1311 	}
1312 
1313 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1314 	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
1315 		dump_data = iwl_fw_error_next_data(dump_data);
1316 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1317 		dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
1318 					     sizeof(*dump_mem));
1319 		dump_mem = (void *)dump_data->data;
1320 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1321 		dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
1322 		iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
1323 					 dump_mem->data, IWL8260_ICCM_LEN);
1324 	}
1325 
1326 	/* Dump fw's virtual image */
1327 	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
1328 		u32 i;
1329 
1330 		for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
1331 			struct iwl_fw_error_dump_paging *paging;
1332 			struct page *pages =
1333 				mvm->fw_paging_db[i].fw_paging_block;
1334 
1335 			dump_data = iwl_fw_error_next_data(dump_data);
1336 			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
1337 			dump_data->len = cpu_to_le32(sizeof(*paging) +
1338 						     PAGING_BLOCK_SIZE);
1339 			paging = (void *)dump_data->data;
1340 			paging->index = cpu_to_le32(i);
1341 			memcpy(paging->data, page_address(pages),
1342 			       PAGING_BLOCK_SIZE);
1343 		}
1344 	}
1345 
1346 dump_trans_data:
1347 	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
1348 						       mvm->fw_dump_trig);
1349 	fw_error_dump->op_mode_len = file_len;
1350 	if (fw_error_dump->trans_ptr)
1351 		file_len += fw_error_dump->trans_ptr->len;
1352 	dump_file->file_len = cpu_to_le32(file_len);
1353 
1354 	dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
1355 		      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
1356 
1357 	mvm->fw_dump_trig = NULL;
1358 	clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
1359 }
1360 
1361 struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
1362 	.trig_desc = {
1363 		.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
1364 	},
1365 };
1366 
iwl_mvm_restart_cleanup(struct iwl_mvm * mvm)1367 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1368 {
1369 	/* clear the D3 reconfig, we only need it to avoid dumping a
1370 	 * firmware coredump on reconfiguration, we shouldn't do that
1371 	 * on D3->D0 transition
1372 	 */
1373 	if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1374 		mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1375 		iwl_mvm_fw_error_dump(mvm);
1376 	}
1377 
1378 	/* cleanup all stale references (scan, roc), but keep the
1379 	 * ucode_down ref until reconfig is complete
1380 	 */
1381 	iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1382 
1383 	iwl_trans_stop_device(mvm->trans);
1384 
1385 	mvm->scan_status = 0;
1386 	mvm->ps_disabled = false;
1387 	mvm->calibrating = false;
1388 
1389 	/* just in case one was running */
1390 	ieee80211_remain_on_channel_expired(mvm->hw);
1391 
1392 	/*
1393 	 * cleanup all interfaces, even inactive ones, as some might have
1394 	 * gone down during the HW restart
1395 	 */
1396 	ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1397 
1398 	mvm->p2p_device_vif = NULL;
1399 	mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1400 
1401 	iwl_mvm_reset_phy_ctxts(mvm);
1402 	memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1403 	memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1404 	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1405 	memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
1406 	memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1407 	memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
1408 	memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
1409 	memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
1410 
1411 	ieee80211_wake_queues(mvm->hw);
1412 
1413 	/* clear any stale d0i3 state */
1414 	clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1415 
1416 	mvm->vif_count = 0;
1417 	mvm->rx_ba_sessions = 0;
1418 	mvm->fw_dbg_conf = FW_DBG_INVALID;
1419 
1420 	/* keep statistics ticking */
1421 	iwl_mvm_accu_radio_stats(mvm);
1422 }
1423 
__iwl_mvm_mac_start(struct iwl_mvm * mvm)1424 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1425 {
1426 	int ret;
1427 
1428 	lockdep_assert_held(&mvm->mutex);
1429 
1430 	/* Clean up some internal and mac80211 state on restart */
1431 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1432 		iwl_mvm_restart_cleanup(mvm);
1433 
1434 	ret = iwl_mvm_up(mvm);
1435 
1436 	if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1437 		/* Something went wrong - we need to finish some cleanup
1438 		 * that normally iwl_mvm_mac_restart_complete() below
1439 		 * would do.
1440 		 */
1441 		clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1442 		iwl_mvm_d0i3_enable_tx(mvm, NULL);
1443 	}
1444 
1445 	return ret;
1446 }
1447 
iwl_mvm_mac_start(struct ieee80211_hw * hw)1448 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1449 {
1450 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1451 	int ret;
1452 
1453 	/* Some hw restart cleanups must not hold the mutex */
1454 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1455 		/*
1456 		 * Make sure we are out of d0i3. This is needed
1457 		 * to make sure the reference accounting is correct
1458 		 * (and there is no stale d0i3_exit_work).
1459 		 */
1460 		wait_event_timeout(mvm->d0i3_exit_waitq,
1461 				   !test_bit(IWL_MVM_STATUS_IN_D0I3,
1462 					     &mvm->status),
1463 				   HZ);
1464 	}
1465 
1466 	mutex_lock(&mvm->mutex);
1467 	ret = __iwl_mvm_mac_start(mvm);
1468 	mutex_unlock(&mvm->mutex);
1469 
1470 	return ret;
1471 }
1472 
iwl_mvm_restart_complete(struct iwl_mvm * mvm)1473 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1474 {
1475 	int ret;
1476 
1477 	mutex_lock(&mvm->mutex);
1478 
1479 	clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1480 	iwl_mvm_d0i3_enable_tx(mvm, NULL);
1481 	ret = iwl_mvm_update_quotas(mvm, true, NULL);
1482 	if (ret)
1483 		IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1484 			ret);
1485 
1486 	/* allow transport/FW low power modes */
1487 	iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1488 
1489 	/*
1490 	 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1491 	 * of packets the FW sent out, so we must reconnect.
1492 	 */
1493 	iwl_mvm_teardown_tdls_peers(mvm);
1494 
1495 	mutex_unlock(&mvm->mutex);
1496 }
1497 
iwl_mvm_resume_complete(struct iwl_mvm * mvm)1498 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1499 {
1500 	if (!iwl_mvm_is_d0i3_supported(mvm))
1501 		return;
1502 
1503 	if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
1504 		if (!wait_event_timeout(mvm->d0i3_exit_waitq,
1505 					!test_bit(IWL_MVM_STATUS_IN_D0I3,
1506 						  &mvm->status),
1507 					HZ))
1508 			WARN_ONCE(1, "D0i3 exit on resume timed out\n");
1509 }
1510 
1511 static void
iwl_mvm_mac_reconfig_complete(struct ieee80211_hw * hw,enum ieee80211_reconfig_type reconfig_type)1512 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1513 			      enum ieee80211_reconfig_type reconfig_type)
1514 {
1515 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1516 
1517 	switch (reconfig_type) {
1518 	case IEEE80211_RECONFIG_TYPE_RESTART:
1519 		iwl_mvm_restart_complete(mvm);
1520 		break;
1521 	case IEEE80211_RECONFIG_TYPE_SUSPEND:
1522 		iwl_mvm_resume_complete(mvm);
1523 		break;
1524 	}
1525 }
1526 
__iwl_mvm_mac_stop(struct iwl_mvm * mvm)1527 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1528 {
1529 	lockdep_assert_held(&mvm->mutex);
1530 
1531 	/* firmware counters are obviously reset now, but we shouldn't
1532 	 * partially track so also clear the fw_reset_accu counters.
1533 	 */
1534 	memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1535 
1536 	/*
1537 	 * Disallow low power states when the FW is down by taking
1538 	 * the UCODE_DOWN ref. in case of ongoing hw restart the
1539 	 * ref is already taken, so don't take it again.
1540 	 */
1541 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1542 		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1543 
1544 	/* async_handlers_wk is now blocked */
1545 
1546 	/*
1547 	 * The work item could be running or queued if the
1548 	 * ROC time event stops just as we get here.
1549 	 */
1550 	flush_work(&mvm->roc_done_wk);
1551 
1552 	iwl_trans_stop_device(mvm->trans);
1553 
1554 	iwl_mvm_async_handlers_purge(mvm);
1555 	/* async_handlers_list is empty and will stay empty: HW is stopped */
1556 
1557 	/* the fw is stopped, the aux sta is dead: clean up driver state */
1558 	iwl_mvm_del_aux_sta(mvm);
1559 
1560 	iwl_free_fw_paging(mvm);
1561 
1562 	/*
1563 	 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1564 	 * won't be called in this case).
1565 	 * But make sure to cleanup interfaces that have gone down before/during
1566 	 * HW restart was requested.
1567 	 */
1568 	if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1569 		ieee80211_iterate_interfaces(mvm->hw, 0,
1570 					     iwl_mvm_cleanup_iterator, mvm);
1571 
1572 	/* We shouldn't have any UIDs still set.  Loop over all the UIDs to
1573 	 * make sure there's nothing left there and warn if any is found.
1574 	 */
1575 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1576 		int i;
1577 
1578 		for (i = 0; i < mvm->max_scans; i++) {
1579 			if (WARN_ONCE(mvm->scan_uid_status[i],
1580 				      "UMAC scan UID %d status was not cleaned\n",
1581 				      i))
1582 				mvm->scan_uid_status[i] = 0;
1583 		}
1584 	}
1585 
1586 	mvm->ucode_loaded = false;
1587 }
1588 
iwl_mvm_mac_stop(struct ieee80211_hw * hw)1589 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1590 {
1591 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1592 
1593 	flush_work(&mvm->d0i3_exit_work);
1594 	flush_work(&mvm->async_handlers_wk);
1595 	cancel_delayed_work_sync(&mvm->fw_dump_wk);
1596 	iwl_mvm_free_fw_dump_desc(mvm);
1597 
1598 	mutex_lock(&mvm->mutex);
1599 	__iwl_mvm_mac_stop(mvm);
1600 	mutex_unlock(&mvm->mutex);
1601 
1602 	/*
1603 	 * The worker might have been waiting for the mutex, let it run and
1604 	 * discover that its list is now empty.
1605 	 */
1606 	cancel_work_sync(&mvm->async_handlers_wk);
1607 }
1608 
iwl_mvm_get_free_phy_ctxt(struct iwl_mvm * mvm)1609 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1610 {
1611 	u16 i;
1612 
1613 	lockdep_assert_held(&mvm->mutex);
1614 
1615 	for (i = 0; i < NUM_PHY_CTX; i++)
1616 		if (!mvm->phy_ctxts[i].ref)
1617 			return &mvm->phy_ctxts[i];
1618 
1619 	IWL_ERR(mvm, "No available PHY context\n");
1620 	return NULL;
1621 }
1622 
iwl_mvm_set_tx_power(struct iwl_mvm * mvm,struct ieee80211_vif * vif,s16 tx_power)1623 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1624 				s16 tx_power)
1625 {
1626 	struct iwl_dev_tx_power_cmd cmd = {
1627 		.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1628 		.v2.mac_context_id =
1629 			cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1630 		.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
1631 	};
1632 	int len = sizeof(cmd);
1633 
1634 	if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1635 		cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1636 
1637 	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
1638 		len = sizeof(cmd.v2);
1639 
1640 	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1641 }
1642 
iwl_mvm_mac_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1643 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1644 				     struct ieee80211_vif *vif)
1645 {
1646 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1647 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1648 	int ret;
1649 
1650 	mvmvif->mvm = mvm;
1651 
1652 	/*
1653 	 * make sure D0i3 exit is completed, otherwise a target access
1654 	 * during tx queue configuration could be done when still in
1655 	 * D0i3 state.
1656 	 */
1657 	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1658 	if (ret)
1659 		return ret;
1660 
1661 	/*
1662 	 * Not much to do here. The stack will not allow interface
1663 	 * types or combinations that we didn't advertise, so we
1664 	 * don't really have to check the types.
1665 	 */
1666 
1667 	mutex_lock(&mvm->mutex);
1668 
1669 	/* make sure that beacon statistics don't go backwards with FW reset */
1670 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1671 		mvmvif->beacon_stats.accu_num_beacons +=
1672 			mvmvif->beacon_stats.num_beacons;
1673 
1674 	/* Allocate resources for the MAC context, and add it to the fw  */
1675 	ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1676 	if (ret)
1677 		goto out_unlock;
1678 
1679 	/* Counting number of interfaces is needed for legacy PM */
1680 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1681 		mvm->vif_count++;
1682 
1683 	/*
1684 	 * The AP binding flow can be done only after the beacon
1685 	 * template is configured (which happens only in the mac80211
1686 	 * start_ap() flow), and adding the broadcast station can happen
1687 	 * only after the binding.
1688 	 * In addition, since modifying the MAC before adding a bcast
1689 	 * station is not allowed by the FW, delay the adding of MAC context to
1690 	 * the point where we can also add the bcast station.
1691 	 * In short: there's not much we can do at this point, other than
1692 	 * allocating resources :)
1693 	 */
1694 	if (vif->type == NL80211_IFTYPE_AP ||
1695 	    vif->type == NL80211_IFTYPE_ADHOC) {
1696 		ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1697 		if (ret) {
1698 			IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1699 			goto out_release;
1700 		}
1701 
1702 		iwl_mvm_vif_dbgfs_register(mvm, vif);
1703 		goto out_unlock;
1704 	}
1705 
1706 	mvmvif->features |= hw->netdev_features;
1707 
1708 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1709 	if (ret)
1710 		goto out_release;
1711 
1712 	ret = iwl_mvm_power_update_mac(mvm);
1713 	if (ret)
1714 		goto out_remove_mac;
1715 
1716 	/* beacon filtering */
1717 	ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1718 	if (ret)
1719 		goto out_remove_mac;
1720 
1721 	if (!mvm->bf_allowed_vif &&
1722 	    vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1723 		mvm->bf_allowed_vif = mvmvif;
1724 		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1725 				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1726 	}
1727 
1728 	/*
1729 	 * P2P_DEVICE interface does not have a channel context assigned to it,
1730 	 * so a dedicated PHY context is allocated to it and the corresponding
1731 	 * MAC context is bound to it at this stage.
1732 	 */
1733 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1734 
1735 		mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1736 		if (!mvmvif->phy_ctxt) {
1737 			ret = -ENOSPC;
1738 			goto out_free_bf;
1739 		}
1740 
1741 		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1742 		ret = iwl_mvm_binding_add_vif(mvm, vif);
1743 		if (ret)
1744 			goto out_unref_phy;
1745 
1746 		ret = iwl_mvm_add_bcast_sta(mvm, vif);
1747 		if (ret)
1748 			goto out_unbind;
1749 
1750 		/* Save a pointer to p2p device vif, so it can later be used to
1751 		 * update the p2p device MAC when a GO is started/stopped */
1752 		mvm->p2p_device_vif = vif;
1753 	}
1754 
1755 	iwl_mvm_vif_dbgfs_register(mvm, vif);
1756 	goto out_unlock;
1757 
1758  out_unbind:
1759 	iwl_mvm_binding_remove_vif(mvm, vif);
1760  out_unref_phy:
1761 	iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1762  out_free_bf:
1763 	if (mvm->bf_allowed_vif == mvmvif) {
1764 		mvm->bf_allowed_vif = NULL;
1765 		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1766 				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1767 	}
1768  out_remove_mac:
1769 	mvmvif->phy_ctxt = NULL;
1770 	iwl_mvm_mac_ctxt_remove(mvm, vif);
1771  out_release:
1772 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1773 		mvm->vif_count--;
1774 
1775 	iwl_mvm_mac_ctxt_release(mvm, vif);
1776  out_unlock:
1777 	mutex_unlock(&mvm->mutex);
1778 
1779 	iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1780 
1781 	return ret;
1782 }
1783 
iwl_mvm_prepare_mac_removal(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1784 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1785 					struct ieee80211_vif *vif)
1786 {
1787 	u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1788 
1789 	if (tfd_msk) {
1790 		/*
1791 		 * mac80211 first removes all the stations of the vif and
1792 		 * then removes the vif. When it removes a station it also
1793 		 * flushes the AMPDU session. So by now, all the AMPDU sessions
1794 		 * of all the stations of this vif are closed, and the queues
1795 		 * of these AMPDU sessions are properly closed.
1796 		 * We still need to take care of the shared queues of the vif.
1797 		 * Flush them here.
1798 		 */
1799 		mutex_lock(&mvm->mutex);
1800 		iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
1801 		mutex_unlock(&mvm->mutex);
1802 
1803 		/*
1804 		 * There are transports that buffer a few frames in the host.
1805 		 * For these, the flush above isn't enough since while we were
1806 		 * flushing, the transport might have sent more frames to the
1807 		 * device. To solve this, wait here until the transport is
1808 		 * empty. Technically, this could have replaced the flush
1809 		 * above, but flush is much faster than draining. So flush
1810 		 * first, and drain to make sure we have no frames in the
1811 		 * transport anymore.
1812 		 * If a station still had frames on the shared queues, it is
1813 		 * already marked as draining, so to complete the draining, we
1814 		 * just need to wait until the transport is empty.
1815 		 */
1816 		iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1817 	}
1818 
1819 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1820 		/*
1821 		 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1822 		 * We assume here that all the packets sent to the OFFCHANNEL
1823 		 * queue are sent in ROC session.
1824 		 */
1825 		flush_work(&mvm->roc_done_wk);
1826 	} else {
1827 		/*
1828 		 * By now, all the AC queues are empty. The AGG queues are
1829 		 * empty too. We already got all the Tx responses for all the
1830 		 * packets in the queues. The drain work can have been
1831 		 * triggered. Flush it.
1832 		 */
1833 		flush_work(&mvm->sta_drained_wk);
1834 	}
1835 }
1836 
iwl_mvm_mac_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1837 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1838 					 struct ieee80211_vif *vif)
1839 {
1840 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1841 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1842 
1843 	iwl_mvm_prepare_mac_removal(mvm, vif);
1844 
1845 	mutex_lock(&mvm->mutex);
1846 
1847 	if (mvm->bf_allowed_vif == mvmvif) {
1848 		mvm->bf_allowed_vif = NULL;
1849 		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1850 				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1851 	}
1852 
1853 	iwl_mvm_vif_dbgfs_clean(mvm, vif);
1854 
1855 	/*
1856 	 * For AP/GO interface, the tear down of the resources allocated to the
1857 	 * interface is be handled as part of the stop_ap flow.
1858 	 */
1859 	if (vif->type == NL80211_IFTYPE_AP ||
1860 	    vif->type == NL80211_IFTYPE_ADHOC) {
1861 #ifdef CONFIG_NL80211_TESTMODE
1862 		if (vif == mvm->noa_vif) {
1863 			mvm->noa_vif = NULL;
1864 			mvm->noa_duration = 0;
1865 		}
1866 #endif
1867 		iwl_mvm_dealloc_bcast_sta(mvm, vif);
1868 		goto out_release;
1869 	}
1870 
1871 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1872 		mvm->p2p_device_vif = NULL;
1873 		iwl_mvm_rm_bcast_sta(mvm, vif);
1874 		iwl_mvm_binding_remove_vif(mvm, vif);
1875 		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1876 		mvmvif->phy_ctxt = NULL;
1877 	}
1878 
1879 	if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1880 		mvm->vif_count--;
1881 
1882 	iwl_mvm_power_update_mac(mvm);
1883 	iwl_mvm_mac_ctxt_remove(mvm, vif);
1884 
1885 out_release:
1886 	iwl_mvm_mac_ctxt_release(mvm, vif);
1887 	mutex_unlock(&mvm->mutex);
1888 }
1889 
iwl_mvm_mac_config(struct ieee80211_hw * hw,u32 changed)1890 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1891 {
1892 	return 0;
1893 }
1894 
1895 struct iwl_mvm_mc_iter_data {
1896 	struct iwl_mvm *mvm;
1897 	int port_id;
1898 };
1899 
iwl_mvm_mc_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1900 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1901 				      struct ieee80211_vif *vif)
1902 {
1903 	struct iwl_mvm_mc_iter_data *data = _data;
1904 	struct iwl_mvm *mvm = data->mvm;
1905 	struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1906 	int ret, len;
1907 
1908 	/* if we don't have free ports, mcast frames will be dropped */
1909 	if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1910 		return;
1911 
1912 	if (vif->type != NL80211_IFTYPE_STATION ||
1913 	    !vif->bss_conf.assoc)
1914 		return;
1915 
1916 	cmd->port_id = data->port_id++;
1917 	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1918 	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1919 
1920 	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1921 	if (ret)
1922 		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1923 }
1924 
iwl_mvm_recalc_multicast(struct iwl_mvm * mvm)1925 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1926 {
1927 	struct iwl_mvm_mc_iter_data iter_data = {
1928 		.mvm = mvm,
1929 	};
1930 
1931 	lockdep_assert_held(&mvm->mutex);
1932 
1933 	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1934 		return;
1935 
1936 	ieee80211_iterate_active_interfaces_atomic(
1937 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1938 		iwl_mvm_mc_iface_iterator, &iter_data);
1939 }
1940 
iwl_mvm_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)1941 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1942 				     struct netdev_hw_addr_list *mc_list)
1943 {
1944 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1945 	struct iwl_mcast_filter_cmd *cmd;
1946 	struct netdev_hw_addr *addr;
1947 	int addr_count;
1948 	bool pass_all;
1949 	int len;
1950 
1951 	addr_count = netdev_hw_addr_list_count(mc_list);
1952 	pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1953 		   IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1954 	if (pass_all)
1955 		addr_count = 0;
1956 
1957 	len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1958 	cmd = kzalloc(len, GFP_ATOMIC);
1959 	if (!cmd)
1960 		return 0;
1961 
1962 	if (pass_all) {
1963 		cmd->pass_all = 1;
1964 		return (u64)(unsigned long)cmd;
1965 	}
1966 
1967 	netdev_hw_addr_list_for_each(addr, mc_list) {
1968 		IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1969 				   cmd->count, addr->addr);
1970 		memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1971 		       addr->addr, ETH_ALEN);
1972 		cmd->count++;
1973 	}
1974 
1975 	return (u64)(unsigned long)cmd;
1976 }
1977 
iwl_mvm_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)1978 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1979 				     unsigned int changed_flags,
1980 				     unsigned int *total_flags,
1981 				     u64 multicast)
1982 {
1983 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1984 	struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1985 
1986 	mutex_lock(&mvm->mutex);
1987 
1988 	/* replace previous configuration */
1989 	kfree(mvm->mcast_filter_cmd);
1990 	mvm->mcast_filter_cmd = cmd;
1991 
1992 	if (!cmd)
1993 		goto out;
1994 
1995 	iwl_mvm_recalc_multicast(mvm);
1996 out:
1997 	mutex_unlock(&mvm->mutex);
1998 	*total_flags = 0;
1999 }
2000 
iwl_mvm_config_iface_filter(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int filter_flags,unsigned int changed_flags)2001 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
2002 					struct ieee80211_vif *vif,
2003 					unsigned int filter_flags,
2004 					unsigned int changed_flags)
2005 {
2006 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2007 
2008 	/* We support only filter for probe requests */
2009 	if (!(changed_flags & FIF_PROBE_REQ))
2010 		return;
2011 
2012 	/* Supported only for p2p client interfaces */
2013 	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
2014 	    !vif->p2p)
2015 		return;
2016 
2017 	mutex_lock(&mvm->mutex);
2018 	iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2019 	mutex_unlock(&mvm->mutex);
2020 }
2021 
2022 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
2023 struct iwl_bcast_iter_data {
2024 	struct iwl_mvm *mvm;
2025 	struct iwl_bcast_filter_cmd *cmd;
2026 	u8 current_filter;
2027 };
2028 
2029 static void
iwl_mvm_set_bcast_filter(struct ieee80211_vif * vif,const struct iwl_fw_bcast_filter * in_filter,struct iwl_fw_bcast_filter * out_filter)2030 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
2031 			 const struct iwl_fw_bcast_filter *in_filter,
2032 			 struct iwl_fw_bcast_filter *out_filter)
2033 {
2034 	struct iwl_fw_bcast_filter_attr *attr;
2035 	int i;
2036 
2037 	memcpy(out_filter, in_filter, sizeof(*out_filter));
2038 
2039 	for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
2040 		attr = &out_filter->attrs[i];
2041 
2042 		if (!attr->mask)
2043 			break;
2044 
2045 		switch (attr->reserved1) {
2046 		case cpu_to_le16(BC_FILTER_MAGIC_IP):
2047 			if (vif->bss_conf.arp_addr_cnt != 1) {
2048 				attr->mask = 0;
2049 				continue;
2050 			}
2051 
2052 			attr->val = vif->bss_conf.arp_addr_list[0];
2053 			break;
2054 		case cpu_to_le16(BC_FILTER_MAGIC_MAC):
2055 			attr->val = *(__be32 *)&vif->addr[2];
2056 			break;
2057 		default:
2058 			break;
2059 		}
2060 		attr->reserved1 = 0;
2061 		out_filter->num_attrs++;
2062 	}
2063 }
2064 
iwl_mvm_bcast_filter_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)2065 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
2066 					  struct ieee80211_vif *vif)
2067 {
2068 	struct iwl_bcast_iter_data *data = _data;
2069 	struct iwl_mvm *mvm = data->mvm;
2070 	struct iwl_bcast_filter_cmd *cmd = data->cmd;
2071 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2072 	struct iwl_fw_bcast_mac *bcast_mac;
2073 	int i;
2074 
2075 	if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
2076 		return;
2077 
2078 	bcast_mac = &cmd->macs[mvmvif->id];
2079 
2080 	/*
2081 	 * enable filtering only for associated stations, but not for P2P
2082 	 * Clients
2083 	 */
2084 	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
2085 	    !vif->bss_conf.assoc)
2086 		return;
2087 
2088 	bcast_mac->default_discard = 1;
2089 
2090 	/* copy all configured filters */
2091 	for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
2092 		/*
2093 		 * Make sure we don't exceed our filters limit.
2094 		 * if there is still a valid filter to be configured,
2095 		 * be on the safe side and just allow bcast for this mac.
2096 		 */
2097 		if (WARN_ON_ONCE(data->current_filter >=
2098 				 ARRAY_SIZE(cmd->filters))) {
2099 			bcast_mac->default_discard = 0;
2100 			bcast_mac->attached_filters = 0;
2101 			break;
2102 		}
2103 
2104 		iwl_mvm_set_bcast_filter(vif,
2105 					 &mvm->bcast_filters[i],
2106 					 &cmd->filters[data->current_filter]);
2107 
2108 		/* skip current filter if it contains no attributes */
2109 		if (!cmd->filters[data->current_filter].num_attrs)
2110 			continue;
2111 
2112 		/* attach the filter to current mac */
2113 		bcast_mac->attached_filters |=
2114 				cpu_to_le16(BIT(data->current_filter));
2115 
2116 		data->current_filter++;
2117 	}
2118 }
2119 
iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm * mvm,struct iwl_bcast_filter_cmd * cmd)2120 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
2121 				    struct iwl_bcast_filter_cmd *cmd)
2122 {
2123 	struct iwl_bcast_iter_data iter_data = {
2124 		.mvm = mvm,
2125 		.cmd = cmd,
2126 	};
2127 
2128 	if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
2129 		return false;
2130 
2131 	memset(cmd, 0, sizeof(*cmd));
2132 	cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
2133 	cmd->max_macs = ARRAY_SIZE(cmd->macs);
2134 
2135 #ifdef CONFIG_IWLWIFI_DEBUGFS
2136 	/* use debugfs filters/macs if override is configured */
2137 	if (mvm->dbgfs_bcast_filtering.override) {
2138 		memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
2139 		       sizeof(cmd->filters));
2140 		memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
2141 		       sizeof(cmd->macs));
2142 		return true;
2143 	}
2144 #endif
2145 
2146 	/* if no filters are configured, do nothing */
2147 	if (!mvm->bcast_filters)
2148 		return false;
2149 
2150 	/* configure and attach these filters for each associated sta vif */
2151 	ieee80211_iterate_active_interfaces(
2152 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2153 		iwl_mvm_bcast_filter_iterator, &iter_data);
2154 
2155 	return true;
2156 }
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2157 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2158 					  struct ieee80211_vif *vif)
2159 {
2160 	struct iwl_bcast_filter_cmd cmd;
2161 
2162 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
2163 		return 0;
2164 
2165 	if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
2166 		return 0;
2167 
2168 	return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
2169 				    sizeof(cmd), &cmd);
2170 }
2171 #else
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2172 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2173 						 struct ieee80211_vif *vif)
2174 {
2175 	return 0;
2176 }
2177 #endif
2178 
iwl_mvm_bss_info_changed_station(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2179 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2180 					     struct ieee80211_vif *vif,
2181 					     struct ieee80211_bss_conf *bss_conf,
2182 					     u32 changes)
2183 {
2184 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2185 	int ret;
2186 
2187 	/*
2188 	 * Re-calculate the tsf id, as the master-slave relations depend on the
2189 	 * beacon interval, which was not known when the station interface was
2190 	 * added.
2191 	 */
2192 	if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
2193 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2194 
2195 	/*
2196 	 * If we're not associated yet, take the (new) BSSID before associating
2197 	 * so the firmware knows. If we're already associated, then use the old
2198 	 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2199 	 * branch for disassociation below.
2200 	 */
2201 	if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2202 		memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2203 
2204 	ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
2205 	if (ret)
2206 		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2207 
2208 	/* after sending it once, adopt mac80211 data */
2209 	memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2210 	mvmvif->associated = bss_conf->assoc;
2211 
2212 	if (changes & BSS_CHANGED_ASSOC) {
2213 		if (bss_conf->assoc) {
2214 			/* clear statistics to get clean beacon counter */
2215 			iwl_mvm_request_statistics(mvm, true);
2216 			memset(&mvmvif->beacon_stats, 0,
2217 			       sizeof(mvmvif->beacon_stats));
2218 
2219 			/* add quota for this interface */
2220 			ret = iwl_mvm_update_quotas(mvm, true, NULL);
2221 			if (ret) {
2222 				IWL_ERR(mvm, "failed to update quotas\n");
2223 				return;
2224 			}
2225 
2226 			if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2227 				     &mvm->status)) {
2228 				/*
2229 				 * If we're restarting then the firmware will
2230 				 * obviously have lost synchronisation with
2231 				 * the AP. It will attempt to synchronise by
2232 				 * itself, but we can make it more reliable by
2233 				 * scheduling a session protection time event.
2234 				 *
2235 				 * The firmware needs to receive a beacon to
2236 				 * catch up with synchronisation, use 110% of
2237 				 * the beacon interval.
2238 				 *
2239 				 * Set a large maximum delay to allow for more
2240 				 * than a single interface.
2241 				 */
2242 				u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2243 				iwl_mvm_protect_session(mvm, vif, dur, dur,
2244 							5 * dur, false);
2245 			}
2246 
2247 			iwl_mvm_sf_update(mvm, vif, false);
2248 			iwl_mvm_power_vif_assoc(mvm, vif);
2249 			if (vif->p2p) {
2250 				iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
2251 				iwl_mvm_update_smps(mvm, vif,
2252 						    IWL_MVM_SMPS_REQ_PROT,
2253 						    IEEE80211_SMPS_DYNAMIC);
2254 			}
2255 		} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2256 			/*
2257 			 * If update fails - SF might be running in associated
2258 			 * mode while disassociated - which is forbidden.
2259 			 */
2260 			WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
2261 				  "Failed to update SF upon disassociation\n");
2262 
2263 			/* remove AP station now that the MAC is unassoc */
2264 			ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
2265 			if (ret)
2266 				IWL_ERR(mvm, "failed to remove AP station\n");
2267 
2268 			if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2269 				mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
2270 			mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
2271 			/* remove quota for this interface */
2272 			ret = iwl_mvm_update_quotas(mvm, false, NULL);
2273 			if (ret)
2274 				IWL_ERR(mvm, "failed to update quotas\n");
2275 
2276 			if (vif->p2p)
2277 				iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
2278 
2279 			/* this will take the cleared BSSID from bss_conf */
2280 			ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2281 			if (ret)
2282 				IWL_ERR(mvm,
2283 					"failed to update MAC %pM (clear after unassoc)\n",
2284 					vif->addr);
2285 		}
2286 
2287 		iwl_mvm_recalc_multicast(mvm);
2288 		iwl_mvm_configure_bcast_filter(mvm, vif);
2289 
2290 		/* reset rssi values */
2291 		mvmvif->bf_data.ave_beacon_signal = 0;
2292 
2293 		iwl_mvm_bt_coex_vif_change(mvm);
2294 		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2295 				    IEEE80211_SMPS_AUTOMATIC);
2296 	} else if (changes & BSS_CHANGED_BEACON_INFO) {
2297 		/*
2298 		 * We received a beacon _after_ association so
2299 		 * remove the session protection.
2300 		 */
2301 		iwl_mvm_remove_time_event(mvm, mvmvif,
2302 					  &mvmvif->time_event_data);
2303 	}
2304 
2305 	if (changes & BSS_CHANGED_BEACON_INFO) {
2306 		iwl_mvm_sf_update(mvm, vif, false);
2307 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2308 	}
2309 
2310 	if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
2311 		ret = iwl_mvm_power_update_mac(mvm);
2312 		if (ret)
2313 			IWL_ERR(mvm, "failed to update power mode\n");
2314 	}
2315 
2316 	if (changes & BSS_CHANGED_TXPOWER) {
2317 		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2318 				bss_conf->txpower);
2319 		iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2320 	}
2321 
2322 	if (changes & BSS_CHANGED_CQM) {
2323 		IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2324 		/* reset cqm events tracking */
2325 		mvmvif->bf_data.last_cqm_event = 0;
2326 		if (mvmvif->bf_data.bf_enabled) {
2327 			ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2328 			if (ret)
2329 				IWL_ERR(mvm,
2330 					"failed to update CQM thresholds\n");
2331 		}
2332 	}
2333 
2334 	if (changes & BSS_CHANGED_ARP_FILTER) {
2335 		IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2336 		iwl_mvm_configure_bcast_filter(mvm, vif);
2337 	}
2338 }
2339 
iwl_mvm_start_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2340 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2341 				 struct ieee80211_vif *vif)
2342 {
2343 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2344 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2345 	int ret;
2346 
2347 	/*
2348 	 * iwl_mvm_mac_ctxt_add() might read directly from the device
2349 	 * (the system time), so make sure it is available.
2350 	 */
2351 	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2352 	if (ret)
2353 		return ret;
2354 
2355 	mutex_lock(&mvm->mutex);
2356 
2357 	/* Send the beacon template */
2358 	ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2359 	if (ret)
2360 		goto out_unlock;
2361 
2362 	/*
2363 	 * Re-calculate the tsf id, as the master-slave relations depend on the
2364 	 * beacon interval, which was not known when the AP interface was added.
2365 	 */
2366 	if (vif->type == NL80211_IFTYPE_AP)
2367 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2368 
2369 	mvmvif->ap_assoc_sta_count = 0;
2370 
2371 	/* Add the mac context */
2372 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2373 	if (ret)
2374 		goto out_unlock;
2375 
2376 	/* Perform the binding */
2377 	ret = iwl_mvm_binding_add_vif(mvm, vif);
2378 	if (ret)
2379 		goto out_remove;
2380 
2381 	/* Send the bcast station. At this stage the TBTT and DTIM time events
2382 	 * are added and applied to the scheduler */
2383 	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2384 	if (ret)
2385 		goto out_unbind;
2386 
2387 	/* must be set before quota calculations */
2388 	mvmvif->ap_ibss_active = true;
2389 
2390 	/* power updated needs to be done before quotas */
2391 	iwl_mvm_power_update_mac(mvm);
2392 
2393 	ret = iwl_mvm_update_quotas(mvm, false, NULL);
2394 	if (ret)
2395 		goto out_quota_failed;
2396 
2397 	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2398 	if (vif->p2p && mvm->p2p_device_vif)
2399 		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2400 
2401 	iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2402 
2403 	iwl_mvm_bt_coex_vif_change(mvm);
2404 
2405 	/* we don't support TDLS during DCM */
2406 	if (iwl_mvm_phy_ctx_count(mvm) > 1)
2407 		iwl_mvm_teardown_tdls_peers(mvm);
2408 
2409 	goto out_unlock;
2410 
2411 out_quota_failed:
2412 	iwl_mvm_power_update_mac(mvm);
2413 	mvmvif->ap_ibss_active = false;
2414 	iwl_mvm_send_rm_bcast_sta(mvm, vif);
2415 out_unbind:
2416 	iwl_mvm_binding_remove_vif(mvm, vif);
2417 out_remove:
2418 	iwl_mvm_mac_ctxt_remove(mvm, vif);
2419 out_unlock:
2420 	mutex_unlock(&mvm->mutex);
2421 	iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2422 	return ret;
2423 }
2424 
iwl_mvm_stop_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2425 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2426 				 struct ieee80211_vif *vif)
2427 {
2428 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2429 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2430 
2431 	iwl_mvm_prepare_mac_removal(mvm, vif);
2432 
2433 	mutex_lock(&mvm->mutex);
2434 
2435 	/* Handle AP stop while in CSA */
2436 	if (rcu_access_pointer(mvm->csa_vif) == vif) {
2437 		iwl_mvm_remove_time_event(mvm, mvmvif,
2438 					  &mvmvif->time_event_data);
2439 		RCU_INIT_POINTER(mvm->csa_vif, NULL);
2440 		mvmvif->csa_countdown = false;
2441 	}
2442 
2443 	if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2444 		RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2445 		mvm->csa_tx_block_bcn_timeout = 0;
2446 	}
2447 
2448 	mvmvif->ap_ibss_active = false;
2449 	mvm->ap_last_beacon_gp2 = 0;
2450 
2451 	iwl_mvm_bt_coex_vif_change(mvm);
2452 
2453 	iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2454 
2455 	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2456 	if (vif->p2p && mvm->p2p_device_vif)
2457 		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2458 
2459 	iwl_mvm_update_quotas(mvm, false, NULL);
2460 	iwl_mvm_send_rm_bcast_sta(mvm, vif);
2461 	iwl_mvm_binding_remove_vif(mvm, vif);
2462 
2463 	iwl_mvm_power_update_mac(mvm);
2464 
2465 	iwl_mvm_mac_ctxt_remove(mvm, vif);
2466 
2467 	mutex_unlock(&mvm->mutex);
2468 }
2469 
2470 static void
iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2471 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2472 				 struct ieee80211_vif *vif,
2473 				 struct ieee80211_bss_conf *bss_conf,
2474 				 u32 changes)
2475 {
2476 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2477 
2478 	/* Changes will be applied when the AP/IBSS is started */
2479 	if (!mvmvif->ap_ibss_active)
2480 		return;
2481 
2482 	if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2483 		       BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2484 	    iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2485 		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2486 
2487 	/* Need to send a new beacon template to the FW */
2488 	if (changes & BSS_CHANGED_BEACON &&
2489 	    iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2490 		IWL_WARN(mvm, "Failed updating beacon data\n");
2491 
2492 	if (changes & BSS_CHANGED_TXPOWER) {
2493 		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2494 				bss_conf->txpower);
2495 		iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2496 	}
2497 
2498 }
2499 
iwl_mvm_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2500 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2501 				     struct ieee80211_vif *vif,
2502 				     struct ieee80211_bss_conf *bss_conf,
2503 				     u32 changes)
2504 {
2505 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2506 
2507 	/*
2508 	 * iwl_mvm_bss_info_changed_station() might call
2509 	 * iwl_mvm_protect_session(), which reads directly from
2510 	 * the device (the system time), so make sure it is available.
2511 	 */
2512 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2513 		return;
2514 
2515 	mutex_lock(&mvm->mutex);
2516 
2517 	if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2518 		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2519 
2520 	switch (vif->type) {
2521 	case NL80211_IFTYPE_STATION:
2522 		iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2523 		break;
2524 	case NL80211_IFTYPE_AP:
2525 	case NL80211_IFTYPE_ADHOC:
2526 		iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2527 		break;
2528 	default:
2529 		/* shouldn't happen */
2530 		WARN_ON_ONCE(1);
2531 	}
2532 
2533 	mutex_unlock(&mvm->mutex);
2534 	iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2535 }
2536 
iwl_mvm_mac_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)2537 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2538 			       struct ieee80211_vif *vif,
2539 			       struct ieee80211_scan_request *hw_req)
2540 {
2541 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2542 	int ret;
2543 
2544 	if (hw_req->req.n_channels == 0 ||
2545 	    hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2546 		return -EINVAL;
2547 
2548 	mutex_lock(&mvm->mutex);
2549 	ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2550 	mutex_unlock(&mvm->mutex);
2551 
2552 	return ret;
2553 }
2554 
iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2555 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2556 				       struct ieee80211_vif *vif)
2557 {
2558 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2559 
2560 	mutex_lock(&mvm->mutex);
2561 
2562 	/* Due to a race condition, it's possible that mac80211 asks
2563 	 * us to stop a hw_scan when it's already stopped.  This can
2564 	 * happen, for instance, if we stopped the scan ourselves,
2565 	 * called ieee80211_scan_completed() and the userspace called
2566 	 * cancel scan scan before ieee80211_scan_work() could run.
2567 	 * To handle that, simply return if the scan is not running.
2568 	*/
2569 	if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2570 		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2571 
2572 	mutex_unlock(&mvm->mutex);
2573 }
2574 
2575 static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2576 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2577 				  struct ieee80211_sta *sta, u16 tids,
2578 				  int num_frames,
2579 				  enum ieee80211_frame_release_type reason,
2580 				  bool more_data)
2581 {
2582 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2583 
2584 	/* Called when we need to transmit (a) frame(s) from mac80211 */
2585 
2586 	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2587 					  tids, more_data, false);
2588 }
2589 
2590 static void
iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2591 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2592 				    struct ieee80211_sta *sta, u16 tids,
2593 				    int num_frames,
2594 				    enum ieee80211_frame_release_type reason,
2595 				    bool more_data)
2596 {
2597 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2598 
2599 	/* Called when we need to transmit (a) frame(s) from agg queue */
2600 
2601 	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2602 					  tids, more_data, true);
2603 }
2604 
iwl_mvm_mac_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)2605 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2606 				   struct ieee80211_vif *vif,
2607 				   enum sta_notify_cmd cmd,
2608 				   struct ieee80211_sta *sta)
2609 {
2610 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2611 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2612 	unsigned long txqs = 0, tids = 0;
2613 	int tid;
2614 
2615 	spin_lock_bh(&mvmsta->lock);
2616 	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2617 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2618 
2619 		if (tid_data->state != IWL_AGG_ON &&
2620 		    tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2621 			continue;
2622 
2623 		__set_bit(tid_data->txq_id, &txqs);
2624 
2625 		if (iwl_mvm_tid_queued(tid_data) == 0)
2626 			continue;
2627 
2628 		__set_bit(tid, &tids);
2629 	}
2630 
2631 	switch (cmd) {
2632 	case STA_NOTIFY_SLEEP:
2633 		if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2634 			ieee80211_sta_block_awake(hw, sta, true);
2635 
2636 		for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2637 			ieee80211_sta_set_buffered(sta, tid, true);
2638 
2639 		if (txqs)
2640 			iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2641 		/*
2642 		 * The fw updates the STA to be asleep. Tx packets on the Tx
2643 		 * queues to this station will not be transmitted. The fw will
2644 		 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2645 		 */
2646 		break;
2647 	case STA_NOTIFY_AWAKE:
2648 		if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2649 			break;
2650 
2651 		if (txqs)
2652 			iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2653 		iwl_mvm_sta_modify_ps_wake(mvm, sta);
2654 		break;
2655 	default:
2656 		break;
2657 	}
2658 	spin_unlock_bh(&mvmsta->lock);
2659 }
2660 
iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)2661 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2662 				       struct ieee80211_vif *vif,
2663 				       struct ieee80211_sta *sta)
2664 {
2665 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2666 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2667 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2668 
2669 	/*
2670 	 * This is called before mac80211 does RCU synchronisation,
2671 	 * so here we already invalidate our internal RCU-protected
2672 	 * station pointer. The rest of the code will thus no longer
2673 	 * be able to find the station this way, and we don't rely
2674 	 * on further RCU synchronisation after the sta_state()
2675 	 * callback deleted the station.
2676 	 */
2677 	mutex_lock(&mvm->mutex);
2678 	if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2679 		rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2680 				   ERR_PTR(-ENOENT));
2681 
2682 	if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
2683 		mvmvif->ap_assoc_sta_count--;
2684 		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2685 	}
2686 
2687 	mutex_unlock(&mvm->mutex);
2688 }
2689 
iwl_mvm_check_uapsd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const u8 * bssid)2690 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2691 				const u8 *bssid)
2692 {
2693 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2694 		return;
2695 
2696 	if (iwlwifi_mod_params.uapsd_disable) {
2697 		vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2698 		return;
2699 	}
2700 
2701 	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2702 }
2703 
iwl_mvm_mac_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)2704 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2705 				 struct ieee80211_vif *vif,
2706 				 struct ieee80211_sta *sta,
2707 				 enum ieee80211_sta_state old_state,
2708 				 enum ieee80211_sta_state new_state)
2709 {
2710 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2711 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2712 	int ret;
2713 
2714 	IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2715 			   sta->addr, old_state, new_state);
2716 
2717 	/* this would be a mac80211 bug ... but don't crash */
2718 	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2719 		return -EINVAL;
2720 
2721 	/* if a STA is being removed, reuse its ID */
2722 	flush_work(&mvm->sta_drained_wk);
2723 
2724 	mutex_lock(&mvm->mutex);
2725 	if (old_state == IEEE80211_STA_NOTEXIST &&
2726 	    new_state == IEEE80211_STA_NONE) {
2727 		/*
2728 		 * Firmware bug - it'll crash if the beacon interval is less
2729 		 * than 16. We can't avoid connecting at all, so refuse the
2730 		 * station state change, this will cause mac80211 to abandon
2731 		 * attempts to connect to this AP, and eventually wpa_s will
2732 		 * blacklist the AP...
2733 		 */
2734 		if (vif->type == NL80211_IFTYPE_STATION &&
2735 		    vif->bss_conf.beacon_int < 16) {
2736 			IWL_ERR(mvm,
2737 				"AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2738 				sta->addr, vif->bss_conf.beacon_int);
2739 			ret = -EINVAL;
2740 			goto out_unlock;
2741 		}
2742 
2743 		if (sta->tdls &&
2744 		    (vif->p2p ||
2745 		     iwl_mvm_tdls_sta_count(mvm, NULL) ==
2746 						IWL_MVM_TDLS_STA_COUNT ||
2747 		     iwl_mvm_phy_ctx_count(mvm) > 1)) {
2748 			IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2749 			ret = -EBUSY;
2750 			goto out_unlock;
2751 		}
2752 
2753 		ret = iwl_mvm_add_sta(mvm, vif, sta);
2754 		if (sta->tdls && ret == 0)
2755 			iwl_mvm_recalc_tdls_state(mvm, vif, true);
2756 	} else if (old_state == IEEE80211_STA_NONE &&
2757 		   new_state == IEEE80211_STA_AUTH) {
2758 		/*
2759 		 * EBS may be disabled due to previous failures reported by FW.
2760 		 * Reset EBS status here assuming environment has been changed.
2761 		 */
2762 		mvm->last_ebs_successful = true;
2763 		iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2764 		ret = 0;
2765 	} else if (old_state == IEEE80211_STA_AUTH &&
2766 		   new_state == IEEE80211_STA_ASSOC) {
2767 		ret = iwl_mvm_update_sta(mvm, vif, sta);
2768 		if (ret == 0)
2769 			iwl_mvm_rs_rate_init(mvm, sta,
2770 					     mvmvif->phy_ctxt->channel->band,
2771 					     true);
2772 	} else if (old_state == IEEE80211_STA_ASSOC &&
2773 		   new_state == IEEE80211_STA_AUTHORIZED) {
2774 
2775 		/* we don't support TDLS during DCM */
2776 		if (iwl_mvm_phy_ctx_count(mvm) > 1)
2777 			iwl_mvm_teardown_tdls_peers(mvm);
2778 
2779 		/* enable beacon filtering */
2780 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2781 		ret = 0;
2782 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
2783 		   new_state == IEEE80211_STA_ASSOC) {
2784 		/* disable beacon filtering */
2785 		WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2786 		ret = 0;
2787 	} else if (old_state == IEEE80211_STA_ASSOC &&
2788 		   new_state == IEEE80211_STA_AUTH) {
2789 		ret = 0;
2790 	} else if (old_state == IEEE80211_STA_AUTH &&
2791 		   new_state == IEEE80211_STA_NONE) {
2792 		ret = 0;
2793 	} else if (old_state == IEEE80211_STA_NONE &&
2794 		   new_state == IEEE80211_STA_NOTEXIST) {
2795 		ret = iwl_mvm_rm_sta(mvm, vif, sta);
2796 		if (sta->tdls)
2797 			iwl_mvm_recalc_tdls_state(mvm, vif, false);
2798 	} else {
2799 		ret = -EIO;
2800 	}
2801  out_unlock:
2802 	mutex_unlock(&mvm->mutex);
2803 
2804 	if (sta->tdls && ret == 0) {
2805 		if (old_state == IEEE80211_STA_NOTEXIST &&
2806 		    new_state == IEEE80211_STA_NONE)
2807 			ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2808 		else if (old_state == IEEE80211_STA_NONE &&
2809 			 new_state == IEEE80211_STA_NOTEXIST)
2810 			ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2811 	}
2812 
2813 	return ret;
2814 }
2815 
iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw * hw,u32 value)2816 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2817 {
2818 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2819 
2820 	mvm->rts_threshold = value;
2821 
2822 	return 0;
2823 }
2824 
iwl_mvm_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)2825 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2826 				  struct ieee80211_vif *vif,
2827 				  struct ieee80211_sta *sta, u32 changed)
2828 {
2829 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2830 
2831 	if (vif->type == NL80211_IFTYPE_STATION &&
2832 	    changed & IEEE80211_RC_NSS_CHANGED)
2833 		iwl_mvm_sf_update(mvm, vif, false);
2834 }
2835 
iwl_mvm_mac_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 ac,const struct ieee80211_tx_queue_params * params)2836 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2837 			       struct ieee80211_vif *vif, u16 ac,
2838 			       const struct ieee80211_tx_queue_params *params)
2839 {
2840 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2841 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2842 
2843 	mvmvif->queue_params[ac] = *params;
2844 
2845 	/*
2846 	 * No need to update right away, we'll get BSS_CHANGED_QOS
2847 	 * The exception is P2P_DEVICE interface which needs immediate update.
2848 	 */
2849 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2850 		int ret;
2851 
2852 		mutex_lock(&mvm->mutex);
2853 		ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2854 		mutex_unlock(&mvm->mutex);
2855 		return ret;
2856 	}
2857 	return 0;
2858 }
2859 
iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2860 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2861 				      struct ieee80211_vif *vif)
2862 {
2863 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2864 	u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
2865 			   200 + vif->bss_conf.beacon_int);
2866 	u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
2867 			       100 + vif->bss_conf.beacon_int);
2868 
2869 	if (WARN_ON_ONCE(vif->bss_conf.assoc))
2870 		return;
2871 
2872 	/*
2873 	 * iwl_mvm_protect_session() reads directly from the device
2874 	 * (the system time), so make sure it is available.
2875 	 */
2876 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2877 		return;
2878 
2879 	mutex_lock(&mvm->mutex);
2880 	/* Try really hard to protect the session and hear a beacon */
2881 	iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2882 	mutex_unlock(&mvm->mutex);
2883 
2884 	iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2885 }
2886 
iwl_mvm_mac_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)2887 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2888 					struct ieee80211_vif *vif,
2889 					struct cfg80211_sched_scan_request *req,
2890 					struct ieee80211_scan_ies *ies)
2891 {
2892 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2893 
2894 	int ret;
2895 
2896 	mutex_lock(&mvm->mutex);
2897 
2898 	if (!vif->bss_conf.idle) {
2899 		ret = -EBUSY;
2900 		goto out;
2901 	}
2902 
2903 	ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2904 
2905 out:
2906 	mutex_unlock(&mvm->mutex);
2907 	return ret;
2908 }
2909 
iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2910 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2911 				       struct ieee80211_vif *vif)
2912 {
2913 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2914 	int ret;
2915 
2916 	mutex_lock(&mvm->mutex);
2917 
2918 	/* Due to a race condition, it's possible that mac80211 asks
2919 	 * us to stop a sched_scan when it's already stopped.  This
2920 	 * can happen, for instance, if we stopped the scan ourselves,
2921 	 * called ieee80211_sched_scan_stopped() and the userspace called
2922 	 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2923 	 * could run.  To handle this, simply return if the scan is
2924 	 * not running.
2925 	*/
2926 	if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2927 		mutex_unlock(&mvm->mutex);
2928 		return 0;
2929 	}
2930 
2931 	ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2932 	mutex_unlock(&mvm->mutex);
2933 	iwl_mvm_wait_for_async_handlers(mvm);
2934 
2935 	return ret;
2936 }
2937 
iwl_mvm_mac_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)2938 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2939 			       enum set_key_cmd cmd,
2940 			       struct ieee80211_vif *vif,
2941 			       struct ieee80211_sta *sta,
2942 			       struct ieee80211_key_conf *key)
2943 {
2944 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2945 	int ret;
2946 	u8 key_offset;
2947 
2948 	if (iwlwifi_mod_params.sw_crypto) {
2949 		IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2950 		return -EOPNOTSUPP;
2951 	}
2952 
2953 	switch (key->cipher) {
2954 	case WLAN_CIPHER_SUITE_TKIP:
2955 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2956 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2957 		break;
2958 	case WLAN_CIPHER_SUITE_CCMP:
2959 		key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2960 		break;
2961 	case WLAN_CIPHER_SUITE_AES_CMAC:
2962 		WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
2963 		break;
2964 	case WLAN_CIPHER_SUITE_WEP40:
2965 	case WLAN_CIPHER_SUITE_WEP104:
2966 		/* For non-client mode, only use WEP keys for TX as we probably
2967 		 * don't have a station yet anyway and would then have to keep
2968 		 * track of the keys, linking them to each of the clients/peers
2969 		 * as they appear. For now, don't do that, for performance WEP
2970 		 * offload doesn't really matter much, but we need it for some
2971 		 * other offload features in client mode.
2972 		 */
2973 		if (vif->type != NL80211_IFTYPE_STATION)
2974 			return 0;
2975 		break;
2976 	default:
2977 		/* currently FW supports only one optional cipher scheme */
2978 		if (hw->n_cipher_schemes &&
2979 		    hw->cipher_schemes->cipher == key->cipher)
2980 			key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2981 		else
2982 			return -EOPNOTSUPP;
2983 	}
2984 
2985 	mutex_lock(&mvm->mutex);
2986 
2987 	switch (cmd) {
2988 	case SET_KEY:
2989 		if ((vif->type == NL80211_IFTYPE_ADHOC ||
2990 		     vif->type == NL80211_IFTYPE_AP) && !sta) {
2991 			/*
2992 			 * GTK on AP interface is a TX-only key, return 0;
2993 			 * on IBSS they're per-station and because we're lazy
2994 			 * we don't support them for RX, so do the same.
2995 			 */
2996 			ret = 0;
2997 			key->hw_key_idx = STA_KEY_IDX_INVALID;
2998 			break;
2999 		}
3000 
3001 		/* During FW restart, in order to restore the state as it was,
3002 		 * don't try to reprogram keys we previously failed for.
3003 		 */
3004 		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3005 		    key->hw_key_idx == STA_KEY_IDX_INVALID) {
3006 			IWL_DEBUG_MAC80211(mvm,
3007 					   "skip invalid idx key programming during restart\n");
3008 			ret = 0;
3009 			break;
3010 		}
3011 
3012 		/* in HW restart reuse the index, otherwise request a new one */
3013 		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
3014 			key_offset = key->hw_key_idx;
3015 		else
3016 			key_offset = STA_KEY_IDX_INVALID;
3017 
3018 		IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
3019 		ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
3020 		if (ret) {
3021 			IWL_WARN(mvm, "set key failed\n");
3022 			/*
3023 			 * can't add key for RX, but we don't need it
3024 			 * in the device for TX so still return 0
3025 			 */
3026 			key->hw_key_idx = STA_KEY_IDX_INVALID;
3027 			ret = 0;
3028 		}
3029 
3030 		break;
3031 	case DISABLE_KEY:
3032 		if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
3033 			ret = 0;
3034 			break;
3035 		}
3036 
3037 		IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
3038 		ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
3039 		break;
3040 	default:
3041 		ret = -EINVAL;
3042 	}
3043 
3044 	mutex_unlock(&mvm->mutex);
3045 	return ret;
3046 }
3047 
iwl_mvm_mac_update_tkip_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_key_conf * keyconf,struct ieee80211_sta * sta,u32 iv32,u16 * phase1key)3048 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
3049 					struct ieee80211_vif *vif,
3050 					struct ieee80211_key_conf *keyconf,
3051 					struct ieee80211_sta *sta,
3052 					u32 iv32, u16 *phase1key)
3053 {
3054 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3055 
3056 	if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
3057 		return;
3058 
3059 	iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
3060 }
3061 
3062 
iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)3063 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
3064 			       struct iwl_rx_packet *pkt, void *data)
3065 {
3066 	struct iwl_mvm *mvm =
3067 		container_of(notif_wait, struct iwl_mvm, notif_wait);
3068 	struct iwl_hs20_roc_res *resp;
3069 	int resp_len = iwl_rx_packet_payload_len(pkt);
3070 	struct iwl_mvm_time_event_data *te_data = data;
3071 
3072 	if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3073 		return true;
3074 
3075 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3076 		IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3077 		return true;
3078 	}
3079 
3080 	resp = (void *)pkt->data;
3081 
3082 	IWL_DEBUG_TE(mvm,
3083 		     "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3084 		     resp->status, resp->event_unique_id);
3085 
3086 	te_data->uid = le32_to_cpu(resp->event_unique_id);
3087 	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3088 		     te_data->uid);
3089 
3090 	spin_lock_bh(&mvm->time_event_lock);
3091 	list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3092 	spin_unlock_bh(&mvm->time_event_lock);
3093 
3094 	return true;
3095 }
3096 
3097 #define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
iwl_mvm_send_aux_roc_cmd(struct iwl_mvm * mvm,struct ieee80211_channel * channel,struct ieee80211_vif * vif,int duration)3098 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3099 				    struct ieee80211_channel *channel,
3100 				    struct ieee80211_vif *vif,
3101 				    int duration)
3102 {
3103 	int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3104 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3105 	struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3106 	static const u16 time_event_response[] = { HOT_SPOT_CMD };
3107 	struct iwl_notification_wait wait_time_event;
3108 	struct iwl_hs20_roc_req aux_roc_req = {
3109 		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3110 		.id_and_color =
3111 			cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3112 		.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3113 		/* Set the channel info data */
3114 		.channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
3115 			PHY_BAND_24 : PHY_BAND_5,
3116 		.channel_info.channel = channel->hw_value,
3117 		.channel_info.width = PHY_VHT_CHANNEL_MODE20,
3118 		/* Set the time and duration */
3119 		.apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3120 		.apply_time_max_delay =
3121 			cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)),
3122 		.duration = cpu_to_le32(MSEC_TO_TU(duration)),
3123 	 };
3124 
3125 	/* Set the node address */
3126 	memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3127 
3128 	lockdep_assert_held(&mvm->mutex);
3129 
3130 	spin_lock_bh(&mvm->time_event_lock);
3131 
3132 	if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3133 		spin_unlock_bh(&mvm->time_event_lock);
3134 		return -EIO;
3135 	}
3136 
3137 	te_data->vif = vif;
3138 	te_data->duration = duration;
3139 	te_data->id = HOT_SPOT_CMD;
3140 
3141 	spin_unlock_bh(&mvm->time_event_lock);
3142 
3143 	/*
3144 	 * Use a notification wait, which really just processes the
3145 	 * command response and doesn't wait for anything, in order
3146 	 * to be able to process the response and get the UID inside
3147 	 * the RX path. Using CMD_WANT_SKB doesn't work because it
3148 	 * stores the buffer and then wakes up this thread, by which
3149 	 * time another notification (that the time event started)
3150 	 * might already be processed unsuccessfully.
3151 	 */
3152 	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3153 				   time_event_response,
3154 				   ARRAY_SIZE(time_event_response),
3155 				   iwl_mvm_rx_aux_roc, te_data);
3156 
3157 	res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3158 				   &aux_roc_req);
3159 
3160 	if (res) {
3161 		IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3162 		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3163 		goto out_clear_te;
3164 	}
3165 
3166 	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
3167 	res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3168 	/* should never fail */
3169 	WARN_ON_ONCE(res);
3170 
3171 	if (res) {
3172  out_clear_te:
3173 		spin_lock_bh(&mvm->time_event_lock);
3174 		iwl_mvm_te_clear_data(mvm, te_data);
3175 		spin_unlock_bh(&mvm->time_event_lock);
3176 	}
3177 
3178 	return res;
3179 }
3180 
iwl_mvm_roc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * channel,int duration,enum ieee80211_roc_type type)3181 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3182 		       struct ieee80211_vif *vif,
3183 		       struct ieee80211_channel *channel,
3184 		       int duration,
3185 		       enum ieee80211_roc_type type)
3186 {
3187 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3188 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3189 	struct cfg80211_chan_def chandef;
3190 	struct iwl_mvm_phy_ctxt *phy_ctxt;
3191 	int ret, i;
3192 
3193 	IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3194 			   duration, type);
3195 
3196 	flush_work(&mvm->roc_done_wk);
3197 
3198 	mutex_lock(&mvm->mutex);
3199 
3200 	switch (vif->type) {
3201 	case NL80211_IFTYPE_STATION:
3202 		if (fw_has_capa(&mvm->fw->ucode_capa,
3203 				IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3204 			/* Use aux roc framework (HS20) */
3205 			ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3206 						       vif, duration);
3207 			goto out_unlock;
3208 		}
3209 		IWL_ERR(mvm, "hotspot not supported\n");
3210 		ret = -EINVAL;
3211 		goto out_unlock;
3212 	case NL80211_IFTYPE_P2P_DEVICE:
3213 		/* handle below */
3214 		break;
3215 	default:
3216 		IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3217 		ret = -EINVAL;
3218 		goto out_unlock;
3219 	}
3220 
3221 	for (i = 0; i < NUM_PHY_CTX; i++) {
3222 		phy_ctxt = &mvm->phy_ctxts[i];
3223 		if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3224 			continue;
3225 
3226 		if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3227 			/*
3228 			 * Unbind the P2P_DEVICE from the current PHY context,
3229 			 * and if the PHY context is not used remove it.
3230 			 */
3231 			ret = iwl_mvm_binding_remove_vif(mvm, vif);
3232 			if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3233 				goto out_unlock;
3234 
3235 			iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3236 
3237 			/* Bind the P2P_DEVICE to the current PHY Context */
3238 			mvmvif->phy_ctxt = phy_ctxt;
3239 
3240 			ret = iwl_mvm_binding_add_vif(mvm, vif);
3241 			if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3242 				goto out_unlock;
3243 
3244 			iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3245 			goto schedule_time_event;
3246 		}
3247 	}
3248 
3249 	/* Need to update the PHY context only if the ROC channel changed */
3250 	if (channel == mvmvif->phy_ctxt->channel)
3251 		goto schedule_time_event;
3252 
3253 	cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3254 
3255 	/*
3256 	 * Change the PHY context configuration as it is currently referenced
3257 	 * only by the P2P Device MAC
3258 	 */
3259 	if (mvmvif->phy_ctxt->ref == 1) {
3260 		ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3261 					       &chandef, 1, 1);
3262 		if (ret)
3263 			goto out_unlock;
3264 	} else {
3265 		/*
3266 		 * The PHY context is shared with other MACs. Need to remove the
3267 		 * P2P Device from the binding, allocate an new PHY context and
3268 		 * create a new binding
3269 		 */
3270 		phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3271 		if (!phy_ctxt) {
3272 			ret = -ENOSPC;
3273 			goto out_unlock;
3274 		}
3275 
3276 		ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3277 					       1, 1);
3278 		if (ret) {
3279 			IWL_ERR(mvm, "Failed to change PHY context\n");
3280 			goto out_unlock;
3281 		}
3282 
3283 		/* Unbind the P2P_DEVICE from the current PHY context */
3284 		ret = iwl_mvm_binding_remove_vif(mvm, vif);
3285 		if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3286 			goto out_unlock;
3287 
3288 		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3289 
3290 		/* Bind the P2P_DEVICE to the new allocated PHY context */
3291 		mvmvif->phy_ctxt = phy_ctxt;
3292 
3293 		ret = iwl_mvm_binding_add_vif(mvm, vif);
3294 		if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3295 			goto out_unlock;
3296 
3297 		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3298 	}
3299 
3300 schedule_time_event:
3301 	/* Schedule the time events */
3302 	ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3303 
3304 out_unlock:
3305 	mutex_unlock(&mvm->mutex);
3306 	IWL_DEBUG_MAC80211(mvm, "leave\n");
3307 	return ret;
3308 }
3309 
iwl_mvm_cancel_roc(struct ieee80211_hw * hw)3310 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3311 {
3312 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3313 
3314 	IWL_DEBUG_MAC80211(mvm, "enter\n");
3315 
3316 	mutex_lock(&mvm->mutex);
3317 	iwl_mvm_stop_roc(mvm);
3318 	mutex_unlock(&mvm->mutex);
3319 
3320 	IWL_DEBUG_MAC80211(mvm, "leave\n");
3321 	return 0;
3322 }
3323 
__iwl_mvm_add_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3324 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3325 				 struct ieee80211_chanctx_conf *ctx)
3326 {
3327 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3328 	struct iwl_mvm_phy_ctxt *phy_ctxt;
3329 	int ret;
3330 
3331 	lockdep_assert_held(&mvm->mutex);
3332 
3333 	IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3334 
3335 	phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3336 	if (!phy_ctxt) {
3337 		ret = -ENOSPC;
3338 		goto out;
3339 	}
3340 
3341 	ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3342 				       ctx->rx_chains_static,
3343 				       ctx->rx_chains_dynamic);
3344 	if (ret) {
3345 		IWL_ERR(mvm, "Failed to add PHY context\n");
3346 		goto out;
3347 	}
3348 
3349 	iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3350 	*phy_ctxt_id = phy_ctxt->id;
3351 out:
3352 	return ret;
3353 }
3354 
iwl_mvm_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3355 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3356 			       struct ieee80211_chanctx_conf *ctx)
3357 {
3358 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3359 	int ret;
3360 
3361 	mutex_lock(&mvm->mutex);
3362 	ret = __iwl_mvm_add_chanctx(mvm, ctx);
3363 	mutex_unlock(&mvm->mutex);
3364 
3365 	return ret;
3366 }
3367 
__iwl_mvm_remove_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3368 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3369 				     struct ieee80211_chanctx_conf *ctx)
3370 {
3371 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3372 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3373 
3374 	lockdep_assert_held(&mvm->mutex);
3375 
3376 	iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3377 }
3378 
iwl_mvm_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3379 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3380 				   struct ieee80211_chanctx_conf *ctx)
3381 {
3382 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3383 
3384 	mutex_lock(&mvm->mutex);
3385 	__iwl_mvm_remove_chanctx(mvm, ctx);
3386 	mutex_unlock(&mvm->mutex);
3387 }
3388 
iwl_mvm_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)3389 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3390 				   struct ieee80211_chanctx_conf *ctx,
3391 				   u32 changed)
3392 {
3393 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3394 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3395 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3396 
3397 	if (WARN_ONCE((phy_ctxt->ref > 1) &&
3398 		      (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3399 				   IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3400 				   IEEE80211_CHANCTX_CHANGE_RADAR |
3401 				   IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3402 		      "Cannot change PHY. Ref=%d, changed=0x%X\n",
3403 		      phy_ctxt->ref, changed))
3404 		return;
3405 
3406 	mutex_lock(&mvm->mutex);
3407 	iwl_mvm_bt_coex_vif_change(mvm);
3408 	iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3409 				 ctx->rx_chains_static,
3410 				 ctx->rx_chains_dynamic);
3411 	mutex_unlock(&mvm->mutex);
3412 }
3413 
__iwl_mvm_assign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3414 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3415 					struct ieee80211_vif *vif,
3416 					struct ieee80211_chanctx_conf *ctx,
3417 					bool switching_chanctx)
3418 {
3419 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3420 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3421 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3422 	int ret;
3423 
3424 	lockdep_assert_held(&mvm->mutex);
3425 
3426 	mvmvif->phy_ctxt = phy_ctxt;
3427 
3428 	switch (vif->type) {
3429 	case NL80211_IFTYPE_AP:
3430 		/* only needed if we're switching chanctx (i.e. during CSA) */
3431 		if (switching_chanctx) {
3432 			mvmvif->ap_ibss_active = true;
3433 			break;
3434 		}
3435 	case NL80211_IFTYPE_ADHOC:
3436 		/*
3437 		 * The AP binding flow is handled as part of the start_ap flow
3438 		 * (in bss_info_changed), similarly for IBSS.
3439 		 */
3440 		ret = 0;
3441 		goto out;
3442 	case NL80211_IFTYPE_STATION:
3443 		break;
3444 	case NL80211_IFTYPE_MONITOR:
3445 		/* always disable PS when a monitor interface is active */
3446 		mvmvif->ps_disabled = true;
3447 		break;
3448 	default:
3449 		ret = -EINVAL;
3450 		goto out;
3451 	}
3452 
3453 	ret = iwl_mvm_binding_add_vif(mvm, vif);
3454 	if (ret)
3455 		goto out;
3456 
3457 	/*
3458 	 * Power state must be updated before quotas,
3459 	 * otherwise fw will complain.
3460 	 */
3461 	iwl_mvm_power_update_mac(mvm);
3462 
3463 	/* Setting the quota at this stage is only required for monitor
3464 	 * interfaces. For the other types, the bss_info changed flow
3465 	 * will handle quota settings.
3466 	 */
3467 	if (vif->type == NL80211_IFTYPE_MONITOR) {
3468 		mvmvif->monitor_active = true;
3469 		ret = iwl_mvm_update_quotas(mvm, false, NULL);
3470 		if (ret)
3471 			goto out_remove_binding;
3472 	}
3473 
3474 	/* Handle binding during CSA */
3475 	if (vif->type == NL80211_IFTYPE_AP) {
3476 		iwl_mvm_update_quotas(mvm, false, NULL);
3477 		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3478 	}
3479 
3480 	if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3481 		u32 duration = 2 * vif->bss_conf.beacon_int;
3482 
3483 		/* iwl_mvm_protect_session() reads directly from the
3484 		 * device (the system time), so make sure it is
3485 		 * available.
3486 		 */
3487 		ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3488 		if (ret)
3489 			goto out_remove_binding;
3490 
3491 		/* Protect the session to make sure we hear the first
3492 		 * beacon on the new channel.
3493 		 */
3494 		iwl_mvm_protect_session(mvm, vif, duration, duration,
3495 					vif->bss_conf.beacon_int / 2,
3496 					true);
3497 
3498 		iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3499 
3500 		iwl_mvm_update_quotas(mvm, false, NULL);
3501 	}
3502 
3503 	goto out;
3504 
3505 out_remove_binding:
3506 	iwl_mvm_binding_remove_vif(mvm, vif);
3507 	iwl_mvm_power_update_mac(mvm);
3508 out:
3509 	if (ret)
3510 		mvmvif->phy_ctxt = NULL;
3511 	return ret;
3512 }
iwl_mvm_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3513 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3514 				      struct ieee80211_vif *vif,
3515 				      struct ieee80211_chanctx_conf *ctx)
3516 {
3517 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3518 	int ret;
3519 
3520 	mutex_lock(&mvm->mutex);
3521 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3522 	mutex_unlock(&mvm->mutex);
3523 
3524 	return ret;
3525 }
3526 
__iwl_mvm_unassign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3527 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3528 					   struct ieee80211_vif *vif,
3529 					   struct ieee80211_chanctx_conf *ctx,
3530 					   bool switching_chanctx)
3531 {
3532 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3533 	struct ieee80211_vif *disabled_vif = NULL;
3534 
3535 	lockdep_assert_held(&mvm->mutex);
3536 
3537 	iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3538 
3539 	switch (vif->type) {
3540 	case NL80211_IFTYPE_ADHOC:
3541 		goto out;
3542 	case NL80211_IFTYPE_MONITOR:
3543 		mvmvif->monitor_active = false;
3544 		mvmvif->ps_disabled = false;
3545 		break;
3546 	case NL80211_IFTYPE_AP:
3547 		/* This part is triggered only during CSA */
3548 		if (!switching_chanctx || !mvmvif->ap_ibss_active)
3549 			goto out;
3550 
3551 		mvmvif->csa_countdown = false;
3552 
3553 		/* Set CS bit on all the stations */
3554 		iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3555 
3556 		/* Save blocked iface, the timeout is set on the next beacon */
3557 		rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3558 
3559 		mvmvif->ap_ibss_active = false;
3560 		break;
3561 	case NL80211_IFTYPE_STATION:
3562 		if (!switching_chanctx)
3563 			break;
3564 
3565 		disabled_vif = vif;
3566 
3567 		iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3568 		break;
3569 	default:
3570 		break;
3571 	}
3572 
3573 	iwl_mvm_update_quotas(mvm, false, disabled_vif);
3574 	iwl_mvm_binding_remove_vif(mvm, vif);
3575 
3576 out:
3577 	mvmvif->phy_ctxt = NULL;
3578 	iwl_mvm_power_update_mac(mvm);
3579 }
3580 
iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3581 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3582 					 struct ieee80211_vif *vif,
3583 					 struct ieee80211_chanctx_conf *ctx)
3584 {
3585 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3586 
3587 	mutex_lock(&mvm->mutex);
3588 	__iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3589 	mutex_unlock(&mvm->mutex);
3590 }
3591 
3592 static int
iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3593 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3594 				struct ieee80211_vif_chanctx_switch *vifs)
3595 {
3596 	int ret;
3597 
3598 	mutex_lock(&mvm->mutex);
3599 	__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3600 	__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3601 
3602 	ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3603 	if (ret) {
3604 		IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3605 		goto out_reassign;
3606 	}
3607 
3608 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3609 					   true);
3610 	if (ret) {
3611 		IWL_ERR(mvm,
3612 			"failed to assign new_ctx during channel switch\n");
3613 		goto out_remove;
3614 	}
3615 
3616 	/* we don't support TDLS during DCM - can be caused by channel switch */
3617 	if (iwl_mvm_phy_ctx_count(mvm) > 1)
3618 		iwl_mvm_teardown_tdls_peers(mvm);
3619 
3620 	goto out;
3621 
3622 out_remove:
3623 	__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3624 
3625 out_reassign:
3626 	if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3627 		IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3628 		goto out_restart;
3629 	}
3630 
3631 	if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3632 					 true)) {
3633 		IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3634 		goto out_restart;
3635 	}
3636 
3637 	goto out;
3638 
3639 out_restart:
3640 	/* things keep failing, better restart the hw */
3641 	iwl_mvm_nic_restart(mvm, false);
3642 
3643 out:
3644 	mutex_unlock(&mvm->mutex);
3645 
3646 	return ret;
3647 }
3648 
3649 static int
iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3650 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3651 				    struct ieee80211_vif_chanctx_switch *vifs)
3652 {
3653 	int ret;
3654 
3655 	mutex_lock(&mvm->mutex);
3656 	__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3657 
3658 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3659 					   true);
3660 	if (ret) {
3661 		IWL_ERR(mvm,
3662 			"failed to assign new_ctx during channel switch\n");
3663 		goto out_reassign;
3664 	}
3665 
3666 	goto out;
3667 
3668 out_reassign:
3669 	if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3670 					 true)) {
3671 		IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3672 		goto out_restart;
3673 	}
3674 
3675 	goto out;
3676 
3677 out_restart:
3678 	/* things keep failing, better restart the hw */
3679 	iwl_mvm_nic_restart(mvm, false);
3680 
3681 out:
3682 	mutex_unlock(&mvm->mutex);
3683 
3684 	return ret;
3685 }
3686 
iwl_mvm_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)3687 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3688 				      struct ieee80211_vif_chanctx_switch *vifs,
3689 				      int n_vifs,
3690 				      enum ieee80211_chanctx_switch_mode mode)
3691 {
3692 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3693 	int ret;
3694 
3695 	/* we only support a single-vif right now */
3696 	if (n_vifs > 1)
3697 		return -EOPNOTSUPP;
3698 
3699 	switch (mode) {
3700 	case CHANCTX_SWMODE_SWAP_CONTEXTS:
3701 		ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3702 		break;
3703 	case CHANCTX_SWMODE_REASSIGN_VIF:
3704 		ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3705 		break;
3706 	default:
3707 		ret = -EOPNOTSUPP;
3708 		break;
3709 	}
3710 
3711 	return ret;
3712 }
3713 
iwl_mvm_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)3714 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3715 			   struct ieee80211_sta *sta,
3716 			   bool set)
3717 {
3718 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3719 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3720 
3721 	if (!mvm_sta || !mvm_sta->vif) {
3722 		IWL_ERR(mvm, "Station is not associated to a vif\n");
3723 		return -EINVAL;
3724 	}
3725 
3726 	return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3727 }
3728 
3729 #ifdef CONFIG_NL80211_TESTMODE
3730 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3731 	[IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3732 	[IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3733 	[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3734 };
3735 
__iwl_mvm_mac_testmode_cmd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,void * data,int len)3736 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3737 				      struct ieee80211_vif *vif,
3738 				      void *data, int len)
3739 {
3740 	struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3741 	int err;
3742 	u32 noa_duration;
3743 
3744 	err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3745 	if (err)
3746 		return err;
3747 
3748 	if (!tb[IWL_MVM_TM_ATTR_CMD])
3749 		return -EINVAL;
3750 
3751 	switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3752 	case IWL_MVM_TM_CMD_SET_NOA:
3753 		if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3754 		    !vif->bss_conf.enable_beacon ||
3755 		    !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3756 			return -EINVAL;
3757 
3758 		noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3759 		if (noa_duration >= vif->bss_conf.beacon_int)
3760 			return -EINVAL;
3761 
3762 		mvm->noa_duration = noa_duration;
3763 		mvm->noa_vif = vif;
3764 
3765 		return iwl_mvm_update_quotas(mvm, false, NULL);
3766 	case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3767 		/* must be associated client vif - ignore authorized */
3768 		if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3769 		    !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3770 		    !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3771 			return -EINVAL;
3772 
3773 		if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3774 			return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3775 		return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3776 	}
3777 
3778 	return -EOPNOTSUPP;
3779 }
3780 
iwl_mvm_mac_testmode_cmd(struct ieee80211_hw * hw,struct ieee80211_vif * vif,void * data,int len)3781 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3782 				    struct ieee80211_vif *vif,
3783 				    void *data, int len)
3784 {
3785 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3786 	int err;
3787 
3788 	mutex_lock(&mvm->mutex);
3789 	err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3790 	mutex_unlock(&mvm->mutex);
3791 
3792 	return err;
3793 }
3794 #endif
3795 
iwl_mvm_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)3796 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3797 				   struct ieee80211_vif *vif,
3798 				   struct ieee80211_channel_switch *chsw)
3799 {
3800 	/* By implementing this operation, we prevent mac80211 from
3801 	 * starting its own channel switch timer, so that we can call
3802 	 * ieee80211_chswitch_done() ourselves at the right time
3803 	 * (which is when the absence time event starts).
3804 	 */
3805 
3806 	IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3807 			   "dummy channel switch op\n");
3808 }
3809 
iwl_mvm_pre_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)3810 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3811 				      struct ieee80211_vif *vif,
3812 				      struct ieee80211_channel_switch *chsw)
3813 {
3814 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3815 	struct ieee80211_vif *csa_vif;
3816 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3817 	u32 apply_time;
3818 	int ret;
3819 
3820 	mutex_lock(&mvm->mutex);
3821 
3822 	mvmvif->csa_failed = false;
3823 
3824 	IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3825 			   chsw->chandef.center_freq1);
3826 
3827 	iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3828 
3829 	switch (vif->type) {
3830 	case NL80211_IFTYPE_AP:
3831 		csa_vif =
3832 			rcu_dereference_protected(mvm->csa_vif,
3833 						  lockdep_is_held(&mvm->mutex));
3834 		if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3835 			      "Another CSA is already in progress")) {
3836 			ret = -EBUSY;
3837 			goto out_unlock;
3838 		}
3839 
3840 		rcu_assign_pointer(mvm->csa_vif, vif);
3841 
3842 		if (WARN_ONCE(mvmvif->csa_countdown,
3843 			      "Previous CSA countdown didn't complete")) {
3844 			ret = -EBUSY;
3845 			goto out_unlock;
3846 		}
3847 
3848 		break;
3849 	case NL80211_IFTYPE_STATION:
3850 		/* Schedule the time event to a bit before beacon 1,
3851 		 * to make sure we're in the new channel when the
3852 		 * GO/AP arrives.
3853 		 */
3854 		apply_time = chsw->device_timestamp +
3855 			((vif->bss_conf.beacon_int * (chsw->count - 1) -
3856 			  IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3857 
3858 		if (chsw->block_tx)
3859 			iwl_mvm_csa_client_absent(mvm, vif);
3860 
3861 		iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3862 					    apply_time);
3863 		if (mvmvif->bf_data.bf_enabled) {
3864 			ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3865 			if (ret)
3866 				goto out_unlock;
3867 		}
3868 
3869 		break;
3870 	default:
3871 		break;
3872 	}
3873 
3874 	mvmvif->ps_disabled = true;
3875 
3876 	ret = iwl_mvm_power_update_ps(mvm);
3877 	if (ret)
3878 		goto out_unlock;
3879 
3880 	/* we won't be on this channel any longer */
3881 	iwl_mvm_teardown_tdls_peers(mvm);
3882 
3883 out_unlock:
3884 	mutex_unlock(&mvm->mutex);
3885 
3886 	return ret;
3887 }
3888 
iwl_mvm_post_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3889 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3890 				       struct ieee80211_vif *vif)
3891 {
3892 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3893 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3894 	int ret;
3895 
3896 	mutex_lock(&mvm->mutex);
3897 
3898 	if (mvmvif->csa_failed) {
3899 		mvmvif->csa_failed = false;
3900 		ret = -EIO;
3901 		goto out_unlock;
3902 	}
3903 
3904 	if (vif->type == NL80211_IFTYPE_STATION) {
3905 		struct iwl_mvm_sta *mvmsta;
3906 
3907 		mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3908 							  mvmvif->ap_sta_id);
3909 
3910 		if (WARN_ON(!mvmsta)) {
3911 			ret = -EIO;
3912 			goto out_unlock;
3913 		}
3914 
3915 		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3916 
3917 		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3918 
3919 		ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3920 		if (ret)
3921 			goto out_unlock;
3922 
3923 		iwl_mvm_stop_session_protection(mvm, vif);
3924 	}
3925 
3926 	mvmvif->ps_disabled = false;
3927 
3928 	ret = iwl_mvm_power_update_ps(mvm);
3929 
3930 out_unlock:
3931 	mutex_unlock(&mvm->mutex);
3932 
3933 	return ret;
3934 }
3935 
iwl_mvm_mac_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)3936 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3937 			      struct ieee80211_vif *vif, u32 queues, bool drop)
3938 {
3939 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3940 	struct iwl_mvm_vif *mvmvif;
3941 	struct iwl_mvm_sta *mvmsta;
3942 	struct ieee80211_sta *sta;
3943 	int i;
3944 	u32 msk = 0;
3945 
3946 	if (!vif || vif->type != NL80211_IFTYPE_STATION)
3947 		return;
3948 
3949 	mutex_lock(&mvm->mutex);
3950 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
3951 
3952 	/* flush the AP-station and all TDLS peers */
3953 	for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3954 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3955 						lockdep_is_held(&mvm->mutex));
3956 		if (IS_ERR_OR_NULL(sta))
3957 			continue;
3958 
3959 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
3960 		if (mvmsta->vif != vif)
3961 			continue;
3962 
3963 		/* make sure only TDLS peers or the AP are flushed */
3964 		WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3965 
3966 		msk |= mvmsta->tfd_queue_msk;
3967 	}
3968 
3969 	if (drop) {
3970 		if (iwl_mvm_flush_tx_path(mvm, msk, 0))
3971 			IWL_ERR(mvm, "flush request fail\n");
3972 		mutex_unlock(&mvm->mutex);
3973 	} else {
3974 		mutex_unlock(&mvm->mutex);
3975 
3976 		/* this can take a while, and we may need/want other operations
3977 		 * to succeed while doing this, so do it without the mutex held
3978 		 */
3979 		iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3980 	}
3981 }
3982 
iwl_mvm_mac_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)3983 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3984 				  struct survey_info *survey)
3985 {
3986 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3987 	int ret;
3988 
3989 	memset(survey, 0, sizeof(*survey));
3990 
3991 	/* only support global statistics right now */
3992 	if (idx != 0)
3993 		return -ENOENT;
3994 
3995 	if (fw_has_capa(&mvm->fw->ucode_capa,
3996 			IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3997 		return -ENOENT;
3998 
3999 	mutex_lock(&mvm->mutex);
4000 
4001 	if (mvm->ucode_loaded) {
4002 		ret = iwl_mvm_request_statistics(mvm, false);
4003 		if (ret)
4004 			goto out;
4005 	}
4006 
4007 	survey->filled = SURVEY_INFO_TIME |
4008 			 SURVEY_INFO_TIME_RX |
4009 			 SURVEY_INFO_TIME_TX |
4010 			 SURVEY_INFO_TIME_SCAN;
4011 	survey->time = mvm->accu_radio_stats.on_time_rf +
4012 		       mvm->radio_stats.on_time_rf;
4013 	do_div(survey->time, USEC_PER_MSEC);
4014 
4015 	survey->time_rx = mvm->accu_radio_stats.rx_time +
4016 			  mvm->radio_stats.rx_time;
4017 	do_div(survey->time_rx, USEC_PER_MSEC);
4018 
4019 	survey->time_tx = mvm->accu_radio_stats.tx_time +
4020 			  mvm->radio_stats.tx_time;
4021 	do_div(survey->time_tx, USEC_PER_MSEC);
4022 
4023 	survey->time_scan = mvm->accu_radio_stats.on_time_scan +
4024 			    mvm->radio_stats.on_time_scan;
4025 	do_div(survey->time_scan, USEC_PER_MSEC);
4026 
4027 	ret = 0;
4028  out:
4029 	mutex_unlock(&mvm->mutex);
4030 	return ret;
4031 }
4032 
iwl_mvm_mac_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)4033 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4034 				       struct ieee80211_vif *vif,
4035 				       struct ieee80211_sta *sta,
4036 				       struct station_info *sinfo)
4037 {
4038 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4039 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4040 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4041 
4042 	if (fw_has_capa(&mvm->fw->ucode_capa,
4043 			IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4044 		return;
4045 
4046 	/* if beacon filtering isn't on mac80211 does it anyway */
4047 	if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4048 		return;
4049 
4050 	if (!vif->bss_conf.assoc)
4051 		return;
4052 
4053 	mutex_lock(&mvm->mutex);
4054 
4055 	if (mvmvif->ap_sta_id != mvmsta->sta_id)
4056 		goto unlock;
4057 
4058 	if (iwl_mvm_request_statistics(mvm, false))
4059 		goto unlock;
4060 
4061 	sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4062 			   mvmvif->beacon_stats.accu_num_beacons;
4063 	sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
4064 	if (mvmvif->beacon_stats.avg_signal) {
4065 		/* firmware only reports a value after RXing a few beacons */
4066 		sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4067 		sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4068 	}
4069  unlock:
4070 	mutex_unlock(&mvm->mutex);
4071 }
4072 
iwl_mvm_event_mlme_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4073 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4074 					struct ieee80211_vif *vif,
4075 					const struct ieee80211_event *event)
4076 {
4077 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...)	\
4078 	do {							\
4079 		if ((_cnt) && --(_cnt))				\
4080 			break;					\
4081 		iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
4082 	} while (0)
4083 
4084 	struct iwl_fw_dbg_trigger_tlv *trig;
4085 	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4086 
4087 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4088 		return;
4089 
4090 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4091 	trig_mlme = (void *)trig->data;
4092 	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4093 		return;
4094 
4095 	if (event->u.mlme.data == ASSOC_EVENT) {
4096 		if (event->u.mlme.status == MLME_DENIED)
4097 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4098 					   trig_mlme->stop_assoc_denied,
4099 					   "DENIED ASSOC: reason %d",
4100 					    event->u.mlme.reason);
4101 		else if (event->u.mlme.status == MLME_TIMEOUT)
4102 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4103 					   trig_mlme->stop_assoc_timeout,
4104 					   "ASSOC TIMEOUT");
4105 	} else if (event->u.mlme.data == AUTH_EVENT) {
4106 		if (event->u.mlme.status == MLME_DENIED)
4107 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4108 					   trig_mlme->stop_auth_denied,
4109 					   "DENIED AUTH: reason %d",
4110 					   event->u.mlme.reason);
4111 		else if (event->u.mlme.status == MLME_TIMEOUT)
4112 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4113 					   trig_mlme->stop_auth_timeout,
4114 					   "AUTH TIMEOUT");
4115 	} else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4116 		CHECK_MLME_TRIGGER(mvm, trig, buf,
4117 				   trig_mlme->stop_rx_deauth,
4118 				   "DEAUTH RX %d", event->u.mlme.reason);
4119 	} else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4120 		CHECK_MLME_TRIGGER(mvm, trig, buf,
4121 				   trig_mlme->stop_tx_deauth,
4122 				   "DEAUTH TX %d", event->u.mlme.reason);
4123 	}
4124 #undef CHECK_MLME_TRIGGER
4125 }
4126 
iwl_mvm_event_bar_rx_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4127 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4128 					  struct ieee80211_vif *vif,
4129 					  const struct ieee80211_event *event)
4130 {
4131 	struct iwl_fw_dbg_trigger_tlv *trig;
4132 	struct iwl_fw_dbg_trigger_ba *ba_trig;
4133 
4134 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4135 		return;
4136 
4137 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4138 	ba_trig = (void *)trig->data;
4139 	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4140 		return;
4141 
4142 	if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4143 		return;
4144 
4145 	iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4146 				    "BAR received from %pM, tid %d, ssn %d",
4147 				    event->u.ba.sta->addr, event->u.ba.tid,
4148 				    event->u.ba.ssn);
4149 }
4150 
4151 static void
iwl_mvm_event_frame_timeout_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4152 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4153 				     struct ieee80211_vif *vif,
4154 				     const struct ieee80211_event *event)
4155 {
4156 	struct iwl_fw_dbg_trigger_tlv *trig;
4157 	struct iwl_fw_dbg_trigger_ba *ba_trig;
4158 
4159 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4160 		return;
4161 
4162 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4163 	ba_trig = (void *)trig->data;
4164 	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4165 		return;
4166 
4167 	if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4168 		return;
4169 
4170 	iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4171 				    "Frame from %pM timed out, tid %d",
4172 				    event->u.ba.sta->addr, event->u.ba.tid);
4173 }
4174 
iwl_mvm_mac_event_callback(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct ieee80211_event * event)4175 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4176 				       struct ieee80211_vif *vif,
4177 				       const struct ieee80211_event *event)
4178 {
4179 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4180 
4181 	switch (event->type) {
4182 	case MLME_EVENT:
4183 		iwl_mvm_event_mlme_callback(mvm, vif, event);
4184 		break;
4185 	case BAR_RX_EVENT:
4186 		iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4187 		break;
4188 	case BA_FRAME_TIMEOUT:
4189 		iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4190 		break;
4191 	default:
4192 		break;
4193 	}
4194 }
4195 
4196 const struct ieee80211_ops iwl_mvm_hw_ops = {
4197 	.tx = iwl_mvm_mac_tx,
4198 	.ampdu_action = iwl_mvm_mac_ampdu_action,
4199 	.start = iwl_mvm_mac_start,
4200 	.reconfig_complete = iwl_mvm_mac_reconfig_complete,
4201 	.stop = iwl_mvm_mac_stop,
4202 	.add_interface = iwl_mvm_mac_add_interface,
4203 	.remove_interface = iwl_mvm_mac_remove_interface,
4204 	.config = iwl_mvm_mac_config,
4205 	.prepare_multicast = iwl_mvm_prepare_multicast,
4206 	.configure_filter = iwl_mvm_configure_filter,
4207 	.config_iface_filter = iwl_mvm_config_iface_filter,
4208 	.bss_info_changed = iwl_mvm_bss_info_changed,
4209 	.hw_scan = iwl_mvm_mac_hw_scan,
4210 	.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4211 	.sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4212 	.sta_state = iwl_mvm_mac_sta_state,
4213 	.sta_notify = iwl_mvm_mac_sta_notify,
4214 	.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4215 	.release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4216 	.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4217 	.sta_rc_update = iwl_mvm_sta_rc_update,
4218 	.conf_tx = iwl_mvm_mac_conf_tx,
4219 	.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4220 	.mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4221 	.flush = iwl_mvm_mac_flush,
4222 	.sched_scan_start = iwl_mvm_mac_sched_scan_start,
4223 	.sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4224 	.set_key = iwl_mvm_mac_set_key,
4225 	.update_tkip_key = iwl_mvm_mac_update_tkip_key,
4226 	.remain_on_channel = iwl_mvm_roc,
4227 	.cancel_remain_on_channel = iwl_mvm_cancel_roc,
4228 	.add_chanctx = iwl_mvm_add_chanctx,
4229 	.remove_chanctx = iwl_mvm_remove_chanctx,
4230 	.change_chanctx = iwl_mvm_change_chanctx,
4231 	.assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4232 	.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4233 	.switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4234 
4235 	.start_ap = iwl_mvm_start_ap_ibss,
4236 	.stop_ap = iwl_mvm_stop_ap_ibss,
4237 	.join_ibss = iwl_mvm_start_ap_ibss,
4238 	.leave_ibss = iwl_mvm_stop_ap_ibss,
4239 
4240 	.set_tim = iwl_mvm_set_tim,
4241 
4242 	.channel_switch = iwl_mvm_channel_switch,
4243 	.pre_channel_switch = iwl_mvm_pre_channel_switch,
4244 	.post_channel_switch = iwl_mvm_post_channel_switch,
4245 
4246 	.tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4247 	.tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4248 	.tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4249 
4250 	.event_callback = iwl_mvm_mac_event_callback,
4251 
4252 	CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4253 
4254 #ifdef CONFIG_PM_SLEEP
4255 	/* look at d3.c */
4256 	.suspend = iwl_mvm_suspend,
4257 	.resume = iwl_mvm_resume,
4258 	.set_wakeup = iwl_mvm_set_wakeup,
4259 	.set_rekey_data = iwl_mvm_set_rekey_data,
4260 #if IS_ENABLED(CONFIG_IPV6)
4261 	.ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4262 #endif
4263 	.set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4264 #endif
4265 	.get_survey = iwl_mvm_mac_get_survey,
4266 	.sta_statistics = iwl_mvm_mac_sta_statistics,
4267 };
4268