1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23  * USA
24  *
25  * The full GNU General Public License is included in this distribution
26  * in the file called COPYING.
27  *
28  * Contact Information:
29  *  Intel Linux Wireless <ilw@linux.intel.com>
30  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31  *
32  * BSD LICENSE
33  *
34  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  *
42  *  * Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  *  * Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in
46  *    the documentation and/or other materials provided with the
47  *    distribution.
48  *  * Neither the name Intel Corporation nor the names of its
49  *    contributors may be used to endorse or promote products derived
50  *    from this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63  *
64  *****************************************************************************/
65 #include <linux/kernel.h>
66 #include <linux/slab.h>
67 #include <linux/skbuff.h>
68 #include <linux/netdevice.h>
69 #include <linux/etherdevice.h>
70 #include <linux/ip.h>
71 #include <linux/if_arp.h>
72 #include <linux/devcoredump.h>
73 #include <net/mac80211.h>
74 #include <net/ieee80211_radiotap.h>
75 #include <net/tcp.h>
76 
77 #include "iwl-op-mode.h"
78 #include "iwl-io.h"
79 #include "mvm.h"
80 #include "sta.h"
81 #include "time-event.h"
82 #include "iwl-eeprom-parse.h"
83 #include "fw-api-scan.h"
84 #include "iwl-phy-db.h"
85 #include "testmode.h"
86 #include "iwl-fw-error-dump.h"
87 #include "iwl-prph.h"
88 #include "iwl-csr.h"
89 #include "iwl-nvm-parse.h"
90 
91 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
92 	{
93 		.max = 1,
94 		.types = BIT(NL80211_IFTYPE_STATION),
95 	},
96 	{
97 		.max = 1,
98 		.types = BIT(NL80211_IFTYPE_AP) |
99 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
100 			BIT(NL80211_IFTYPE_P2P_GO),
101 	},
102 	{
103 		.max = 1,
104 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
105 	},
106 };
107 
108 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
109 	{
110 		.num_different_channels = 2,
111 		.max_interfaces = 3,
112 		.limits = iwl_mvm_limits,
113 		.n_limits = ARRAY_SIZE(iwl_mvm_limits),
114 	},
115 };
116 
117 #ifdef CONFIG_PM_SLEEP
118 static const struct nl80211_wowlan_tcp_data_token_feature
119 iwl_mvm_wowlan_tcp_token_feature = {
120 	.min_len = 0,
121 	.max_len = 255,
122 	.bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
123 };
124 
125 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
126 	.tok = &iwl_mvm_wowlan_tcp_token_feature,
127 	.data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
128 			    sizeof(struct ethhdr) -
129 			    sizeof(struct iphdr) -
130 			    sizeof(struct tcphdr),
131 	.data_interval_max = 65535, /* __le16 in API */
132 	.wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
133 			    sizeof(struct ethhdr) -
134 			    sizeof(struct iphdr) -
135 			    sizeof(struct tcphdr),
136 	.seq = true,
137 };
138 #endif
139 
140 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
141 /*
142  * Use the reserved field to indicate magic values.
143  * these values will only be used internally by the driver,
144  * and won't make it to the fw (reserved will be 0).
145  * BC_FILTER_MAGIC_IP - configure the val of this attribute to
146  *	be the vif's ip address. in case there is not a single
147  *	ip address (0, or more than 1), this attribute will
148  *	be skipped.
149  * BC_FILTER_MAGIC_MAC - set the val of this attribute to
150  *	the LSB bytes of the vif's mac address
151  */
152 enum {
153 	BC_FILTER_MAGIC_NONE = 0,
154 	BC_FILTER_MAGIC_IP,
155 	BC_FILTER_MAGIC_MAC,
156 };
157 
158 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
159 	{
160 		/* arp */
161 		.discard = 0,
162 		.frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
163 		.attrs = {
164 			{
165 				/* frame type - arp, hw type - ethernet */
166 				.offset_type =
167 					BCAST_FILTER_OFFSET_PAYLOAD_START,
168 				.offset = sizeof(rfc1042_header),
169 				.val = cpu_to_be32(0x08060001),
170 				.mask = cpu_to_be32(0xffffffff),
171 			},
172 			{
173 				/* arp dest ip */
174 				.offset_type =
175 					BCAST_FILTER_OFFSET_PAYLOAD_START,
176 				.offset = sizeof(rfc1042_header) + 2 +
177 					  sizeof(struct arphdr) +
178 					  ETH_ALEN + sizeof(__be32) +
179 					  ETH_ALEN,
180 				.mask = cpu_to_be32(0xffffffff),
181 				/* mark it as special field */
182 				.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
183 			},
184 		},
185 	},
186 	{
187 		/* dhcp offer bcast */
188 		.discard = 0,
189 		.frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
190 		.attrs = {
191 			{
192 				/* udp dest port - 68 (bootp client)*/
193 				.offset_type = BCAST_FILTER_OFFSET_IP_END,
194 				.offset = offsetof(struct udphdr, dest),
195 				.val = cpu_to_be32(0x00440000),
196 				.mask = cpu_to_be32(0xffff0000),
197 			},
198 			{
199 				/* dhcp - lsb bytes of client hw address */
200 				.offset_type = BCAST_FILTER_OFFSET_IP_END,
201 				.offset = 38,
202 				.mask = cpu_to_be32(0xffffffff),
203 				/* mark it as special field */
204 				.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
205 			},
206 		},
207 	},
208 	/* last filter must be empty */
209 	{},
210 };
211 #endif
212 
iwl_mvm_ref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)213 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
214 {
215 	if (!iwl_mvm_is_d0i3_supported(mvm))
216 		return;
217 
218 	IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
219 	spin_lock_bh(&mvm->refs_lock);
220 	mvm->refs[ref_type]++;
221 	spin_unlock_bh(&mvm->refs_lock);
222 	iwl_trans_ref(mvm->trans);
223 }
224 
iwl_mvm_unref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)225 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
226 {
227 	if (!iwl_mvm_is_d0i3_supported(mvm))
228 		return;
229 
230 	IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
231 	spin_lock_bh(&mvm->refs_lock);
232 	WARN_ON(!mvm->refs[ref_type]--);
233 	spin_unlock_bh(&mvm->refs_lock);
234 	iwl_trans_unref(mvm->trans);
235 }
236 
iwl_mvm_unref_all_except(struct iwl_mvm * mvm,enum iwl_mvm_ref_type except_ref)237 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
238 				     enum iwl_mvm_ref_type except_ref)
239 {
240 	int i, j;
241 
242 	if (!iwl_mvm_is_d0i3_supported(mvm))
243 		return;
244 
245 	spin_lock_bh(&mvm->refs_lock);
246 	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
247 		if (except_ref == i || !mvm->refs[i])
248 			continue;
249 
250 		IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
251 			      i, mvm->refs[i]);
252 		for (j = 0; j < mvm->refs[i]; j++)
253 			iwl_trans_unref(mvm->trans);
254 		mvm->refs[i] = 0;
255 	}
256 	spin_unlock_bh(&mvm->refs_lock);
257 }
258 
iwl_mvm_ref_taken(struct iwl_mvm * mvm)259 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
260 {
261 	int i;
262 	bool taken = false;
263 
264 	if (!iwl_mvm_is_d0i3_supported(mvm))
265 		return true;
266 
267 	spin_lock_bh(&mvm->refs_lock);
268 	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
269 		if (mvm->refs[i]) {
270 			taken = true;
271 			break;
272 		}
273 	}
274 	spin_unlock_bh(&mvm->refs_lock);
275 
276 	return taken;
277 }
278 
iwl_mvm_ref_sync(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)279 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
280 {
281 	iwl_mvm_ref(mvm, ref_type);
282 
283 	if (!wait_event_timeout(mvm->d0i3_exit_waitq,
284 				!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
285 				HZ)) {
286 		WARN_ON_ONCE(1);
287 		iwl_mvm_unref(mvm, ref_type);
288 		return -EIO;
289 	}
290 
291 	return 0;
292 }
293 
iwl_mvm_reset_phy_ctxts(struct iwl_mvm * mvm)294 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
295 {
296 	int i;
297 
298 	memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
299 	for (i = 0; i < NUM_PHY_CTX; i++) {
300 		mvm->phy_ctxts[i].id = i;
301 		mvm->phy_ctxts[i].ref = 0;
302 	}
303 }
304 
iwl_mvm_get_regdomain(struct wiphy * wiphy,const char * alpha2,enum iwl_mcc_source src_id,bool * changed)305 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
306 						  const char *alpha2,
307 						  enum iwl_mcc_source src_id,
308 						  bool *changed)
309 {
310 	struct ieee80211_regdomain *regd = NULL;
311 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
312 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
313 	struct iwl_mcc_update_resp *resp;
314 
315 	IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
316 
317 	lockdep_assert_held(&mvm->mutex);
318 
319 	resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
320 	if (IS_ERR_OR_NULL(resp)) {
321 		IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
322 			      PTR_RET(resp));
323 		goto out;
324 	}
325 
326 	if (changed)
327 		*changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
328 
329 	regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
330 				      __le32_to_cpu(resp->n_channels),
331 				      resp->channels,
332 				      __le16_to_cpu(resp->mcc));
333 	/* Store the return source id */
334 	src_id = resp->source_id;
335 	kfree(resp);
336 	if (IS_ERR_OR_NULL(regd)) {
337 		IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
338 			      PTR_RET(regd));
339 		goto out;
340 	}
341 
342 	IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
343 		      regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
344 	mvm->lar_regdom_set = true;
345 	mvm->mcc_src = src_id;
346 
347 out:
348 	return regd;
349 }
350 
iwl_mvm_update_changed_regdom(struct iwl_mvm * mvm)351 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
352 {
353 	bool changed;
354 	struct ieee80211_regdomain *regd;
355 
356 	if (!iwl_mvm_is_lar_supported(mvm))
357 		return;
358 
359 	regd = iwl_mvm_get_current_regdomain(mvm, &changed);
360 	if (!IS_ERR_OR_NULL(regd)) {
361 		/* only update the regulatory core if changed */
362 		if (changed)
363 			regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
364 
365 		kfree(regd);
366 	}
367 }
368 
iwl_mvm_get_current_regdomain(struct iwl_mvm * mvm,bool * changed)369 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
370 							  bool *changed)
371 {
372 	return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
373 				     iwl_mvm_is_wifi_mcc_supported(mvm) ?
374 				     MCC_SOURCE_GET_CURRENT :
375 				     MCC_SOURCE_OLD_FW, changed);
376 }
377 
iwl_mvm_init_fw_regd(struct iwl_mvm * mvm)378 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
379 {
380 	enum iwl_mcc_source used_src;
381 	struct ieee80211_regdomain *regd;
382 	int ret;
383 	bool changed;
384 	const struct ieee80211_regdomain *r =
385 			rtnl_dereference(mvm->hw->wiphy->regd);
386 
387 	if (!r)
388 		return -ENOENT;
389 
390 	/* save the last source in case we overwrite it below */
391 	used_src = mvm->mcc_src;
392 	if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
393 		/* Notify the firmware we support wifi location updates */
394 		regd = iwl_mvm_get_current_regdomain(mvm, NULL);
395 		if (!IS_ERR_OR_NULL(regd))
396 			kfree(regd);
397 	}
398 
399 	/* Now set our last stored MCC and source */
400 	regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
401 				     &changed);
402 	if (IS_ERR_OR_NULL(regd))
403 		return -EIO;
404 
405 	/* update cfg80211 if the regdomain was changed */
406 	if (changed)
407 		ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
408 	else
409 		ret = 0;
410 
411 	kfree(regd);
412 	return ret;
413 }
414 
iwl_mvm_mac_setup_register(struct iwl_mvm * mvm)415 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
416 {
417 	struct ieee80211_hw *hw = mvm->hw;
418 	int num_mac, ret, i;
419 
420 	/* Tell mac80211 our characteristics */
421 	hw->flags = IEEE80211_HW_SIGNAL_DBM |
422 		    IEEE80211_HW_SPECTRUM_MGMT |
423 		    IEEE80211_HW_REPORTS_TX_ACK_STATUS |
424 		    IEEE80211_HW_QUEUE_CONTROL |
425 		    IEEE80211_HW_WANT_MONITOR_VIF |
426 		    IEEE80211_HW_SUPPORTS_PS |
427 		    IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
428 		    IEEE80211_HW_AMPDU_AGGREGATION |
429 		    IEEE80211_HW_TIMING_BEACON_ONLY |
430 		    IEEE80211_HW_CONNECTION_MONITOR |
431 		    IEEE80211_HW_CHANCTX_STA_CSA |
432 		    IEEE80211_HW_SUPPORTS_CLONED_SKBS;
433 
434 	hw->queues = mvm->first_agg_queue;
435 	hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
436 	hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
437 				    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
438 	hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
439 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
440 	hw->rate_control_algorithm = "iwl-mvm-rs";
441 	hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
442 	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
443 
444 	/*
445 	 * Enable 11w if advertised by firmware and software crypto
446 	 * is not enabled (as the firmware will interpret some mgmt
447 	 * packets, so enabling it with software crypto isn't safe)
448 	 */
449 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
450 	    !iwlwifi_mod_params.sw_crypto)
451 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
452 
453 	hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
454 	hw->wiphy->features |=
455 		NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
456 		NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
457 
458 	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
459 	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
460 	hw->chanctx_data_size = sizeof(u16);
461 
462 	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
463 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
464 		BIT(NL80211_IFTYPE_AP) |
465 		BIT(NL80211_IFTYPE_P2P_GO) |
466 		BIT(NL80211_IFTYPE_P2P_DEVICE) |
467 		BIT(NL80211_IFTYPE_ADHOC);
468 
469 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
470 	hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
471 	if (iwl_mvm_is_lar_supported(mvm))
472 		hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
473 	else
474 		hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
475 					       REGULATORY_DISABLE_BEACON_HINTS;
476 
477 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
478 		hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
479 
480 	hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
481 
482 	hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
483 	hw->wiphy->n_iface_combinations =
484 		ARRAY_SIZE(iwl_mvm_iface_combinations);
485 
486 	hw->wiphy->max_remain_on_channel_duration = 10000;
487 	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
488 	/* we can compensate an offset of up to 3 channels = 15 MHz */
489 	hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
490 
491 	/* Extract MAC address */
492 	memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
493 	hw->wiphy->addresses = mvm->addresses;
494 	hw->wiphy->n_addresses = 1;
495 
496 	/* Extract additional MAC addresses if available */
497 	num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
498 		min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
499 
500 	for (i = 1; i < num_mac; i++) {
501 		memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
502 		       ETH_ALEN);
503 		mvm->addresses[i].addr[5]++;
504 		hw->wiphy->n_addresses++;
505 	}
506 
507 	iwl_mvm_reset_phy_ctxts(mvm);
508 
509 	hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm, false);
510 
511 	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
512 
513 	if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
514 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
515 			&mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
516 	if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
517 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
518 			&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
519 
520 		if ((mvm->fw->ucode_capa.capa[0] &
521 		     IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
522 		    (mvm->fw->ucode_capa.api[0] &
523 		     IWL_UCODE_TLV_API_LQ_SS_PARAMS))
524 			hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
525 				IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
526 	}
527 
528 	hw->wiphy->hw_version = mvm->trans->hw_id;
529 
530 	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
531 		hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
532 	else
533 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
534 
535 	if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
536 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
537 		hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
538 		hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
539 		/* we create the 802.11 header and zero length SSID IE. */
540 		hw->wiphy->max_sched_scan_ie_len =
541 			SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
542 	}
543 
544 	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
545 			       NL80211_FEATURE_LOW_PRIORITY_SCAN |
546 			       NL80211_FEATURE_P2P_GO_OPPPS |
547 			       NL80211_FEATURE_DYNAMIC_SMPS |
548 			       NL80211_FEATURE_STATIC_SMPS |
549 			       NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
550 
551 	if (mvm->fw->ucode_capa.capa[0] &
552 	    IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
553 		hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
554 	if (mvm->fw->ucode_capa.capa[0] &
555 	    IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)
556 		hw->wiphy->features |= NL80211_FEATURE_QUIET;
557 
558 	if (mvm->fw->ucode_capa.capa[0] &
559 	    IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)
560 		hw->wiphy->features |=
561 			NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
562 
563 	if (mvm->fw->ucode_capa.capa[0] &
564 	    IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)
565 		hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
566 
567 	mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
568 
569 	/* currently FW API supports only one optional cipher scheme */
570 	if (mvm->fw->cs[0].cipher) {
571 		mvm->hw->n_cipher_schemes = 1;
572 		mvm->hw->cipher_schemes = &mvm->fw->cs[0];
573 	}
574 
575 #ifdef CONFIG_PM_SLEEP
576 	if (iwl_mvm_is_d0i3_supported(mvm) &&
577 	    device_can_wakeup(mvm->trans->dev)) {
578 		mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
579 		hw->wiphy->wowlan = &mvm->wowlan;
580 	}
581 
582 	if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
583 	    mvm->trans->ops->d3_suspend &&
584 	    mvm->trans->ops->d3_resume &&
585 	    device_can_wakeup(mvm->trans->dev)) {
586 		mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
587 				     WIPHY_WOWLAN_DISCONNECT |
588 				     WIPHY_WOWLAN_EAP_IDENTITY_REQ |
589 				     WIPHY_WOWLAN_RFKILL_RELEASE |
590 				     WIPHY_WOWLAN_NET_DETECT;
591 		if (!iwlwifi_mod_params.sw_crypto)
592 			mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
593 					     WIPHY_WOWLAN_GTK_REKEY_FAILURE |
594 					     WIPHY_WOWLAN_4WAY_HANDSHAKE;
595 
596 		mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
597 		mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
598 		mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
599 		mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
600 		mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
601 		hw->wiphy->wowlan = &mvm->wowlan;
602 	}
603 #endif
604 
605 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
606 	/* assign default bcast filtering configuration */
607 	mvm->bcast_filters = iwl_mvm_default_bcast_filters;
608 #endif
609 
610 	ret = iwl_mvm_leds_init(mvm);
611 	if (ret)
612 		return ret;
613 
614 	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) {
615 		IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
616 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
617 	}
618 
619 	if (mvm->fw->ucode_capa.capa[0] &
620 	    IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) {
621 		IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
622 		hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
623 	}
624 
625 	ret = ieee80211_register_hw(mvm->hw);
626 	if (ret)
627 		iwl_mvm_leds_exit(mvm);
628 
629 	return ret;
630 }
631 
iwl_mvm_defer_tx(struct iwl_mvm * mvm,struct ieee80211_sta * sta,struct sk_buff * skb)632 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
633 			     struct ieee80211_sta *sta,
634 			     struct sk_buff *skb)
635 {
636 	struct iwl_mvm_sta *mvmsta;
637 	bool defer = false;
638 
639 	/*
640 	 * double check the IN_D0I3 flag both before and after
641 	 * taking the spinlock, in order to prevent taking
642 	 * the spinlock when not needed.
643 	 */
644 	if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
645 		return false;
646 
647 	spin_lock(&mvm->d0i3_tx_lock);
648 	/*
649 	 * testing the flag again ensures the skb dequeue
650 	 * loop (on d0i3 exit) hasn't run yet.
651 	 */
652 	if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
653 		goto out;
654 
655 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
656 	if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
657 	    mvmsta->sta_id != mvm->d0i3_ap_sta_id)
658 		goto out;
659 
660 	__skb_queue_tail(&mvm->d0i3_tx, skb);
661 	ieee80211_stop_queues(mvm->hw);
662 
663 	/* trigger wakeup */
664 	iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
665 	iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
666 
667 	defer = true;
668 out:
669 	spin_unlock(&mvm->d0i3_tx_lock);
670 	return defer;
671 }
672 
iwl_mvm_mac_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)673 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
674 			   struct ieee80211_tx_control *control,
675 			   struct sk_buff *skb)
676 {
677 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
678 	struct ieee80211_sta *sta = control->sta;
679 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
680 	struct ieee80211_hdr *hdr = (void *)skb->data;
681 
682 	if (iwl_mvm_is_radio_killed(mvm)) {
683 		IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
684 		goto drop;
685 	}
686 
687 	if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
688 	    !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
689 	    !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
690 		goto drop;
691 
692 	/* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
693 	if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
694 		     ieee80211_is_mgmt(hdr->frame_control) &&
695 		     !ieee80211_is_deauth(hdr->frame_control) &&
696 		     !ieee80211_is_disassoc(hdr->frame_control) &&
697 		     !ieee80211_is_action(hdr->frame_control)))
698 		sta = NULL;
699 
700 	if (sta) {
701 		if (iwl_mvm_defer_tx(mvm, sta, skb))
702 			return;
703 		if (iwl_mvm_tx_skb(mvm, skb, sta))
704 			goto drop;
705 		return;
706 	}
707 
708 	if (iwl_mvm_tx_skb_non_sta(mvm, skb))
709 		goto drop;
710 	return;
711  drop:
712 	ieee80211_free_txskb(hw, skb);
713 }
714 
iwl_enable_rx_ampdu(const struct iwl_cfg * cfg)715 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
716 {
717 	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
718 		return false;
719 	return true;
720 }
721 
iwl_enable_tx_ampdu(const struct iwl_cfg * cfg)722 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
723 {
724 	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
725 		return false;
726 	if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
727 		return true;
728 
729 	/* enabled by default */
730 	return true;
731 }
732 
iwl_mvm_mac_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum ieee80211_ampdu_mlme_action action,struct ieee80211_sta * sta,u16 tid,u16 * ssn,u8 buf_size)733 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
734 				    struct ieee80211_vif *vif,
735 				    enum ieee80211_ampdu_mlme_action action,
736 				    struct ieee80211_sta *sta, u16 tid,
737 				    u16 *ssn, u8 buf_size)
738 {
739 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
740 	int ret;
741 	bool tx_agg_ref = false;
742 
743 	IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
744 		     sta->addr, tid, action);
745 
746 	if (!(mvm->nvm_data->sku_cap_11n_enable))
747 		return -EACCES;
748 
749 	/* return from D0i3 before starting a new Tx aggregation */
750 	switch (action) {
751 	case IEEE80211_AMPDU_TX_START:
752 	case IEEE80211_AMPDU_TX_STOP_CONT:
753 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
754 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
755 	case IEEE80211_AMPDU_TX_OPERATIONAL:
756 		/*
757 		 * for tx start, wait synchronously until D0i3 exit to
758 		 * get the correct sequence number for the tid.
759 		 * additionally, some other ampdu actions use direct
760 		 * target access, which is not handled automatically
761 		 * by the trans layer (unlike commands), so wait for
762 		 * d0i3 exit in these cases as well.
763 		 */
764 		ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
765 		if (ret)
766 			return ret;
767 
768 		tx_agg_ref = true;
769 		break;
770 	default:
771 		break;
772 	}
773 
774 	mutex_lock(&mvm->mutex);
775 
776 	switch (action) {
777 	case IEEE80211_AMPDU_RX_START:
778 		if (!iwl_enable_rx_ampdu(mvm->cfg)) {
779 			ret = -EINVAL;
780 			break;
781 		}
782 		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
783 		break;
784 	case IEEE80211_AMPDU_RX_STOP:
785 		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
786 		break;
787 	case IEEE80211_AMPDU_TX_START:
788 		if (!iwl_enable_tx_ampdu(mvm->cfg)) {
789 			ret = -EINVAL;
790 			break;
791 		}
792 		ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
793 		break;
794 	case IEEE80211_AMPDU_TX_STOP_CONT:
795 		ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
796 		break;
797 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
798 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
799 		ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
800 		break;
801 	case IEEE80211_AMPDU_TX_OPERATIONAL:
802 		ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
803 		break;
804 	default:
805 		WARN_ON_ONCE(1);
806 		ret = -EINVAL;
807 		break;
808 	}
809 	mutex_unlock(&mvm->mutex);
810 
811 	/*
812 	 * If the tid is marked as started, we won't use it for offloaded
813 	 * traffic on the next D0i3 entry. It's safe to unref.
814 	 */
815 	if (tx_agg_ref)
816 		iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
817 
818 	return ret;
819 }
820 
iwl_mvm_cleanup_iterator(void * data,u8 * mac,struct ieee80211_vif * vif)821 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
822 				     struct ieee80211_vif *vif)
823 {
824 	struct iwl_mvm *mvm = data;
825 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
826 
827 	mvmvif->uploaded = false;
828 	mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
829 
830 	spin_lock_bh(&mvm->time_event_lock);
831 	iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
832 	spin_unlock_bh(&mvm->time_event_lock);
833 
834 	mvmvif->phy_ctxt = NULL;
835 	memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
836 }
837 
iwl_mvm_read_coredump(char * buffer,loff_t offset,size_t count,const void * data,size_t datalen)838 static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
839 				     const void *data, size_t datalen)
840 {
841 	const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
842 	ssize_t bytes_read;
843 	ssize_t bytes_read_trans;
844 
845 	if (offset < dump_ptrs->op_mode_len) {
846 		bytes_read = min_t(ssize_t, count,
847 				   dump_ptrs->op_mode_len - offset);
848 		memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
849 		       bytes_read);
850 		offset += bytes_read;
851 		count -= bytes_read;
852 
853 		if (count == 0)
854 			return bytes_read;
855 	} else {
856 		bytes_read = 0;
857 	}
858 
859 	if (!dump_ptrs->trans_ptr)
860 		return bytes_read;
861 
862 	offset -= dump_ptrs->op_mode_len;
863 	bytes_read_trans = min_t(ssize_t, count,
864 				 dump_ptrs->trans_ptr->len - offset);
865 	memcpy(buffer + bytes_read,
866 	       (u8 *)dump_ptrs->trans_ptr->data + offset,
867 	       bytes_read_trans);
868 
869 	return bytes_read + bytes_read_trans;
870 }
871 
iwl_mvm_free_coredump(const void * data)872 static void iwl_mvm_free_coredump(const void *data)
873 {
874 	const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
875 
876 	vfree(fw_error_dump->op_mode_ptr);
877 	vfree(fw_error_dump->trans_ptr);
878 	kfree(fw_error_dump);
879 }
880 
iwl_mvm_dump_fifos(struct iwl_mvm * mvm,struct iwl_fw_error_dump_data ** dump_data)881 static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
882 			       struct iwl_fw_error_dump_data **dump_data)
883 {
884 	struct iwl_fw_error_dump_fifo *fifo_hdr;
885 	u32 *fifo_data;
886 	u32 fifo_len;
887 	unsigned long flags;
888 	int i, j;
889 
890 	if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
891 		return;
892 
893 	/* Pull RXF data from all RXFs */
894 	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
895 		/*
896 		 * Keep aside the additional offset that might be needed for
897 		 * next RXF
898 		 */
899 		u32 offset_diff = RXF_DIFF_FROM_PREV * i;
900 
901 		fifo_hdr = (void *)(*dump_data)->data;
902 		fifo_data = (void *)fifo_hdr->data;
903 		fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
904 
905 		/* No need to try to read the data if the length is 0 */
906 		if (fifo_len == 0)
907 			continue;
908 
909 		/* Add a TLV for the RXF */
910 		(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
911 		(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
912 
913 		fifo_hdr->fifo_num = cpu_to_le32(i);
914 		fifo_hdr->available_bytes =
915 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
916 							RXF_RD_D_SPACE +
917 							offset_diff));
918 		fifo_hdr->wr_ptr =
919 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
920 							RXF_RD_WR_PTR +
921 							offset_diff));
922 		fifo_hdr->rd_ptr =
923 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
924 							RXF_RD_RD_PTR +
925 							offset_diff));
926 		fifo_hdr->fence_ptr =
927 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
928 							RXF_RD_FENCE_PTR +
929 							offset_diff));
930 		fifo_hdr->fence_mode =
931 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
932 							RXF_SET_FENCE_MODE +
933 							offset_diff));
934 
935 		/* Lock fence */
936 		iwl_trans_write_prph(mvm->trans,
937 				     RXF_SET_FENCE_MODE + offset_diff, 0x1);
938 		/* Set fence pointer to the same place like WR pointer */
939 		iwl_trans_write_prph(mvm->trans,
940 				     RXF_LD_WR2FENCE + offset_diff, 0x1);
941 		/* Set fence offset */
942 		iwl_trans_write_prph(mvm->trans,
943 				     RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
944 				     0x0);
945 
946 		/* Read FIFO */
947 		fifo_len /= sizeof(u32); /* Size in DWORDS */
948 		for (j = 0; j < fifo_len; j++)
949 			fifo_data[j] = iwl_trans_read_prph(mvm->trans,
950 							 RXF_FIFO_RD_FENCE_INC +
951 							 offset_diff);
952 		*dump_data = iwl_fw_error_next_data(*dump_data);
953 	}
954 
955 	/* Pull TXF data from all TXFs */
956 	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
957 		/* Mark the number of TXF we're pulling now */
958 		iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
959 
960 		fifo_hdr = (void *)(*dump_data)->data;
961 		fifo_data = (void *)fifo_hdr->data;
962 		fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
963 
964 		/* No need to try to read the data if the length is 0 */
965 		if (fifo_len == 0)
966 			continue;
967 
968 		/* Add a TLV for the FIFO */
969 		(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
970 		(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
971 
972 		fifo_hdr->fifo_num = cpu_to_le32(i);
973 		fifo_hdr->available_bytes =
974 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
975 							TXF_FIFO_ITEM_CNT));
976 		fifo_hdr->wr_ptr =
977 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
978 							TXF_WR_PTR));
979 		fifo_hdr->rd_ptr =
980 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
981 							TXF_RD_PTR));
982 		fifo_hdr->fence_ptr =
983 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
984 							TXF_FENCE_PTR));
985 		fifo_hdr->fence_mode =
986 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
987 							TXF_LOCK_FENCE));
988 
989 		/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
990 		iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
991 				     TXF_WR_PTR);
992 
993 		/* Dummy-read to advance the read pointer to the head */
994 		iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
995 
996 		/* Read FIFO */
997 		fifo_len /= sizeof(u32); /* Size in DWORDS */
998 		for (j = 0; j < fifo_len; j++)
999 			fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1000 							  TXF_READ_MODIFY_DATA);
1001 		*dump_data = iwl_fw_error_next_data(*dump_data);
1002 	}
1003 
1004 	iwl_trans_release_nic_access(mvm->trans, &flags);
1005 }
1006 
iwl_mvm_free_fw_dump_desc(struct iwl_mvm * mvm)1007 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
1008 {
1009 	if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
1010 	    !mvm->fw_dump_desc)
1011 		return;
1012 
1013 	kfree(mvm->fw_dump_desc);
1014 	mvm->fw_dump_desc = NULL;
1015 }
1016 
1017 #define IWL8260_ICCM_OFFSET		0x44000 /* Only for B-step */
1018 #define IWL8260_ICCM_LEN		0xC000 /* Only for B-step */
1019 
iwl_mvm_fw_error_dump(struct iwl_mvm * mvm)1020 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
1021 {
1022 	struct iwl_fw_error_dump_file *dump_file;
1023 	struct iwl_fw_error_dump_data *dump_data;
1024 	struct iwl_fw_error_dump_info *dump_info;
1025 	struct iwl_fw_error_dump_mem *dump_mem;
1026 	struct iwl_fw_error_dump_trigger_desc *dump_trig;
1027 	struct iwl_mvm_dump_ptrs *fw_error_dump;
1028 	u32 sram_len, sram_ofs;
1029 	u32 file_len, fifo_data_len = 0;
1030 	u32 smem_len = mvm->cfg->smem_len;
1031 	u32 sram2_len = mvm->cfg->dccm2_len;
1032 
1033 	lockdep_assert_held(&mvm->mutex);
1034 
1035 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
1036 	if (!fw_error_dump)
1037 		return;
1038 
1039 	/* SRAM - include stack CCM if driver knows the values for it */
1040 	if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
1041 		const struct fw_img *img;
1042 
1043 		img = &mvm->fw->img[mvm->cur_ucode];
1044 		sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1045 		sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
1046 	} else {
1047 		sram_ofs = mvm->cfg->dccm_offset;
1048 		sram_len = mvm->cfg->dccm_len;
1049 	}
1050 
1051 	/* reading RXF/TXF sizes */
1052 	if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
1053 		struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
1054 		int i;
1055 
1056 		fifo_data_len = 0;
1057 
1058 		/* Count RXF size */
1059 		for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
1060 			if (!mem_cfg->rxfifo_size[i])
1061 				continue;
1062 
1063 			/* Add header info */
1064 			fifo_data_len += mem_cfg->rxfifo_size[i] +
1065 					 sizeof(*dump_data) +
1066 					 sizeof(struct iwl_fw_error_dump_fifo);
1067 		}
1068 
1069 		for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
1070 			if (!mem_cfg->txfifo_size[i])
1071 				continue;
1072 
1073 			/* Add header info */
1074 			fifo_data_len += mem_cfg->txfifo_size[i] +
1075 					 sizeof(*dump_data) +
1076 					 sizeof(struct iwl_fw_error_dump_fifo);
1077 		}
1078 	}
1079 
1080 	file_len = sizeof(*dump_file) +
1081 		   sizeof(*dump_data) * 2 +
1082 		   sram_len + sizeof(*dump_mem) +
1083 		   fifo_data_len +
1084 		   sizeof(*dump_info);
1085 
1086 	/*
1087 	 * In 8000 HW family B-step include the ICCM (which resides separately)
1088 	 */
1089 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1090 	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
1091 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
1092 			    IWL8260_ICCM_LEN;
1093 
1094 	if (mvm->fw_dump_desc)
1095 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
1096 			    mvm->fw_dump_desc->len;
1097 
1098 	/* Make room for the SMEM, if it exists */
1099 	if (smem_len)
1100 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
1101 
1102 	/* Make room for the secondary SRAM, if it exists */
1103 	if (sram2_len)
1104 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
1105 
1106 	dump_file = vzalloc(file_len);
1107 	if (!dump_file) {
1108 		kfree(fw_error_dump);
1109 		iwl_mvm_free_fw_dump_desc(mvm);
1110 		return;
1111 	}
1112 
1113 	fw_error_dump->op_mode_ptr = dump_file;
1114 
1115 	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
1116 	dump_data = (void *)dump_file->data;
1117 
1118 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
1119 	dump_data->len = cpu_to_le32(sizeof(*dump_info));
1120 	dump_info = (void *) dump_data->data;
1121 	dump_info->device_family =
1122 		mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
1123 			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
1124 			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
1125 	dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
1126 	memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
1127 	       sizeof(dump_info->fw_human_readable));
1128 	strncpy(dump_info->dev_human_readable, mvm->cfg->name,
1129 		sizeof(dump_info->dev_human_readable));
1130 	strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
1131 		sizeof(dump_info->bus_human_readable));
1132 
1133 	dump_data = iwl_fw_error_next_data(dump_data);
1134 	/* We only dump the FIFOs if the FW is in error state */
1135 	if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
1136 		iwl_mvm_dump_fifos(mvm, &dump_data);
1137 
1138 	if (mvm->fw_dump_desc) {
1139 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
1140 		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
1141 					     mvm->fw_dump_desc->len);
1142 		dump_trig = (void *)dump_data->data;
1143 		memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
1144 		       sizeof(*dump_trig) + mvm->fw_dump_desc->len);
1145 
1146 		/* now we can free this copy */
1147 		iwl_mvm_free_fw_dump_desc(mvm);
1148 		dump_data = iwl_fw_error_next_data(dump_data);
1149 	}
1150 
1151 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1152 	dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
1153 	dump_mem = (void *)dump_data->data;
1154 	dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1155 	dump_mem->offset = cpu_to_le32(sram_ofs);
1156 	iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
1157 				 sram_len);
1158 
1159 	if (smem_len) {
1160 		dump_data = iwl_fw_error_next_data(dump_data);
1161 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1162 		dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
1163 		dump_mem = (void *)dump_data->data;
1164 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
1165 		dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
1166 		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
1167 					 dump_mem->data, smem_len);
1168 	}
1169 
1170 	if (sram2_len) {
1171 		dump_data = iwl_fw_error_next_data(dump_data);
1172 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1173 		dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
1174 		dump_mem = (void *)dump_data->data;
1175 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1176 		dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
1177 		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
1178 					 dump_mem->data, sram2_len);
1179 	}
1180 
1181 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1182 	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
1183 		dump_data = iwl_fw_error_next_data(dump_data);
1184 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1185 		dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
1186 					     sizeof(*dump_mem));
1187 		dump_mem = (void *)dump_data->data;
1188 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1189 		dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
1190 		iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
1191 					 dump_mem->data, IWL8260_ICCM_LEN);
1192 	}
1193 
1194 	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
1195 	fw_error_dump->op_mode_len = file_len;
1196 	if (fw_error_dump->trans_ptr)
1197 		file_len += fw_error_dump->trans_ptr->len;
1198 	dump_file->file_len = cpu_to_le32(file_len);
1199 
1200 	dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
1201 		      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
1202 
1203 	clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
1204 }
1205 
1206 struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
1207 	.trig_desc = {
1208 		.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
1209 	},
1210 };
1211 
iwl_mvm_restart_cleanup(struct iwl_mvm * mvm)1212 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1213 {
1214 	/* clear the D3 reconfig, we only need it to avoid dumping a
1215 	 * firmware coredump on reconfiguration, we shouldn't do that
1216 	 * on D3->D0 transition
1217 	 */
1218 	if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1219 		mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1220 		iwl_mvm_fw_error_dump(mvm);
1221 	}
1222 
1223 	/* cleanup all stale references (scan, roc), but keep the
1224 	 * ucode_down ref until reconfig is complete
1225 	 */
1226 	iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1227 
1228 	iwl_trans_stop_device(mvm->trans);
1229 
1230 	mvm->scan_status = IWL_MVM_SCAN_NONE;
1231 	mvm->ps_disabled = false;
1232 	mvm->calibrating = false;
1233 
1234 	/* just in case one was running */
1235 	ieee80211_remain_on_channel_expired(mvm->hw);
1236 
1237 	ieee80211_iterate_active_interfaces_atomic(
1238 		mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
1239 		iwl_mvm_cleanup_iterator, mvm);
1240 
1241 	mvm->p2p_device_vif = NULL;
1242 	mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1243 
1244 	iwl_mvm_reset_phy_ctxts(mvm);
1245 	memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
1246 	memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1247 	memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1248 	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1249 	memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
1250 	memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1251 	memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
1252 	memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
1253 	memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
1254 
1255 	ieee80211_wake_queues(mvm->hw);
1256 
1257 	/* clear any stale d0i3 state */
1258 	clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1259 
1260 	mvm->vif_count = 0;
1261 	mvm->rx_ba_sessions = 0;
1262 	mvm->fw_dbg_conf = FW_DBG_INVALID;
1263 
1264 	/* keep statistics ticking */
1265 	iwl_mvm_accu_radio_stats(mvm);
1266 }
1267 
__iwl_mvm_mac_start(struct iwl_mvm * mvm)1268 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1269 {
1270 	int ret;
1271 
1272 	lockdep_assert_held(&mvm->mutex);
1273 
1274 	/* Clean up some internal and mac80211 state on restart */
1275 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1276 		iwl_mvm_restart_cleanup(mvm);
1277 
1278 	ret = iwl_mvm_up(mvm);
1279 
1280 	if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1281 		/* Something went wrong - we need to finish some cleanup
1282 		 * that normally iwl_mvm_mac_restart_complete() below
1283 		 * would do.
1284 		 */
1285 		clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1286 		iwl_mvm_d0i3_enable_tx(mvm, NULL);
1287 	}
1288 
1289 	return ret;
1290 }
1291 
iwl_mvm_mac_start(struct ieee80211_hw * hw)1292 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1293 {
1294 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1295 	int ret;
1296 
1297 	/* Some hw restart cleanups must not hold the mutex */
1298 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1299 		/*
1300 		 * Make sure we are out of d0i3. This is needed
1301 		 * to make sure the reference accounting is correct
1302 		 * (and there is no stale d0i3_exit_work).
1303 		 */
1304 		wait_event_timeout(mvm->d0i3_exit_waitq,
1305 				   !test_bit(IWL_MVM_STATUS_IN_D0I3,
1306 					     &mvm->status),
1307 				   HZ);
1308 	}
1309 
1310 	mutex_lock(&mvm->mutex);
1311 	ret = __iwl_mvm_mac_start(mvm);
1312 	mutex_unlock(&mvm->mutex);
1313 
1314 	return ret;
1315 }
1316 
iwl_mvm_restart_complete(struct iwl_mvm * mvm)1317 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1318 {
1319 	int ret;
1320 
1321 	mutex_lock(&mvm->mutex);
1322 
1323 	clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1324 	iwl_mvm_d0i3_enable_tx(mvm, NULL);
1325 	ret = iwl_mvm_update_quotas(mvm, true, NULL);
1326 	if (ret)
1327 		IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1328 			ret);
1329 
1330 	/* allow transport/FW low power modes */
1331 	iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1332 
1333 	/*
1334 	 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1335 	 * of packets the FW sent out, so we must reconnect.
1336 	 */
1337 	iwl_mvm_teardown_tdls_peers(mvm);
1338 
1339 	mutex_unlock(&mvm->mutex);
1340 }
1341 
iwl_mvm_resume_complete(struct iwl_mvm * mvm)1342 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1343 {
1344 	bool exit_now;
1345 
1346 	if (!iwl_mvm_is_d0i3_supported(mvm))
1347 		return;
1348 
1349 	mutex_lock(&mvm->d0i3_suspend_mutex);
1350 	__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1351 	exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
1352 					&mvm->d0i3_suspend_flags);
1353 	mutex_unlock(&mvm->d0i3_suspend_mutex);
1354 
1355 	if (exit_now) {
1356 		IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
1357 		_iwl_mvm_exit_d0i3(mvm);
1358 	}
1359 
1360 	if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
1361 		if (!wait_event_timeout(mvm->d0i3_exit_waitq,
1362 					!test_bit(IWL_MVM_STATUS_IN_D0I3,
1363 						  &mvm->status),
1364 					HZ))
1365 			WARN_ONCE(1, "D0i3 exit on resume timed out\n");
1366 }
1367 
1368 static void
iwl_mvm_mac_reconfig_complete(struct ieee80211_hw * hw,enum ieee80211_reconfig_type reconfig_type)1369 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1370 			      enum ieee80211_reconfig_type reconfig_type)
1371 {
1372 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1373 
1374 	switch (reconfig_type) {
1375 	case IEEE80211_RECONFIG_TYPE_RESTART:
1376 		iwl_mvm_restart_complete(mvm);
1377 		break;
1378 	case IEEE80211_RECONFIG_TYPE_SUSPEND:
1379 		iwl_mvm_resume_complete(mvm);
1380 		break;
1381 	}
1382 }
1383 
__iwl_mvm_mac_stop(struct iwl_mvm * mvm)1384 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1385 {
1386 	lockdep_assert_held(&mvm->mutex);
1387 
1388 	/* firmware counters are obviously reset now, but we shouldn't
1389 	 * partially track so also clear the fw_reset_accu counters.
1390 	 */
1391 	memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1392 
1393 	/*
1394 	 * Disallow low power states when the FW is down by taking
1395 	 * the UCODE_DOWN ref. in case of ongoing hw restart the
1396 	 * ref is already taken, so don't take it again.
1397 	 */
1398 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1399 		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1400 
1401 	/* async_handlers_wk is now blocked */
1402 
1403 	/*
1404 	 * The work item could be running or queued if the
1405 	 * ROC time event stops just as we get here.
1406 	 */
1407 	flush_work(&mvm->roc_done_wk);
1408 
1409 	iwl_trans_stop_device(mvm->trans);
1410 
1411 	iwl_mvm_async_handlers_purge(mvm);
1412 	/* async_handlers_list is empty and will stay empty: HW is stopped */
1413 
1414 	/* the fw is stopped, the aux sta is dead: clean up driver state */
1415 	iwl_mvm_del_aux_sta(mvm);
1416 
1417 	/*
1418 	 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1419 	 * won't be called in this case).
1420 	 */
1421 	clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1422 
1423 	/* We shouldn't have any UIDs still set.  Loop over all the UIDs to
1424 	 * make sure there's nothing left there and warn if any is found.
1425 	 */
1426 	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
1427 		int i;
1428 
1429 		for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
1430 			if (WARN_ONCE(mvm->scan_uid[i],
1431 				      "UMAC scan UID %d was not cleaned\n",
1432 				      mvm->scan_uid[i]))
1433 				mvm->scan_uid[i] = 0;
1434 		}
1435 	}
1436 
1437 	mvm->ucode_loaded = false;
1438 }
1439 
iwl_mvm_mac_stop(struct ieee80211_hw * hw)1440 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1441 {
1442 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1443 
1444 	flush_work(&mvm->d0i3_exit_work);
1445 	flush_work(&mvm->async_handlers_wk);
1446 	cancel_delayed_work_sync(&mvm->fw_dump_wk);
1447 	iwl_mvm_free_fw_dump_desc(mvm);
1448 
1449 	mutex_lock(&mvm->mutex);
1450 	__iwl_mvm_mac_stop(mvm);
1451 	mutex_unlock(&mvm->mutex);
1452 
1453 	/*
1454 	 * The worker might have been waiting for the mutex, let it run and
1455 	 * discover that its list is now empty.
1456 	 */
1457 	cancel_work_sync(&mvm->async_handlers_wk);
1458 }
1459 
iwl_mvm_get_free_phy_ctxt(struct iwl_mvm * mvm)1460 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1461 {
1462 	u16 i;
1463 
1464 	lockdep_assert_held(&mvm->mutex);
1465 
1466 	for (i = 0; i < NUM_PHY_CTX; i++)
1467 		if (!mvm->phy_ctxts[i].ref)
1468 			return &mvm->phy_ctxts[i];
1469 
1470 	IWL_ERR(mvm, "No available PHY context\n");
1471 	return NULL;
1472 }
1473 
iwl_mvm_set_tx_power_old(struct iwl_mvm * mvm,struct ieee80211_vif * vif,s8 tx_power)1474 static int iwl_mvm_set_tx_power_old(struct iwl_mvm *mvm,
1475 				    struct ieee80211_vif *vif, s8 tx_power)
1476 {
1477 	/* FW is in charge of regulatory enforcement */
1478 	struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
1479 		.mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
1480 		.pwr_restriction = cpu_to_le16(tx_power),
1481 	};
1482 
1483 	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
1484 				    sizeof(reduce_txpwr_cmd),
1485 				    &reduce_txpwr_cmd);
1486 }
1487 
iwl_mvm_set_tx_power(struct iwl_mvm * mvm,struct ieee80211_vif * vif,s16 tx_power)1488 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1489 				s16 tx_power)
1490 {
1491 	struct iwl_dev_tx_power_cmd cmd = {
1492 		.set_mode = 0,
1493 		.mac_context_id =
1494 			cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1495 		.pwr_restriction = cpu_to_le16(8 * tx_power),
1496 	};
1497 
1498 	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV))
1499 		return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
1500 
1501 	if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1502 		cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1503 
1504 	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
1505 				    sizeof(cmd), &cmd);
1506 }
1507 
iwl_mvm_mac_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1508 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1509 				     struct ieee80211_vif *vif)
1510 {
1511 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1512 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1513 	int ret;
1514 
1515 	mvmvif->mvm = mvm;
1516 
1517 	/*
1518 	 * make sure D0i3 exit is completed, otherwise a target access
1519 	 * during tx queue configuration could be done when still in
1520 	 * D0i3 state.
1521 	 */
1522 	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1523 	if (ret)
1524 		return ret;
1525 
1526 	/*
1527 	 * Not much to do here. The stack will not allow interface
1528 	 * types or combinations that we didn't advertise, so we
1529 	 * don't really have to check the types.
1530 	 */
1531 
1532 	mutex_lock(&mvm->mutex);
1533 
1534 	/* make sure that beacon statistics don't go backwards with FW reset */
1535 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1536 		mvmvif->beacon_stats.accu_num_beacons +=
1537 			mvmvif->beacon_stats.num_beacons;
1538 
1539 	/* Allocate resources for the MAC context, and add it to the fw  */
1540 	ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1541 	if (ret)
1542 		goto out_unlock;
1543 
1544 	/* Counting number of interfaces is needed for legacy PM */
1545 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1546 		mvm->vif_count++;
1547 
1548 	/*
1549 	 * The AP binding flow can be done only after the beacon
1550 	 * template is configured (which happens only in the mac80211
1551 	 * start_ap() flow), and adding the broadcast station can happen
1552 	 * only after the binding.
1553 	 * In addition, since modifying the MAC before adding a bcast
1554 	 * station is not allowed by the FW, delay the adding of MAC context to
1555 	 * the point where we can also add the bcast station.
1556 	 * In short: there's not much we can do at this point, other than
1557 	 * allocating resources :)
1558 	 */
1559 	if (vif->type == NL80211_IFTYPE_AP ||
1560 	    vif->type == NL80211_IFTYPE_ADHOC) {
1561 		ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1562 		if (ret) {
1563 			IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1564 			goto out_release;
1565 		}
1566 
1567 		iwl_mvm_vif_dbgfs_register(mvm, vif);
1568 		goto out_unlock;
1569 	}
1570 
1571 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1572 	if (ret)
1573 		goto out_release;
1574 
1575 	ret = iwl_mvm_power_update_mac(mvm);
1576 	if (ret)
1577 		goto out_remove_mac;
1578 
1579 	/* beacon filtering */
1580 	ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1581 	if (ret)
1582 		goto out_remove_mac;
1583 
1584 	if (!mvm->bf_allowed_vif &&
1585 	    vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1586 		mvm->bf_allowed_vif = mvmvif;
1587 		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1588 				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1589 	}
1590 
1591 	/*
1592 	 * P2P_DEVICE interface does not have a channel context assigned to it,
1593 	 * so a dedicated PHY context is allocated to it and the corresponding
1594 	 * MAC context is bound to it at this stage.
1595 	 */
1596 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1597 
1598 		mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1599 		if (!mvmvif->phy_ctxt) {
1600 			ret = -ENOSPC;
1601 			goto out_free_bf;
1602 		}
1603 
1604 		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1605 		ret = iwl_mvm_binding_add_vif(mvm, vif);
1606 		if (ret)
1607 			goto out_unref_phy;
1608 
1609 		ret = iwl_mvm_add_bcast_sta(mvm, vif);
1610 		if (ret)
1611 			goto out_unbind;
1612 
1613 		/* Save a pointer to p2p device vif, so it can later be used to
1614 		 * update the p2p device MAC when a GO is started/stopped */
1615 		mvm->p2p_device_vif = vif;
1616 	}
1617 
1618 	iwl_mvm_vif_dbgfs_register(mvm, vif);
1619 	goto out_unlock;
1620 
1621  out_unbind:
1622 	iwl_mvm_binding_remove_vif(mvm, vif);
1623  out_unref_phy:
1624 	iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1625  out_free_bf:
1626 	if (mvm->bf_allowed_vif == mvmvif) {
1627 		mvm->bf_allowed_vif = NULL;
1628 		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1629 				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1630 	}
1631  out_remove_mac:
1632 	mvmvif->phy_ctxt = NULL;
1633 	iwl_mvm_mac_ctxt_remove(mvm, vif);
1634  out_release:
1635 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1636 		mvm->vif_count--;
1637 
1638 	iwl_mvm_mac_ctxt_release(mvm, vif);
1639  out_unlock:
1640 	mutex_unlock(&mvm->mutex);
1641 
1642 	iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1643 
1644 	return ret;
1645 }
1646 
iwl_mvm_prepare_mac_removal(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1647 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1648 					struct ieee80211_vif *vif)
1649 {
1650 	u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1651 
1652 	if (tfd_msk) {
1653 		/*
1654 		 * mac80211 first removes all the stations of the vif and
1655 		 * then removes the vif. When it removes a station it also
1656 		 * flushes the AMPDU session. So by now, all the AMPDU sessions
1657 		 * of all the stations of this vif are closed, and the queues
1658 		 * of these AMPDU sessions are properly closed.
1659 		 * We still need to take care of the shared queues of the vif.
1660 		 * Flush them here.
1661 		 */
1662 		mutex_lock(&mvm->mutex);
1663 		iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
1664 		mutex_unlock(&mvm->mutex);
1665 
1666 		/*
1667 		 * There are transports that buffer a few frames in the host.
1668 		 * For these, the flush above isn't enough since while we were
1669 		 * flushing, the transport might have sent more frames to the
1670 		 * device. To solve this, wait here until the transport is
1671 		 * empty. Technically, this could have replaced the flush
1672 		 * above, but flush is much faster than draining. So flush
1673 		 * first, and drain to make sure we have no frames in the
1674 		 * transport anymore.
1675 		 * If a station still had frames on the shared queues, it is
1676 		 * already marked as draining, so to complete the draining, we
1677 		 * just need to wait until the transport is empty.
1678 		 */
1679 		iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1680 	}
1681 
1682 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1683 		/*
1684 		 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1685 		 * We assume here that all the packets sent to the OFFCHANNEL
1686 		 * queue are sent in ROC session.
1687 		 */
1688 		flush_work(&mvm->roc_done_wk);
1689 	} else {
1690 		/*
1691 		 * By now, all the AC queues are empty. The AGG queues are
1692 		 * empty too. We already got all the Tx responses for all the
1693 		 * packets in the queues. The drain work can have been
1694 		 * triggered. Flush it.
1695 		 */
1696 		flush_work(&mvm->sta_drained_wk);
1697 	}
1698 }
1699 
iwl_mvm_mac_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1700 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1701 					 struct ieee80211_vif *vif)
1702 {
1703 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1704 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1705 
1706 	iwl_mvm_prepare_mac_removal(mvm, vif);
1707 
1708 	mutex_lock(&mvm->mutex);
1709 
1710 	if (mvm->bf_allowed_vif == mvmvif) {
1711 		mvm->bf_allowed_vif = NULL;
1712 		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1713 				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1714 	}
1715 
1716 	iwl_mvm_vif_dbgfs_clean(mvm, vif);
1717 
1718 	/*
1719 	 * For AP/GO interface, the tear down of the resources allocated to the
1720 	 * interface is be handled as part of the stop_ap flow.
1721 	 */
1722 	if (vif->type == NL80211_IFTYPE_AP ||
1723 	    vif->type == NL80211_IFTYPE_ADHOC) {
1724 #ifdef CONFIG_NL80211_TESTMODE
1725 		if (vif == mvm->noa_vif) {
1726 			mvm->noa_vif = NULL;
1727 			mvm->noa_duration = 0;
1728 		}
1729 #endif
1730 		iwl_mvm_dealloc_bcast_sta(mvm, vif);
1731 		goto out_release;
1732 	}
1733 
1734 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1735 		mvm->p2p_device_vif = NULL;
1736 		iwl_mvm_rm_bcast_sta(mvm, vif);
1737 		iwl_mvm_binding_remove_vif(mvm, vif);
1738 		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1739 		mvmvif->phy_ctxt = NULL;
1740 	}
1741 
1742 	if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1743 		mvm->vif_count--;
1744 
1745 	iwl_mvm_power_update_mac(mvm);
1746 	iwl_mvm_mac_ctxt_remove(mvm, vif);
1747 
1748 out_release:
1749 	iwl_mvm_mac_ctxt_release(mvm, vif);
1750 	mutex_unlock(&mvm->mutex);
1751 }
1752 
iwl_mvm_mac_config(struct ieee80211_hw * hw,u32 changed)1753 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1754 {
1755 	return 0;
1756 }
1757 
1758 struct iwl_mvm_mc_iter_data {
1759 	struct iwl_mvm *mvm;
1760 	int port_id;
1761 };
1762 
iwl_mvm_mc_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1763 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1764 				      struct ieee80211_vif *vif)
1765 {
1766 	struct iwl_mvm_mc_iter_data *data = _data;
1767 	struct iwl_mvm *mvm = data->mvm;
1768 	struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1769 	int ret, len;
1770 
1771 	/* if we don't have free ports, mcast frames will be dropped */
1772 	if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1773 		return;
1774 
1775 	if (vif->type != NL80211_IFTYPE_STATION ||
1776 	    !vif->bss_conf.assoc)
1777 		return;
1778 
1779 	cmd->port_id = data->port_id++;
1780 	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1781 	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1782 
1783 	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1784 	if (ret)
1785 		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1786 }
1787 
iwl_mvm_recalc_multicast(struct iwl_mvm * mvm)1788 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1789 {
1790 	struct iwl_mvm_mc_iter_data iter_data = {
1791 		.mvm = mvm,
1792 	};
1793 
1794 	lockdep_assert_held(&mvm->mutex);
1795 
1796 	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1797 		return;
1798 
1799 	ieee80211_iterate_active_interfaces_atomic(
1800 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1801 		iwl_mvm_mc_iface_iterator, &iter_data);
1802 }
1803 
iwl_mvm_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)1804 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1805 				     struct netdev_hw_addr_list *mc_list)
1806 {
1807 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1808 	struct iwl_mcast_filter_cmd *cmd;
1809 	struct netdev_hw_addr *addr;
1810 	int addr_count;
1811 	bool pass_all;
1812 	int len;
1813 
1814 	addr_count = netdev_hw_addr_list_count(mc_list);
1815 	pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1816 		   IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1817 	if (pass_all)
1818 		addr_count = 0;
1819 
1820 	len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1821 	cmd = kzalloc(len, GFP_ATOMIC);
1822 	if (!cmd)
1823 		return 0;
1824 
1825 	if (pass_all) {
1826 		cmd->pass_all = 1;
1827 		return (u64)(unsigned long)cmd;
1828 	}
1829 
1830 	netdev_hw_addr_list_for_each(addr, mc_list) {
1831 		IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1832 				   cmd->count, addr->addr);
1833 		memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1834 		       addr->addr, ETH_ALEN);
1835 		cmd->count++;
1836 	}
1837 
1838 	return (u64)(unsigned long)cmd;
1839 }
1840 
iwl_mvm_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)1841 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1842 				     unsigned int changed_flags,
1843 				     unsigned int *total_flags,
1844 				     u64 multicast)
1845 {
1846 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1847 	struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1848 
1849 	mutex_lock(&mvm->mutex);
1850 
1851 	/* replace previous configuration */
1852 	kfree(mvm->mcast_filter_cmd);
1853 	mvm->mcast_filter_cmd = cmd;
1854 
1855 	if (!cmd)
1856 		goto out;
1857 
1858 	iwl_mvm_recalc_multicast(mvm);
1859 out:
1860 	mutex_unlock(&mvm->mutex);
1861 	*total_flags = 0;
1862 }
1863 
1864 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1865 struct iwl_bcast_iter_data {
1866 	struct iwl_mvm *mvm;
1867 	struct iwl_bcast_filter_cmd *cmd;
1868 	u8 current_filter;
1869 };
1870 
1871 static void
iwl_mvm_set_bcast_filter(struct ieee80211_vif * vif,const struct iwl_fw_bcast_filter * in_filter,struct iwl_fw_bcast_filter * out_filter)1872 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1873 			 const struct iwl_fw_bcast_filter *in_filter,
1874 			 struct iwl_fw_bcast_filter *out_filter)
1875 {
1876 	struct iwl_fw_bcast_filter_attr *attr;
1877 	int i;
1878 
1879 	memcpy(out_filter, in_filter, sizeof(*out_filter));
1880 
1881 	for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1882 		attr = &out_filter->attrs[i];
1883 
1884 		if (!attr->mask)
1885 			break;
1886 
1887 		switch (attr->reserved1) {
1888 		case cpu_to_le16(BC_FILTER_MAGIC_IP):
1889 			if (vif->bss_conf.arp_addr_cnt != 1) {
1890 				attr->mask = 0;
1891 				continue;
1892 			}
1893 
1894 			attr->val = vif->bss_conf.arp_addr_list[0];
1895 			break;
1896 		case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1897 			attr->val = *(__be32 *)&vif->addr[2];
1898 			break;
1899 		default:
1900 			break;
1901 		}
1902 		attr->reserved1 = 0;
1903 		out_filter->num_attrs++;
1904 	}
1905 }
1906 
iwl_mvm_bcast_filter_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1907 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1908 					  struct ieee80211_vif *vif)
1909 {
1910 	struct iwl_bcast_iter_data *data = _data;
1911 	struct iwl_mvm *mvm = data->mvm;
1912 	struct iwl_bcast_filter_cmd *cmd = data->cmd;
1913 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1914 	struct iwl_fw_bcast_mac *bcast_mac;
1915 	int i;
1916 
1917 	if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1918 		return;
1919 
1920 	bcast_mac = &cmd->macs[mvmvif->id];
1921 
1922 	/*
1923 	 * enable filtering only for associated stations, but not for P2P
1924 	 * Clients
1925 	 */
1926 	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1927 	    !vif->bss_conf.assoc)
1928 		return;
1929 
1930 	bcast_mac->default_discard = 1;
1931 
1932 	/* copy all configured filters */
1933 	for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1934 		/*
1935 		 * Make sure we don't exceed our filters limit.
1936 		 * if there is still a valid filter to be configured,
1937 		 * be on the safe side and just allow bcast for this mac.
1938 		 */
1939 		if (WARN_ON_ONCE(data->current_filter >=
1940 				 ARRAY_SIZE(cmd->filters))) {
1941 			bcast_mac->default_discard = 0;
1942 			bcast_mac->attached_filters = 0;
1943 			break;
1944 		}
1945 
1946 		iwl_mvm_set_bcast_filter(vif,
1947 					 &mvm->bcast_filters[i],
1948 					 &cmd->filters[data->current_filter]);
1949 
1950 		/* skip current filter if it contains no attributes */
1951 		if (!cmd->filters[data->current_filter].num_attrs)
1952 			continue;
1953 
1954 		/* attach the filter to current mac */
1955 		bcast_mac->attached_filters |=
1956 				cpu_to_le16(BIT(data->current_filter));
1957 
1958 		data->current_filter++;
1959 	}
1960 }
1961 
iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm * mvm,struct iwl_bcast_filter_cmd * cmd)1962 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1963 				    struct iwl_bcast_filter_cmd *cmd)
1964 {
1965 	struct iwl_bcast_iter_data iter_data = {
1966 		.mvm = mvm,
1967 		.cmd = cmd,
1968 	};
1969 
1970 	if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
1971 		return false;
1972 
1973 	memset(cmd, 0, sizeof(*cmd));
1974 	cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
1975 	cmd->max_macs = ARRAY_SIZE(cmd->macs);
1976 
1977 #ifdef CONFIG_IWLWIFI_DEBUGFS
1978 	/* use debugfs filters/macs if override is configured */
1979 	if (mvm->dbgfs_bcast_filtering.override) {
1980 		memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
1981 		       sizeof(cmd->filters));
1982 		memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
1983 		       sizeof(cmd->macs));
1984 		return true;
1985 	}
1986 #endif
1987 
1988 	/* if no filters are configured, do nothing */
1989 	if (!mvm->bcast_filters)
1990 		return false;
1991 
1992 	/* configure and attach these filters for each associated sta vif */
1993 	ieee80211_iterate_active_interfaces(
1994 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1995 		iwl_mvm_bcast_filter_iterator, &iter_data);
1996 
1997 	return true;
1998 }
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1999 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2000 					  struct ieee80211_vif *vif)
2001 {
2002 	struct iwl_bcast_filter_cmd cmd;
2003 
2004 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
2005 		return 0;
2006 
2007 	if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
2008 		return 0;
2009 
2010 	return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
2011 				    sizeof(cmd), &cmd);
2012 }
2013 #else
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2014 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2015 						 struct ieee80211_vif *vif)
2016 {
2017 	return 0;
2018 }
2019 #endif
2020 
iwl_mvm_bss_info_changed_station(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2021 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2022 					     struct ieee80211_vif *vif,
2023 					     struct ieee80211_bss_conf *bss_conf,
2024 					     u32 changes)
2025 {
2026 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2027 	int ret;
2028 
2029 	/*
2030 	 * Re-calculate the tsf id, as the master-slave relations depend on the
2031 	 * beacon interval, which was not known when the station interface was
2032 	 * added.
2033 	 */
2034 	if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
2035 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2036 
2037 	/*
2038 	 * If we're not associated yet, take the (new) BSSID before associating
2039 	 * so the firmware knows. If we're already associated, then use the old
2040 	 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2041 	 * branch for disassociation below.
2042 	 */
2043 	if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2044 		memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2045 
2046 	ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
2047 	if (ret)
2048 		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2049 
2050 	/* after sending it once, adopt mac80211 data */
2051 	memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2052 	mvmvif->associated = bss_conf->assoc;
2053 
2054 	if (changes & BSS_CHANGED_ASSOC) {
2055 		if (bss_conf->assoc) {
2056 			/* clear statistics to get clean beacon counter */
2057 			iwl_mvm_request_statistics(mvm, true);
2058 			memset(&mvmvif->beacon_stats, 0,
2059 			       sizeof(mvmvif->beacon_stats));
2060 
2061 			/* add quota for this interface */
2062 			ret = iwl_mvm_update_quotas(mvm, true, NULL);
2063 			if (ret) {
2064 				IWL_ERR(mvm, "failed to update quotas\n");
2065 				return;
2066 			}
2067 
2068 			if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2069 				     &mvm->status)) {
2070 				/*
2071 				 * If we're restarting then the firmware will
2072 				 * obviously have lost synchronisation with
2073 				 * the AP. It will attempt to synchronise by
2074 				 * itself, but we can make it more reliable by
2075 				 * scheduling a session protection time event.
2076 				 *
2077 				 * The firmware needs to receive a beacon to
2078 				 * catch up with synchronisation, use 110% of
2079 				 * the beacon interval.
2080 				 *
2081 				 * Set a large maximum delay to allow for more
2082 				 * than a single interface.
2083 				 */
2084 				u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2085 				iwl_mvm_protect_session(mvm, vif, dur, dur,
2086 							5 * dur, false);
2087 			}
2088 
2089 			iwl_mvm_sf_update(mvm, vif, false);
2090 			iwl_mvm_power_vif_assoc(mvm, vif);
2091 			if (vif->p2p) {
2092 				iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
2093 				iwl_mvm_update_smps(mvm, vif,
2094 						    IWL_MVM_SMPS_REQ_PROT,
2095 						    IEEE80211_SMPS_DYNAMIC);
2096 			}
2097 		} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2098 			/*
2099 			 * If update fails - SF might be running in associated
2100 			 * mode while disassociated - which is forbidden.
2101 			 */
2102 			WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
2103 				  "Failed to update SF upon disassociation\n");
2104 
2105 			/* remove AP station now that the MAC is unassoc */
2106 			ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
2107 			if (ret)
2108 				IWL_ERR(mvm, "failed to remove AP station\n");
2109 
2110 			if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2111 				mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
2112 			mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
2113 			/* remove quota for this interface */
2114 			ret = iwl_mvm_update_quotas(mvm, false, NULL);
2115 			if (ret)
2116 				IWL_ERR(mvm, "failed to update quotas\n");
2117 
2118 			if (vif->p2p)
2119 				iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
2120 
2121 			/* this will take the cleared BSSID from bss_conf */
2122 			ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2123 			if (ret)
2124 				IWL_ERR(mvm,
2125 					"failed to update MAC %pM (clear after unassoc)\n",
2126 					vif->addr);
2127 		}
2128 
2129 		iwl_mvm_recalc_multicast(mvm);
2130 		iwl_mvm_configure_bcast_filter(mvm, vif);
2131 
2132 		/* reset rssi values */
2133 		mvmvif->bf_data.ave_beacon_signal = 0;
2134 
2135 		iwl_mvm_bt_coex_vif_change(mvm);
2136 		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2137 				    IEEE80211_SMPS_AUTOMATIC);
2138 	} else if (changes & BSS_CHANGED_BEACON_INFO) {
2139 		/*
2140 		 * We received a beacon _after_ association so
2141 		 * remove the session protection.
2142 		 */
2143 		iwl_mvm_remove_time_event(mvm, mvmvif,
2144 					  &mvmvif->time_event_data);
2145 	}
2146 
2147 	if (changes & BSS_CHANGED_BEACON_INFO) {
2148 		iwl_mvm_sf_update(mvm, vif, false);
2149 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2150 	}
2151 
2152 	if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
2153 		ret = iwl_mvm_power_update_mac(mvm);
2154 		if (ret)
2155 			IWL_ERR(mvm, "failed to update power mode\n");
2156 	}
2157 
2158 	if (changes & BSS_CHANGED_TXPOWER) {
2159 		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2160 				bss_conf->txpower);
2161 		iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2162 	}
2163 
2164 	if (changes & BSS_CHANGED_CQM) {
2165 		IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2166 		/* reset cqm events tracking */
2167 		mvmvif->bf_data.last_cqm_event = 0;
2168 		if (mvmvif->bf_data.bf_enabled) {
2169 			ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2170 			if (ret)
2171 				IWL_ERR(mvm,
2172 					"failed to update CQM thresholds\n");
2173 		}
2174 	}
2175 
2176 	if (changes & BSS_CHANGED_ARP_FILTER) {
2177 		IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2178 		iwl_mvm_configure_bcast_filter(mvm, vif);
2179 	}
2180 }
2181 
iwl_mvm_start_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2182 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2183 				 struct ieee80211_vif *vif)
2184 {
2185 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2186 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2187 	int ret;
2188 
2189 	/*
2190 	 * iwl_mvm_mac_ctxt_add() might read directly from the device
2191 	 * (the system time), so make sure it is available.
2192 	 */
2193 	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2194 	if (ret)
2195 		return ret;
2196 
2197 	mutex_lock(&mvm->mutex);
2198 
2199 	/* Send the beacon template */
2200 	ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2201 	if (ret)
2202 		goto out_unlock;
2203 
2204 	/*
2205 	 * Re-calculate the tsf id, as the master-slave relations depend on the
2206 	 * beacon interval, which was not known when the AP interface was added.
2207 	 */
2208 	if (vif->type == NL80211_IFTYPE_AP)
2209 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2210 
2211 	/* Add the mac context */
2212 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2213 	if (ret)
2214 		goto out_unlock;
2215 
2216 	/* Perform the binding */
2217 	ret = iwl_mvm_binding_add_vif(mvm, vif);
2218 	if (ret)
2219 		goto out_remove;
2220 
2221 	/* Send the bcast station. At this stage the TBTT and DTIM time events
2222 	 * are added and applied to the scheduler */
2223 	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2224 	if (ret)
2225 		goto out_unbind;
2226 
2227 	/* must be set before quota calculations */
2228 	mvmvif->ap_ibss_active = true;
2229 
2230 	/* power updated needs to be done before quotas */
2231 	iwl_mvm_power_update_mac(mvm);
2232 
2233 	ret = iwl_mvm_update_quotas(mvm, false, NULL);
2234 	if (ret)
2235 		goto out_quota_failed;
2236 
2237 	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2238 	if (vif->p2p && mvm->p2p_device_vif)
2239 		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2240 
2241 	iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2242 
2243 	iwl_mvm_bt_coex_vif_change(mvm);
2244 
2245 	/* we don't support TDLS during DCM */
2246 	if (iwl_mvm_phy_ctx_count(mvm) > 1)
2247 		iwl_mvm_teardown_tdls_peers(mvm);
2248 
2249 	goto out_unlock;
2250 
2251 out_quota_failed:
2252 	iwl_mvm_power_update_mac(mvm);
2253 	mvmvif->ap_ibss_active = false;
2254 	iwl_mvm_send_rm_bcast_sta(mvm, vif);
2255 out_unbind:
2256 	iwl_mvm_binding_remove_vif(mvm, vif);
2257 out_remove:
2258 	iwl_mvm_mac_ctxt_remove(mvm, vif);
2259 out_unlock:
2260 	mutex_unlock(&mvm->mutex);
2261 	iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2262 	return ret;
2263 }
2264 
iwl_mvm_stop_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2265 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2266 				 struct ieee80211_vif *vif)
2267 {
2268 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2269 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2270 
2271 	iwl_mvm_prepare_mac_removal(mvm, vif);
2272 
2273 	mutex_lock(&mvm->mutex);
2274 
2275 	/* Handle AP stop while in CSA */
2276 	if (rcu_access_pointer(mvm->csa_vif) == vif) {
2277 		iwl_mvm_remove_time_event(mvm, mvmvif,
2278 					  &mvmvif->time_event_data);
2279 		RCU_INIT_POINTER(mvm->csa_vif, NULL);
2280 		mvmvif->csa_countdown = false;
2281 	}
2282 
2283 	if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2284 		RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2285 		mvm->csa_tx_block_bcn_timeout = 0;
2286 	}
2287 
2288 	mvmvif->ap_ibss_active = false;
2289 	mvm->ap_last_beacon_gp2 = 0;
2290 
2291 	iwl_mvm_bt_coex_vif_change(mvm);
2292 
2293 	iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2294 
2295 	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2296 	if (vif->p2p && mvm->p2p_device_vif)
2297 		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2298 
2299 	iwl_mvm_update_quotas(mvm, false, NULL);
2300 	iwl_mvm_send_rm_bcast_sta(mvm, vif);
2301 	iwl_mvm_binding_remove_vif(mvm, vif);
2302 
2303 	iwl_mvm_power_update_mac(mvm);
2304 
2305 	iwl_mvm_mac_ctxt_remove(mvm, vif);
2306 
2307 	mutex_unlock(&mvm->mutex);
2308 }
2309 
2310 static void
iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2311 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2312 				 struct ieee80211_vif *vif,
2313 				 struct ieee80211_bss_conf *bss_conf,
2314 				 u32 changes)
2315 {
2316 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2317 
2318 	/* Changes will be applied when the AP/IBSS is started */
2319 	if (!mvmvif->ap_ibss_active)
2320 		return;
2321 
2322 	if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2323 		       BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2324 	    iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2325 		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2326 
2327 	/* Need to send a new beacon template to the FW */
2328 	if (changes & BSS_CHANGED_BEACON &&
2329 	    iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2330 		IWL_WARN(mvm, "Failed updating beacon data\n");
2331 
2332 	if (changes & BSS_CHANGED_TXPOWER) {
2333 		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2334 				bss_conf->txpower);
2335 		iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2336 	}
2337 
2338 }
2339 
iwl_mvm_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2340 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2341 				     struct ieee80211_vif *vif,
2342 				     struct ieee80211_bss_conf *bss_conf,
2343 				     u32 changes)
2344 {
2345 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2346 
2347 	/*
2348 	 * iwl_mvm_bss_info_changed_station() might call
2349 	 * iwl_mvm_protect_session(), which reads directly from
2350 	 * the device (the system time), so make sure it is available.
2351 	 */
2352 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2353 		return;
2354 
2355 	mutex_lock(&mvm->mutex);
2356 
2357 	if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2358 		iwl_mvm_scan_offload_stop(mvm, true);
2359 
2360 	switch (vif->type) {
2361 	case NL80211_IFTYPE_STATION:
2362 		iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2363 		break;
2364 	case NL80211_IFTYPE_AP:
2365 	case NL80211_IFTYPE_ADHOC:
2366 		iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2367 		break;
2368 	default:
2369 		/* shouldn't happen */
2370 		WARN_ON_ONCE(1);
2371 	}
2372 
2373 	mutex_unlock(&mvm->mutex);
2374 	iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2375 }
2376 
iwl_mvm_cancel_scan_wait_notif(struct iwl_mvm * mvm,enum iwl_scan_status scan_type)2377 static int iwl_mvm_cancel_scan_wait_notif(struct iwl_mvm *mvm,
2378 					  enum iwl_scan_status scan_type)
2379 {
2380 	int ret;
2381 	bool wait_for_handlers = false;
2382 
2383 	mutex_lock(&mvm->mutex);
2384 
2385 	if (mvm->scan_status != scan_type) {
2386 		ret = 0;
2387 		/* make sure there are no pending notifications */
2388 		wait_for_handlers = true;
2389 		goto out;
2390 	}
2391 
2392 	switch (scan_type) {
2393 	case IWL_MVM_SCAN_SCHED:
2394 		ret = iwl_mvm_scan_offload_stop(mvm, true);
2395 		break;
2396 	case IWL_MVM_SCAN_OS:
2397 		ret = iwl_mvm_cancel_scan(mvm);
2398 		break;
2399 	case IWL_MVM_SCAN_NONE:
2400 	default:
2401 		WARN_ON_ONCE(1);
2402 		ret = -EINVAL;
2403 		break;
2404 	}
2405 	if (ret)
2406 		goto out;
2407 
2408 	wait_for_handlers = true;
2409 out:
2410 	mutex_unlock(&mvm->mutex);
2411 
2412 	/* make sure we consume the completion notification */
2413 	if (wait_for_handlers)
2414 		iwl_mvm_wait_for_async_handlers(mvm);
2415 
2416 	return ret;
2417 }
iwl_mvm_mac_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)2418 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2419 			       struct ieee80211_vif *vif,
2420 			       struct ieee80211_scan_request *hw_req)
2421 {
2422 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2423 	struct cfg80211_scan_request *req = &hw_req->req;
2424 	int ret;
2425 
2426 	if (req->n_channels == 0 ||
2427 	    req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
2428 		return -EINVAL;
2429 
2430 	if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2431 		ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
2432 		if (ret)
2433 			return ret;
2434 	}
2435 
2436 	mutex_lock(&mvm->mutex);
2437 
2438 	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
2439 		IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
2440 		ret = -EBUSY;
2441 		goto out;
2442 	}
2443 
2444 	if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
2445 		ret = -EBUSY;
2446 		goto out;
2447 	}
2448 
2449 	iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
2450 
2451 	if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
2452 		ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
2453 	else
2454 		ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
2455 
2456 	if (ret)
2457 		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
2458 out:
2459 	mutex_unlock(&mvm->mutex);
2460 	return ret;
2461 }
2462 
iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2463 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2464 				       struct ieee80211_vif *vif)
2465 {
2466 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2467 
2468 	mutex_lock(&mvm->mutex);
2469 
2470 	/* Due to a race condition, it's possible that mac80211 asks
2471 	 * us to stop a hw_scan when it's already stopped.  This can
2472 	 * happen, for instance, if we stopped the scan ourselves,
2473 	 * called ieee80211_scan_completed() and the userspace called
2474 	 * cancel scan scan before ieee80211_scan_work() could run.
2475 	 * To handle that, simply return if the scan is not running.
2476 	*/
2477 	/* FIXME: for now, we ignore this race for UMAC scans, since
2478 	 * they don't set the scan_status.
2479 	 */
2480 	if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
2481 	    (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2482 		iwl_mvm_cancel_scan(mvm);
2483 
2484 	mutex_unlock(&mvm->mutex);
2485 }
2486 
2487 static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2488 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2489 				  struct ieee80211_sta *sta, u16 tids,
2490 				  int num_frames,
2491 				  enum ieee80211_frame_release_type reason,
2492 				  bool more_data)
2493 {
2494 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2495 
2496 	/* Called when we need to transmit (a) frame(s) from mac80211 */
2497 
2498 	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2499 					  tids, more_data, false);
2500 }
2501 
2502 static void
iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2503 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2504 				    struct ieee80211_sta *sta, u16 tids,
2505 				    int num_frames,
2506 				    enum ieee80211_frame_release_type reason,
2507 				    bool more_data)
2508 {
2509 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2510 
2511 	/* Called when we need to transmit (a) frame(s) from agg queue */
2512 
2513 	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2514 					  tids, more_data, true);
2515 }
2516 
iwl_mvm_mac_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)2517 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2518 				   struct ieee80211_vif *vif,
2519 				   enum sta_notify_cmd cmd,
2520 				   struct ieee80211_sta *sta)
2521 {
2522 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2523 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2524 	unsigned long txqs = 0, tids = 0;
2525 	int tid;
2526 
2527 	spin_lock_bh(&mvmsta->lock);
2528 	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2529 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2530 
2531 		if (tid_data->state != IWL_AGG_ON &&
2532 		    tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2533 			continue;
2534 
2535 		__set_bit(tid_data->txq_id, &txqs);
2536 
2537 		if (iwl_mvm_tid_queued(tid_data) == 0)
2538 			continue;
2539 
2540 		__set_bit(tid, &tids);
2541 	}
2542 
2543 	switch (cmd) {
2544 	case STA_NOTIFY_SLEEP:
2545 		if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2546 			ieee80211_sta_block_awake(hw, sta, true);
2547 
2548 		for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2549 			ieee80211_sta_set_buffered(sta, tid, true);
2550 
2551 		if (txqs)
2552 			iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2553 		/*
2554 		 * The fw updates the STA to be asleep. Tx packets on the Tx
2555 		 * queues to this station will not be transmitted. The fw will
2556 		 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2557 		 */
2558 		break;
2559 	case STA_NOTIFY_AWAKE:
2560 		if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2561 			break;
2562 
2563 		if (txqs)
2564 			iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2565 		iwl_mvm_sta_modify_ps_wake(mvm, sta);
2566 		break;
2567 	default:
2568 		break;
2569 	}
2570 	spin_unlock_bh(&mvmsta->lock);
2571 }
2572 
iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)2573 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2574 				       struct ieee80211_vif *vif,
2575 				       struct ieee80211_sta *sta)
2576 {
2577 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2578 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2579 
2580 	/*
2581 	 * This is called before mac80211 does RCU synchronisation,
2582 	 * so here we already invalidate our internal RCU-protected
2583 	 * station pointer. The rest of the code will thus no longer
2584 	 * be able to find the station this way, and we don't rely
2585 	 * on further RCU synchronisation after the sta_state()
2586 	 * callback deleted the station.
2587 	 */
2588 	mutex_lock(&mvm->mutex);
2589 	if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2590 		rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2591 				   ERR_PTR(-ENOENT));
2592 	mutex_unlock(&mvm->mutex);
2593 }
2594 
iwl_mvm_check_uapsd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const u8 * bssid)2595 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2596 				const u8 *bssid)
2597 {
2598 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2599 		return;
2600 
2601 	if (iwlwifi_mod_params.uapsd_disable) {
2602 		vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2603 		return;
2604 	}
2605 
2606 	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2607 }
2608 
iwl_mvm_mac_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)2609 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2610 				 struct ieee80211_vif *vif,
2611 				 struct ieee80211_sta *sta,
2612 				 enum ieee80211_sta_state old_state,
2613 				 enum ieee80211_sta_state new_state)
2614 {
2615 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2616 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2617 	int ret;
2618 
2619 	IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2620 			   sta->addr, old_state, new_state);
2621 
2622 	/* this would be a mac80211 bug ... but don't crash */
2623 	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2624 		return -EINVAL;
2625 
2626 	/* if a STA is being removed, reuse its ID */
2627 	flush_work(&mvm->sta_drained_wk);
2628 
2629 	mutex_lock(&mvm->mutex);
2630 	if (old_state == IEEE80211_STA_NOTEXIST &&
2631 	    new_state == IEEE80211_STA_NONE) {
2632 		/*
2633 		 * Firmware bug - it'll crash if the beacon interval is less
2634 		 * than 16. We can't avoid connecting at all, so refuse the
2635 		 * station state change, this will cause mac80211 to abandon
2636 		 * attempts to connect to this AP, and eventually wpa_s will
2637 		 * blacklist the AP...
2638 		 */
2639 		if (vif->type == NL80211_IFTYPE_STATION &&
2640 		    vif->bss_conf.beacon_int < 16) {
2641 			IWL_ERR(mvm,
2642 				"AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2643 				sta->addr, vif->bss_conf.beacon_int);
2644 			ret = -EINVAL;
2645 			goto out_unlock;
2646 		}
2647 
2648 		if (sta->tdls &&
2649 		    (vif->p2p ||
2650 		     iwl_mvm_tdls_sta_count(mvm, NULL) ==
2651 						IWL_MVM_TDLS_STA_COUNT ||
2652 		     iwl_mvm_phy_ctx_count(mvm) > 1)) {
2653 			IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2654 			ret = -EBUSY;
2655 			goto out_unlock;
2656 		}
2657 
2658 		ret = iwl_mvm_add_sta(mvm, vif, sta);
2659 		if (sta->tdls && ret == 0)
2660 			iwl_mvm_recalc_tdls_state(mvm, vif, true);
2661 	} else if (old_state == IEEE80211_STA_NONE &&
2662 		   new_state == IEEE80211_STA_AUTH) {
2663 		/*
2664 		 * EBS may be disabled due to previous failures reported by FW.
2665 		 * Reset EBS status here assuming environment has been changed.
2666 		 */
2667 		mvm->last_ebs_successful = true;
2668 		iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2669 		ret = 0;
2670 	} else if (old_state == IEEE80211_STA_AUTH &&
2671 		   new_state == IEEE80211_STA_ASSOC) {
2672 		ret = iwl_mvm_update_sta(mvm, vif, sta);
2673 		if (ret == 0)
2674 			iwl_mvm_rs_rate_init(mvm, sta,
2675 					     mvmvif->phy_ctxt->channel->band,
2676 					     true);
2677 	} else if (old_state == IEEE80211_STA_ASSOC &&
2678 		   new_state == IEEE80211_STA_AUTHORIZED) {
2679 
2680 		/* we don't support TDLS during DCM */
2681 		if (iwl_mvm_phy_ctx_count(mvm) > 1)
2682 			iwl_mvm_teardown_tdls_peers(mvm);
2683 
2684 		/* enable beacon filtering */
2685 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2686 		ret = 0;
2687 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
2688 		   new_state == IEEE80211_STA_ASSOC) {
2689 		/* disable beacon filtering */
2690 		WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2691 		ret = 0;
2692 	} else if (old_state == IEEE80211_STA_ASSOC &&
2693 		   new_state == IEEE80211_STA_AUTH) {
2694 		ret = 0;
2695 	} else if (old_state == IEEE80211_STA_AUTH &&
2696 		   new_state == IEEE80211_STA_NONE) {
2697 		ret = 0;
2698 	} else if (old_state == IEEE80211_STA_NONE &&
2699 		   new_state == IEEE80211_STA_NOTEXIST) {
2700 		ret = iwl_mvm_rm_sta(mvm, vif, sta);
2701 		if (sta->tdls)
2702 			iwl_mvm_recalc_tdls_state(mvm, vif, false);
2703 	} else {
2704 		ret = -EIO;
2705 	}
2706  out_unlock:
2707 	mutex_unlock(&mvm->mutex);
2708 
2709 	if (sta->tdls && ret == 0) {
2710 		if (old_state == IEEE80211_STA_NOTEXIST &&
2711 		    new_state == IEEE80211_STA_NONE)
2712 			ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2713 		else if (old_state == IEEE80211_STA_NONE &&
2714 			 new_state == IEEE80211_STA_NOTEXIST)
2715 			ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2716 	}
2717 
2718 	return ret;
2719 }
2720 
iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw * hw,u32 value)2721 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2722 {
2723 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2724 
2725 	mvm->rts_threshold = value;
2726 
2727 	return 0;
2728 }
2729 
iwl_mvm_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)2730 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2731 				  struct ieee80211_vif *vif,
2732 				  struct ieee80211_sta *sta, u32 changed)
2733 {
2734 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2735 
2736 	if (vif->type == NL80211_IFTYPE_STATION &&
2737 	    changed & IEEE80211_RC_NSS_CHANGED)
2738 		iwl_mvm_sf_update(mvm, vif, false);
2739 }
2740 
iwl_mvm_mac_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 ac,const struct ieee80211_tx_queue_params * params)2741 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2742 			       struct ieee80211_vif *vif, u16 ac,
2743 			       const struct ieee80211_tx_queue_params *params)
2744 {
2745 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2746 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2747 
2748 	mvmvif->queue_params[ac] = *params;
2749 
2750 	/*
2751 	 * No need to update right away, we'll get BSS_CHANGED_QOS
2752 	 * The exception is P2P_DEVICE interface which needs immediate update.
2753 	 */
2754 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2755 		int ret;
2756 
2757 		mutex_lock(&mvm->mutex);
2758 		ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2759 		mutex_unlock(&mvm->mutex);
2760 		return ret;
2761 	}
2762 	return 0;
2763 }
2764 
iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2765 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2766 				      struct ieee80211_vif *vif)
2767 {
2768 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2769 	u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
2770 			   200 + vif->bss_conf.beacon_int);
2771 	u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
2772 			       100 + vif->bss_conf.beacon_int);
2773 
2774 	if (WARN_ON_ONCE(vif->bss_conf.assoc))
2775 		return;
2776 
2777 	/*
2778 	 * iwl_mvm_protect_session() reads directly from the device
2779 	 * (the system time), so make sure it is available.
2780 	 */
2781 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2782 		return;
2783 
2784 	mutex_lock(&mvm->mutex);
2785 	/* Try really hard to protect the session and hear a beacon */
2786 	iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2787 	mutex_unlock(&mvm->mutex);
2788 
2789 	iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2790 }
2791 
iwl_mvm_mac_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)2792 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2793 					struct ieee80211_vif *vif,
2794 					struct cfg80211_sched_scan_request *req,
2795 					struct ieee80211_scan_ies *ies)
2796 {
2797 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2798 	int ret;
2799 
2800 	/* we don't support "match all" in the firmware */
2801 	if (!req->n_match_sets)
2802 		return -EOPNOTSUPP;
2803 
2804 	if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2805 		ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
2806 		if (ret)
2807 			return ret;
2808 	}
2809 
2810 	mutex_lock(&mvm->mutex);
2811 
2812 	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
2813 		IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
2814 		ret = -EBUSY;
2815 		goto out;
2816 	}
2817 
2818 	if (!vif->bss_conf.idle) {
2819 		ret = -EBUSY;
2820 		goto out;
2821 	}
2822 
2823 	if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
2824 		ret = -EBUSY;
2825 		goto out;
2826 	}
2827 
2828 	ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies);
2829 	if (ret)
2830 		mvm->scan_status = IWL_MVM_SCAN_NONE;
2831 
2832 out:
2833 	mutex_unlock(&mvm->mutex);
2834 	return ret;
2835 }
2836 
iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2837 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2838 				       struct ieee80211_vif *vif)
2839 {
2840 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2841 	int ret;
2842 
2843 	mutex_lock(&mvm->mutex);
2844 
2845 	/* Due to a race condition, it's possible that mac80211 asks
2846 	 * us to stop a sched_scan when it's already stopped.  This
2847 	 * can happen, for instance, if we stopped the scan ourselves,
2848 	 * called ieee80211_sched_scan_stopped() and the userspace called
2849 	 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2850 	 * could run.  To handle this, simply return if the scan is
2851 	 * not running.
2852 	*/
2853 	/* FIXME: for now, we ignore this race for UMAC scans, since
2854 	 * they don't set the scan_status.
2855 	 */
2856 	if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
2857 	    !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2858 		mutex_unlock(&mvm->mutex);
2859 		return 0;
2860 	}
2861 
2862 	ret = iwl_mvm_scan_offload_stop(mvm, false);
2863 	mutex_unlock(&mvm->mutex);
2864 	iwl_mvm_wait_for_async_handlers(mvm);
2865 
2866 	return ret;
2867 }
2868 
iwl_mvm_mac_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)2869 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2870 			       enum set_key_cmd cmd,
2871 			       struct ieee80211_vif *vif,
2872 			       struct ieee80211_sta *sta,
2873 			       struct ieee80211_key_conf *key)
2874 {
2875 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2876 	int ret;
2877 
2878 	if (iwlwifi_mod_params.sw_crypto) {
2879 		IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2880 		return -EOPNOTSUPP;
2881 	}
2882 
2883 	switch (key->cipher) {
2884 	case WLAN_CIPHER_SUITE_TKIP:
2885 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2886 		/* fall-through */
2887 	case WLAN_CIPHER_SUITE_CCMP:
2888 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2889 		break;
2890 	case WLAN_CIPHER_SUITE_AES_CMAC:
2891 		WARN_ON_ONCE(!(hw->flags & IEEE80211_HW_MFP_CAPABLE));
2892 		break;
2893 	case WLAN_CIPHER_SUITE_WEP40:
2894 	case WLAN_CIPHER_SUITE_WEP104:
2895 		/* For non-client mode, only use WEP keys for TX as we probably
2896 		 * don't have a station yet anyway and would then have to keep
2897 		 * track of the keys, linking them to each of the clients/peers
2898 		 * as they appear. For now, don't do that, for performance WEP
2899 		 * offload doesn't really matter much, but we need it for some
2900 		 * other offload features in client mode.
2901 		 */
2902 		if (vif->type != NL80211_IFTYPE_STATION)
2903 			return 0;
2904 		break;
2905 	default:
2906 		/* currently FW supports only one optional cipher scheme */
2907 		if (hw->n_cipher_schemes &&
2908 		    hw->cipher_schemes->cipher == key->cipher)
2909 			key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2910 		else
2911 			return -EOPNOTSUPP;
2912 	}
2913 
2914 	mutex_lock(&mvm->mutex);
2915 
2916 	switch (cmd) {
2917 	case SET_KEY:
2918 		if ((vif->type == NL80211_IFTYPE_ADHOC ||
2919 		     vif->type == NL80211_IFTYPE_AP) && !sta) {
2920 			/*
2921 			 * GTK on AP interface is a TX-only key, return 0;
2922 			 * on IBSS they're per-station and because we're lazy
2923 			 * we don't support them for RX, so do the same.
2924 			 */
2925 			ret = 0;
2926 			key->hw_key_idx = STA_KEY_IDX_INVALID;
2927 			break;
2928 		}
2929 
2930 		IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
2931 		ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
2932 		if (ret) {
2933 			IWL_WARN(mvm, "set key failed\n");
2934 			/*
2935 			 * can't add key for RX, but we don't need it
2936 			 * in the device for TX so still return 0
2937 			 */
2938 			key->hw_key_idx = STA_KEY_IDX_INVALID;
2939 			ret = 0;
2940 		}
2941 
2942 		break;
2943 	case DISABLE_KEY:
2944 		if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
2945 			ret = 0;
2946 			break;
2947 		}
2948 
2949 		IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
2950 		ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
2951 		break;
2952 	default:
2953 		ret = -EINVAL;
2954 	}
2955 
2956 	mutex_unlock(&mvm->mutex);
2957 	return ret;
2958 }
2959 
iwl_mvm_mac_update_tkip_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_key_conf * keyconf,struct ieee80211_sta * sta,u32 iv32,u16 * phase1key)2960 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
2961 					struct ieee80211_vif *vif,
2962 					struct ieee80211_key_conf *keyconf,
2963 					struct ieee80211_sta *sta,
2964 					u32 iv32, u16 *phase1key)
2965 {
2966 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2967 
2968 	if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
2969 		return;
2970 
2971 	iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
2972 }
2973 
2974 
iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)2975 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
2976 			       struct iwl_rx_packet *pkt, void *data)
2977 {
2978 	struct iwl_mvm *mvm =
2979 		container_of(notif_wait, struct iwl_mvm, notif_wait);
2980 	struct iwl_hs20_roc_res *resp;
2981 	int resp_len = iwl_rx_packet_payload_len(pkt);
2982 	struct iwl_mvm_time_event_data *te_data = data;
2983 
2984 	if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
2985 		return true;
2986 
2987 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
2988 		IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
2989 		return true;
2990 	}
2991 
2992 	resp = (void *)pkt->data;
2993 
2994 	IWL_DEBUG_TE(mvm,
2995 		     "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
2996 		     resp->status, resp->event_unique_id);
2997 
2998 	te_data->uid = le32_to_cpu(resp->event_unique_id);
2999 	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3000 		     te_data->uid);
3001 
3002 	spin_lock_bh(&mvm->time_event_lock);
3003 	list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3004 	spin_unlock_bh(&mvm->time_event_lock);
3005 
3006 	return true;
3007 }
3008 
3009 #define AUX_ROC_MAX_DELAY_ON_CHANNEL 5000
iwl_mvm_send_aux_roc_cmd(struct iwl_mvm * mvm,struct ieee80211_channel * channel,struct ieee80211_vif * vif,int duration)3010 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3011 				    struct ieee80211_channel *channel,
3012 				    struct ieee80211_vif *vif,
3013 				    int duration)
3014 {
3015 	int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3016 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3017 	struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3018 	static const u8 time_event_response[] = { HOT_SPOT_CMD };
3019 	struct iwl_notification_wait wait_time_event;
3020 	struct iwl_hs20_roc_req aux_roc_req = {
3021 		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3022 		.id_and_color =
3023 			cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3024 		.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3025 		/* Set the channel info data */
3026 		.channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
3027 			PHY_BAND_24 : PHY_BAND_5,
3028 		.channel_info.channel = channel->hw_value,
3029 		.channel_info.width = PHY_VHT_CHANNEL_MODE20,
3030 		/* Set the time and duration */
3031 		.apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3032 		.apply_time_max_delay =
3033 			cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)),
3034 		.duration = cpu_to_le32(MSEC_TO_TU(duration)),
3035 	 };
3036 
3037 	/* Set the node address */
3038 	memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3039 
3040 	lockdep_assert_held(&mvm->mutex);
3041 
3042 	spin_lock_bh(&mvm->time_event_lock);
3043 
3044 	if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3045 		spin_unlock_bh(&mvm->time_event_lock);
3046 		return -EIO;
3047 	}
3048 
3049 	te_data->vif = vif;
3050 	te_data->duration = duration;
3051 	te_data->id = HOT_SPOT_CMD;
3052 
3053 	spin_unlock_bh(&mvm->time_event_lock);
3054 
3055 	/*
3056 	 * Use a notification wait, which really just processes the
3057 	 * command response and doesn't wait for anything, in order
3058 	 * to be able to process the response and get the UID inside
3059 	 * the RX path. Using CMD_WANT_SKB doesn't work because it
3060 	 * stores the buffer and then wakes up this thread, by which
3061 	 * time another notification (that the time event started)
3062 	 * might already be processed unsuccessfully.
3063 	 */
3064 	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3065 				   time_event_response,
3066 				   ARRAY_SIZE(time_event_response),
3067 				   iwl_mvm_rx_aux_roc, te_data);
3068 
3069 	res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3070 				   &aux_roc_req);
3071 
3072 	if (res) {
3073 		IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3074 		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3075 		goto out_clear_te;
3076 	}
3077 
3078 	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
3079 	res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3080 	/* should never fail */
3081 	WARN_ON_ONCE(res);
3082 
3083 	if (res) {
3084  out_clear_te:
3085 		spin_lock_bh(&mvm->time_event_lock);
3086 		iwl_mvm_te_clear_data(mvm, te_data);
3087 		spin_unlock_bh(&mvm->time_event_lock);
3088 	}
3089 
3090 	return res;
3091 }
3092 
iwl_mvm_roc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * channel,int duration,enum ieee80211_roc_type type)3093 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3094 		       struct ieee80211_vif *vif,
3095 		       struct ieee80211_channel *channel,
3096 		       int duration,
3097 		       enum ieee80211_roc_type type)
3098 {
3099 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3100 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3101 	struct cfg80211_chan_def chandef;
3102 	struct iwl_mvm_phy_ctxt *phy_ctxt;
3103 	int ret, i;
3104 
3105 	IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3106 			   duration, type);
3107 
3108 	flush_work(&mvm->roc_done_wk);
3109 
3110 	mutex_lock(&mvm->mutex);
3111 
3112 	switch (vif->type) {
3113 	case NL80211_IFTYPE_STATION:
3114 		if (mvm->fw->ucode_capa.capa[0] &
3115 		    IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
3116 			/* Use aux roc framework (HS20) */
3117 			ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3118 						       vif, duration);
3119 			goto out_unlock;
3120 		}
3121 		IWL_ERR(mvm, "hotspot not supported\n");
3122 		ret = -EINVAL;
3123 		goto out_unlock;
3124 	case NL80211_IFTYPE_P2P_DEVICE:
3125 		/* handle below */
3126 		break;
3127 	default:
3128 		IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3129 		ret = -EINVAL;
3130 		goto out_unlock;
3131 	}
3132 
3133 	for (i = 0; i < NUM_PHY_CTX; i++) {
3134 		phy_ctxt = &mvm->phy_ctxts[i];
3135 		if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3136 			continue;
3137 
3138 		if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3139 			/*
3140 			 * Unbind the P2P_DEVICE from the current PHY context,
3141 			 * and if the PHY context is not used remove it.
3142 			 */
3143 			ret = iwl_mvm_binding_remove_vif(mvm, vif);
3144 			if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3145 				goto out_unlock;
3146 
3147 			iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3148 
3149 			/* Bind the P2P_DEVICE to the current PHY Context */
3150 			mvmvif->phy_ctxt = phy_ctxt;
3151 
3152 			ret = iwl_mvm_binding_add_vif(mvm, vif);
3153 			if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3154 				goto out_unlock;
3155 
3156 			iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3157 			goto schedule_time_event;
3158 		}
3159 	}
3160 
3161 	/* Need to update the PHY context only if the ROC channel changed */
3162 	if (channel == mvmvif->phy_ctxt->channel)
3163 		goto schedule_time_event;
3164 
3165 	cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3166 
3167 	/*
3168 	 * Change the PHY context configuration as it is currently referenced
3169 	 * only by the P2P Device MAC
3170 	 */
3171 	if (mvmvif->phy_ctxt->ref == 1) {
3172 		ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3173 					       &chandef, 1, 1);
3174 		if (ret)
3175 			goto out_unlock;
3176 	} else {
3177 		/*
3178 		 * The PHY context is shared with other MACs. Need to remove the
3179 		 * P2P Device from the binding, allocate an new PHY context and
3180 		 * create a new binding
3181 		 */
3182 		phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3183 		if (!phy_ctxt) {
3184 			ret = -ENOSPC;
3185 			goto out_unlock;
3186 		}
3187 
3188 		ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3189 					       1, 1);
3190 		if (ret) {
3191 			IWL_ERR(mvm, "Failed to change PHY context\n");
3192 			goto out_unlock;
3193 		}
3194 
3195 		/* Unbind the P2P_DEVICE from the current PHY context */
3196 		ret = iwl_mvm_binding_remove_vif(mvm, vif);
3197 		if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3198 			goto out_unlock;
3199 
3200 		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3201 
3202 		/* Bind the P2P_DEVICE to the new allocated PHY context */
3203 		mvmvif->phy_ctxt = phy_ctxt;
3204 
3205 		ret = iwl_mvm_binding_add_vif(mvm, vif);
3206 		if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3207 			goto out_unlock;
3208 
3209 		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3210 	}
3211 
3212 schedule_time_event:
3213 	/* Schedule the time events */
3214 	ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3215 
3216 out_unlock:
3217 	mutex_unlock(&mvm->mutex);
3218 	IWL_DEBUG_MAC80211(mvm, "leave\n");
3219 	return ret;
3220 }
3221 
iwl_mvm_cancel_roc(struct ieee80211_hw * hw)3222 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3223 {
3224 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3225 
3226 	IWL_DEBUG_MAC80211(mvm, "enter\n");
3227 
3228 	mutex_lock(&mvm->mutex);
3229 	iwl_mvm_stop_roc(mvm);
3230 	mutex_unlock(&mvm->mutex);
3231 
3232 	IWL_DEBUG_MAC80211(mvm, "leave\n");
3233 	return 0;
3234 }
3235 
__iwl_mvm_add_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3236 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3237 				 struct ieee80211_chanctx_conf *ctx)
3238 {
3239 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3240 	struct iwl_mvm_phy_ctxt *phy_ctxt;
3241 	int ret;
3242 
3243 	lockdep_assert_held(&mvm->mutex);
3244 
3245 	IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3246 
3247 	phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3248 	if (!phy_ctxt) {
3249 		ret = -ENOSPC;
3250 		goto out;
3251 	}
3252 
3253 	ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3254 				       ctx->rx_chains_static,
3255 				       ctx->rx_chains_dynamic);
3256 	if (ret) {
3257 		IWL_ERR(mvm, "Failed to add PHY context\n");
3258 		goto out;
3259 	}
3260 
3261 	iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3262 	*phy_ctxt_id = phy_ctxt->id;
3263 out:
3264 	return ret;
3265 }
3266 
iwl_mvm_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3267 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3268 			       struct ieee80211_chanctx_conf *ctx)
3269 {
3270 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3271 	int ret;
3272 
3273 	mutex_lock(&mvm->mutex);
3274 	ret = __iwl_mvm_add_chanctx(mvm, ctx);
3275 	mutex_unlock(&mvm->mutex);
3276 
3277 	return ret;
3278 }
3279 
__iwl_mvm_remove_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3280 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3281 				     struct ieee80211_chanctx_conf *ctx)
3282 {
3283 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3284 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3285 
3286 	lockdep_assert_held(&mvm->mutex);
3287 
3288 	iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3289 }
3290 
iwl_mvm_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3291 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3292 				   struct ieee80211_chanctx_conf *ctx)
3293 {
3294 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3295 
3296 	mutex_lock(&mvm->mutex);
3297 	__iwl_mvm_remove_chanctx(mvm, ctx);
3298 	mutex_unlock(&mvm->mutex);
3299 }
3300 
iwl_mvm_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)3301 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3302 				   struct ieee80211_chanctx_conf *ctx,
3303 				   u32 changed)
3304 {
3305 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3306 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3307 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3308 
3309 	if (WARN_ONCE((phy_ctxt->ref > 1) &&
3310 		      (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3311 				   IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3312 				   IEEE80211_CHANCTX_CHANGE_RADAR |
3313 				   IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3314 		      "Cannot change PHY. Ref=%d, changed=0x%X\n",
3315 		      phy_ctxt->ref, changed))
3316 		return;
3317 
3318 	mutex_lock(&mvm->mutex);
3319 	iwl_mvm_bt_coex_vif_change(mvm);
3320 	iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3321 				 ctx->rx_chains_static,
3322 				 ctx->rx_chains_dynamic);
3323 	mutex_unlock(&mvm->mutex);
3324 }
3325 
__iwl_mvm_assign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3326 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3327 					struct ieee80211_vif *vif,
3328 					struct ieee80211_chanctx_conf *ctx,
3329 					bool switching_chanctx)
3330 {
3331 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3332 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3333 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3334 	int ret;
3335 
3336 	lockdep_assert_held(&mvm->mutex);
3337 
3338 	mvmvif->phy_ctxt = phy_ctxt;
3339 
3340 	switch (vif->type) {
3341 	case NL80211_IFTYPE_AP:
3342 		/* only needed if we're switching chanctx (i.e. during CSA) */
3343 		if (switching_chanctx) {
3344 			mvmvif->ap_ibss_active = true;
3345 			break;
3346 		}
3347 	case NL80211_IFTYPE_ADHOC:
3348 		/*
3349 		 * The AP binding flow is handled as part of the start_ap flow
3350 		 * (in bss_info_changed), similarly for IBSS.
3351 		 */
3352 		ret = 0;
3353 		goto out;
3354 	case NL80211_IFTYPE_STATION:
3355 		break;
3356 	case NL80211_IFTYPE_MONITOR:
3357 		/* always disable PS when a monitor interface is active */
3358 		mvmvif->ps_disabled = true;
3359 		break;
3360 	default:
3361 		ret = -EINVAL;
3362 		goto out;
3363 	}
3364 
3365 	ret = iwl_mvm_binding_add_vif(mvm, vif);
3366 	if (ret)
3367 		goto out;
3368 
3369 	/*
3370 	 * Power state must be updated before quotas,
3371 	 * otherwise fw will complain.
3372 	 */
3373 	iwl_mvm_power_update_mac(mvm);
3374 
3375 	/* Setting the quota at this stage is only required for monitor
3376 	 * interfaces. For the other types, the bss_info changed flow
3377 	 * will handle quota settings.
3378 	 */
3379 	if (vif->type == NL80211_IFTYPE_MONITOR) {
3380 		mvmvif->monitor_active = true;
3381 		ret = iwl_mvm_update_quotas(mvm, false, NULL);
3382 		if (ret)
3383 			goto out_remove_binding;
3384 	}
3385 
3386 	/* Handle binding during CSA */
3387 	if (vif->type == NL80211_IFTYPE_AP) {
3388 		iwl_mvm_update_quotas(mvm, false, NULL);
3389 		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3390 	}
3391 
3392 	if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3393 		u32 duration = 2 * vif->bss_conf.beacon_int;
3394 
3395 		/* iwl_mvm_protect_session() reads directly from the
3396 		 * device (the system time), so make sure it is
3397 		 * available.
3398 		 */
3399 		ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3400 		if (ret)
3401 			goto out_remove_binding;
3402 
3403 		/* Protect the session to make sure we hear the first
3404 		 * beacon on the new channel.
3405 		 */
3406 		iwl_mvm_protect_session(mvm, vif, duration, duration,
3407 					vif->bss_conf.beacon_int / 2,
3408 					true);
3409 
3410 		iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3411 
3412 		iwl_mvm_update_quotas(mvm, false, NULL);
3413 	}
3414 
3415 	goto out;
3416 
3417 out_remove_binding:
3418 	iwl_mvm_binding_remove_vif(mvm, vif);
3419 	iwl_mvm_power_update_mac(mvm);
3420 out:
3421 	if (ret)
3422 		mvmvif->phy_ctxt = NULL;
3423 	return ret;
3424 }
iwl_mvm_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3425 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3426 				      struct ieee80211_vif *vif,
3427 				      struct ieee80211_chanctx_conf *ctx)
3428 {
3429 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3430 	int ret;
3431 
3432 	mutex_lock(&mvm->mutex);
3433 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3434 	mutex_unlock(&mvm->mutex);
3435 
3436 	return ret;
3437 }
3438 
__iwl_mvm_unassign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3439 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3440 					   struct ieee80211_vif *vif,
3441 					   struct ieee80211_chanctx_conf *ctx,
3442 					   bool switching_chanctx)
3443 {
3444 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3445 	struct ieee80211_vif *disabled_vif = NULL;
3446 
3447 	lockdep_assert_held(&mvm->mutex);
3448 
3449 	iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3450 
3451 	switch (vif->type) {
3452 	case NL80211_IFTYPE_ADHOC:
3453 		goto out;
3454 	case NL80211_IFTYPE_MONITOR:
3455 		mvmvif->monitor_active = false;
3456 		mvmvif->ps_disabled = false;
3457 		break;
3458 	case NL80211_IFTYPE_AP:
3459 		/* This part is triggered only during CSA */
3460 		if (!switching_chanctx || !mvmvif->ap_ibss_active)
3461 			goto out;
3462 
3463 		mvmvif->csa_countdown = false;
3464 
3465 		/* Set CS bit on all the stations */
3466 		iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3467 
3468 		/* Save blocked iface, the timeout is set on the next beacon */
3469 		rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3470 
3471 		mvmvif->ap_ibss_active = false;
3472 		break;
3473 	case NL80211_IFTYPE_STATION:
3474 		if (!switching_chanctx)
3475 			break;
3476 
3477 		disabled_vif = vif;
3478 
3479 		iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3480 		break;
3481 	default:
3482 		break;
3483 	}
3484 
3485 	iwl_mvm_update_quotas(mvm, false, disabled_vif);
3486 	iwl_mvm_binding_remove_vif(mvm, vif);
3487 
3488 out:
3489 	mvmvif->phy_ctxt = NULL;
3490 	iwl_mvm_power_update_mac(mvm);
3491 }
3492 
iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3493 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3494 					 struct ieee80211_vif *vif,
3495 					 struct ieee80211_chanctx_conf *ctx)
3496 {
3497 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3498 
3499 	mutex_lock(&mvm->mutex);
3500 	__iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3501 	mutex_unlock(&mvm->mutex);
3502 }
3503 
3504 static int
iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3505 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3506 				struct ieee80211_vif_chanctx_switch *vifs)
3507 {
3508 	int ret;
3509 
3510 	mutex_lock(&mvm->mutex);
3511 	__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3512 	__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3513 
3514 	ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3515 	if (ret) {
3516 		IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3517 		goto out_reassign;
3518 	}
3519 
3520 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3521 					   true);
3522 	if (ret) {
3523 		IWL_ERR(mvm,
3524 			"failed to assign new_ctx during channel switch\n");
3525 		goto out_remove;
3526 	}
3527 
3528 	/* we don't support TDLS during DCM - can be caused by channel switch */
3529 	if (iwl_mvm_phy_ctx_count(mvm) > 1)
3530 		iwl_mvm_teardown_tdls_peers(mvm);
3531 
3532 	goto out;
3533 
3534 out_remove:
3535 	__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3536 
3537 out_reassign:
3538 	if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3539 		IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3540 		goto out_restart;
3541 	}
3542 
3543 	if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3544 					 true)) {
3545 		IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3546 		goto out_restart;
3547 	}
3548 
3549 	goto out;
3550 
3551 out_restart:
3552 	/* things keep failing, better restart the hw */
3553 	iwl_mvm_nic_restart(mvm, false);
3554 
3555 out:
3556 	mutex_unlock(&mvm->mutex);
3557 
3558 	return ret;
3559 }
3560 
3561 static int
iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3562 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3563 				    struct ieee80211_vif_chanctx_switch *vifs)
3564 {
3565 	int ret;
3566 
3567 	mutex_lock(&mvm->mutex);
3568 	__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3569 
3570 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3571 					   true);
3572 	if (ret) {
3573 		IWL_ERR(mvm,
3574 			"failed to assign new_ctx during channel switch\n");
3575 		goto out_reassign;
3576 	}
3577 
3578 	goto out;
3579 
3580 out_reassign:
3581 	if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3582 					 true)) {
3583 		IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3584 		goto out_restart;
3585 	}
3586 
3587 	goto out;
3588 
3589 out_restart:
3590 	/* things keep failing, better restart the hw */
3591 	iwl_mvm_nic_restart(mvm, false);
3592 
3593 out:
3594 	mutex_unlock(&mvm->mutex);
3595 
3596 	return ret;
3597 }
3598 
iwl_mvm_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)3599 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3600 				      struct ieee80211_vif_chanctx_switch *vifs,
3601 				      int n_vifs,
3602 				      enum ieee80211_chanctx_switch_mode mode)
3603 {
3604 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3605 	int ret;
3606 
3607 	/* we only support a single-vif right now */
3608 	if (n_vifs > 1)
3609 		return -EOPNOTSUPP;
3610 
3611 	switch (mode) {
3612 	case CHANCTX_SWMODE_SWAP_CONTEXTS:
3613 		ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3614 		break;
3615 	case CHANCTX_SWMODE_REASSIGN_VIF:
3616 		ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3617 		break;
3618 	default:
3619 		ret = -EOPNOTSUPP;
3620 		break;
3621 	}
3622 
3623 	return ret;
3624 }
3625 
iwl_mvm_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)3626 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3627 			   struct ieee80211_sta *sta,
3628 			   bool set)
3629 {
3630 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3631 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3632 
3633 	if (!mvm_sta || !mvm_sta->vif) {
3634 		IWL_ERR(mvm, "Station is not associated to a vif\n");
3635 		return -EINVAL;
3636 	}
3637 
3638 	return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3639 }
3640 
3641 #ifdef CONFIG_NL80211_TESTMODE
3642 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3643 	[IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3644 	[IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3645 	[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3646 };
3647 
__iwl_mvm_mac_testmode_cmd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,void * data,int len)3648 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3649 				      struct ieee80211_vif *vif,
3650 				      void *data, int len)
3651 {
3652 	struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3653 	int err;
3654 	u32 noa_duration;
3655 
3656 	err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3657 	if (err)
3658 		return err;
3659 
3660 	if (!tb[IWL_MVM_TM_ATTR_CMD])
3661 		return -EINVAL;
3662 
3663 	switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3664 	case IWL_MVM_TM_CMD_SET_NOA:
3665 		if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3666 		    !vif->bss_conf.enable_beacon ||
3667 		    !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3668 			return -EINVAL;
3669 
3670 		noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3671 		if (noa_duration >= vif->bss_conf.beacon_int)
3672 			return -EINVAL;
3673 
3674 		mvm->noa_duration = noa_duration;
3675 		mvm->noa_vif = vif;
3676 
3677 		return iwl_mvm_update_quotas(mvm, false, NULL);
3678 	case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3679 		/* must be associated client vif - ignore authorized */
3680 		if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3681 		    !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3682 		    !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3683 			return -EINVAL;
3684 
3685 		if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3686 			return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3687 		return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3688 	}
3689 
3690 	return -EOPNOTSUPP;
3691 }
3692 
iwl_mvm_mac_testmode_cmd(struct ieee80211_hw * hw,struct ieee80211_vif * vif,void * data,int len)3693 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3694 				    struct ieee80211_vif *vif,
3695 				    void *data, int len)
3696 {
3697 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3698 	int err;
3699 
3700 	mutex_lock(&mvm->mutex);
3701 	err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3702 	mutex_unlock(&mvm->mutex);
3703 
3704 	return err;
3705 }
3706 #endif
3707 
iwl_mvm_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)3708 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3709 				   struct ieee80211_vif *vif,
3710 				   struct ieee80211_channel_switch *chsw)
3711 {
3712 	/* By implementing this operation, we prevent mac80211 from
3713 	 * starting its own channel switch timer, so that we can call
3714 	 * ieee80211_chswitch_done() ourselves at the right time
3715 	 * (which is when the absence time event starts).
3716 	 */
3717 
3718 	IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3719 			   "dummy channel switch op\n");
3720 }
3721 
iwl_mvm_pre_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)3722 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3723 				      struct ieee80211_vif *vif,
3724 				      struct ieee80211_channel_switch *chsw)
3725 {
3726 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3727 	struct ieee80211_vif *csa_vif;
3728 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3729 	u32 apply_time;
3730 	int ret;
3731 
3732 	mutex_lock(&mvm->mutex);
3733 
3734 	mvmvif->csa_failed = false;
3735 
3736 	IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3737 			   chsw->chandef.center_freq1);
3738 
3739 	iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3740 
3741 	switch (vif->type) {
3742 	case NL80211_IFTYPE_AP:
3743 		csa_vif =
3744 			rcu_dereference_protected(mvm->csa_vif,
3745 						  lockdep_is_held(&mvm->mutex));
3746 		if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3747 			      "Another CSA is already in progress")) {
3748 			ret = -EBUSY;
3749 			goto out_unlock;
3750 		}
3751 
3752 		rcu_assign_pointer(mvm->csa_vif, vif);
3753 
3754 		if (WARN_ONCE(mvmvif->csa_countdown,
3755 			      "Previous CSA countdown didn't complete")) {
3756 			ret = -EBUSY;
3757 			goto out_unlock;
3758 		}
3759 
3760 		break;
3761 	case NL80211_IFTYPE_STATION:
3762 		/* Schedule the time event to a bit before beacon 1,
3763 		 * to make sure we're in the new channel when the
3764 		 * GO/AP arrives.
3765 		 */
3766 		apply_time = chsw->device_timestamp +
3767 			((vif->bss_conf.beacon_int * (chsw->count - 1) -
3768 			  IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3769 
3770 		if (chsw->block_tx)
3771 			iwl_mvm_csa_client_absent(mvm, vif);
3772 
3773 		iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3774 					    apply_time);
3775 		if (mvmvif->bf_data.bf_enabled) {
3776 			ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3777 			if (ret)
3778 				goto out_unlock;
3779 		}
3780 
3781 		break;
3782 	default:
3783 		break;
3784 	}
3785 
3786 	mvmvif->ps_disabled = true;
3787 
3788 	ret = iwl_mvm_power_update_ps(mvm);
3789 	if (ret)
3790 		goto out_unlock;
3791 
3792 	/* we won't be on this channel any longer */
3793 	iwl_mvm_teardown_tdls_peers(mvm);
3794 
3795 out_unlock:
3796 	mutex_unlock(&mvm->mutex);
3797 
3798 	return ret;
3799 }
3800 
iwl_mvm_post_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3801 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3802 				       struct ieee80211_vif *vif)
3803 {
3804 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3805 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3806 	int ret;
3807 
3808 	mutex_lock(&mvm->mutex);
3809 
3810 	if (mvmvif->csa_failed) {
3811 		mvmvif->csa_failed = false;
3812 		ret = -EIO;
3813 		goto out_unlock;
3814 	}
3815 
3816 	if (vif->type == NL80211_IFTYPE_STATION) {
3817 		struct iwl_mvm_sta *mvmsta;
3818 
3819 		mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3820 							  mvmvif->ap_sta_id);
3821 
3822 		if (WARN_ON(!mvmsta)) {
3823 			ret = -EIO;
3824 			goto out_unlock;
3825 		}
3826 
3827 		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3828 
3829 		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3830 
3831 		ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3832 		if (ret)
3833 			goto out_unlock;
3834 
3835 		iwl_mvm_stop_session_protection(mvm, vif);
3836 	}
3837 
3838 	mvmvif->ps_disabled = false;
3839 
3840 	ret = iwl_mvm_power_update_ps(mvm);
3841 
3842 out_unlock:
3843 	mutex_unlock(&mvm->mutex);
3844 
3845 	return ret;
3846 }
3847 
iwl_mvm_mac_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)3848 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3849 			      struct ieee80211_vif *vif, u32 queues, bool drop)
3850 {
3851 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3852 	struct iwl_mvm_vif *mvmvif;
3853 	struct iwl_mvm_sta *mvmsta;
3854 	struct ieee80211_sta *sta;
3855 	int i;
3856 	u32 msk = 0;
3857 
3858 	if (!vif || vif->type != NL80211_IFTYPE_STATION)
3859 		return;
3860 
3861 	mutex_lock(&mvm->mutex);
3862 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
3863 
3864 	/* flush the AP-station and all TDLS peers */
3865 	for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3866 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3867 						lockdep_is_held(&mvm->mutex));
3868 		if (IS_ERR_OR_NULL(sta))
3869 			continue;
3870 
3871 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
3872 		if (mvmsta->vif != vif)
3873 			continue;
3874 
3875 		/* make sure only TDLS peers or the AP are flushed */
3876 		WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3877 
3878 		msk |= mvmsta->tfd_queue_msk;
3879 	}
3880 
3881 	if (drop) {
3882 		if (iwl_mvm_flush_tx_path(mvm, msk, true))
3883 			IWL_ERR(mvm, "flush request fail\n");
3884 		mutex_unlock(&mvm->mutex);
3885 	} else {
3886 		mutex_unlock(&mvm->mutex);
3887 
3888 		/* this can take a while, and we may need/want other operations
3889 		 * to succeed while doing this, so do it without the mutex held
3890 		 */
3891 		iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3892 	}
3893 }
3894 
iwl_mvm_mac_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)3895 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3896 				  struct survey_info *survey)
3897 {
3898 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3899 	int ret;
3900 
3901 	memset(survey, 0, sizeof(*survey));
3902 
3903 	/* only support global statistics right now */
3904 	if (idx != 0)
3905 		return -ENOENT;
3906 
3907 	if (!(mvm->fw->ucode_capa.capa[0] &
3908 			IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3909 		return -ENOENT;
3910 
3911 	mutex_lock(&mvm->mutex);
3912 
3913 	if (mvm->ucode_loaded) {
3914 		ret = iwl_mvm_request_statistics(mvm, false);
3915 		if (ret)
3916 			goto out;
3917 	}
3918 
3919 	survey->filled = SURVEY_INFO_TIME |
3920 			 SURVEY_INFO_TIME_RX |
3921 			 SURVEY_INFO_TIME_TX |
3922 			 SURVEY_INFO_TIME_SCAN;
3923 	survey->time = mvm->accu_radio_stats.on_time_rf +
3924 		       mvm->radio_stats.on_time_rf;
3925 	do_div(survey->time, USEC_PER_MSEC);
3926 
3927 	survey->time_rx = mvm->accu_radio_stats.rx_time +
3928 			  mvm->radio_stats.rx_time;
3929 	do_div(survey->time_rx, USEC_PER_MSEC);
3930 
3931 	survey->time_tx = mvm->accu_radio_stats.tx_time +
3932 			  mvm->radio_stats.tx_time;
3933 	do_div(survey->time_tx, USEC_PER_MSEC);
3934 
3935 	survey->time_scan = mvm->accu_radio_stats.on_time_scan +
3936 			    mvm->radio_stats.on_time_scan;
3937 	do_div(survey->time_scan, USEC_PER_MSEC);
3938 
3939 	ret = 0;
3940  out:
3941 	mutex_unlock(&mvm->mutex);
3942 	return ret;
3943 }
3944 
iwl_mvm_mac_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)3945 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
3946 				       struct ieee80211_vif *vif,
3947 				       struct ieee80211_sta *sta,
3948 				       struct station_info *sinfo)
3949 {
3950 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3951 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3952 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3953 
3954 	if (!(mvm->fw->ucode_capa.capa[0] &
3955 				IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3956 		return;
3957 
3958 	/* if beacon filtering isn't on mac80211 does it anyway */
3959 	if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
3960 		return;
3961 
3962 	if (!vif->bss_conf.assoc)
3963 		return;
3964 
3965 	mutex_lock(&mvm->mutex);
3966 
3967 	if (mvmvif->ap_sta_id != mvmsta->sta_id)
3968 		goto unlock;
3969 
3970 	if (iwl_mvm_request_statistics(mvm, false))
3971 		goto unlock;
3972 
3973 	sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
3974 			   mvmvif->beacon_stats.accu_num_beacons;
3975 	sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
3976 	if (mvmvif->beacon_stats.avg_signal) {
3977 		/* firmware only reports a value after RXing a few beacons */
3978 		sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
3979 		sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
3980 	}
3981  unlock:
3982 	mutex_unlock(&mvm->mutex);
3983 }
3984 
iwl_mvm_mac_event_callback(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct ieee80211_event * event)3985 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
3986 				       struct ieee80211_vif *vif,
3987 				       const struct ieee80211_event *event)
3988 {
3989 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...)	\
3990 	do {							\
3991 		if ((_cnt) && --(_cnt))				\
3992 			break;					\
3993 		iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
3994 	} while (0)
3995 
3996 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3997 	struct iwl_fw_dbg_trigger_tlv *trig;
3998 	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
3999 
4000 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4001 		return;
4002 
4003 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4004 	trig_mlme = (void *)trig->data;
4005 	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4006 		return;
4007 
4008 	if (event->u.mlme.data == ASSOC_EVENT) {
4009 		if (event->u.mlme.status == MLME_DENIED)
4010 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4011 					   trig_mlme->stop_assoc_denied,
4012 					   "DENIED ASSOC: reason %d",
4013 					    event->u.mlme.reason);
4014 		else if (event->u.mlme.status == MLME_TIMEOUT)
4015 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4016 					   trig_mlme->stop_assoc_timeout,
4017 					   "ASSOC TIMEOUT");
4018 	} else if (event->u.mlme.data == AUTH_EVENT) {
4019 		if (event->u.mlme.status == MLME_DENIED)
4020 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4021 					   trig_mlme->stop_auth_denied,
4022 					   "DENIED AUTH: reason %d",
4023 					   event->u.mlme.reason);
4024 		else if (event->u.mlme.status == MLME_TIMEOUT)
4025 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4026 					   trig_mlme->stop_auth_timeout,
4027 					   "AUTH TIMEOUT");
4028 	} else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4029 		CHECK_MLME_TRIGGER(mvm, trig, buf,
4030 				   trig_mlme->stop_rx_deauth,
4031 				   "DEAUTH RX %d", event->u.mlme.reason);
4032 	} else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4033 		CHECK_MLME_TRIGGER(mvm, trig, buf,
4034 				   trig_mlme->stop_tx_deauth,
4035 				   "DEAUTH TX %d", event->u.mlme.reason);
4036 	}
4037 #undef CHECK_MLME_TRIGGER
4038 }
4039 
4040 const struct ieee80211_ops iwl_mvm_hw_ops = {
4041 	.tx = iwl_mvm_mac_tx,
4042 	.ampdu_action = iwl_mvm_mac_ampdu_action,
4043 	.start = iwl_mvm_mac_start,
4044 	.reconfig_complete = iwl_mvm_mac_reconfig_complete,
4045 	.stop = iwl_mvm_mac_stop,
4046 	.add_interface = iwl_mvm_mac_add_interface,
4047 	.remove_interface = iwl_mvm_mac_remove_interface,
4048 	.config = iwl_mvm_mac_config,
4049 	.prepare_multicast = iwl_mvm_prepare_multicast,
4050 	.configure_filter = iwl_mvm_configure_filter,
4051 	.bss_info_changed = iwl_mvm_bss_info_changed,
4052 	.hw_scan = iwl_mvm_mac_hw_scan,
4053 	.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4054 	.sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4055 	.sta_state = iwl_mvm_mac_sta_state,
4056 	.sta_notify = iwl_mvm_mac_sta_notify,
4057 	.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4058 	.release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4059 	.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4060 	.sta_rc_update = iwl_mvm_sta_rc_update,
4061 	.conf_tx = iwl_mvm_mac_conf_tx,
4062 	.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4063 	.mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4064 	.flush = iwl_mvm_mac_flush,
4065 	.sched_scan_start = iwl_mvm_mac_sched_scan_start,
4066 	.sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4067 	.set_key = iwl_mvm_mac_set_key,
4068 	.update_tkip_key = iwl_mvm_mac_update_tkip_key,
4069 	.remain_on_channel = iwl_mvm_roc,
4070 	.cancel_remain_on_channel = iwl_mvm_cancel_roc,
4071 	.add_chanctx = iwl_mvm_add_chanctx,
4072 	.remove_chanctx = iwl_mvm_remove_chanctx,
4073 	.change_chanctx = iwl_mvm_change_chanctx,
4074 	.assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4075 	.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4076 	.switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4077 
4078 	.start_ap = iwl_mvm_start_ap_ibss,
4079 	.stop_ap = iwl_mvm_stop_ap_ibss,
4080 	.join_ibss = iwl_mvm_start_ap_ibss,
4081 	.leave_ibss = iwl_mvm_stop_ap_ibss,
4082 
4083 	.set_tim = iwl_mvm_set_tim,
4084 
4085 	.channel_switch = iwl_mvm_channel_switch,
4086 	.pre_channel_switch = iwl_mvm_pre_channel_switch,
4087 	.post_channel_switch = iwl_mvm_post_channel_switch,
4088 
4089 	.tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4090 	.tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4091 	.tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4092 
4093 	.event_callback = iwl_mvm_mac_event_callback,
4094 
4095 	CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4096 
4097 #ifdef CONFIG_PM_SLEEP
4098 	/* look at d3.c */
4099 	.suspend = iwl_mvm_suspend,
4100 	.resume = iwl_mvm_resume,
4101 	.set_wakeup = iwl_mvm_set_wakeup,
4102 	.set_rekey_data = iwl_mvm_set_rekey_data,
4103 #if IS_ENABLED(CONFIG_IPV6)
4104 	.ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4105 #endif
4106 	.set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4107 #endif
4108 	.get_survey = iwl_mvm_mac_get_survey,
4109 	.sta_statistics = iwl_mvm_mac_sta_statistics,
4110 };
4111