1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license.  When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 *  Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 *  * Redistributions of source code must retain the above copyright
43 *    notice, this list of conditions and the following disclaimer.
44 *  * Redistributions in binary form must reproduce the above copyright
45 *    notice, this list of conditions and the following disclaimer in
46 *    the documentation and/or other materials provided with the
47 *    distribution.
48 *  * Neither the name Intel Corporation nor the names of its
49 *    contributors may be used to endorse or promote products derived
50 *    from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/etherdevice.h>
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "fw-api-scan.h"
71
72#define IWL_DENSE_EBS_SCAN_RATIO 5
73#define IWL_SPARSE_EBS_SCAN_RATIO 1
74
75enum iwl_mvm_scan_type {
76	IWL_SCAN_TYPE_UNASSOC,
77	IWL_SCAN_TYPE_WILD,
78	IWL_SCAN_TYPE_MILD,
79	IWL_SCAN_TYPE_FRAGMENTED,
80};
81
82enum iwl_mvm_traffic_load {
83	IWL_MVM_TRAFFIC_LOW,
84	IWL_MVM_TRAFFIC_MEDIUM,
85	IWL_MVM_TRAFFIC_HIGH,
86};
87
88struct iwl_mvm_scan_timing_params {
89	u32 dwell_active;
90	u32 dwell_passive;
91	u32 dwell_fragmented;
92	u32 suspend_time;
93	u32 max_out_time;
94};
95
96static struct iwl_mvm_scan_timing_params scan_timing[] = {
97	[IWL_SCAN_TYPE_UNASSOC] = {
98		.dwell_active = 10,
99		.dwell_passive = 110,
100		.dwell_fragmented = 44,
101		.suspend_time = 0,
102		.max_out_time = 0,
103	},
104	[IWL_SCAN_TYPE_WILD] = {
105		.dwell_active = 10,
106		.dwell_passive = 110,
107		.dwell_fragmented = 44,
108		.suspend_time = 30,
109		.max_out_time = 120,
110	},
111	[IWL_SCAN_TYPE_MILD] = {
112		.dwell_active = 10,
113		.dwell_passive = 110,
114		.dwell_fragmented = 44,
115		.suspend_time = 120,
116		.max_out_time = 120,
117	},
118	[IWL_SCAN_TYPE_FRAGMENTED] = {
119		.dwell_active = 10,
120		.dwell_passive = 110,
121		.dwell_fragmented = 44,
122		.suspend_time = 95,
123		.max_out_time = 44,
124	},
125};
126
127struct iwl_mvm_scan_params {
128	enum iwl_mvm_scan_type type;
129	u32 n_channels;
130	u16 delay;
131	int n_ssids;
132	struct cfg80211_ssid *ssids;
133	struct ieee80211_channel **channels;
134	u32 flags;
135	u8 *mac_addr;
136	u8 *mac_addr_mask;
137	bool no_cck;
138	bool pass_all;
139	int n_match_sets;
140	struct iwl_scan_probe_req preq;
141	struct cfg80211_match_set *match_sets;
142	int n_scan_plans;
143	struct cfg80211_sched_scan_plan *scan_plans;
144};
145
146static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
147{
148	if (mvm->scan_rx_ant != ANT_NONE)
149		return mvm->scan_rx_ant;
150	return iwl_mvm_get_valid_rx_ant(mvm);
151}
152
153static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
154{
155	u16 rx_chain;
156	u8 rx_ant;
157
158	rx_ant = iwl_mvm_scan_rx_ant(mvm);
159	rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
160	rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
161	rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
162	rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
163	return cpu_to_le16(rx_chain);
164}
165
166static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
167{
168	if (band == IEEE80211_BAND_2GHZ)
169		return cpu_to_le32(PHY_BAND_24);
170	else
171		return cpu_to_le32(PHY_BAND_5);
172}
173
174static inline __le32
175iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
176			  bool no_cck)
177{
178	u32 tx_ant;
179
180	mvm->scan_last_antenna_idx =
181		iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
182				     mvm->scan_last_antenna_idx);
183	tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
184
185	if (band == IEEE80211_BAND_2GHZ && !no_cck)
186		return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
187				   tx_ant);
188	else
189		return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
190}
191
192static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
193					    struct ieee80211_vif *vif)
194{
195	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
196	int *global_cnt = data;
197
198	if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
199	    mvmvif->phy_ctxt->id < MAX_PHYS)
200		*global_cnt += 1;
201}
202
203static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
204{
205	return IWL_MVM_TRAFFIC_LOW;
206}
207
208static enum
209iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
210					struct ieee80211_vif *vif,
211					struct iwl_mvm_scan_params *params)
212{
213	int global_cnt = 0;
214	enum iwl_mvm_traffic_load load;
215	bool low_latency;
216
217	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
218					    IEEE80211_IFACE_ITER_NORMAL,
219					    iwl_mvm_scan_condition_iterator,
220					    &global_cnt);
221	if (!global_cnt)
222		return IWL_SCAN_TYPE_UNASSOC;
223
224	load = iwl_mvm_get_traffic_load(mvm);
225	low_latency = iwl_mvm_low_latency(mvm);
226
227	if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
228	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
229	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
230		return IWL_SCAN_TYPE_FRAGMENTED;
231
232	if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
233		return IWL_SCAN_TYPE_MILD;
234
235	return IWL_SCAN_TYPE_WILD;
236}
237
238static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
239{
240	/* require rrm scan whenever the fw supports it */
241	return fw_has_capa(&mvm->fw->ucode_capa,
242			   IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
243}
244
245static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
246{
247	int max_probe_len;
248
249	max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
250
251	/* we create the 802.11 header and SSID element */
252	max_probe_len -= 24 + 2;
253
254	/* DS parameter set element is added on 2.4GHZ band if required */
255	if (iwl_mvm_rrm_scan_needed(mvm))
256		max_probe_len -= 3;
257
258	return max_probe_len;
259}
260
261int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
262{
263	int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
264
265	/* TODO: [BUG] This function should return the maximum allowed size of
266	 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
267	 * in the same command. So the correct implementation of this function
268	 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
269	 * command has only 512 bytes and it would leave us with about 240
270	 * bytes for scan IEs, which is clearly not enough. So meanwhile
271	 * we will report an incorrect value. This may result in a failure to
272	 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
273	 * functions with -ENOBUFS, if a large enough probe will be provided.
274	 */
275	return max_ie_len;
276}
277
278static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
279				     int num_res, u8 *buf, size_t buf_size)
280{
281	int i;
282	u8 *pos = buf, *end = buf + buf_size;
283
284	for (i = 0; pos < end && i < num_res; i++)
285		pos += snprintf(pos, end - pos, " %u", res[i].channel);
286
287	/* terminate the string in case the buffer was too short */
288	*(buf + buf_size - 1) = '\0';
289
290	return buf;
291}
292
293void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
294					      struct iwl_rx_cmd_buffer *rxb)
295{
296	struct iwl_rx_packet *pkt = rxb_addr(rxb);
297	struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
298	u8 buf[256];
299
300	IWL_DEBUG_SCAN(mvm,
301		       "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
302		       notif->status, notif->scanned_channels,
303		       iwl_mvm_dump_channel_list(notif->results,
304						 notif->scanned_channels, buf,
305						 sizeof(buf)));
306}
307
308void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
309				 struct iwl_rx_cmd_buffer *rxb)
310{
311	IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
312	ieee80211_sched_scan_results(mvm->hw);
313}
314
315static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
316{
317	switch (status) {
318	case IWL_SCAN_EBS_SUCCESS:
319		return "successful";
320	case IWL_SCAN_EBS_INACTIVE:
321		return "inactive";
322	case IWL_SCAN_EBS_FAILED:
323	case IWL_SCAN_EBS_CHAN_NOT_FOUND:
324	default:
325		return "failed";
326	}
327}
328
329void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
330					 struct iwl_rx_cmd_buffer *rxb)
331{
332	struct iwl_rx_packet *pkt = rxb_addr(rxb);
333	struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
334	bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
335
336	/* scan status must be locked for proper checking */
337	lockdep_assert_held(&mvm->mutex);
338
339	/* We first check if we were stopping a scan, in which case we
340	 * just clear the stopping flag.  Then we check if it was a
341	 * firmware initiated stop, in which case we need to inform
342	 * mac80211.
343	 * Note that we can have a stopping and a running scan
344	 * simultaneously, but we can't have two different types of
345	 * scans stopping or running at the same time (since LMAC
346	 * doesn't support it).
347	 */
348
349	if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
350		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
351
352		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
353			       aborted ? "aborted" : "completed",
354			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
355		IWL_DEBUG_SCAN(mvm,
356			       "Last line %d, Last iteration %d, Time after last iteration %d\n",
357			       scan_notif->last_schedule_line,
358			       scan_notif->last_schedule_iteration,
359			       __le32_to_cpu(scan_notif->time_after_last_iter));
360
361		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
362	} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
363		IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
364			       aborted ? "aborted" : "completed",
365			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
366
367		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
368	} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
369		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
370
371		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
372			       aborted ? "aborted" : "completed",
373			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
374		IWL_DEBUG_SCAN(mvm,
375			       "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
376			       scan_notif->last_schedule_line,
377			       scan_notif->last_schedule_iteration,
378			       __le32_to_cpu(scan_notif->time_after_last_iter));
379
380		mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
381		ieee80211_sched_scan_stopped(mvm->hw);
382	} else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
383		IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
384			       aborted ? "aborted" : "completed",
385			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
386
387		mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
388		ieee80211_scan_completed(mvm->hw,
389				scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
390		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
391	}
392
393	mvm->last_ebs_successful =
394			scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
395			scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
396}
397
398static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
399{
400	int i;
401
402	for (i = 0; i < PROBE_OPTION_MAX; i++) {
403		if (!ssid_list[i].len)
404			break;
405		if (ssid_list[i].len == ssid_len &&
406		    !memcmp(ssid_list->ssid, ssid, ssid_len))
407			return i;
408	}
409	return -1;
410}
411
412/* We insert the SSIDs in an inverted order, because the FW will
413 * invert it back.
414 */
415static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
416				 struct iwl_ssid_ie *ssids,
417				 u32 *ssid_bitmap)
418{
419	int i, j;
420	int index;
421
422	/*
423	 * copy SSIDs from match list.
424	 * iwl_config_sched_scan_profiles() uses the order of these ssids to
425	 * config match list.
426	 */
427	for (i = 0, j = params->n_match_sets - 1;
428	     j >= 0 && i < PROBE_OPTION_MAX;
429	     i++, j--) {
430		/* skip empty SSID matchsets */
431		if (!params->match_sets[j].ssid.ssid_len)
432			continue;
433		ssids[i].id = WLAN_EID_SSID;
434		ssids[i].len = params->match_sets[j].ssid.ssid_len;
435		memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
436		       ssids[i].len);
437	}
438
439	/* add SSIDs from scan SSID list */
440	*ssid_bitmap = 0;
441	for (j = params->n_ssids - 1;
442	     j >= 0 && i < PROBE_OPTION_MAX;
443	     i++, j--) {
444		index = iwl_ssid_exist(params->ssids[j].ssid,
445				       params->ssids[j].ssid_len,
446				       ssids);
447		if (index < 0) {
448			ssids[i].id = WLAN_EID_SSID;
449			ssids[i].len = params->ssids[j].ssid_len;
450			memcpy(ssids[i].ssid, params->ssids[j].ssid,
451			       ssids[i].len);
452			*ssid_bitmap |= BIT(i);
453		} else {
454			*ssid_bitmap |= BIT(index);
455		}
456	}
457}
458
459static int
460iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
461				   struct cfg80211_sched_scan_request *req)
462{
463	struct iwl_scan_offload_profile *profile;
464	struct iwl_scan_offload_profile_cfg *profile_cfg;
465	struct iwl_scan_offload_blacklist *blacklist;
466	struct iwl_host_cmd cmd = {
467		.id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
468		.len[1] = sizeof(*profile_cfg),
469		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
470		.dataflags[1] = IWL_HCMD_DFL_NOCOPY,
471	};
472	int blacklist_len;
473	int i;
474	int ret;
475
476	if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
477		return -EIO;
478
479	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
480		blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
481	else
482		blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
483
484	blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
485	if (!blacklist)
486		return -ENOMEM;
487
488	profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
489	if (!profile_cfg) {
490		ret = -ENOMEM;
491		goto free_blacklist;
492	}
493
494	cmd.data[0] = blacklist;
495	cmd.len[0] = sizeof(*blacklist) * blacklist_len;
496	cmd.data[1] = profile_cfg;
497
498	/* No blacklist configuration */
499
500	profile_cfg->num_profiles = req->n_match_sets;
501	profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
502	profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
503	profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
504	if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
505		profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
506
507	for (i = 0; i < req->n_match_sets; i++) {
508		profile = &profile_cfg->profiles[i];
509		profile->ssid_index = i;
510		/* Support any cipher and auth algorithm */
511		profile->unicast_cipher = 0xff;
512		profile->auth_alg = 0xff;
513		profile->network_type = IWL_NETWORK_TYPE_ANY;
514		profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
515		profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
516	}
517
518	IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
519
520	ret = iwl_mvm_send_cmd(mvm, &cmd);
521	kfree(profile_cfg);
522free_blacklist:
523	kfree(blacklist);
524
525	return ret;
526}
527
528static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
529				  struct cfg80211_sched_scan_request *req)
530{
531	if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
532		IWL_DEBUG_SCAN(mvm,
533			       "Sending scheduled scan with filtering, n_match_sets %d\n",
534			       req->n_match_sets);
535		return false;
536	}
537
538	IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
539	return true;
540}
541
542static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
543{
544	int ret;
545	struct iwl_host_cmd cmd = {
546		.id = SCAN_OFFLOAD_ABORT_CMD,
547	};
548	u32 status;
549
550	ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
551	if (ret)
552		return ret;
553
554	if (status != CAN_ABORT_STATUS) {
555		/*
556		 * The scan abort will return 1 for success or
557		 * 2 for "failure".  A failure condition can be
558		 * due to simply not being in an active scan which
559		 * can occur if we send the scan abort before the
560		 * microcode has notified us that a scan is completed.
561		 */
562		IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
563		ret = -ENOENT;
564	}
565
566	return ret;
567}
568
569static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
570				     struct iwl_scan_req_tx_cmd *tx_cmd,
571				     bool no_cck)
572{
573	tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
574					 TX_CMD_FLG_BT_DIS);
575	tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
576							   IEEE80211_BAND_2GHZ,
577							   no_cck);
578	tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
579
580	tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
581					 TX_CMD_FLG_BT_DIS);
582	tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
583							   IEEE80211_BAND_5GHZ,
584							   no_cck);
585	tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
586}
587
588static void
589iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
590			       struct ieee80211_channel **channels,
591			       int n_channels, u32 ssid_bitmap,
592			       struct iwl_scan_req_lmac *cmd)
593{
594	struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
595	int i;
596
597	for (i = 0; i < n_channels; i++) {
598		channel_cfg[i].channel_num =
599			cpu_to_le16(channels[i]->hw_value);
600		channel_cfg[i].iter_count = cpu_to_le16(1);
601		channel_cfg[i].iter_interval = 0;
602		channel_cfg[i].flags =
603			cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
604				    ssid_bitmap);
605	}
606}
607
608static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
609					   size_t len, u8 *const pos)
610{
611	static const u8 before_ds_params[] = {
612			WLAN_EID_SSID,
613			WLAN_EID_SUPP_RATES,
614			WLAN_EID_REQUEST,
615			WLAN_EID_EXT_SUPP_RATES,
616	};
617	size_t offs;
618	u8 *newpos = pos;
619
620	if (!iwl_mvm_rrm_scan_needed(mvm)) {
621		memcpy(newpos, ies, len);
622		return newpos + len;
623	}
624
625	offs = ieee80211_ie_split(ies, len,
626				  before_ds_params,
627				  ARRAY_SIZE(before_ds_params),
628				  0);
629
630	memcpy(newpos, ies, offs);
631	newpos += offs;
632
633	/* Add a placeholder for DS Parameter Set element */
634	*newpos++ = WLAN_EID_DS_PARAMS;
635	*newpos++ = 1;
636	*newpos++ = 0;
637
638	memcpy(newpos, ies + offs, len - offs);
639	newpos += len - offs;
640
641	return newpos;
642}
643
644static void
645iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
646			 struct ieee80211_scan_ies *ies,
647			 struct iwl_mvm_scan_params *params)
648{
649	struct ieee80211_mgmt *frame = (void *)params->preq.buf;
650	u8 *pos, *newpos;
651	const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
652		params->mac_addr : NULL;
653
654	/*
655	 * Unfortunately, right now the offload scan doesn't support randomising
656	 * within the firmware, so until the firmware API is ready we implement
657	 * it in the driver. This means that the scan iterations won't really be
658	 * random, only when it's restarted, but at least that helps a bit.
659	 */
660	if (mac_addr)
661		get_random_mask_addr(frame->sa, mac_addr,
662				     params->mac_addr_mask);
663	else
664		memcpy(frame->sa, vif->addr, ETH_ALEN);
665
666	frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
667	eth_broadcast_addr(frame->da);
668	eth_broadcast_addr(frame->bssid);
669	frame->seq_ctrl = 0;
670
671	pos = frame->u.probe_req.variable;
672	*pos++ = WLAN_EID_SSID;
673	*pos++ = 0;
674
675	params->preq.mac_header.offset = 0;
676	params->preq.mac_header.len = cpu_to_le16(24 + 2);
677
678	/* Insert ds parameter set element on 2.4 GHz band */
679	newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
680						 ies->ies[IEEE80211_BAND_2GHZ],
681						 ies->len[IEEE80211_BAND_2GHZ],
682						 pos);
683	params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
684	params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
685	pos = newpos;
686
687	memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
688	       ies->len[IEEE80211_BAND_5GHZ]);
689	params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
690	params->preq.band_data[1].len =
691		cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
692	pos += ies->len[IEEE80211_BAND_5GHZ];
693
694	memcpy(pos, ies->common_ies, ies->common_ie_len);
695	params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
696	params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
697}
698
699static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
700				    enum iwl_scan_priority_ext prio)
701{
702	if (fw_has_api(&mvm->fw->ucode_capa,
703		       IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
704		return cpu_to_le32(prio);
705
706	if (prio <= IWL_SCAN_PRIORITY_EXT_2)
707		return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
708
709	if (prio <= IWL_SCAN_PRIORITY_EXT_4)
710		return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
711
712	return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
713}
714
715static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
716				    struct iwl_scan_req_lmac *cmd,
717				    struct iwl_mvm_scan_params *params)
718{
719	cmd->active_dwell = scan_timing[params->type].dwell_active;
720	cmd->passive_dwell = scan_timing[params->type].dwell_passive;
721	cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
722	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
723	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
724	cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
725}
726
727static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
728				     struct ieee80211_scan_ies *ies,
729				     int n_channels)
730{
731	return ((n_ssids <= PROBE_OPTION_MAX) &&
732		(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
733		(ies->common_ie_len +
734		 ies->len[NL80211_BAND_2GHZ] +
735		 ies->len[NL80211_BAND_5GHZ] <=
736		 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
737}
738
739static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
740					struct ieee80211_vif *vif)
741{
742	const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
743
744	/* We can only use EBS if:
745	 *	1. the feature is supported;
746	 *	2. the last EBS was successful;
747	 *	3. if only single scan, the single scan EBS API is supported;
748	 *	4. it's not a p2p find operation.
749	 */
750	return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
751		mvm->last_ebs_successful &&
752		vif->type != NL80211_IFTYPE_P2P_DEVICE);
753}
754
755static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
756				   struct iwl_mvm_scan_params *params)
757{
758	int flags = 0;
759
760	if (params->n_ssids == 0)
761		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
762
763	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
764		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
765
766	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
767		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
768
769	if (iwl_mvm_rrm_scan_needed(mvm))
770		flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
771
772	if (params->pass_all)
773		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
774	else
775		flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
776
777#ifdef CONFIG_IWLWIFI_DEBUGFS
778	if (mvm->scan_iter_notif_enabled)
779		flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
780#endif
781
782	return flags;
783}
784
785static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
786			     struct iwl_mvm_scan_params *params)
787{
788	struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
789	struct iwl_scan_probe_req *preq =
790		(void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
791			 mvm->fw->ucode_capa.n_scan_channels);
792	u32 ssid_bitmap = 0;
793	int i;
794
795	lockdep_assert_held(&mvm->mutex);
796
797	memset(cmd, 0, ksize(cmd));
798
799	if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
800		return -EINVAL;
801
802	iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
803
804	cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
805	cmd->iter_num = cpu_to_le32(1);
806	cmd->n_channels = (u8)params->n_channels;
807
808	cmd->delay = cpu_to_le32(params->delay);
809
810	cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params));
811
812	cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
813	cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
814					MAC_FILTER_IN_BEACON);
815	iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
816	iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
817
818	/* this API uses bits 1-20 instead of 0-19 */
819	ssid_bitmap <<= 1;
820
821	for (i = 0; i < params->n_scan_plans; i++) {
822		struct cfg80211_sched_scan_plan *scan_plan =
823			&params->scan_plans[i];
824
825		cmd->schedule[i].delay =
826			cpu_to_le16(scan_plan->interval);
827		cmd->schedule[i].iterations = scan_plan->iterations;
828		cmd->schedule[i].full_scan_mul = 1;
829	}
830
831	/*
832	 * If the number of iterations of the last scan plan is set to
833	 * zero, it should run infinitely. However, this is not always the case.
834	 * For example, when regular scan is requested the driver sets one scan
835	 * plan with one iteration.
836	 */
837	if (!cmd->schedule[i - 1].iterations)
838		cmd->schedule[i - 1].iterations = 0xff;
839
840	if (iwl_mvm_scan_use_ebs(mvm, vif)) {
841		cmd->channel_opt[0].flags =
842			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
843				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
844				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
845		cmd->channel_opt[0].non_ebs_ratio =
846			cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
847		cmd->channel_opt[1].flags =
848			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
849				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
850				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
851		cmd->channel_opt[1].non_ebs_ratio =
852			cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
853	}
854
855	iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
856				       params->n_channels, ssid_bitmap, cmd);
857
858	*preq = params->preq;
859
860	return 0;
861}
862
863static int rate_to_scan_rate_flag(unsigned int rate)
864{
865	static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
866		[IWL_RATE_1M_INDEX]	= SCAN_CONFIG_RATE_1M,
867		[IWL_RATE_2M_INDEX]	= SCAN_CONFIG_RATE_2M,
868		[IWL_RATE_5M_INDEX]	= SCAN_CONFIG_RATE_5M,
869		[IWL_RATE_11M_INDEX]	= SCAN_CONFIG_RATE_11M,
870		[IWL_RATE_6M_INDEX]	= SCAN_CONFIG_RATE_6M,
871		[IWL_RATE_9M_INDEX]	= SCAN_CONFIG_RATE_9M,
872		[IWL_RATE_12M_INDEX]	= SCAN_CONFIG_RATE_12M,
873		[IWL_RATE_18M_INDEX]	= SCAN_CONFIG_RATE_18M,
874		[IWL_RATE_24M_INDEX]	= SCAN_CONFIG_RATE_24M,
875		[IWL_RATE_36M_INDEX]	= SCAN_CONFIG_RATE_36M,
876		[IWL_RATE_48M_INDEX]	= SCAN_CONFIG_RATE_48M,
877		[IWL_RATE_54M_INDEX]	= SCAN_CONFIG_RATE_54M,
878	};
879
880	return rate_to_scan_rate[rate];
881}
882
883static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
884{
885	struct ieee80211_supported_band *band;
886	unsigned int rates = 0;
887	int i;
888
889	band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
890	for (i = 0; i < band->n_bitrates; i++)
891		rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
892	band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
893	for (i = 0; i < band->n_bitrates; i++)
894		rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
895
896	/* Set both basic rates and supported rates */
897	rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
898
899	return cpu_to_le32(rates);
900}
901
902int iwl_mvm_config_scan(struct iwl_mvm *mvm)
903{
904	struct iwl_scan_config *scan_config;
905	struct ieee80211_supported_band *band;
906	int num_channels =
907		mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
908		mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
909	int ret, i, j = 0, cmd_size;
910	struct iwl_host_cmd cmd = {
911		.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
912	};
913
914	if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
915		return -ENOBUFS;
916
917	cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
918
919	scan_config = kzalloc(cmd_size, GFP_KERNEL);
920	if (!scan_config)
921		return -ENOMEM;
922
923	scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
924					 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
925					 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
926					 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
927					 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
928					 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
929					 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
930					 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
931					 SCAN_CONFIG_N_CHANNELS(num_channels));
932	scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
933	scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
934	scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
935	scan_config->out_of_channel_time = cpu_to_le32(170);
936	scan_config->suspend_time = cpu_to_le32(30);
937	scan_config->dwell_active = 20;
938	scan_config->dwell_passive = 110;
939	scan_config->dwell_fragmented = 20;
940
941	memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
942
943	scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
944	scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
945				     IWL_CHANNEL_FLAG_ACCURATE_EBS |
946				     IWL_CHANNEL_FLAG_EBS_ADD |
947				     IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
948
949	band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
950	for (i = 0; i < band->n_channels; i++, j++)
951		scan_config->channel_array[j] = band->channels[i].hw_value;
952	band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
953	for (i = 0; i < band->n_channels; i++, j++)
954		scan_config->channel_array[j] = band->channels[i].hw_value;
955
956	cmd.data[0] = scan_config;
957	cmd.len[0] = cmd_size;
958	cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
959
960	IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
961
962	ret = iwl_mvm_send_cmd(mvm, &cmd);
963
964	kfree(scan_config);
965	return ret;
966}
967
968static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
969{
970	int i;
971
972	for (i = 0; i < mvm->max_scans; i++)
973		if (mvm->scan_uid_status[i] == status)
974			return i;
975
976	return -ENOENT;
977}
978
979static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
980{
981	return params->n_scan_plans == 1 &&
982		params->scan_plans[0].iterations == 1;
983}
984
985static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
986				    struct iwl_scan_req_umac *cmd,
987				    struct iwl_mvm_scan_params *params)
988{
989	cmd->active_dwell = scan_timing[params->type].dwell_active;
990	cmd->passive_dwell = scan_timing[params->type].dwell_passive;
991	cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
992	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
993	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
994	cmd->scan_priority =
995		iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
996
997	if (iwl_mvm_is_regular_scan(params))
998		cmd->ooc_priority =
999			iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1000	else
1001		cmd->ooc_priority =
1002			iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
1003}
1004
1005static void
1006iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1007			       struct ieee80211_channel **channels,
1008			       int n_channels, u32 ssid_bitmap,
1009			       struct iwl_scan_req_umac *cmd)
1010{
1011	struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
1012	int i;
1013
1014	for (i = 0; i < n_channels; i++) {
1015		channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
1016		channel_cfg[i].channel_num = channels[i]->hw_value;
1017		channel_cfg[i].iter_count = 1;
1018		channel_cfg[i].iter_interval = 0;
1019	}
1020}
1021
1022static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
1023				   struct iwl_mvm_scan_params *params)
1024{
1025	int flags = 0;
1026
1027	if (params->n_ssids == 0)
1028		flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
1029
1030	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
1031		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
1032
1033	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
1034		flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
1035
1036	if (iwl_mvm_rrm_scan_needed(mvm))
1037		flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
1038
1039	if (params->pass_all)
1040		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1041	else
1042		flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
1043
1044	if (!iwl_mvm_is_regular_scan(params))
1045		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
1046
1047#ifdef CONFIG_IWLWIFI_DEBUGFS
1048	if (mvm->scan_iter_notif_enabled)
1049		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1050#endif
1051	return flags;
1052}
1053
1054static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1055			     struct iwl_mvm_scan_params *params,
1056			     int type)
1057{
1058	struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
1059	struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
1060		sizeof(struct iwl_scan_channel_cfg_umac) *
1061			mvm->fw->ucode_capa.n_scan_channels;
1062	int uid, i;
1063	u32 ssid_bitmap = 0;
1064
1065	lockdep_assert_held(&mvm->mutex);
1066
1067	if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
1068		return -EINVAL;
1069
1070	uid = iwl_mvm_scan_uid_by_status(mvm, 0);
1071	if (uid < 0)
1072		return uid;
1073
1074	memset(cmd, 0, ksize(cmd));
1075
1076	iwl_mvm_scan_umac_dwell(mvm, cmd, params);
1077
1078	mvm->scan_uid_status[uid] = type;
1079
1080	cmd->uid = cpu_to_le32(uid);
1081	cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
1082
1083	if (type == IWL_MVM_SCAN_SCHED)
1084		cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1085
1086	if (iwl_mvm_scan_use_ebs(mvm, vif))
1087		cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1088				     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1089				     IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1090
1091	cmd->n_channels = params->n_channels;
1092
1093	iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
1094
1095	iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
1096				       params->n_channels, ssid_bitmap, cmd);
1097
1098	for (i = 0; i < params->n_scan_plans; i++) {
1099		struct cfg80211_sched_scan_plan *scan_plan =
1100			&params->scan_plans[i];
1101
1102		sec_part->schedule[i].iter_count = scan_plan->iterations;
1103		sec_part->schedule[i].interval =
1104			cpu_to_le16(scan_plan->interval);
1105	}
1106
1107	/*
1108	 * If the number of iterations of the last scan plan is set to
1109	 * zero, it should run infinitely. However, this is not always the case.
1110	 * For example, when regular scan is requested the driver sets one scan
1111	 * plan with one iteration.
1112	 */
1113	if (!sec_part->schedule[i - 1].iter_count)
1114		sec_part->schedule[i - 1].iter_count = 0xff;
1115
1116	sec_part->delay = cpu_to_le16(params->delay);
1117	sec_part->preq = params->preq;
1118
1119	return 0;
1120}
1121
1122static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
1123{
1124	return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
1125}
1126
1127static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1128{
1129	/* This looks a bit arbitrary, but the idea is that if we run
1130	 * out of possible simultaneous scans and the userspace is
1131	 * trying to run a scan type that is already running, we
1132	 * return -EBUSY.  But if the userspace wants to start a
1133	 * different type of scan, we stop the opposite type to make
1134	 * space for the new request.  The reason is backwards
1135	 * compatibility with old wpa_supplicant that wouldn't stop a
1136	 * scheduled scan before starting a normal scan.
1137	 */
1138
1139	if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
1140		return 0;
1141
1142	/* Use a switch, even though this is a bitmask, so that more
1143	 * than one bits set will fall in default and we will warn.
1144	 */
1145	switch (type) {
1146	case IWL_MVM_SCAN_REGULAR:
1147		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
1148			return -EBUSY;
1149		return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
1150	case IWL_MVM_SCAN_SCHED:
1151		if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
1152			return -EBUSY;
1153		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
1154	case IWL_MVM_SCAN_NETDETECT:
1155		/* No need to stop anything for net-detect since the
1156		 * firmware is restarted anyway.  This way, any sched
1157		 * scans that were running will be restarted when we
1158		 * resume.
1159		*/
1160		return 0;
1161	default:
1162		WARN_ON(1);
1163		break;
1164	}
1165
1166	return -EIO;
1167}
1168
1169int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1170			   struct cfg80211_scan_request *req,
1171			   struct ieee80211_scan_ies *ies)
1172{
1173	struct iwl_host_cmd hcmd = {
1174		.len = { iwl_mvm_scan_size(mvm), },
1175		.data = { mvm->scan_cmd, },
1176		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
1177	};
1178	struct iwl_mvm_scan_params params = {};
1179	int ret;
1180	struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
1181
1182	lockdep_assert_held(&mvm->mutex);
1183
1184	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
1185		IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
1186		return -EBUSY;
1187	}
1188
1189	ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
1190	if (ret)
1191		return ret;
1192
1193	/* we should have failed registration if scan_cmd was NULL */
1194	if (WARN_ON(!mvm->scan_cmd))
1195		return -ENOMEM;
1196
1197	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
1198		return -ENOBUFS;
1199
1200	params.n_ssids = req->n_ssids;
1201	params.flags = req->flags;
1202	params.n_channels = req->n_channels;
1203	params.delay = 0;
1204	params.ssids = req->ssids;
1205	params.channels = req->channels;
1206	params.mac_addr = req->mac_addr;
1207	params.mac_addr_mask = req->mac_addr_mask;
1208	params.no_cck = req->no_cck;
1209	params.pass_all = true;
1210	params.n_match_sets = 0;
1211	params.match_sets = NULL;
1212
1213	params.scan_plans = &scan_plan;
1214	params.n_scan_plans = 1;
1215
1216	params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
1217
1218	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
1219
1220	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1221		hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
1222		ret = iwl_mvm_scan_umac(mvm, vif, &params,
1223					IWL_MVM_SCAN_REGULAR);
1224	} else {
1225		hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
1226		ret = iwl_mvm_scan_lmac(mvm, vif, &params);
1227	}
1228
1229	if (ret)
1230		return ret;
1231
1232	ret = iwl_mvm_send_cmd(mvm, &hcmd);
1233	if (ret) {
1234		/* If the scan failed, it usually means that the FW was unable
1235		 * to allocate the time events. Warn on it, but maybe we
1236		 * should try to send the command again with different params.
1237		 */
1238		IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
1239		return ret;
1240	}
1241
1242	IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
1243	mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
1244	iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
1245
1246	return 0;
1247}
1248
1249int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1250			     struct ieee80211_vif *vif,
1251			     struct cfg80211_sched_scan_request *req,
1252			     struct ieee80211_scan_ies *ies,
1253			     int type)
1254{
1255	struct iwl_host_cmd hcmd = {
1256		.len = { iwl_mvm_scan_size(mvm), },
1257		.data = { mvm->scan_cmd, },
1258		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
1259	};
1260	struct iwl_mvm_scan_params params = {};
1261	int ret;
1262
1263	lockdep_assert_held(&mvm->mutex);
1264
1265	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
1266		IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
1267		return -EBUSY;
1268	}
1269
1270	/* we don't support "match all" in the firmware */
1271	if (!req->n_match_sets)
1272		return -EOPNOTSUPP;
1273
1274	ret = iwl_mvm_check_running_scans(mvm, type);
1275	if (ret)
1276		return ret;
1277
1278	/* we should have failed registration if scan_cmd was NULL */
1279	if (WARN_ON(!mvm->scan_cmd))
1280		return -ENOMEM;
1281
1282	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
1283		return -ENOBUFS;
1284
1285	params.n_ssids = req->n_ssids;
1286	params.flags = req->flags;
1287	params.n_channels = req->n_channels;
1288	params.ssids = req->ssids;
1289	params.channels = req->channels;
1290	params.mac_addr = req->mac_addr;
1291	params.mac_addr_mask = req->mac_addr_mask;
1292	params.no_cck = false;
1293	params.pass_all =  iwl_mvm_scan_pass_all(mvm, req);
1294	params.n_match_sets = req->n_match_sets;
1295	params.match_sets = req->match_sets;
1296	if (!req->n_scan_plans)
1297		return -EINVAL;
1298
1299	params.n_scan_plans = req->n_scan_plans;
1300	params.scan_plans = req->scan_plans;
1301
1302	params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
1303
1304	/* In theory, LMAC scans can handle a 32-bit delay, but since
1305	 * waiting for over 18 hours to start the scan is a bit silly
1306	 * and to keep it aligned with UMAC scans (which only support
1307	 * 16-bit delays), trim it down to 16-bits.
1308	 */
1309	if (req->delay > U16_MAX) {
1310		IWL_DEBUG_SCAN(mvm,
1311			       "delay value is > 16-bits, set to max possible\n");
1312		params.delay = U16_MAX;
1313	} else {
1314		params.delay = req->delay;
1315	}
1316
1317	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
1318	if (ret)
1319		return ret;
1320
1321	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
1322
1323	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1324		hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
1325		ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
1326	} else {
1327		hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
1328		ret = iwl_mvm_scan_lmac(mvm, vif, &params);
1329	}
1330
1331	if (ret)
1332		return ret;
1333
1334	ret = iwl_mvm_send_cmd(mvm, &hcmd);
1335	if (!ret) {
1336		IWL_DEBUG_SCAN(mvm,
1337			       "Sched scan request was sent successfully\n");
1338		mvm->scan_status |= type;
1339	} else {
1340		/* If the scan failed, it usually means that the FW was unable
1341		 * to allocate the time events. Warn on it, but maybe we
1342		 * should try to send the command again with different params.
1343		 */
1344		IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
1345	}
1346
1347	return ret;
1348}
1349
1350void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1351					 struct iwl_rx_cmd_buffer *rxb)
1352{
1353	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1354	struct iwl_umac_scan_complete *notif = (void *)pkt->data;
1355	u32 uid = __le32_to_cpu(notif->uid);
1356	bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
1357
1358	if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
1359		return;
1360
1361	/* if the scan is already stopping, we don't need to notify mac80211 */
1362	if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
1363		ieee80211_scan_completed(mvm->hw, aborted);
1364		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1365	} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
1366		ieee80211_sched_scan_stopped(mvm->hw);
1367	}
1368
1369	mvm->scan_status &= ~mvm->scan_uid_status[uid];
1370	IWL_DEBUG_SCAN(mvm,
1371		       "Scan completed, uid %u type %u, status %s, EBS status %s\n",
1372		       uid, mvm->scan_uid_status[uid],
1373		       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
1374				"completed" : "aborted",
1375		       iwl_mvm_ebs_status_str(notif->ebs_status));
1376	IWL_DEBUG_SCAN(mvm,
1377		       "Last line %d, Last iteration %d, Time from last iteration %d\n",
1378		       notif->last_schedule, notif->last_iter,
1379		       __le32_to_cpu(notif->time_from_last_iter));
1380
1381	if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
1382	    notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
1383		mvm->last_ebs_successful = false;
1384
1385	mvm->scan_uid_status[uid] = 0;
1386}
1387
1388void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1389					      struct iwl_rx_cmd_buffer *rxb)
1390{
1391	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1392	struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
1393	u8 buf[256];
1394
1395	IWL_DEBUG_SCAN(mvm,
1396		       "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
1397		       notif->status, notif->scanned_channels,
1398		       iwl_mvm_dump_channel_list(notif->results,
1399						 notif->scanned_channels, buf,
1400						 sizeof(buf)));
1401}
1402
1403static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
1404{
1405	struct iwl_umac_scan_abort cmd = {};
1406	int uid, ret;
1407
1408	lockdep_assert_held(&mvm->mutex);
1409
1410	/* We should always get a valid index here, because we already
1411	 * checked that this type of scan was running in the generic
1412	 * code.
1413	 */
1414	uid = iwl_mvm_scan_uid_by_status(mvm, type);
1415	if (WARN_ON_ONCE(uid < 0))
1416		return uid;
1417
1418	cmd.uid = cpu_to_le32(uid);
1419
1420	IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
1421
1422	ret = iwl_mvm_send_cmd_pdu(mvm,
1423				   iwl_cmd_id(SCAN_ABORT_UMAC,
1424					      IWL_ALWAYS_LONG_GROUP, 0),
1425				   0, sizeof(cmd), &cmd);
1426	if (!ret)
1427		mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
1428
1429	return ret;
1430}
1431
1432static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1433{
1434	struct iwl_notification_wait wait_scan_done;
1435	static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
1436					      SCAN_OFFLOAD_COMPLETE, };
1437	int ret;
1438
1439	lockdep_assert_held(&mvm->mutex);
1440
1441	iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
1442				   scan_done_notif,
1443				   ARRAY_SIZE(scan_done_notif),
1444				   NULL, NULL);
1445
1446	IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
1447
1448	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1449		ret = iwl_mvm_umac_scan_abort(mvm, type);
1450	else
1451		ret = iwl_mvm_lmac_scan_abort(mvm);
1452
1453	if (ret) {
1454		IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
1455		iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1456		return ret;
1457	}
1458
1459	ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
1460
1461	return ret;
1462}
1463
1464int iwl_mvm_scan_size(struct iwl_mvm *mvm)
1465{
1466	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1467		return sizeof(struct iwl_scan_req_umac) +
1468			sizeof(struct iwl_scan_channel_cfg_umac) *
1469				mvm->fw->ucode_capa.n_scan_channels +
1470			sizeof(struct iwl_scan_req_umac_tail);
1471
1472	return sizeof(struct iwl_scan_req_lmac) +
1473		sizeof(struct iwl_scan_channel_cfg_lmac) *
1474		mvm->fw->ucode_capa.n_scan_channels +
1475		sizeof(struct iwl_scan_probe_req);
1476}
1477
1478/*
1479 * This function is used in nic restart flow, to inform mac80211 about scans
1480 * that was aborted by restart flow or by an assert.
1481 */
1482void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
1483{
1484	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1485		int uid, i;
1486
1487		uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
1488		if (uid >= 0) {
1489			ieee80211_scan_completed(mvm->hw, true);
1490			mvm->scan_uid_status[uid] = 0;
1491		}
1492		uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
1493		if (uid >= 0 && !mvm->restart_fw) {
1494			ieee80211_sched_scan_stopped(mvm->hw);
1495			mvm->scan_uid_status[uid] = 0;
1496		}
1497
1498		/* We shouldn't have any UIDs still set.  Loop over all the
1499		 * UIDs to make sure there's nothing left there and warn if
1500		 * any is found.
1501		 */
1502		for (i = 0; i < mvm->max_scans; i++) {
1503			if (WARN_ONCE(mvm->scan_uid_status[i],
1504				      "UMAC scan UID %d status was not cleaned\n",
1505				      i))
1506				mvm->scan_uid_status[i] = 0;
1507		}
1508	} else {
1509		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
1510			ieee80211_scan_completed(mvm->hw, true);
1511
1512		/* Sched scan will be restarted by mac80211 in
1513		 * restart_hw, so do not report if FW is about to be
1514		 * restarted.
1515		 */
1516		if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw)
1517			ieee80211_sched_scan_stopped(mvm->hw);
1518	}
1519}
1520
1521int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
1522{
1523	int ret;
1524
1525	if (!(mvm->scan_status & type))
1526		return 0;
1527
1528	if (iwl_mvm_is_radio_killed(mvm)) {
1529		ret = 0;
1530		goto out;
1531	}
1532
1533	ret = iwl_mvm_scan_stop_wait(mvm, type);
1534	if (!ret)
1535		mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
1536out:
1537	/* Clear the scan status so the next scan requests will
1538	 * succeed and mark the scan as stopping, so that the Rx
1539	 * handler doesn't do anything, as the scan was stopped from
1540	 * above.
1541	 */
1542	mvm->scan_status &= ~type;
1543
1544	if (type == IWL_MVM_SCAN_REGULAR) {
1545		/* Since the rx handler won't do anything now, we have
1546		 * to release the scan reference here.
1547		 */
1548		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1549		if (notify)
1550			ieee80211_scan_completed(mvm->hw, true);
1551	} else if (notify) {
1552		ieee80211_sched_scan_stopped(mvm->hw);
1553	}
1554
1555	return ret;
1556}
1557