1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2014 Intel Mobile Communications GmbH
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22  * USA
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called COPYING.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <ilw@linux.intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2014 Intel Mobile Communications GmbH
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  *
40  *  * Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  *  * Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in
44  *    the documentation and/or other materials provided with the
45  *    distribution.
46  *  * Neither the name Intel Corporation nor the names of its
47  *    contributors may be used to endorse or promote products derived
48  *    from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  *
62  *****************************************************************************/
63 
64 #include <linux/etherdevice.h>
65 #include "mvm.h"
66 #include "time-event.h"
67 #include "iwl-io.h"
68 #include "iwl-prph.h"
69 
70 #define TU_TO_US(x) (x * 1024)
71 #define TU_TO_MS(x) (TU_TO_US(x) / 1000)
72 
iwl_mvm_teardown_tdls_peers(struct iwl_mvm * mvm)73 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
74 {
75 	struct ieee80211_sta *sta;
76 	struct iwl_mvm_sta *mvmsta;
77 	int i;
78 
79 	lockdep_assert_held(&mvm->mutex);
80 
81 	for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
82 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
83 						lockdep_is_held(&mvm->mutex));
84 		if (!sta || IS_ERR(sta) || !sta->tdls)
85 			continue;
86 
87 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
88 		ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
89 				NL80211_TDLS_TEARDOWN,
90 				WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
91 				GFP_KERNEL);
92 	}
93 }
94 
iwl_mvm_tdls_sta_count(struct iwl_mvm * mvm,struct ieee80211_vif * vif)95 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
96 {
97 	struct ieee80211_sta *sta;
98 	struct iwl_mvm_sta *mvmsta;
99 	int count = 0;
100 	int i;
101 
102 	lockdep_assert_held(&mvm->mutex);
103 
104 	for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
105 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
106 						lockdep_is_held(&mvm->mutex));
107 		if (!sta || IS_ERR(sta) || !sta->tdls)
108 			continue;
109 
110 		if (vif) {
111 			mvmsta = iwl_mvm_sta_from_mac80211(sta);
112 			if (mvmsta->vif != vif)
113 				continue;
114 		}
115 
116 		count++;
117 	}
118 
119 	return count;
120 }
121 
iwl_mvm_tdls_config(struct iwl_mvm * mvm,struct ieee80211_vif * vif)122 static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
123 {
124 	struct iwl_rx_packet *pkt;
125 	struct iwl_tdls_config_res *resp;
126 	struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
127 	struct iwl_host_cmd cmd = {
128 		.id = TDLS_CONFIG_CMD,
129 		.flags = CMD_WANT_SKB,
130 		.data = { &tdls_cfg_cmd, },
131 		.len = { sizeof(struct iwl_tdls_config_cmd), },
132 	};
133 	struct ieee80211_sta *sta;
134 	int ret, i, cnt;
135 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
136 
137 	lockdep_assert_held(&mvm->mutex);
138 
139 	tdls_cfg_cmd.id_and_color =
140 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
141 	tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
142 	tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
143 
144 	/* for now the Tx cmd is empty and unused */
145 
146 	/* populate TDLS peer data */
147 	cnt = 0;
148 	for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
149 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
150 						lockdep_is_held(&mvm->mutex));
151 		if (IS_ERR_OR_NULL(sta) || !sta->tdls)
152 			continue;
153 
154 		tdls_cfg_cmd.sta_info[cnt].sta_id = i;
155 		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
156 							IWL_MVM_TDLS_FW_TID;
157 		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
158 		tdls_cfg_cmd.sta_info[cnt].is_initiator =
159 				cpu_to_le32(sta->tdls_initiator ? 1 : 0);
160 
161 		cnt++;
162 	}
163 
164 	tdls_cfg_cmd.tdls_peer_count = cnt;
165 	IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
166 
167 	ret = iwl_mvm_send_cmd(mvm, &cmd);
168 	if (WARN_ON_ONCE(ret))
169 		return;
170 
171 	pkt = cmd.resp_pkt;
172 	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
173 		IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
174 			pkt->hdr.flags);
175 		goto exit;
176 	}
177 
178 	if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
179 		goto exit;
180 
181 	/* we don't really care about the response at this point */
182 
183 exit:
184 	iwl_free_resp(&cmd);
185 }
186 
iwl_mvm_recalc_tdls_state(struct iwl_mvm * mvm,struct ieee80211_vif * vif,bool sta_added)187 void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
188 			       bool sta_added)
189 {
190 	int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
191 
192 	/* when the first peer joins, send a power update first */
193 	if (tdls_sta_cnt == 1 && sta_added)
194 		iwl_mvm_power_update_mac(mvm);
195 
196 	/* configure the FW with TDLS peer info */
197 	iwl_mvm_tdls_config(mvm, vif);
198 
199 	/* when the last peer leaves, send a power update last */
200 	if (tdls_sta_cnt == 0 && !sta_added)
201 		iwl_mvm_power_update_mac(mvm);
202 }
203 
iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw * hw,struct ieee80211_vif * vif)204 void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
205 					   struct ieee80211_vif *vif)
206 {
207 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
208 	u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
209 
210 	/*
211 	 * iwl_mvm_protect_session() reads directly from the device
212 	 * (the system time), so make sure it is available.
213 	 */
214 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
215 		return;
216 
217 	mutex_lock(&mvm->mutex);
218 	/* Protect the session to hear the TDLS setup response on the channel */
219 	iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
220 	mutex_unlock(&mvm->mutex);
221 
222 	iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
223 }
224 
225 static const char *
iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)226 iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
227 {
228 	switch (state) {
229 	case IWL_MVM_TDLS_SW_IDLE:
230 		return "IDLE";
231 	case IWL_MVM_TDLS_SW_REQ_SENT:
232 		return "REQ SENT";
233 	case IWL_MVM_TDLS_SW_RESP_RCVD:
234 		return "RESP RECEIVED";
235 	case IWL_MVM_TDLS_SW_REQ_RCVD:
236 		return "REQ RECEIVED";
237 	case IWL_MVM_TDLS_SW_ACTIVE:
238 		return "ACTIVE";
239 	}
240 
241 	return NULL;
242 }
243 
iwl_mvm_tdls_update_cs_state(struct iwl_mvm * mvm,enum iwl_mvm_tdls_cs_state state)244 static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
245 					 enum iwl_mvm_tdls_cs_state state)
246 {
247 	if (mvm->tdls_cs.state == state)
248 		return;
249 
250 	IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
251 		       iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
252 		       iwl_mvm_tdls_cs_state_str(state));
253 	mvm->tdls_cs.state = state;
254 
255 	/* we only send requests to our switching peer - update sent time */
256 	if (state == IWL_MVM_TDLS_SW_REQ_SENT)
257 		mvm->tdls_cs.peer.sent_timestamp =
258 			iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
259 
260 	if (state == IWL_MVM_TDLS_SW_IDLE)
261 		mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
262 }
263 
iwl_mvm_rx_tdls_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb,struct iwl_device_cmd * cmd)264 int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
265 			  struct iwl_device_cmd *cmd)
266 {
267 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
268 	struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
269 	struct ieee80211_sta *sta;
270 	unsigned int delay;
271 	struct iwl_mvm_sta *mvmsta;
272 	struct ieee80211_vif *vif;
273 	u32 sta_id = le32_to_cpu(notif->sta_id);
274 
275 	lockdep_assert_held(&mvm->mutex);
276 
277 	/* can fail sometimes */
278 	if (!le32_to_cpu(notif->status)) {
279 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
280 		goto out;
281 	}
282 
283 	if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
284 		goto out;
285 
286 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
287 					lockdep_is_held(&mvm->mutex));
288 	/* the station may not be here, but if it is, it must be a TDLS peer */
289 	if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
290 		goto out;
291 
292 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
293 	vif = mvmsta->vif;
294 
295 	/*
296 	 * Update state and possibly switch again after this is over (DTIM).
297 	 * Also convert TU to msec.
298 	 */
299 	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
300 	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
301 			 msecs_to_jiffies(delay));
302 
303 	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
304 
305 out:
306 	return 0;
307 }
308 
309 static int
iwl_mvm_tdls_check_action(struct iwl_mvm * mvm,enum iwl_tdls_channel_switch_type type,const u8 * peer,bool peer_initiator,u32 timestamp)310 iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
311 			  enum iwl_tdls_channel_switch_type type,
312 			  const u8 *peer, bool peer_initiator, u32 timestamp)
313 {
314 	bool same_peer = false;
315 	int ret = 0;
316 
317 	/* get the existing peer if it's there */
318 	if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
319 	    mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
320 		struct ieee80211_sta *sta = rcu_dereference_protected(
321 				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
322 				lockdep_is_held(&mvm->mutex));
323 		if (!IS_ERR_OR_NULL(sta))
324 			same_peer = ether_addr_equal(peer, sta->addr);
325 	}
326 
327 	switch (mvm->tdls_cs.state) {
328 	case IWL_MVM_TDLS_SW_IDLE:
329 		/*
330 		 * might be spurious packet from the peer after the switch is
331 		 * already done
332 		 */
333 		if (type == TDLS_MOVE_CH)
334 			ret = -EINVAL;
335 		break;
336 	case IWL_MVM_TDLS_SW_REQ_SENT:
337 		/* only allow requests from the same peer */
338 		if (!same_peer)
339 			ret = -EBUSY;
340 		else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
341 			 !peer_initiator)
342 			/*
343 			 * We received a ch-switch request while an outgoing
344 			 * one is pending. Allow it if the peer is the link
345 			 * initiator.
346 			 */
347 			ret = -EBUSY;
348 		else if (type == TDLS_SEND_CHAN_SW_REQ)
349 			/* wait for idle before sending another request */
350 			ret = -EBUSY;
351 		else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
352 			/* we got a stale response - ignore it */
353 			ret = -EINVAL;
354 		break;
355 	case IWL_MVM_TDLS_SW_RESP_RCVD:
356 		/*
357 		 * we are waiting for the FW to give an "active" notification,
358 		 * so ignore requests in the meantime
359 		 */
360 		ret = -EBUSY;
361 		break;
362 	case IWL_MVM_TDLS_SW_REQ_RCVD:
363 		/* as above, allow the link initiator to proceed */
364 		if (type == TDLS_SEND_CHAN_SW_REQ) {
365 			if (!same_peer)
366 				ret = -EBUSY;
367 			else if (peer_initiator) /* they are the initiator */
368 				ret = -EBUSY;
369 		} else if (type == TDLS_MOVE_CH) {
370 			ret = -EINVAL;
371 		}
372 		break;
373 	case IWL_MVM_TDLS_SW_ACTIVE:
374 		/*
375 		 * the only valid request when active is a request to return
376 		 * to the base channel by the current off-channel peer
377 		 */
378 		if (type != TDLS_MOVE_CH || !same_peer)
379 			ret = -EBUSY;
380 		break;
381 	}
382 
383 	if (ret)
384 		IWL_DEBUG_TDLS(mvm,
385 			       "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
386 			       type, mvm->tdls_cs.state, peer, same_peer,
387 			       peer_initiator);
388 
389 	return ret;
390 }
391 
392 static int
iwl_mvm_tdls_config_channel_switch(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_tdls_channel_switch_type type,const u8 * peer,bool peer_initiator,u8 oper_class,struct cfg80211_chan_def * chandef,u32 timestamp,u16 switch_time,u16 switch_timeout,struct sk_buff * skb,u32 ch_sw_tm_ie)393 iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
394 				   struct ieee80211_vif *vif,
395 				   enum iwl_tdls_channel_switch_type type,
396 				   const u8 *peer, bool peer_initiator,
397 				   u8 oper_class,
398 				   struct cfg80211_chan_def *chandef,
399 				   u32 timestamp, u16 switch_time,
400 				   u16 switch_timeout, struct sk_buff *skb,
401 				   u32 ch_sw_tm_ie)
402 {
403 	struct ieee80211_sta *sta;
404 	struct iwl_mvm_sta *mvmsta;
405 	struct ieee80211_tx_info *info;
406 	struct ieee80211_hdr *hdr;
407 	struct iwl_tdls_channel_switch_cmd cmd = {0};
408 	int ret;
409 
410 	lockdep_assert_held(&mvm->mutex);
411 
412 	ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
413 					timestamp);
414 	if (ret)
415 		return ret;
416 
417 	if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
418 		ret = -EINVAL;
419 		goto out;
420 	}
421 
422 	cmd.switch_type = type;
423 	cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
424 	cmd.timing.switch_time = cpu_to_le32(switch_time);
425 	cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
426 
427 	rcu_read_lock();
428 	sta = ieee80211_find_sta(vif, peer);
429 	if (!sta) {
430 		rcu_read_unlock();
431 		ret = -ENOENT;
432 		goto out;
433 	}
434 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
435 	cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
436 
437 	if (!chandef) {
438 		if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
439 		    mvm->tdls_cs.peer.chandef.chan) {
440 			/* actually moving to the channel */
441 			chandef = &mvm->tdls_cs.peer.chandef;
442 		} else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
443 			   type == TDLS_MOVE_CH) {
444 			/* we need to return to base channel */
445 			struct ieee80211_chanctx_conf *chanctx =
446 					rcu_dereference(vif->chanctx_conf);
447 
448 			if (WARN_ON_ONCE(!chanctx)) {
449 				rcu_read_unlock();
450 				goto out;
451 			}
452 
453 			chandef = &chanctx->def;
454 		}
455 	}
456 
457 	if (chandef) {
458 		cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
459 			       PHY_BAND_24 : PHY_BAND_5);
460 		cmd.ci.channel = chandef->chan->hw_value;
461 		cmd.ci.width = iwl_mvm_get_channel_width(chandef);
462 		cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
463 	}
464 
465 	/* keep quota calculation simple for now - 50% of DTIM for TDLS */
466 	cmd.timing.max_offchan_duration =
467 			cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
468 					     vif->bss_conf.beacon_int) / 2);
469 
470 	/* Switch time is the first element in the switch-timing IE. */
471 	cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
472 
473 	info = IEEE80211_SKB_CB(skb);
474 	if (info->control.hw_key)
475 		iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
476 
477 	iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
478 			   mvmsta->sta_id);
479 
480 	hdr = (void *)skb->data;
481 	iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
482 				hdr->frame_control);
483 	rcu_read_unlock();
484 
485 	memcpy(cmd.frame.data, skb->data, skb->len);
486 
487 	ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
488 				   sizeof(cmd), &cmd);
489 	if (ret) {
490 		IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
491 			ret);
492 		goto out;
493 	}
494 
495 	/* channel switch has started, update state */
496 	if (type != TDLS_MOVE_CH) {
497 		mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
498 		iwl_mvm_tdls_update_cs_state(mvm,
499 					     type == TDLS_SEND_CHAN_SW_REQ ?
500 					     IWL_MVM_TDLS_SW_REQ_SENT :
501 					     IWL_MVM_TDLS_SW_REQ_RCVD);
502 	} else {
503 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
504 	}
505 
506 out:
507 
508 	/* channel switch failed - we are idle */
509 	if (ret)
510 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
511 
512 	return ret;
513 }
514 
iwl_mvm_tdls_ch_switch_work(struct work_struct * work)515 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
516 {
517 	struct iwl_mvm *mvm;
518 	struct ieee80211_sta *sta;
519 	struct iwl_mvm_sta *mvmsta;
520 	struct ieee80211_vif *vif;
521 	unsigned int delay;
522 	int ret;
523 
524 	mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
525 	mutex_lock(&mvm->mutex);
526 
527 	/* called after an active channel switch has finished or timed-out */
528 	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
529 
530 	/* station might be gone, in that case do nothing */
531 	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
532 		goto out;
533 
534 	sta = rcu_dereference_protected(
535 				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
536 				lockdep_is_held(&mvm->mutex));
537 	/* the station may not be here, but if it is, it must be a TDLS peer */
538 	if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
539 		goto out;
540 
541 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
542 	vif = mvmsta->vif;
543 	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
544 						 TDLS_SEND_CHAN_SW_REQ,
545 						 sta->addr,
546 						 mvm->tdls_cs.peer.initiator,
547 						 mvm->tdls_cs.peer.op_class,
548 						 &mvm->tdls_cs.peer.chandef,
549 						 0, 0, 0,
550 						 mvm->tdls_cs.peer.skb,
551 						 mvm->tdls_cs.peer.ch_sw_tm_ie);
552 	if (ret)
553 		IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
554 
555 	/* retry after a DTIM if we failed sending now */
556 	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
557 	queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
558 			   msecs_to_jiffies(delay));
559 out:
560 	mutex_unlock(&mvm->mutex);
561 }
562 
563 int
iwl_mvm_tdls_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u8 oper_class,struct cfg80211_chan_def * chandef,struct sk_buff * tmpl_skb,u32 ch_sw_tm_ie)564 iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
565 			    struct ieee80211_vif *vif,
566 			    struct ieee80211_sta *sta, u8 oper_class,
567 			    struct cfg80211_chan_def *chandef,
568 			    struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
569 {
570 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
571 	struct iwl_mvm_sta *mvmsta;
572 	unsigned int delay;
573 	int ret;
574 
575 	mutex_lock(&mvm->mutex);
576 
577 	IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
578 		       sta->addr, chandef->chan->center_freq, chandef->width);
579 
580 	/* we only support a single peer for channel switching */
581 	if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
582 		IWL_DEBUG_TDLS(mvm,
583 			       "Existing peer. Can't start switch with %pM\n",
584 			       sta->addr);
585 		ret = -EBUSY;
586 		goto out;
587 	}
588 
589 	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
590 						 TDLS_SEND_CHAN_SW_REQ,
591 						 sta->addr, sta->tdls_initiator,
592 						 oper_class, chandef, 0, 0, 0,
593 						 tmpl_skb, ch_sw_tm_ie);
594 	if (ret)
595 		goto out;
596 
597 	/*
598 	 * Mark the peer as "in tdls switch" for this vif. We only allow a
599 	 * single such peer per vif.
600 	 */
601 	mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
602 	if (!mvm->tdls_cs.peer.skb) {
603 		ret = -ENOMEM;
604 		goto out;
605 	}
606 
607 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
608 	mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
609 	mvm->tdls_cs.peer.chandef = *chandef;
610 	mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
611 	mvm->tdls_cs.peer.op_class = oper_class;
612 	mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
613 
614 	/*
615 	 * Wait for 2 DTIM periods before attempting the next switch. The next
616 	 * switch will be made sooner if the current one completes before that.
617 	 */
618 	delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
619 			     vif->bss_conf.beacon_int);
620 	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
621 			 msecs_to_jiffies(delay));
622 
623 out:
624 	mutex_unlock(&mvm->mutex);
625 	return ret;
626 }
627 
iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)628 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
629 					struct ieee80211_vif *vif,
630 					struct ieee80211_sta *sta)
631 {
632 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
633 	struct ieee80211_sta *cur_sta;
634 	bool wait_for_phy = false;
635 
636 	mutex_lock(&mvm->mutex);
637 
638 	IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
639 
640 	/* we only support a single peer for channel switching */
641 	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
642 		IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
643 		goto out;
644 	}
645 
646 	cur_sta = rcu_dereference_protected(
647 				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
648 				lockdep_is_held(&mvm->mutex));
649 	/* make sure it's the same peer */
650 	if (cur_sta != sta)
651 		goto out;
652 
653 	/*
654 	 * If we're currently in a switch because of the now canceled peer,
655 	 * wait a DTIM here to make sure the phy is back on the base channel.
656 	 * We can't otherwise force it.
657 	 */
658 	if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
659 	    mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
660 		wait_for_phy = true;
661 
662 	mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
663 	dev_kfree_skb(mvm->tdls_cs.peer.skb);
664 	mvm->tdls_cs.peer.skb = NULL;
665 
666 out:
667 	mutex_unlock(&mvm->mutex);
668 
669 	/* make sure the phy is on the base channel */
670 	if (wait_for_phy)
671 		msleep(TU_TO_MS(vif->bss_conf.dtim_period *
672 				vif->bss_conf.beacon_int));
673 
674 	/* flush the channel switch state */
675 	flush_delayed_work(&mvm->tdls_cs.dwork);
676 
677 	IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
678 }
679 
680 void
iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_tdls_ch_sw_params * params)681 iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
682 				 struct ieee80211_vif *vif,
683 				 struct ieee80211_tdls_ch_sw_params *params)
684 {
685 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
686 	enum iwl_tdls_channel_switch_type type;
687 	unsigned int delay;
688 	const char *action_str =
689 		params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
690 		"REQ" : "RESP";
691 
692 	mutex_lock(&mvm->mutex);
693 
694 	IWL_DEBUG_TDLS(mvm,
695 		       "Received TDLS ch switch action %s from %pM status %d\n",
696 		       action_str, params->sta->addr, params->status);
697 
698 	/*
699 	 * we got a non-zero status from a peer we were switching to - move to
700 	 * the idle state and retry again later
701 	 */
702 	if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
703 	    params->status != 0 &&
704 	    mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
705 	    mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
706 		struct ieee80211_sta *cur_sta;
707 
708 		/* make sure it's the same peer */
709 		cur_sta = rcu_dereference_protected(
710 				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
711 				lockdep_is_held(&mvm->mutex));
712 		if (cur_sta == params->sta) {
713 			iwl_mvm_tdls_update_cs_state(mvm,
714 						     IWL_MVM_TDLS_SW_IDLE);
715 			goto retry;
716 		}
717 	}
718 
719 	type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
720 	       TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
721 
722 	iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
723 					   params->sta->tdls_initiator, 0,
724 					   params->chandef, params->timestamp,
725 					   params->switch_time,
726 					   params->switch_timeout,
727 					   params->tmpl_skb,
728 					   params->ch_sw_tm_ie);
729 
730 retry:
731 	/* register a timeout in case we don't succeed in switching */
732 	delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
733 		1024 / 1000;
734 	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
735 			 msecs_to_jiffies(delay));
736 	mutex_unlock(&mvm->mutex);
737 }
738