1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23  * USA
24  *
25  * The full GNU General Public License is included in this distribution
26  * in the file called COPYING.
27  *
28  * Contact Information:
29  *  Intel Linux Wireless <ilw@linux.intel.com>
30  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31  *
32  * BSD LICENSE
33  *
34  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  *
42  *  * Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  *  * Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in
46  *    the documentation and/or other materials provided with the
47  *    distribution.
48  *  * Neither the name Intel Corporation nor the names of its
49  *    contributors may be used to endorse or promote products derived
50  *    from this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63  *
64  *****************************************************************************/
65 
66 #include <linux/jiffies.h>
67 #include <net/mac80211.h>
68 
69 #include "iwl-notif-wait.h"
70 #include "iwl-trans.h"
71 #include "fw-api.h"
72 #include "time-event.h"
73 #include "mvm.h"
74 #include "iwl-io.h"
75 #include "iwl-prph.h"
76 
77 /*
78  * For the high priority TE use a time event type that has similar priority to
79  * the FW's action scan priority.
80  */
81 #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
82 #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
83 
iwl_mvm_te_clear_data(struct iwl_mvm * mvm,struct iwl_mvm_time_event_data * te_data)84 void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
85 			   struct iwl_mvm_time_event_data *te_data)
86 {
87 	lockdep_assert_held(&mvm->time_event_lock);
88 
89 	if (!te_data->vif)
90 		return;
91 
92 	list_del(&te_data->list);
93 	te_data->running = false;
94 	te_data->uid = 0;
95 	te_data->id = TE_MAX;
96 	te_data->vif = NULL;
97 }
98 
iwl_mvm_roc_done_wk(struct work_struct * wk)99 void iwl_mvm_roc_done_wk(struct work_struct *wk)
100 {
101 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
102 	u32 queues = 0;
103 
104 	/*
105 	 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
106 	 * This will cause the TX path to drop offchannel transmissions.
107 	 * That would also be done by mac80211, but it is racy, in particular
108 	 * in the case that the time event actually completed in the firmware
109 	 * (which is handled in iwl_mvm_te_handle_notif).
110 	 */
111 	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
112 		queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
113 		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
114 	}
115 	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
116 		queues |= BIT(mvm->aux_queue);
117 		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
118 	}
119 
120 	synchronize_net();
121 
122 	/*
123 	 * Flush the offchannel queue -- this is called when the time
124 	 * event finishes or is canceled, so that frames queued for it
125 	 * won't get stuck on the queue and be transmitted in the next
126 	 * time event.
127 	 * We have to send the command asynchronously since this cannot
128 	 * be under the mutex for locking reasons, but that's not an
129 	 * issue as it will have to complete before the next command is
130 	 * executed, and a new time event means a new command.
131 	 */
132 	iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
133 }
134 
iwl_mvm_roc_finished(struct iwl_mvm * mvm)135 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
136 {
137 	/*
138 	 * Of course, our status bit is just as racy as mac80211, so in
139 	 * addition, fire off the work struct which will drop all frames
140 	 * from the hardware queues that made it through the race. First
141 	 * it will of course synchronize the TX path to make sure that
142 	 * any *new* TX will be rejected.
143 	 */
144 	schedule_work(&mvm->roc_done_wk);
145 }
146 
iwl_mvm_csa_noa_start(struct iwl_mvm * mvm)147 static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
148 {
149 	struct ieee80211_vif *csa_vif;
150 
151 	rcu_read_lock();
152 
153 	csa_vif = rcu_dereference(mvm->csa_vif);
154 	if (!csa_vif || !csa_vif->csa_active)
155 		goto out_unlock;
156 
157 	IWL_DEBUG_TE(mvm, "CSA NOA started\n");
158 
159 	/*
160 	 * CSA NoA is started but we still have beacons to
161 	 * transmit on the current channel.
162 	 * So we just do nothing here and the switch
163 	 * will be performed on the last TBTT.
164 	 */
165 	if (!ieee80211_csa_is_complete(csa_vif)) {
166 		IWL_WARN(mvm, "CSA NOA started too early\n");
167 		goto out_unlock;
168 	}
169 
170 	ieee80211_csa_finish(csa_vif);
171 
172 	rcu_read_unlock();
173 
174 	RCU_INIT_POINTER(mvm->csa_vif, NULL);
175 
176 	return;
177 
178 out_unlock:
179 	rcu_read_unlock();
180 }
181 
iwl_mvm_te_check_disconnect(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const char * errmsg)182 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
183 					struct ieee80211_vif *vif,
184 					const char *errmsg)
185 {
186 	if (vif->type != NL80211_IFTYPE_STATION)
187 		return false;
188 	if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
189 		return false;
190 	if (errmsg)
191 		IWL_ERR(mvm, "%s\n", errmsg);
192 
193 	iwl_mvm_connection_loss(mvm, vif, errmsg);
194 	return true;
195 }
196 
197 static void
iwl_mvm_te_handle_notify_csa(struct iwl_mvm * mvm,struct iwl_mvm_time_event_data * te_data,struct iwl_time_event_notif * notif)198 iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
199 			     struct iwl_mvm_time_event_data *te_data,
200 			     struct iwl_time_event_notif *notif)
201 {
202 	struct ieee80211_vif *vif = te_data->vif;
203 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
204 
205 	if (!notif->status)
206 		IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
207 
208 	switch (te_data->vif->type) {
209 	case NL80211_IFTYPE_AP:
210 		if (!notif->status)
211 			mvmvif->csa_failed = true;
212 		iwl_mvm_csa_noa_start(mvm);
213 		break;
214 	case NL80211_IFTYPE_STATION:
215 		if (!notif->status) {
216 			iwl_mvm_connection_loss(mvm, vif,
217 						"CSA TE failed to start");
218 			break;
219 		}
220 		iwl_mvm_csa_client_absent(mvm, te_data->vif);
221 		ieee80211_chswitch_done(te_data->vif, true);
222 		break;
223 	default:
224 		/* should never happen */
225 		WARN_ON_ONCE(1);
226 		break;
227 	}
228 
229 	/* we don't need it anymore */
230 	iwl_mvm_te_clear_data(mvm, te_data);
231 }
232 
iwl_mvm_te_check_trigger(struct iwl_mvm * mvm,struct iwl_time_event_notif * notif,struct iwl_mvm_time_event_data * te_data)233 static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
234 				     struct iwl_time_event_notif *notif,
235 				     struct iwl_mvm_time_event_data *te_data)
236 {
237 	struct iwl_fw_dbg_trigger_tlv *trig;
238 	struct iwl_fw_dbg_trigger_time_event *te_trig;
239 	int i;
240 
241 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
242 		return;
243 
244 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
245 	te_trig = (void *)trig->data;
246 
247 	if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig))
248 		return;
249 
250 	for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
251 		u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
252 		u32 trig_action_bitmap =
253 			le32_to_cpu(te_trig->time_events[i].action_bitmap);
254 		u32 trig_status_bitmap =
255 			le32_to_cpu(te_trig->time_events[i].status_bitmap);
256 
257 		if (trig_te_id != te_data->id ||
258 		    !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
259 		    !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
260 			continue;
261 
262 		iwl_mvm_fw_dbg_collect_trig(mvm, trig,
263 					    "Time event %d Action 0x%x received status: %d",
264 					    te_data->id,
265 					    le32_to_cpu(notif->action),
266 					    le32_to_cpu(notif->status));
267 		break;
268 	}
269 }
270 
271 /*
272  * Handles a FW notification for an event that is known to the driver.
273  *
274  * @mvm: the mvm component
275  * @te_data: the time event data
276  * @notif: the notification data corresponding the time event data.
277  */
iwl_mvm_te_handle_notif(struct iwl_mvm * mvm,struct iwl_mvm_time_event_data * te_data,struct iwl_time_event_notif * notif)278 static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
279 				    struct iwl_mvm_time_event_data *te_data,
280 				    struct iwl_time_event_notif *notif)
281 {
282 	lockdep_assert_held(&mvm->time_event_lock);
283 
284 	IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
285 		     le32_to_cpu(notif->unique_id),
286 		     le32_to_cpu(notif->action));
287 
288 	iwl_mvm_te_check_trigger(mvm, notif, te_data);
289 
290 	/*
291 	 * The FW sends the start/end time event notifications even for events
292 	 * that it fails to schedule. This is indicated in the status field of
293 	 * the notification. This happens in cases that the scheduler cannot
294 	 * find a schedule that can handle the event (for example requesting a
295 	 * P2P Device discoveribility, while there are other higher priority
296 	 * events in the system).
297 	 */
298 	if (!le32_to_cpu(notif->status)) {
299 		const char *msg;
300 
301 		if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
302 			msg = "Time Event start notification failure";
303 		else
304 			msg = "Time Event end notification failure";
305 
306 		IWL_DEBUG_TE(mvm, "%s\n", msg);
307 
308 		if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
309 			iwl_mvm_te_clear_data(mvm, te_data);
310 			return;
311 		}
312 	}
313 
314 	if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
315 		IWL_DEBUG_TE(mvm,
316 			     "TE ended - current time %lu, estimated end %lu\n",
317 			     jiffies, te_data->end_jiffies);
318 
319 		switch (te_data->vif->type) {
320 		case NL80211_IFTYPE_P2P_DEVICE:
321 			ieee80211_remain_on_channel_expired(mvm->hw);
322 			iwl_mvm_roc_finished(mvm);
323 			break;
324 		case NL80211_IFTYPE_STATION:
325 			/*
326 			 * By now, we should have finished association
327 			 * and know the dtim period.
328 			 */
329 			iwl_mvm_te_check_disconnect(mvm, te_data->vif,
330 				"No association and the time event is over already...");
331 			break;
332 		default:
333 			break;
334 		}
335 
336 		iwl_mvm_te_clear_data(mvm, te_data);
337 	} else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
338 		te_data->running = true;
339 		te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
340 
341 		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
342 			set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
343 			iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
344 			ieee80211_ready_on_channel(mvm->hw);
345 		} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
346 			iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
347 		}
348 	} else {
349 		IWL_WARN(mvm, "Got TE with unknown action\n");
350 	}
351 }
352 
353 /*
354  * Handle A Aux ROC time event
355  */
iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm * mvm,struct iwl_time_event_notif * notif)356 static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
357 					   struct iwl_time_event_notif *notif)
358 {
359 	struct iwl_mvm_time_event_data *te_data, *tmp;
360 	bool aux_roc_te = false;
361 
362 	list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
363 		if (le32_to_cpu(notif->unique_id) == te_data->uid) {
364 			aux_roc_te = true;
365 			break;
366 		}
367 	}
368 	if (!aux_roc_te) /* Not a Aux ROC time event */
369 		return -EINVAL;
370 
371 	iwl_mvm_te_check_trigger(mvm, notif, te_data);
372 
373 	if (!le32_to_cpu(notif->status)) {
374 		IWL_DEBUG_TE(mvm,
375 			     "ERROR: Aux ROC Time Event %s notification failure\n",
376 			     (le32_to_cpu(notif->action) &
377 			      TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end");
378 		return -EINVAL;
379 	}
380 
381 	IWL_DEBUG_TE(mvm,
382 		     "Aux ROC time event notification  - UID = 0x%x action %d\n",
383 		     le32_to_cpu(notif->unique_id),
384 		     le32_to_cpu(notif->action));
385 
386 	if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
387 		/* End TE, notify mac80211 */
388 		ieee80211_remain_on_channel_expired(mvm->hw);
389 		iwl_mvm_roc_finished(mvm); /* flush aux queue */
390 		list_del(&te_data->list); /* remove from list */
391 		te_data->running = false;
392 		te_data->vif = NULL;
393 		te_data->uid = 0;
394 		te_data->id = TE_MAX;
395 	} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
396 		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
397 		te_data->running = true;
398 		iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
399 		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
400 	} else {
401 		IWL_DEBUG_TE(mvm,
402 			     "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
403 			     le32_to_cpu(notif->action));
404 		return -EINVAL;
405 	}
406 
407 	return 0;
408 }
409 
410 /*
411  * The Rx handler for time event notifications
412  */
iwl_mvm_rx_time_event_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)413 void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
414 				 struct iwl_rx_cmd_buffer *rxb)
415 {
416 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
417 	struct iwl_time_event_notif *notif = (void *)pkt->data;
418 	struct iwl_mvm_time_event_data *te_data, *tmp;
419 
420 	IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
421 		     le32_to_cpu(notif->unique_id),
422 		     le32_to_cpu(notif->action));
423 
424 	spin_lock_bh(&mvm->time_event_lock);
425 	/* This time event is triggered for Aux ROC request */
426 	if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
427 		goto unlock;
428 
429 	list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
430 		if (le32_to_cpu(notif->unique_id) == te_data->uid)
431 			iwl_mvm_te_handle_notif(mvm, te_data, notif);
432 	}
433 unlock:
434 	spin_unlock_bh(&mvm->time_event_lock);
435 }
436 
iwl_mvm_te_notif(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)437 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
438 			     struct iwl_rx_packet *pkt, void *data)
439 {
440 	struct iwl_mvm *mvm =
441 		container_of(notif_wait, struct iwl_mvm, notif_wait);
442 	struct iwl_mvm_time_event_data *te_data = data;
443 	struct iwl_time_event_notif *resp;
444 	int resp_len = iwl_rx_packet_payload_len(pkt);
445 
446 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
447 		return true;
448 
449 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
450 		IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
451 		return true;
452 	}
453 
454 	resp = (void *)pkt->data;
455 
456 	/* te_data->uid is already set in the TIME_EVENT_CMD response */
457 	if (le32_to_cpu(resp->unique_id) != te_data->uid)
458 		return false;
459 
460 	IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
461 		     te_data->uid);
462 	if (!resp->status)
463 		IWL_ERR(mvm,
464 			"TIME_EVENT_NOTIFICATION received but not executed\n");
465 
466 	return true;
467 }
468 
iwl_mvm_time_event_response(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)469 static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
470 					struct iwl_rx_packet *pkt, void *data)
471 {
472 	struct iwl_mvm *mvm =
473 		container_of(notif_wait, struct iwl_mvm, notif_wait);
474 	struct iwl_mvm_time_event_data *te_data = data;
475 	struct iwl_time_event_resp *resp;
476 	int resp_len = iwl_rx_packet_payload_len(pkt);
477 
478 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
479 		return true;
480 
481 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
482 		IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
483 		return true;
484 	}
485 
486 	resp = (void *)pkt->data;
487 
488 	/* we should never get a response to another TIME_EVENT_CMD here */
489 	if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
490 		return false;
491 
492 	te_data->uid = le32_to_cpu(resp->unique_id);
493 	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
494 		     te_data->uid);
495 	return true;
496 }
497 
iwl_mvm_time_event_send_add(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_time_event_data * te_data,struct iwl_time_event_cmd * te_cmd)498 static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
499 				       struct ieee80211_vif *vif,
500 				       struct iwl_mvm_time_event_data *te_data,
501 				       struct iwl_time_event_cmd *te_cmd)
502 {
503 	static const u16 time_event_response[] = { TIME_EVENT_CMD };
504 	struct iwl_notification_wait wait_time_event;
505 	int ret;
506 
507 	lockdep_assert_held(&mvm->mutex);
508 
509 	IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
510 		     le32_to_cpu(te_cmd->duration));
511 
512 	spin_lock_bh(&mvm->time_event_lock);
513 	if (WARN_ON(te_data->id != TE_MAX)) {
514 		spin_unlock_bh(&mvm->time_event_lock);
515 		return -EIO;
516 	}
517 	te_data->vif = vif;
518 	te_data->duration = le32_to_cpu(te_cmd->duration);
519 	te_data->id = le32_to_cpu(te_cmd->id);
520 	list_add_tail(&te_data->list, &mvm->time_event_list);
521 	spin_unlock_bh(&mvm->time_event_lock);
522 
523 	/*
524 	 * Use a notification wait, which really just processes the
525 	 * command response and doesn't wait for anything, in order
526 	 * to be able to process the response and get the UID inside
527 	 * the RX path. Using CMD_WANT_SKB doesn't work because it
528 	 * stores the buffer and then wakes up this thread, by which
529 	 * time another notification (that the time event started)
530 	 * might already be processed unsuccessfully.
531 	 */
532 	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
533 				   time_event_response,
534 				   ARRAY_SIZE(time_event_response),
535 				   iwl_mvm_time_event_response, te_data);
536 
537 	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
538 					    sizeof(*te_cmd), te_cmd);
539 	if (ret) {
540 		IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
541 		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
542 		goto out_clear_te;
543 	}
544 
545 	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
546 	ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
547 	/* should never fail */
548 	WARN_ON_ONCE(ret);
549 
550 	if (ret) {
551  out_clear_te:
552 		spin_lock_bh(&mvm->time_event_lock);
553 		iwl_mvm_te_clear_data(mvm, te_data);
554 		spin_unlock_bh(&mvm->time_event_lock);
555 	}
556 	return ret;
557 }
558 
iwl_mvm_protect_session(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u32 duration,u32 min_duration,u32 max_delay,bool wait_for_notif)559 void iwl_mvm_protect_session(struct iwl_mvm *mvm,
560 			     struct ieee80211_vif *vif,
561 			     u32 duration, u32 min_duration,
562 			     u32 max_delay, bool wait_for_notif)
563 {
564 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
565 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
566 	const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
567 	struct iwl_notification_wait wait_te_notif;
568 	struct iwl_time_event_cmd time_cmd = {};
569 
570 	lockdep_assert_held(&mvm->mutex);
571 
572 	if (te_data->running &&
573 	    time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
574 		IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
575 			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
576 		return;
577 	}
578 
579 	if (te_data->running) {
580 		IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
581 			     te_data->uid,
582 			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
583 		/*
584 		 * we don't have enough time
585 		 * cancel the current TE and issue a new one
586 		 * Of course it would be better to remove the old one only
587 		 * when the new one is added, but we don't care if we are off
588 		 * channel for a bit. All we need to do, is not to return
589 		 * before we actually begin to be on the channel.
590 		 */
591 		iwl_mvm_stop_session_protection(mvm, vif);
592 	}
593 
594 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
595 	time_cmd.id_and_color =
596 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
597 	time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
598 
599 	time_cmd.apply_time = cpu_to_le32(0);
600 
601 	time_cmd.max_frags = TE_V2_FRAG_NONE;
602 	time_cmd.max_delay = cpu_to_le32(max_delay);
603 	/* TODO: why do we need to interval = bi if it is not periodic? */
604 	time_cmd.interval = cpu_to_le32(1);
605 	time_cmd.duration = cpu_to_le32(duration);
606 	time_cmd.repeat = 1;
607 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
608 				      TE_V2_NOTIF_HOST_EVENT_END |
609 				      T2_V2_START_IMMEDIATELY);
610 
611 	if (!wait_for_notif) {
612 		iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
613 		return;
614 	}
615 
616 	/*
617 	 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
618 	 * right after we send the time event
619 	 */
620 	iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
621 				   te_notif_response,
622 				   ARRAY_SIZE(te_notif_response),
623 				   iwl_mvm_te_notif, te_data);
624 
625 	/* If TE was sent OK - wait for the notification that started */
626 	if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
627 		IWL_ERR(mvm, "Failed to add TE to protect session\n");
628 		iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
629 	} else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
630 					 TU_TO_JIFFIES(max_delay))) {
631 		IWL_ERR(mvm, "Failed to protect session until TE\n");
632 	}
633 }
634 
__iwl_mvm_remove_time_event(struct iwl_mvm * mvm,struct iwl_mvm_time_event_data * te_data,u32 * uid)635 static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
636 					struct iwl_mvm_time_event_data *te_data,
637 					u32 *uid)
638 {
639 	u32 id;
640 
641 	/*
642 	 * It is possible that by the time we got to this point the time
643 	 * event was already removed.
644 	 */
645 	spin_lock_bh(&mvm->time_event_lock);
646 
647 	/* Save time event uid before clearing its data */
648 	*uid = te_data->uid;
649 	id = te_data->id;
650 
651 	/*
652 	 * The clear_data function handles time events that were already removed
653 	 */
654 	iwl_mvm_te_clear_data(mvm, te_data);
655 	spin_unlock_bh(&mvm->time_event_lock);
656 
657 	/*
658 	 * It is possible that by the time we try to remove it, the time event
659 	 * has already ended and removed. In such a case there is no need to
660 	 * send a removal command.
661 	 */
662 	if (id == TE_MAX) {
663 		IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
664 		return false;
665 	}
666 
667 	return true;
668 }
669 
670 /*
671  * Explicit request to remove a aux roc time event. The removal of a time
672  * event needs to be synchronized with the flow of a time event's end
673  * notification, which also removes the time event from the op mode
674  * data structures.
675  */
iwl_mvm_remove_aux_roc_te(struct iwl_mvm * mvm,struct iwl_mvm_vif * mvmvif,struct iwl_mvm_time_event_data * te_data)676 static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
677 				      struct iwl_mvm_vif *mvmvif,
678 				      struct iwl_mvm_time_event_data *te_data)
679 {
680 	struct iwl_hs20_roc_req aux_cmd = {};
681 	u32 uid;
682 	int ret;
683 
684 	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
685 		return;
686 
687 	aux_cmd.event_unique_id = cpu_to_le32(uid);
688 	aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
689 	aux_cmd.id_and_color =
690 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
691 	IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
692 		     le32_to_cpu(aux_cmd.event_unique_id));
693 	ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
694 				   sizeof(aux_cmd), &aux_cmd);
695 
696 	if (WARN_ON(ret))
697 		return;
698 }
699 
700 /*
701  * Explicit request to remove a time event. The removal of a time event needs to
702  * be synchronized with the flow of a time event's end notification, which also
703  * removes the time event from the op mode data structures.
704  */
iwl_mvm_remove_time_event(struct iwl_mvm * mvm,struct iwl_mvm_vif * mvmvif,struct iwl_mvm_time_event_data * te_data)705 void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
706 			       struct iwl_mvm_vif *mvmvif,
707 			       struct iwl_mvm_time_event_data *te_data)
708 {
709 	struct iwl_time_event_cmd time_cmd = {};
710 	u32 uid;
711 	int ret;
712 
713 	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
714 		return;
715 
716 	/* When we remove a TE, the UID is to be set in the id field */
717 	time_cmd.id = cpu_to_le32(uid);
718 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
719 	time_cmd.id_and_color =
720 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
721 
722 	IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
723 	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
724 				   sizeof(time_cmd), &time_cmd);
725 	if (WARN_ON(ret))
726 		return;
727 }
728 
iwl_mvm_stop_session_protection(struct iwl_mvm * mvm,struct ieee80211_vif * vif)729 void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
730 				     struct ieee80211_vif *vif)
731 {
732 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
733 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
734 
735 	lockdep_assert_held(&mvm->mutex);
736 	iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
737 }
738 
iwl_mvm_start_p2p_roc(struct iwl_mvm * mvm,struct ieee80211_vif * vif,int duration,enum ieee80211_roc_type type)739 int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
740 			  int duration, enum ieee80211_roc_type type)
741 {
742 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
743 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
744 	struct iwl_time_event_cmd time_cmd = {};
745 
746 	lockdep_assert_held(&mvm->mutex);
747 	if (te_data->running) {
748 		IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
749 		return -EBUSY;
750 	}
751 
752 	/*
753 	 * Flush the done work, just in case it's still pending, so that
754 	 * the work it does can complete and we can accept new frames.
755 	 */
756 	flush_work(&mvm->roc_done_wk);
757 
758 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
759 	time_cmd.id_and_color =
760 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
761 
762 	switch (type) {
763 	case IEEE80211_ROC_TYPE_NORMAL:
764 		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
765 		break;
766 	case IEEE80211_ROC_TYPE_MGMT_TX:
767 		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
768 		break;
769 	default:
770 		WARN_ONCE(1, "Got an invalid ROC type\n");
771 		return -EINVAL;
772 	}
773 
774 	time_cmd.apply_time = cpu_to_le32(0);
775 	time_cmd.interval = cpu_to_le32(1);
776 
777 	/*
778 	 * The P2P Device TEs can have lower priority than other events
779 	 * that are being scheduled by the driver/fw, and thus it might not be
780 	 * scheduled. To improve the chances of it being scheduled, allow them
781 	 * to be fragmented, and in addition allow them to be delayed.
782 	 */
783 	time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
784 	time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
785 	time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
786 	time_cmd.repeat = 1;
787 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
788 				      TE_V2_NOTIF_HOST_EVENT_END |
789 				      T2_V2_START_IMMEDIATELY);
790 
791 	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
792 }
793 
iwl_mvm_stop_roc(struct iwl_mvm * mvm)794 void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
795 {
796 	struct iwl_mvm_vif *mvmvif = NULL;
797 	struct iwl_mvm_time_event_data *te_data;
798 	bool is_p2p = false;
799 
800 	lockdep_assert_held(&mvm->mutex);
801 
802 	spin_lock_bh(&mvm->time_event_lock);
803 
804 	/*
805 	 * Iterate over the list of time events and find the time event that is
806 	 * associated with a P2P_DEVICE interface.
807 	 * This assumes that a P2P_DEVICE interface can have only a single time
808 	 * event at any given time and this time event coresponds to a ROC
809 	 * request
810 	 */
811 	list_for_each_entry(te_data, &mvm->time_event_list, list) {
812 		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
813 			mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
814 			is_p2p = true;
815 			goto remove_te;
816 		}
817 	}
818 
819 	/* There can only be at most one AUX ROC time event, we just use the
820 	 * list to simplify/unify code. Remove it if it exists.
821 	 */
822 	te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
823 					   struct iwl_mvm_time_event_data,
824 					   list);
825 	if (te_data)
826 		mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
827 
828 remove_te:
829 	spin_unlock_bh(&mvm->time_event_lock);
830 
831 	if (!mvmvif) {
832 		IWL_WARN(mvm, "No remain on channel event\n");
833 		return;
834 	}
835 
836 	if (is_p2p)
837 		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
838 	else
839 		iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
840 
841 	iwl_mvm_roc_finished(mvm);
842 }
843 
iwl_mvm_schedule_csa_period(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u32 duration,u32 apply_time)844 int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
845 				struct ieee80211_vif *vif,
846 				u32 duration, u32 apply_time)
847 {
848 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
849 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
850 	struct iwl_time_event_cmd time_cmd = {};
851 
852 	lockdep_assert_held(&mvm->mutex);
853 
854 	if (te_data->running) {
855 		IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
856 		return -EBUSY;
857 	}
858 
859 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
860 	time_cmd.id_and_color =
861 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
862 	time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
863 	time_cmd.apply_time = cpu_to_le32(apply_time);
864 	time_cmd.max_frags = TE_V2_FRAG_NONE;
865 	time_cmd.duration = cpu_to_le32(duration);
866 	time_cmd.repeat = 1;
867 	time_cmd.interval = cpu_to_le32(1);
868 	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
869 				      TE_V2_ABSENCE);
870 
871 	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
872 }
873