This source file includes following definitions.
- brcmf_pno_store_request
- brcmf_pno_remove_request
- brcmf_pno_channel_config
- brcmf_pno_config
- brcmf_pno_set_random
- brcmf_pno_add_ssid
- brcmf_pno_add_bssid
- brcmf_is_ssid_active
- brcmf_pno_clean
- brcmf_pno_get_bucket_channels
- brcmf_pno_prep_fwconfig
- brcmf_pno_config_networks
- brcmf_pno_config_sched_scans
- brcmf_pno_start_sched_scan
- brcmf_pno_stop_sched_scan
- brcmf_pno_attach
- brcmf_pno_detach
- brcmf_pno_wiphy_params
- brcmf_pno_find_reqid_by_bucket
- brcmf_pno_get_bucket_map
1
2
3
4
5 #include <linux/netdevice.h>
6 #include <linux/gcd.h>
7 #include <net/cfg80211.h>
8
9 #include "core.h"
10 #include "debug.h"
11 #include "fwil.h"
12 #include "fwil_types.h"
13 #include "cfg80211.h"
14 #include "pno.h"
15
16 #define BRCMF_PNO_VERSION 2
17 #define BRCMF_PNO_REPEAT 4
18 #define BRCMF_PNO_FREQ_EXPO_MAX 3
19 #define BRCMF_PNO_IMMEDIATE_SCAN_BIT 3
20 #define BRCMF_PNO_ENABLE_BD_SCAN_BIT 5
21 #define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
22 #define BRCMF_PNO_REPORT_SEPARATELY_BIT 11
23 #define BRCMF_PNO_SCAN_INCOMPLETE 0
24 #define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
25 #define BRCMF_PNO_HIDDEN_BIT 2
26 #define BRCMF_PNO_SCHED_SCAN_PERIOD 30
27
28 #define BRCMF_PNO_MAX_BUCKETS 16
29 #define GSCAN_BATCH_NO_THR_SET 101
30 #define GSCAN_RETRY_THRESHOLD 3
31
32 struct brcmf_pno_info {
33 int n_reqs;
34 struct cfg80211_sched_scan_request *reqs[BRCMF_PNO_MAX_BUCKETS];
35 struct mutex req_lock;
36 };
37
38 #define ifp_to_pno(_ifp) ((_ifp)->drvr->config->pno)
39
40 static int brcmf_pno_store_request(struct brcmf_pno_info *pi,
41 struct cfg80211_sched_scan_request *req)
42 {
43 if (WARN(pi->n_reqs == BRCMF_PNO_MAX_BUCKETS,
44 "pno request storage full\n"))
45 return -ENOSPC;
46
47 brcmf_dbg(SCAN, "reqid=%llu\n", req->reqid);
48 mutex_lock(&pi->req_lock);
49 pi->reqs[pi->n_reqs++] = req;
50 mutex_unlock(&pi->req_lock);
51 return 0;
52 }
53
54 static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
55 {
56 int i, err = 0;
57
58 mutex_lock(&pi->req_lock);
59
60
61 for (i = 0; i < pi->n_reqs; i++) {
62 if (pi->reqs[i]->reqid == reqid)
63 break;
64 }
65
66 if (WARN(i == pi->n_reqs, "reqid not found\n")) {
67 err = -ENOENT;
68 goto done;
69 }
70
71 brcmf_dbg(SCAN, "reqid=%llu\n", reqid);
72 pi->n_reqs--;
73
74
75 if (!pi->n_reqs || i == pi->n_reqs)
76 goto done;
77
78
79 while (i <= pi->n_reqs - 1) {
80 pi->reqs[i] = pi->reqs[i + 1];
81 i++;
82 }
83
84 done:
85 mutex_unlock(&pi->req_lock);
86 return err;
87 }
88
89 static int brcmf_pno_channel_config(struct brcmf_if *ifp,
90 struct brcmf_pno_config_le *cfg)
91 {
92 cfg->reporttype = 0;
93 cfg->flags = 0;
94
95 return brcmf_fil_iovar_data_set(ifp, "pfn_cfg", cfg, sizeof(*cfg));
96 }
97
98 static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
99 u32 mscan, u32 bestn)
100 {
101 struct brcmf_pub *drvr = ifp->drvr;
102 struct brcmf_pno_param_le pfn_param;
103 u16 flags;
104 u32 pfnmem;
105 s32 err;
106
107 memset(&pfn_param, 0, sizeof(pfn_param));
108 pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
109
110
111 flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
112 BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
113 pfn_param.repeat = BRCMF_PNO_REPEAT;
114 pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
115
116
117 pfn_param.scan_freq = cpu_to_le32(scan_freq);
118
119 if (mscan) {
120 pfnmem = bestn;
121
122
123 err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem);
124 if (err < 0) {
125 bphy_err(drvr, "failed to set pfnmem\n");
126 goto exit;
127 }
128
129 err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem);
130 if (err < 0) {
131 bphy_err(drvr, "failed to get pfnmem\n");
132 goto exit;
133 }
134 mscan = min_t(u32, mscan, pfnmem);
135 pfn_param.mscan = mscan;
136 pfn_param.bestn = bestn;
137 flags |= BIT(BRCMF_PNO_ENABLE_BD_SCAN_BIT);
138 brcmf_dbg(INFO, "mscan=%d, bestn=%d\n", mscan, bestn);
139 }
140
141 pfn_param.flags = cpu_to_le16(flags);
142 err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
143 sizeof(pfn_param));
144 if (err)
145 bphy_err(drvr, "pfn_set failed, err=%d\n", err);
146
147 exit:
148 return err;
149 }
150
151 static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
152 {
153 struct brcmf_pub *drvr = ifp->drvr;
154 struct brcmf_pno_macaddr_le pfn_mac;
155 u8 *mac_addr = NULL;
156 u8 *mac_mask = NULL;
157 int err, i;
158
159 for (i = 0; i < pi->n_reqs; i++)
160 if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
161 mac_addr = pi->reqs[i]->mac_addr;
162 mac_mask = pi->reqs[i]->mac_addr_mask;
163 break;
164 }
165
166
167 if (!mac_addr)
168 return 0;
169
170 pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
171 pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
172
173 memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
174 for (i = 0; i < ETH_ALEN; i++) {
175 pfn_mac.mac[i] &= mac_mask[i];
176 pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
177 }
178
179 pfn_mac.mac[0] &= 0xFE;
180
181 pfn_mac.mac[0] |= 0x02;
182
183 brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
184 pi->reqs[i]->reqid, pfn_mac.mac);
185 err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
186 sizeof(pfn_mac));
187 if (err)
188 bphy_err(drvr, "pfn_macaddr failed, err=%d\n", err);
189
190 return err;
191 }
192
193 static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
194 bool active)
195 {
196 struct brcmf_pub *drvr = ifp->drvr;
197 struct brcmf_pno_net_param_le pfn;
198 int err;
199
200 pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
201 pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
202 pfn.wsec = cpu_to_le32(0);
203 pfn.infra = cpu_to_le32(1);
204 pfn.flags = 0;
205 if (active)
206 pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
207 pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
208 memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
209
210 brcmf_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active);
211 err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
212 if (err < 0)
213 bphy_err(drvr, "adding failed: err=%d\n", err);
214 return err;
215 }
216
217 static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid)
218 {
219 struct brcmf_pub *drvr = ifp->drvr;
220 struct brcmf_pno_bssid_le bssid_cfg;
221 int err;
222
223 memcpy(bssid_cfg.bssid, bssid, ETH_ALEN);
224 bssid_cfg.flags = 0;
225
226 brcmf_dbg(SCAN, "adding bssid=%pM\n", bssid);
227 err = brcmf_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg,
228 sizeof(bssid_cfg));
229 if (err < 0)
230 bphy_err(drvr, "adding failed: err=%d\n", err);
231 return err;
232 }
233
234 static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
235 struct cfg80211_sched_scan_request *req)
236 {
237 int i;
238
239 if (!ssid || !req->ssids || !req->n_ssids)
240 return false;
241
242 for (i = 0; i < req->n_ssids; i++) {
243 if (ssid->ssid_len == req->ssids[i].ssid_len) {
244 if (!strncmp(ssid->ssid, req->ssids[i].ssid,
245 ssid->ssid_len))
246 return true;
247 }
248 }
249 return false;
250 }
251
252 static int brcmf_pno_clean(struct brcmf_if *ifp)
253 {
254 struct brcmf_pub *drvr = ifp->drvr;
255 int ret;
256
257
258 ret = brcmf_fil_iovar_int_set(ifp, "pfn", 0);
259 if (ret == 0) {
260
261 ret = brcmf_fil_iovar_data_set(ifp, "pfnclear", NULL, 0);
262 }
263 if (ret < 0)
264 bphy_err(drvr, "failed code %d\n", ret);
265
266 return ret;
267 }
268
269 static int brcmf_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r,
270 struct brcmf_pno_config_le *pno_cfg)
271 {
272 u32 n_chan = le32_to_cpu(pno_cfg->channel_num);
273 u16 chan;
274 int i, err = 0;
275
276 for (i = 0; i < r->n_channels; i++) {
277 if (n_chan >= BRCMF_NUMCHANNELS) {
278 err = -ENOSPC;
279 goto done;
280 }
281 chan = r->channels[i]->hw_value;
282 brcmf_dbg(SCAN, "[%d] Chan : %u\n", n_chan, chan);
283 pno_cfg->channel_list[n_chan++] = cpu_to_le16(chan);
284 }
285
286 err = n_chan;
287 done:
288 pno_cfg->channel_num = cpu_to_le32(n_chan);
289 return err;
290 }
291
292 static int brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi,
293 struct brcmf_pno_config_le *pno_cfg,
294 struct brcmf_gscan_bucket_config **buckets,
295 u32 *scan_freq)
296 {
297 struct cfg80211_sched_scan_request *sr;
298 struct brcmf_gscan_bucket_config *fw_buckets;
299 int i, err, chidx;
300
301 brcmf_dbg(SCAN, "n_reqs=%d\n", pi->n_reqs);
302 if (WARN_ON(!pi->n_reqs))
303 return -ENODATA;
304
305
306
307
308
309 *scan_freq = pi->reqs[0]->scan_plans[0].interval;
310 for (i = 1; i < pi->n_reqs; i++) {
311 sr = pi->reqs[i];
312 *scan_freq = gcd(sr->scan_plans[0].interval, *scan_freq);
313 }
314 if (*scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
315 brcmf_dbg(SCAN, "scan period too small, using minimum\n");
316 *scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
317 }
318
319 *buckets = NULL;
320 fw_buckets = kcalloc(pi->n_reqs, sizeof(*fw_buckets), GFP_KERNEL);
321 if (!fw_buckets)
322 return -ENOMEM;
323
324 memset(pno_cfg, 0, sizeof(*pno_cfg));
325 for (i = 0; i < pi->n_reqs; i++) {
326 sr = pi->reqs[i];
327 chidx = brcmf_pno_get_bucket_channels(sr, pno_cfg);
328 if (chidx < 0) {
329 err = chidx;
330 goto fail;
331 }
332 fw_buckets[i].bucket_end_index = chidx - 1;
333 fw_buckets[i].bucket_freq_multiple =
334 sr->scan_plans[0].interval / *scan_freq;
335
336 if (!fw_buckets[i].bucket_freq_multiple)
337 fw_buckets[i].bucket_freq_multiple = 1;
338 fw_buckets[i].flag = BRCMF_PNO_REPORT_NO_BATCH;
339 }
340
341 if (BRCMF_SCAN_ON()) {
342 brcmf_err("base period=%u\n", *scan_freq);
343 for (i = 0; i < pi->n_reqs; i++) {
344 brcmf_err("[%d] period %u max %u repeat %u flag %x idx %u\n",
345 i, fw_buckets[i].bucket_freq_multiple,
346 le16_to_cpu(fw_buckets[i].max_freq_multiple),
347 fw_buckets[i].repeat, fw_buckets[i].flag,
348 fw_buckets[i].bucket_end_index);
349 }
350 }
351 *buckets = fw_buckets;
352 return pi->n_reqs;
353
354 fail:
355 kfree(fw_buckets);
356 return err;
357 }
358
359 static int brcmf_pno_config_networks(struct brcmf_if *ifp,
360 struct brcmf_pno_info *pi)
361 {
362 struct cfg80211_sched_scan_request *r;
363 struct cfg80211_match_set *ms;
364 bool active;
365 int i, j, err = 0;
366
367 for (i = 0; i < pi->n_reqs; i++) {
368 r = pi->reqs[i];
369
370 for (j = 0; j < r->n_match_sets; j++) {
371 ms = &r->match_sets[j];
372 if (ms->ssid.ssid_len) {
373 active = brcmf_is_ssid_active(&ms->ssid, r);
374 err = brcmf_pno_add_ssid(ifp, &ms->ssid,
375 active);
376 }
377 if (!err && is_valid_ether_addr(ms->bssid))
378 err = brcmf_pno_add_bssid(ifp, ms->bssid);
379
380 if (err < 0)
381 return err;
382 }
383 }
384 return 0;
385 }
386
387 static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
388 {
389 struct brcmf_pub *drvr = ifp->drvr;
390 struct brcmf_pno_info *pi;
391 struct brcmf_gscan_config *gscan_cfg;
392 struct brcmf_gscan_bucket_config *buckets;
393 struct brcmf_pno_config_le pno_cfg;
394 size_t gsz;
395 u32 scan_freq;
396 int err, n_buckets;
397
398 pi = ifp_to_pno(ifp);
399 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
400 &scan_freq);
401 if (n_buckets < 0)
402 return n_buckets;
403
404 gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets);
405 gscan_cfg = kzalloc(gsz, GFP_KERNEL);
406 if (!gscan_cfg) {
407 err = -ENOMEM;
408 goto free_buckets;
409 }
410
411
412 err = brcmf_pno_clean(ifp);
413 if (err < 0) {
414 bphy_err(drvr, "failed error=%d\n", err);
415 goto free_gscan;
416 }
417
418
419 err = brcmf_pno_config(ifp, scan_freq, 0, 0);
420 if (err < 0)
421 goto free_gscan;
422
423 err = brcmf_pno_channel_config(ifp, &pno_cfg);
424 if (err < 0)
425 goto clean;
426
427 gscan_cfg->version = cpu_to_le16(BRCMF_GSCAN_CFG_VERSION);
428 gscan_cfg->retry_threshold = GSCAN_RETRY_THRESHOLD;
429 gscan_cfg->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
430 gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
431
432 gscan_cfg->count_of_channel_buckets = n_buckets;
433 memcpy(&gscan_cfg->bucket[0], buckets,
434 n_buckets * sizeof(*buckets));
435
436 err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);
437
438 if (err < 0)
439 goto clean;
440
441
442 err = brcmf_pno_set_random(ifp, pi);
443 if (err < 0)
444 goto clean;
445
446 err = brcmf_pno_config_networks(ifp, pi);
447 if (err < 0)
448 goto clean;
449
450
451 err = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
452
453 clean:
454 if (err < 0)
455 brcmf_pno_clean(ifp);
456 free_gscan:
457 kfree(gscan_cfg);
458 free_buckets:
459 kfree(buckets);
460 return err;
461 }
462
463 int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
464 struct cfg80211_sched_scan_request *req)
465 {
466 struct brcmf_pno_info *pi;
467 int ret;
468
469 brcmf_dbg(TRACE, "reqid=%llu\n", req->reqid);
470
471 pi = ifp_to_pno(ifp);
472 ret = brcmf_pno_store_request(pi, req);
473 if (ret < 0)
474 return ret;
475
476 ret = brcmf_pno_config_sched_scans(ifp);
477 if (ret < 0) {
478 brcmf_pno_remove_request(pi, req->reqid);
479 if (pi->n_reqs)
480 (void)brcmf_pno_config_sched_scans(ifp);
481 return ret;
482 }
483 return 0;
484 }
485
486 int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
487 {
488 struct brcmf_pno_info *pi;
489 int err;
490
491 brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
492
493 pi = ifp_to_pno(ifp);
494
495
496 if (!pi->n_reqs)
497 return 0;
498
499 err = brcmf_pno_remove_request(pi, reqid);
500 if (err)
501 return err;
502
503 brcmf_pno_clean(ifp);
504
505 if (pi->n_reqs)
506 (void)brcmf_pno_config_sched_scans(ifp);
507
508 return 0;
509 }
510
511 int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg)
512 {
513 struct brcmf_pno_info *pi;
514
515 brcmf_dbg(TRACE, "enter\n");
516 pi = kzalloc(sizeof(*pi), GFP_KERNEL);
517 if (!pi)
518 return -ENOMEM;
519
520 cfg->pno = pi;
521 mutex_init(&pi->req_lock);
522 return 0;
523 }
524
525 void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg)
526 {
527 struct brcmf_pno_info *pi;
528
529 brcmf_dbg(TRACE, "enter\n");
530 pi = cfg->pno;
531 cfg->pno = NULL;
532
533 WARN_ON(pi->n_reqs);
534 mutex_destroy(&pi->req_lock);
535 kfree(pi);
536 }
537
538 void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan)
539 {
540
541 wiphy->max_sched_scan_reqs = gscan ? BRCMF_PNO_MAX_BUCKETS : 1;
542 wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
543 wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
544 wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
545 wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
546 }
547
548 u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
549 {
550 u64 reqid = 0;
551
552 mutex_lock(&pi->req_lock);
553
554 if (bucket < pi->n_reqs)
555 reqid = pi->reqs[bucket]->reqid;
556
557 mutex_unlock(&pi->req_lock);
558 return reqid;
559 }
560
561 u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
562 struct brcmf_pno_net_info_le *ni)
563 {
564 struct cfg80211_sched_scan_request *req;
565 struct cfg80211_match_set *ms;
566 u32 bucket_map = 0;
567 int i, j;
568
569 mutex_lock(&pi->req_lock);
570 for (i = 0; i < pi->n_reqs; i++) {
571 req = pi->reqs[i];
572
573 if (!req->n_match_sets)
574 continue;
575 for (j = 0; j < req->n_match_sets; j++) {
576 ms = &req->match_sets[j];
577 if (ms->ssid.ssid_len == ni->SSID_len &&
578 !memcmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) {
579 bucket_map |= BIT(i);
580 break;
581 }
582 if (is_valid_ether_addr(ms->bssid) &&
583 !memcmp(ms->bssid, ni->bssid, ETH_ALEN)) {
584 bucket_map |= BIT(i);
585 break;
586 }
587 }
588 }
589 mutex_unlock(&pi->req_lock);
590 return bucket_map;
591 }