This source file includes following definitions.
- rtw_phy_cck_pd_init
- rtw_phy_init
- rtw_phy_dig_write
- rtw_phy_stat_false_alarm
- rtw_phy_get_rssi_level
- rtw_phy_stat_rssi_iter
- rtw_phy_stat_rssi
- rtw_phy_statistics
- rtw_phy_dig_check_damping
- rtw_phy_dig_get_boundary
- rtw_phy_dig_get_threshold
- rtw_phy_dig_recorder
- rtw_phy_dig
- rtw_phy_ra_info_update_iter
- rtw_phy_ra_info_update
- rtw_phy_dpk_track
- rtw_phy_cck_pd_lv_unlink
- rtw_phy_cck_pd_lv_link
- rtw_phy_cck_pd_lv
- rtw_phy_cck_pd
- rtw_phy_dynamic_mechanism
- rtw_phy_power_2_db
- rtw_phy_db_2_linear
- rtw_phy_linear_2_db
- rtw_phy_rf_power_2_rssi
- rtw_phy_read_rf
- rtw_phy_write_rf_reg_sipi
- rtw_phy_write_rf_reg
- rtw_phy_write_rf_reg_mix
- rtw_phy_setup_phy_cond
- check_positive
- rtw_parse_tbl_phy_cond
- tbl_to_dec_pwr_by_rate
- rtw_phy_get_rate_values_of_txpwr_by_rate
- rtw_phy_store_tx_power_by_rate
- rtw_parse_tbl_bb_pg
- rtw_channel_to_idx
- rtw_phy_set_tx_power_limit
- rtw_xref_5g_txpwr_lmt
- rtw_xref_txpwr_lmt_by_rs
- rtw_xref_5g_txpwr_lmt_by_ch
- rtw_xref_txpwr_lmt_by_bw
- rtw_xref_txpwr_lmt
- rtw_parse_tbl_txpwr_lmt
- rtw_phy_cfg_mac
- rtw_phy_cfg_agc
- rtw_phy_cfg_bb
- rtw_phy_cfg_rf
- rtw_load_rfk_table
- rtw_phy_load_tables
- rtw_get_channel_group
- rtw_phy_get_dis_dpd_by_rate_diff
- rtw_phy_get_2g_tx_power_index
- rtw_phy_get_5g_tx_power_index
- rtw_phy_get_tx_power_limit
- rtw_get_tx_power_params
- rtw_phy_get_tx_power_index
- rtw_phy_set_tx_power_index_by_rs
- rtw_phy_set_tx_power_level_by_path
- rtw_phy_set_tx_power_level
- rtw_phy_tx_power_by_rate_config_by_path
- rtw_phy_tx_power_by_rate_config
- __rtw_phy_tx_power_limit_config
- rtw_phy_tx_power_limit_config
- rtw_phy_init_tx_power_limit
- rtw_phy_init_tx_power
1
2
3
4
5 #include <linux/bcd.h>
6
7 #include "main.h"
8 #include "reg.h"
9 #include "fw.h"
10 #include "phy.h"
11 #include "debug.h"
12
13 struct phy_cfg_pair {
14 u32 addr;
15 u32 data;
16 };
17
18 union phy_table_tile {
19 struct rtw_phy_cond cond;
20 struct phy_cfg_pair cfg;
21 };
22
23 struct phy_pg_cfg_pair {
24 u32 band;
25 u32 rf_path;
26 u32 tx_num;
27 u32 addr;
28 u32 bitmask;
29 u32 data;
30 };
31
32 static const u32 db_invert_table[12][8] = {
33 {10, 13, 16, 20,
34 25, 32, 40, 50},
35 {64, 80, 101, 128,
36 160, 201, 256, 318},
37 {401, 505, 635, 800,
38 1007, 1268, 1596, 2010},
39 {316, 398, 501, 631,
40 794, 1000, 1259, 1585},
41 {1995, 2512, 3162, 3981,
42 5012, 6310, 7943, 10000},
43 {12589, 15849, 19953, 25119,
44 31623, 39811, 50119, 63098},
45 {79433, 100000, 125893, 158489,
46 199526, 251189, 316228, 398107},
47 {501187, 630957, 794328, 1000000,
48 1258925, 1584893, 1995262, 2511886},
49 {3162278, 3981072, 5011872, 6309573,
50 7943282, 1000000, 12589254, 15848932},
51 {19952623, 25118864, 31622777, 39810717,
52 50118723, 63095734, 79432823, 100000000},
53 {125892541, 158489319, 199526232, 251188643,
54 316227766, 398107171, 501187234, 630957345},
55 {794328235, 1000000000, 1258925412, 1584893192,
56 1995262315, 2511886432U, 3162277660U, 3981071706U}
57 };
58
59 u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
60 u8 rtw_ofdm_rates[] = {
61 DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
62 DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
63 DESC_RATE48M, DESC_RATE54M
64 };
65 u8 rtw_ht_1s_rates[] = {
66 DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
67 DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
68 DESC_RATEMCS6, DESC_RATEMCS7
69 };
70 u8 rtw_ht_2s_rates[] = {
71 DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
72 DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
73 DESC_RATEMCS14, DESC_RATEMCS15
74 };
75 u8 rtw_vht_1s_rates[] = {
76 DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
77 DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
78 DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
79 DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
80 DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
81 };
82 u8 rtw_vht_2s_rates[] = {
83 DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
84 DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
85 DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
86 DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
87 DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
88 };
89 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
90 rtw_cck_rates, rtw_ofdm_rates,
91 rtw_ht_1s_rates, rtw_ht_2s_rates,
92 rtw_vht_1s_rates, rtw_vht_2s_rates
93 };
94 u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
95 ARRAY_SIZE(rtw_cck_rates),
96 ARRAY_SIZE(rtw_ofdm_rates),
97 ARRAY_SIZE(rtw_ht_1s_rates),
98 ARRAY_SIZE(rtw_ht_2s_rates),
99 ARRAY_SIZE(rtw_vht_1s_rates),
100 ARRAY_SIZE(rtw_vht_2s_rates)
101 };
102 static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
103 static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
104 static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
105 static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
106 static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
107 static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
108
109 enum rtw_phy_band_type {
110 PHY_BAND_2G = 0,
111 PHY_BAND_5G = 1,
112 };
113
114 static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev)
115 {
116 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
117 u8 i, j;
118
119 for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) {
120 for (j = 0; j < RTW_RF_PATH_MAX; j++)
121 dm_info->cck_pd_lv[i][j] = CCK_PD_LV0;
122 }
123
124 dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
125 }
126
127 void rtw_phy_init(struct rtw_dev *rtwdev)
128 {
129 struct rtw_chip_info *chip = rtwdev->chip;
130 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
131 u32 addr, mask;
132
133 dm_info->fa_history[3] = 0;
134 dm_info->fa_history[2] = 0;
135 dm_info->fa_history[1] = 0;
136 dm_info->fa_history[0] = 0;
137 dm_info->igi_bitmap = 0;
138 dm_info->igi_history[3] = 0;
139 dm_info->igi_history[2] = 0;
140 dm_info->igi_history[1] = 0;
141
142 addr = chip->dig[0].addr;
143 mask = chip->dig[0].mask;
144 dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask);
145 rtw_phy_cck_pd_init(rtwdev);
146 }
147
148 void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
149 {
150 struct rtw_chip_info *chip = rtwdev->chip;
151 struct rtw_hal *hal = &rtwdev->hal;
152 u32 addr, mask;
153 u8 path;
154
155 for (path = 0; path < hal->rf_path_num; path++) {
156 addr = chip->dig[path].addr;
157 mask = chip->dig[path].mask;
158 rtw_write32_mask(rtwdev, addr, mask, igi);
159 }
160 }
161
162 static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev)
163 {
164 struct rtw_chip_info *chip = rtwdev->chip;
165
166 chip->ops->false_alarm_statistics(rtwdev);
167 }
168
169 #define RA_FLOOR_TABLE_SIZE 7
170 #define RA_FLOOR_UP_GAP 3
171
172 static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi)
173 {
174 u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100};
175 u8 new_level = 0;
176 int i;
177
178 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++)
179 if (i >= old_level)
180 table[i] += RA_FLOOR_UP_GAP;
181
182 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
183 if (rssi < table[i]) {
184 new_level = i;
185 break;
186 }
187 }
188
189 return new_level;
190 }
191
192 struct rtw_phy_stat_iter_data {
193 struct rtw_dev *rtwdev;
194 u8 min_rssi;
195 };
196
197 static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta)
198 {
199 struct rtw_phy_stat_iter_data *iter_data = data;
200 struct rtw_dev *rtwdev = iter_data->rtwdev;
201 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
202 u8 rssi;
203
204 rssi = ewma_rssi_read(&si->avg_rssi);
205 si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi);
206
207 rtw_fw_send_rssi_info(rtwdev, si);
208
209 iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi);
210 }
211
212 static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev)
213 {
214 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
215 struct rtw_phy_stat_iter_data data = {};
216
217 data.rtwdev = rtwdev;
218 data.min_rssi = U8_MAX;
219 rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data);
220
221 dm_info->pre_min_rssi = dm_info->min_rssi;
222 dm_info->min_rssi = data.min_rssi;
223 }
224
225 static void rtw_phy_statistics(struct rtw_dev *rtwdev)
226 {
227 rtw_phy_stat_rssi(rtwdev);
228 rtw_phy_stat_false_alarm(rtwdev);
229 }
230
231 #define DIG_PERF_FA_TH_LOW 250
232 #define DIG_PERF_FA_TH_HIGH 500
233 #define DIG_PERF_FA_TH_EXTRA_HIGH 750
234 #define DIG_PERF_MAX 0x5a
235 #define DIG_PERF_MID 0x40
236 #define DIG_CVRG_FA_TH_LOW 2000
237 #define DIG_CVRG_FA_TH_HIGH 4000
238 #define DIG_CVRG_FA_TH_EXTRA_HIGH 5000
239 #define DIG_CVRG_MAX 0x2a
240 #define DIG_CVRG_MID 0x26
241 #define DIG_CVRG_MIN 0x1c
242 #define DIG_RSSI_GAIN_OFFSET 15
243
244 static bool
245 rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info)
246 {
247 u16 fa_lo = DIG_PERF_FA_TH_LOW;
248 u16 fa_hi = DIG_PERF_FA_TH_HIGH;
249 u16 *fa_history;
250 u8 *igi_history;
251 u8 damping_rssi;
252 u8 min_rssi;
253 u8 diff;
254 u8 igi_bitmap;
255 bool damping = false;
256
257 min_rssi = dm_info->min_rssi;
258 if (dm_info->damping) {
259 damping_rssi = dm_info->damping_rssi;
260 diff = min_rssi > damping_rssi ? min_rssi - damping_rssi :
261 damping_rssi - min_rssi;
262 if (diff > 3 || dm_info->damping_cnt++ > 20) {
263 dm_info->damping = false;
264 return false;
265 }
266
267 return true;
268 }
269
270 igi_history = dm_info->igi_history;
271 fa_history = dm_info->fa_history;
272 igi_bitmap = dm_info->igi_bitmap & 0xf;
273 switch (igi_bitmap) {
274 case 5:
275
276 if (igi_history[0] > igi_history[1] &&
277 igi_history[2] > igi_history[3] &&
278 igi_history[0] - igi_history[1] >= 2 &&
279 igi_history[2] - igi_history[3] >= 2 &&
280 fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
281 fa_history[2] > fa_hi && fa_history[3] < fa_lo)
282 damping = true;
283 break;
284 case 9:
285
286 if (igi_history[0] > igi_history[1] &&
287 igi_history[3] > igi_history[2] &&
288 igi_history[0] - igi_history[1] >= 4 &&
289 igi_history[3] - igi_history[2] >= 2 &&
290 fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
291 fa_history[2] < fa_lo && fa_history[3] > fa_hi)
292 damping = true;
293 break;
294 default:
295 return false;
296 }
297
298 if (damping) {
299 dm_info->damping = true;
300 dm_info->damping_cnt = 0;
301 dm_info->damping_rssi = min_rssi;
302 }
303
304 return damping;
305 }
306
307 static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info,
308 u8 *upper, u8 *lower, bool linked)
309 {
310 u8 dig_max, dig_min, dig_mid;
311 u8 min_rssi;
312
313 if (linked) {
314 dig_max = DIG_PERF_MAX;
315 dig_mid = DIG_PERF_MID;
316
317 dig_min = 0x1c;
318 min_rssi = max_t(u8, dm_info->min_rssi, dig_min);
319 } else {
320 dig_max = DIG_CVRG_MAX;
321 dig_mid = DIG_CVRG_MID;
322 dig_min = DIG_CVRG_MIN;
323 min_rssi = dig_min;
324 }
325
326
327 dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET);
328
329 *lower = clamp_t(u8, min_rssi, dig_min, dig_mid);
330 *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max);
331 }
332
333 static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info,
334 u16 *fa_th, u8 *step, bool linked)
335 {
336 u8 min_rssi, pre_min_rssi;
337
338 min_rssi = dm_info->min_rssi;
339 pre_min_rssi = dm_info->pre_min_rssi;
340 step[0] = 4;
341 step[1] = 3;
342 step[2] = 2;
343
344 if (linked) {
345 fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH;
346 fa_th[1] = DIG_PERF_FA_TH_HIGH;
347 fa_th[2] = DIG_PERF_FA_TH_LOW;
348 if (pre_min_rssi > min_rssi) {
349 step[0] = 6;
350 step[1] = 4;
351 step[2] = 2;
352 }
353 } else {
354 fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH;
355 fa_th[1] = DIG_CVRG_FA_TH_HIGH;
356 fa_th[2] = DIG_CVRG_FA_TH_LOW;
357 }
358 }
359
360 static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa)
361 {
362 u8 *igi_history;
363 u16 *fa_history;
364 u8 igi_bitmap;
365 bool up;
366
367 igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe;
368 igi_history = dm_info->igi_history;
369 fa_history = dm_info->fa_history;
370
371 up = igi > igi_history[0];
372 igi_bitmap |= up;
373
374 igi_history[3] = igi_history[2];
375 igi_history[2] = igi_history[1];
376 igi_history[1] = igi_history[0];
377 igi_history[0] = igi;
378
379 fa_history[3] = fa_history[2];
380 fa_history[2] = fa_history[1];
381 fa_history[1] = fa_history[0];
382 fa_history[0] = fa;
383
384 dm_info->igi_bitmap = igi_bitmap;
385 }
386
387 static void rtw_phy_dig(struct rtw_dev *rtwdev)
388 {
389 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
390 u8 upper_bound, lower_bound;
391 u8 pre_igi, cur_igi;
392 u16 fa_th[3], fa_cnt;
393 u8 level;
394 u8 step[3];
395 bool linked;
396
397 if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE))
398 return;
399
400 if (rtw_phy_dig_check_damping(dm_info))
401 return;
402
403 linked = !!rtwdev->sta_cnt;
404
405 fa_cnt = dm_info->total_fa_cnt;
406 pre_igi = dm_info->igi_history[0];
407
408 rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked);
409
410
411
412
413
414
415 cur_igi = pre_igi;
416 for (level = 0; level < 3; level++) {
417 if (fa_cnt > fa_th[level]) {
418 cur_igi += step[level];
419 break;
420 }
421 }
422 cur_igi -= 2;
423
424
425
426
427
428 rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked);
429 cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound);
430
431
432
433
434 rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt);
435
436 if (cur_igi != pre_igi)
437 rtw_phy_dig_write(rtwdev, cur_igi);
438 }
439
440 static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta)
441 {
442 struct rtw_dev *rtwdev = data;
443 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
444
445 rtw_update_sta_info(rtwdev, si);
446 }
447
448 static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev)
449 {
450 if (rtwdev->watch_dog_cnt & 0x3)
451 return;
452
453 rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev);
454 }
455
456 static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
457 {
458 struct rtw_chip_info *chip = rtwdev->chip;
459
460 if (chip->ops->dpk_track)
461 chip->ops->dpk_track(rtwdev);
462 }
463
464 #define CCK_PD_FA_LV1_MIN 1000
465 #define CCK_PD_FA_LV0_MAX 500
466
467 static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev)
468 {
469 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
470 u32 cck_fa_avg = dm_info->cck_fa_avg;
471
472 if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
473 return CCK_PD_LV1;
474
475 if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
476 return CCK_PD_LV0;
477
478 return CCK_PD_LV_MAX;
479 }
480
481 #define CCK_PD_IGI_LV4_VAL 0x38
482 #define CCK_PD_IGI_LV3_VAL 0x2a
483 #define CCK_PD_IGI_LV2_VAL 0x24
484 #define CCK_PD_RSSI_LV4_VAL 32
485 #define CCK_PD_RSSI_LV3_VAL 32
486 #define CCK_PD_RSSI_LV2_VAL 24
487
488 static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev)
489 {
490 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
491 u8 igi = dm_info->igi_history[0];
492 u8 rssi = dm_info->min_rssi;
493 u32 cck_fa_avg = dm_info->cck_fa_avg;
494
495 if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL)
496 return CCK_PD_LV4;
497 if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL)
498 return CCK_PD_LV3;
499 if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL)
500 return CCK_PD_LV2;
501 if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
502 return CCK_PD_LV1;
503 if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
504 return CCK_PD_LV0;
505
506 return CCK_PD_LV_MAX;
507 }
508
509 static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev)
510 {
511 if (!rtw_is_assoc(rtwdev))
512 return rtw_phy_cck_pd_lv_unlink(rtwdev);
513 else
514 return rtw_phy_cck_pd_lv_link(rtwdev);
515 }
516
517 static void rtw_phy_cck_pd(struct rtw_dev *rtwdev)
518 {
519 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
520 struct rtw_chip_info *chip = rtwdev->chip;
521 u32 cck_fa = dm_info->cck_fa_cnt;
522 u8 level;
523
524 if (rtwdev->hal.current_band_type != RTW_BAND_2G)
525 return;
526
527 if (dm_info->cck_fa_avg == CCK_FA_AVG_RESET)
528 dm_info->cck_fa_avg = cck_fa;
529 else
530 dm_info->cck_fa_avg = (dm_info->cck_fa_avg * 3 + cck_fa) >> 2;
531
532 level = rtw_phy_cck_pd_lv(rtwdev);
533
534 if (level >= CCK_PD_LV_MAX)
535 return;
536
537 if (chip->ops->cck_pd_set)
538 chip->ops->cck_pd_set(rtwdev, level);
539 }
540
541 void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
542 {
543
544 rtw_phy_statistics(rtwdev);
545 rtw_phy_dig(rtwdev);
546 rtw_phy_cck_pd(rtwdev);
547 rtw_phy_ra_info_update(rtwdev);
548 rtw_phy_dpk_track(rtwdev);
549 }
550
551 #define FRAC_BITS 3
552
553 static u8 rtw_phy_power_2_db(s8 power)
554 {
555 if (power <= -100 || power >= 20)
556 return 0;
557 else if (power >= 0)
558 return 100;
559 else
560 return 100 + power;
561 }
562
563 static u64 rtw_phy_db_2_linear(u8 power_db)
564 {
565 u8 i, j;
566 u64 linear;
567
568 if (power_db > 96)
569 power_db = 96;
570 else if (power_db < 1)
571 return 1;
572
573
574 i = (power_db - 1) >> 3;
575 j = (power_db - 1) - (i << 3);
576
577 linear = db_invert_table[i][j];
578 linear = i > 2 ? linear << FRAC_BITS : linear;
579
580 return linear;
581 }
582
583 static u8 rtw_phy_linear_2_db(u64 linear)
584 {
585 u8 i;
586 u8 j;
587 u32 dB;
588
589 if (linear >= db_invert_table[11][7])
590 return 96;
591
592 for (i = 0; i < 12; i++) {
593 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7])
594 break;
595 else if (i > 2 && linear <= db_invert_table[i][7])
596 break;
597 }
598
599 for (j = 0; j < 8; j++) {
600 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
601 break;
602 else if (i > 2 && linear <= db_invert_table[i][j])
603 break;
604 }
605
606 if (j == 0 && i == 0)
607 goto end;
608
609 if (j == 0) {
610 if (i != 3) {
611 if (db_invert_table[i][0] - linear >
612 linear - db_invert_table[i - 1][7]) {
613 i = i - 1;
614 j = 7;
615 }
616 } else {
617 if (db_invert_table[3][0] - linear >
618 linear - db_invert_table[2][7]) {
619 i = 2;
620 j = 7;
621 }
622 }
623 } else {
624 if (db_invert_table[i][j] - linear >
625 linear - db_invert_table[i][j - 1]) {
626 j = j - 1;
627 }
628 }
629 end:
630 dB = (i << 3) + j + 1;
631
632 return dB;
633 }
634
635 u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num)
636 {
637 s8 power;
638 u8 power_db;
639 u64 linear;
640 u64 sum = 0;
641 u8 path;
642
643 for (path = 0; path < path_num; path++) {
644 power = rf_power[path];
645 power_db = rtw_phy_power_2_db(power);
646 linear = rtw_phy_db_2_linear(power_db);
647 sum += linear;
648 }
649
650 sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS;
651 switch (path_num) {
652 case 2:
653 sum >>= 1;
654 break;
655 case 3:
656 sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5;
657 break;
658 case 4:
659 sum >>= 2;
660 break;
661 default:
662 break;
663 }
664
665 return rtw_phy_linear_2_db(sum);
666 }
667
668 u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
669 u32 addr, u32 mask)
670 {
671 struct rtw_hal *hal = &rtwdev->hal;
672 struct rtw_chip_info *chip = rtwdev->chip;
673 const u32 *base_addr = chip->rf_base_addr;
674 u32 val, direct_addr;
675
676 if (rf_path >= hal->rf_path_num) {
677 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
678 return INV_RF_DATA;
679 }
680
681 addr &= 0xff;
682 direct_addr = base_addr[rf_path] + (addr << 2);
683 mask &= RFREG_MASK;
684
685 val = rtw_read32_mask(rtwdev, direct_addr, mask);
686
687 return val;
688 }
689
690 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
691 u32 addr, u32 mask, u32 data)
692 {
693 struct rtw_hal *hal = &rtwdev->hal;
694 struct rtw_chip_info *chip = rtwdev->chip;
695 u32 *sipi_addr = chip->rf_sipi_addr;
696 u32 data_and_addr;
697 u32 old_data = 0;
698 u32 shift;
699
700 if (rf_path >= hal->rf_path_num) {
701 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
702 return false;
703 }
704
705 addr &= 0xff;
706 mask &= RFREG_MASK;
707
708 if (mask != RFREG_MASK) {
709 old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK);
710
711 if (old_data == INV_RF_DATA) {
712 rtw_err(rtwdev, "Write fail, rf is disabled\n");
713 return false;
714 }
715
716 shift = __ffs(mask);
717 data = ((old_data) & (~mask)) | (data << shift);
718 }
719
720 data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff;
721
722 rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr);
723
724 udelay(13);
725
726 return true;
727 }
728
729 bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
730 u32 addr, u32 mask, u32 data)
731 {
732 struct rtw_hal *hal = &rtwdev->hal;
733 struct rtw_chip_info *chip = rtwdev->chip;
734 const u32 *base_addr = chip->rf_base_addr;
735 u32 direct_addr;
736
737 if (rf_path >= hal->rf_path_num) {
738 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
739 return false;
740 }
741
742 addr &= 0xff;
743 direct_addr = base_addr[rf_path] + (addr << 2);
744 mask &= RFREG_MASK;
745
746 if (addr == RF_CFGCH) {
747 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI);
748 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI);
749 }
750
751 rtw_write32_mask(rtwdev, direct_addr, mask, data);
752
753 udelay(1);
754
755 if (addr == RF_CFGCH) {
756 rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI);
757 rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI);
758 }
759
760 return true;
761 }
762
763 bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
764 u32 addr, u32 mask, u32 data)
765 {
766 if (addr != 0x00)
767 return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data);
768
769 return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data);
770 }
771
772 void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg)
773 {
774 struct rtw_hal *hal = &rtwdev->hal;
775 struct rtw_efuse *efuse = &rtwdev->efuse;
776 struct rtw_phy_cond cond = {0};
777
778 cond.cut = hal->cut_version ? hal->cut_version : 15;
779 cond.pkg = pkg ? pkg : 15;
780 cond.plat = 0x04;
781 cond.rfe = efuse->rfe_option;
782
783 switch (rtw_hci_type(rtwdev)) {
784 case RTW_HCI_TYPE_USB:
785 cond.intf = INTF_USB;
786 break;
787 case RTW_HCI_TYPE_SDIO:
788 cond.intf = INTF_SDIO;
789 break;
790 case RTW_HCI_TYPE_PCIE:
791 default:
792 cond.intf = INTF_PCIE;
793 break;
794 }
795
796 hal->phy_cond = cond;
797
798 rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond));
799 }
800
801 static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond)
802 {
803 struct rtw_hal *hal = &rtwdev->hal;
804 struct rtw_phy_cond drv_cond = hal->phy_cond;
805
806 if (cond.cut && cond.cut != drv_cond.cut)
807 return false;
808
809 if (cond.pkg && cond.pkg != drv_cond.pkg)
810 return false;
811
812 if (cond.intf && cond.intf != drv_cond.intf)
813 return false;
814
815 if (cond.rfe != drv_cond.rfe)
816 return false;
817
818 return true;
819 }
820
821 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
822 {
823 const union phy_table_tile *p = tbl->data;
824 const union phy_table_tile *end = p + tbl->size / 2;
825 struct rtw_phy_cond pos_cond = {0};
826 bool is_matched = true, is_skipped = false;
827
828 BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair));
829
830 for (; p < end; p++) {
831 if (p->cond.pos) {
832 switch (p->cond.branch) {
833 case BRANCH_ENDIF:
834 is_matched = true;
835 is_skipped = false;
836 break;
837 case BRANCH_ELSE:
838 is_matched = is_skipped ? false : true;
839 break;
840 case BRANCH_IF:
841 case BRANCH_ELIF:
842 default:
843 pos_cond = p->cond;
844 break;
845 }
846 } else if (p->cond.neg) {
847 if (!is_skipped) {
848 if (check_positive(rtwdev, pos_cond)) {
849 is_matched = true;
850 is_skipped = true;
851 } else {
852 is_matched = false;
853 is_skipped = false;
854 }
855 } else {
856 is_matched = false;
857 }
858 } else if (is_matched) {
859 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data);
860 }
861 }
862 }
863
864 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
865
866 static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i)
867 {
868 if (rtwdev->chip->is_pwr_by_rate_dec)
869 return bcd_to_dec_pwr_by_rate(hex, i);
870
871 return (hex >> (i * 8)) & 0xFF;
872 }
873
874 static void
875 rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev,
876 u32 addr, u32 mask, u32 val, u8 *rate,
877 u8 *pwr_by_rate, u8 *rate_num)
878 {
879 int i;
880
881 switch (addr) {
882 case 0xE00:
883 case 0x830:
884 rate[0] = DESC_RATE6M;
885 rate[1] = DESC_RATE9M;
886 rate[2] = DESC_RATE12M;
887 rate[3] = DESC_RATE18M;
888 for (i = 0; i < 4; ++i)
889 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
890 *rate_num = 4;
891 break;
892 case 0xE04:
893 case 0x834:
894 rate[0] = DESC_RATE24M;
895 rate[1] = DESC_RATE36M;
896 rate[2] = DESC_RATE48M;
897 rate[3] = DESC_RATE54M;
898 for (i = 0; i < 4; ++i)
899 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
900 *rate_num = 4;
901 break;
902 case 0xE08:
903 rate[0] = DESC_RATE1M;
904 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1);
905 *rate_num = 1;
906 break;
907 case 0x86C:
908 if (mask == 0xffffff00) {
909 rate[0] = DESC_RATE2M;
910 rate[1] = DESC_RATE5_5M;
911 rate[2] = DESC_RATE11M;
912 for (i = 1; i < 4; ++i)
913 pwr_by_rate[i - 1] =
914 tbl_to_dec_pwr_by_rate(rtwdev, val, i);
915 *rate_num = 3;
916 } else if (mask == 0x000000ff) {
917 rate[0] = DESC_RATE11M;
918 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0);
919 *rate_num = 1;
920 }
921 break;
922 case 0xE10:
923 case 0x83C:
924 rate[0] = DESC_RATEMCS0;
925 rate[1] = DESC_RATEMCS1;
926 rate[2] = DESC_RATEMCS2;
927 rate[3] = DESC_RATEMCS3;
928 for (i = 0; i < 4; ++i)
929 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
930 *rate_num = 4;
931 break;
932 case 0xE14:
933 case 0x848:
934 rate[0] = DESC_RATEMCS4;
935 rate[1] = DESC_RATEMCS5;
936 rate[2] = DESC_RATEMCS6;
937 rate[3] = DESC_RATEMCS7;
938 for (i = 0; i < 4; ++i)
939 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
940 *rate_num = 4;
941 break;
942 case 0xE18:
943 case 0x84C:
944 rate[0] = DESC_RATEMCS8;
945 rate[1] = DESC_RATEMCS9;
946 rate[2] = DESC_RATEMCS10;
947 rate[3] = DESC_RATEMCS11;
948 for (i = 0; i < 4; ++i)
949 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
950 *rate_num = 4;
951 break;
952 case 0xE1C:
953 case 0x868:
954 rate[0] = DESC_RATEMCS12;
955 rate[1] = DESC_RATEMCS13;
956 rate[2] = DESC_RATEMCS14;
957 rate[3] = DESC_RATEMCS15;
958 for (i = 0; i < 4; ++i)
959 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
960 *rate_num = 4;
961 break;
962 case 0x838:
963 rate[0] = DESC_RATE1M;
964 rate[1] = DESC_RATE2M;
965 rate[2] = DESC_RATE5_5M;
966 for (i = 1; i < 4; ++i)
967 pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev,
968 val, i);
969 *rate_num = 3;
970 break;
971 case 0xC20:
972 case 0xE20:
973 case 0x1820:
974 case 0x1A20:
975 rate[0] = DESC_RATE1M;
976 rate[1] = DESC_RATE2M;
977 rate[2] = DESC_RATE5_5M;
978 rate[3] = DESC_RATE11M;
979 for (i = 0; i < 4; ++i)
980 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
981 *rate_num = 4;
982 break;
983 case 0xC24:
984 case 0xE24:
985 case 0x1824:
986 case 0x1A24:
987 rate[0] = DESC_RATE6M;
988 rate[1] = DESC_RATE9M;
989 rate[2] = DESC_RATE12M;
990 rate[3] = DESC_RATE18M;
991 for (i = 0; i < 4; ++i)
992 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
993 *rate_num = 4;
994 break;
995 case 0xC28:
996 case 0xE28:
997 case 0x1828:
998 case 0x1A28:
999 rate[0] = DESC_RATE24M;
1000 rate[1] = DESC_RATE36M;
1001 rate[2] = DESC_RATE48M;
1002 rate[3] = DESC_RATE54M;
1003 for (i = 0; i < 4; ++i)
1004 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1005 *rate_num = 4;
1006 break;
1007 case 0xC2C:
1008 case 0xE2C:
1009 case 0x182C:
1010 case 0x1A2C:
1011 rate[0] = DESC_RATEMCS0;
1012 rate[1] = DESC_RATEMCS1;
1013 rate[2] = DESC_RATEMCS2;
1014 rate[3] = DESC_RATEMCS3;
1015 for (i = 0; i < 4; ++i)
1016 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1017 *rate_num = 4;
1018 break;
1019 case 0xC30:
1020 case 0xE30:
1021 case 0x1830:
1022 case 0x1A30:
1023 rate[0] = DESC_RATEMCS4;
1024 rate[1] = DESC_RATEMCS5;
1025 rate[2] = DESC_RATEMCS6;
1026 rate[3] = DESC_RATEMCS7;
1027 for (i = 0; i < 4; ++i)
1028 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1029 *rate_num = 4;
1030 break;
1031 case 0xC34:
1032 case 0xE34:
1033 case 0x1834:
1034 case 0x1A34:
1035 rate[0] = DESC_RATEMCS8;
1036 rate[1] = DESC_RATEMCS9;
1037 rate[2] = DESC_RATEMCS10;
1038 rate[3] = DESC_RATEMCS11;
1039 for (i = 0; i < 4; ++i)
1040 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1041 *rate_num = 4;
1042 break;
1043 case 0xC38:
1044 case 0xE38:
1045 case 0x1838:
1046 case 0x1A38:
1047 rate[0] = DESC_RATEMCS12;
1048 rate[1] = DESC_RATEMCS13;
1049 rate[2] = DESC_RATEMCS14;
1050 rate[3] = DESC_RATEMCS15;
1051 for (i = 0; i < 4; ++i)
1052 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1053 *rate_num = 4;
1054 break;
1055 case 0xC3C:
1056 case 0xE3C:
1057 case 0x183C:
1058 case 0x1A3C:
1059 rate[0] = DESC_RATEVHT1SS_MCS0;
1060 rate[1] = DESC_RATEVHT1SS_MCS1;
1061 rate[2] = DESC_RATEVHT1SS_MCS2;
1062 rate[3] = DESC_RATEVHT1SS_MCS3;
1063 for (i = 0; i < 4; ++i)
1064 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1065 *rate_num = 4;
1066 break;
1067 case 0xC40:
1068 case 0xE40:
1069 case 0x1840:
1070 case 0x1A40:
1071 rate[0] = DESC_RATEVHT1SS_MCS4;
1072 rate[1] = DESC_RATEVHT1SS_MCS5;
1073 rate[2] = DESC_RATEVHT1SS_MCS6;
1074 rate[3] = DESC_RATEVHT1SS_MCS7;
1075 for (i = 0; i < 4; ++i)
1076 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1077 *rate_num = 4;
1078 break;
1079 case 0xC44:
1080 case 0xE44:
1081 case 0x1844:
1082 case 0x1A44:
1083 rate[0] = DESC_RATEVHT1SS_MCS8;
1084 rate[1] = DESC_RATEVHT1SS_MCS9;
1085 rate[2] = DESC_RATEVHT2SS_MCS0;
1086 rate[3] = DESC_RATEVHT2SS_MCS1;
1087 for (i = 0; i < 4; ++i)
1088 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1089 *rate_num = 4;
1090 break;
1091 case 0xC48:
1092 case 0xE48:
1093 case 0x1848:
1094 case 0x1A48:
1095 rate[0] = DESC_RATEVHT2SS_MCS2;
1096 rate[1] = DESC_RATEVHT2SS_MCS3;
1097 rate[2] = DESC_RATEVHT2SS_MCS4;
1098 rate[3] = DESC_RATEVHT2SS_MCS5;
1099 for (i = 0; i < 4; ++i)
1100 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1101 *rate_num = 4;
1102 break;
1103 case 0xC4C:
1104 case 0xE4C:
1105 case 0x184C:
1106 case 0x1A4C:
1107 rate[0] = DESC_RATEVHT2SS_MCS6;
1108 rate[1] = DESC_RATEVHT2SS_MCS7;
1109 rate[2] = DESC_RATEVHT2SS_MCS8;
1110 rate[3] = DESC_RATEVHT2SS_MCS9;
1111 for (i = 0; i < 4; ++i)
1112 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1113 *rate_num = 4;
1114 break;
1115 case 0xCD8:
1116 case 0xED8:
1117 case 0x18D8:
1118 case 0x1AD8:
1119 rate[0] = DESC_RATEMCS16;
1120 rate[1] = DESC_RATEMCS17;
1121 rate[2] = DESC_RATEMCS18;
1122 rate[3] = DESC_RATEMCS19;
1123 for (i = 0; i < 4; ++i)
1124 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1125 *rate_num = 4;
1126 break;
1127 case 0xCDC:
1128 case 0xEDC:
1129 case 0x18DC:
1130 case 0x1ADC:
1131 rate[0] = DESC_RATEMCS20;
1132 rate[1] = DESC_RATEMCS21;
1133 rate[2] = DESC_RATEMCS22;
1134 rate[3] = DESC_RATEMCS23;
1135 for (i = 0; i < 4; ++i)
1136 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1137 *rate_num = 4;
1138 break;
1139 case 0xCE0:
1140 case 0xEE0:
1141 case 0x18E0:
1142 case 0x1AE0:
1143 rate[0] = DESC_RATEVHT3SS_MCS0;
1144 rate[1] = DESC_RATEVHT3SS_MCS1;
1145 rate[2] = DESC_RATEVHT3SS_MCS2;
1146 rate[3] = DESC_RATEVHT3SS_MCS3;
1147 for (i = 0; i < 4; ++i)
1148 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1149 *rate_num = 4;
1150 break;
1151 case 0xCE4:
1152 case 0xEE4:
1153 case 0x18E4:
1154 case 0x1AE4:
1155 rate[0] = DESC_RATEVHT3SS_MCS4;
1156 rate[1] = DESC_RATEVHT3SS_MCS5;
1157 rate[2] = DESC_RATEVHT3SS_MCS6;
1158 rate[3] = DESC_RATEVHT3SS_MCS7;
1159 for (i = 0; i < 4; ++i)
1160 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1161 *rate_num = 4;
1162 break;
1163 case 0xCE8:
1164 case 0xEE8:
1165 case 0x18E8:
1166 case 0x1AE8:
1167 rate[0] = DESC_RATEVHT3SS_MCS8;
1168 rate[1] = DESC_RATEVHT3SS_MCS9;
1169 for (i = 0; i < 2; ++i)
1170 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1171 *rate_num = 2;
1172 break;
1173 default:
1174 rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr);
1175 break;
1176 }
1177 }
1178
1179 static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev,
1180 u32 band, u32 rfpath, u32 txnum,
1181 u32 regaddr, u32 bitmask, u32 data)
1182 {
1183 struct rtw_hal *hal = &rtwdev->hal;
1184 u8 rate_num = 0;
1185 u8 rate;
1186 u8 rates[RTW_RF_PATH_MAX] = {0};
1187 s8 offset;
1188 s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0};
1189 int i;
1190
1191 rtw_phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data,
1192 rates, pwr_by_rate, &rate_num);
1193
1194 if (WARN_ON(rfpath >= RTW_RF_PATH_MAX ||
1195 (band != PHY_BAND_2G && band != PHY_BAND_5G) ||
1196 rate_num > RTW_RF_PATH_MAX))
1197 return;
1198
1199 for (i = 0; i < rate_num; i++) {
1200 offset = pwr_by_rate[i];
1201 rate = rates[i];
1202 if (band == PHY_BAND_2G)
1203 hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset;
1204 else if (band == PHY_BAND_5G)
1205 hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset;
1206 else
1207 continue;
1208 }
1209 }
1210
1211 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
1212 {
1213 const struct phy_pg_cfg_pair *p = tbl->data;
1214 const struct phy_pg_cfg_pair *end = p + tbl->size / 6;
1215
1216 BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6);
1217
1218 for (; p < end; p++) {
1219 if (p->addr == 0xfe || p->addr == 0xffe) {
1220 msleep(50);
1221 continue;
1222 }
1223 rtw_phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path,
1224 p->tx_num, p->addr, p->bitmask,
1225 p->data);
1226 }
1227 }
1228
1229 static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = {
1230 36, 38, 40, 42, 44, 46, 48,
1231 52, 54, 56, 58, 60, 62, 64,
1232 100, 102, 104, 106, 108, 110, 112,
1233 116, 118, 120, 122, 124, 126, 128,
1234 132, 134, 136, 138, 140, 142, 144,
1235 149, 151, 153, 155, 157, 159, 161,
1236 165, 167, 169, 171, 173, 175, 177};
1237
1238 static int rtw_channel_to_idx(u8 band, u8 channel)
1239 {
1240 int ch_idx;
1241 u8 n_channel;
1242
1243 if (band == PHY_BAND_2G) {
1244 ch_idx = channel - 1;
1245 n_channel = RTW_MAX_CHANNEL_NUM_2G;
1246 } else if (band == PHY_BAND_5G) {
1247 n_channel = RTW_MAX_CHANNEL_NUM_5G;
1248 for (ch_idx = 0; ch_idx < n_channel; ch_idx++)
1249 if (rtw_channel_idx_5g[ch_idx] == channel)
1250 break;
1251 } else {
1252 return -1;
1253 }
1254
1255 if (ch_idx >= n_channel)
1256 return -1;
1257
1258 return ch_idx;
1259 }
1260
1261 static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
1262 u8 bw, u8 rs, u8 ch, s8 pwr_limit)
1263 {
1264 struct rtw_hal *hal = &rtwdev->hal;
1265 u8 max_power_index = rtwdev->chip->max_power_index;
1266 s8 ww;
1267 int ch_idx;
1268
1269 pwr_limit = clamp_t(s8, pwr_limit,
1270 -max_power_index, max_power_index);
1271 ch_idx = rtw_channel_to_idx(band, ch);
1272
1273 if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
1274 rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
1275 WARN(1,
1276 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
1277 regd, band, bw, rs, ch_idx, pwr_limit);
1278 return;
1279 }
1280
1281 if (band == PHY_BAND_2G) {
1282 hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit;
1283 ww = hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx];
1284 ww = min_t(s8, ww, pwr_limit);
1285 hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx] = ww;
1286 } else if (band == PHY_BAND_5G) {
1287 hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit;
1288 ww = hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx];
1289 ww = min_t(s8, ww, pwr_limit);
1290 hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx] = ww;
1291 }
1292 }
1293
1294
1295 static void
1296 rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd,
1297 u8 bw, u8 ch_idx, u8 rs_ht, u8 rs_vht)
1298 {
1299 struct rtw_hal *hal = &rtwdev->hal;
1300 u8 max_power_index = rtwdev->chip->max_power_index;
1301 s8 lmt_ht = hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx];
1302 s8 lmt_vht = hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx];
1303
1304 if (lmt_ht == lmt_vht)
1305 return;
1306
1307 if (lmt_ht == max_power_index)
1308 hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx] = lmt_vht;
1309
1310 else if (lmt_vht == max_power_index)
1311 hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx] = lmt_ht;
1312 }
1313
1314
1315 static void
1316 rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx)
1317 {
1318 u8 rs_idx, rs_ht, rs_vht;
1319 u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S},
1320 {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} };
1321
1322 for (rs_idx = 0; rs_idx < 2; rs_idx++) {
1323 rs_ht = rs_cmp[rs_idx][0];
1324 rs_vht = rs_cmp[rs_idx][1];
1325
1326 rtw_xref_5g_txpwr_lmt(rtwdev, regd, bw, ch_idx, rs_ht, rs_vht);
1327 }
1328 }
1329
1330
1331 static void
1332 rtw_xref_5g_txpwr_lmt_by_ch(struct rtw_dev *rtwdev, u8 regd, u8 bw)
1333 {
1334 u8 ch_idx;
1335
1336 for (ch_idx = 0; ch_idx < RTW_MAX_CHANNEL_NUM_5G; ch_idx++)
1337 rtw_xref_txpwr_lmt_by_rs(rtwdev, regd, bw, ch_idx);
1338 }
1339
1340
1341 static void
1342 rtw_xref_txpwr_lmt_by_bw(struct rtw_dev *rtwdev, u8 regd)
1343 {
1344 u8 bw;
1345
1346 for (bw = RTW_CHANNEL_WIDTH_20; bw <= RTW_CHANNEL_WIDTH_40; bw++)
1347 rtw_xref_5g_txpwr_lmt_by_ch(rtwdev, regd, bw);
1348 }
1349
1350
1351 static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev)
1352 {
1353 u8 regd;
1354
1355 for (regd = 0; regd < RTW_REGD_MAX; regd++)
1356 rtw_xref_txpwr_lmt_by_bw(rtwdev, regd);
1357 }
1358
1359 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
1360 const struct rtw_table *tbl)
1361 {
1362 const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data;
1363 const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size;
1364
1365 for (; p < end; p++) {
1366 rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band,
1367 p->bw, p->rs, p->ch, p->txpwr_lmt);
1368 }
1369
1370 rtw_xref_txpwr_lmt(rtwdev);
1371 }
1372
1373 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1374 u32 addr, u32 data)
1375 {
1376 rtw_write8(rtwdev, addr, data);
1377 }
1378
1379 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1380 u32 addr, u32 data)
1381 {
1382 rtw_write32(rtwdev, addr, data);
1383 }
1384
1385 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1386 u32 addr, u32 data)
1387 {
1388 if (addr == 0xfe)
1389 msleep(50);
1390 else if (addr == 0xfd)
1391 mdelay(5);
1392 else if (addr == 0xfc)
1393 mdelay(1);
1394 else if (addr == 0xfb)
1395 usleep_range(50, 60);
1396 else if (addr == 0xfa)
1397 udelay(5);
1398 else if (addr == 0xf9)
1399 udelay(1);
1400 else
1401 rtw_write32(rtwdev, addr, data);
1402 }
1403
1404 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1405 u32 addr, u32 data)
1406 {
1407 if (addr == 0xffe) {
1408 msleep(50);
1409 } else if (addr == 0xfe) {
1410 usleep_range(100, 110);
1411 } else {
1412 rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data);
1413 udelay(1);
1414 }
1415 }
1416
1417 static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
1418 {
1419 struct rtw_chip_info *chip = rtwdev->chip;
1420 struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
1421
1422 if (!chip->rfk_init_tbl)
1423 return;
1424
1425 rtw_write32_mask(rtwdev, 0x1e24, BIT(17), 0x1);
1426 rtw_write32_mask(rtwdev, 0x1cd0, BIT(28), 0x1);
1427 rtw_write32_mask(rtwdev, 0x1cd0, BIT(29), 0x1);
1428 rtw_write32_mask(rtwdev, 0x1cd0, BIT(30), 0x1);
1429 rtw_write32_mask(rtwdev, 0x1cd0, BIT(31), 0x0);
1430
1431 rtw_load_table(rtwdev, chip->rfk_init_tbl);
1432
1433 dpk_info->is_dpk_pwr_on = 1;
1434 }
1435
1436 void rtw_phy_load_tables(struct rtw_dev *rtwdev)
1437 {
1438 struct rtw_chip_info *chip = rtwdev->chip;
1439 u8 rf_path;
1440
1441 rtw_load_table(rtwdev, chip->mac_tbl);
1442 rtw_load_table(rtwdev, chip->bb_tbl);
1443 rtw_load_table(rtwdev, chip->agc_tbl);
1444 rtw_load_rfk_table(rtwdev);
1445
1446 for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
1447 const struct rtw_table *tbl;
1448
1449 tbl = chip->rf_tbl[rf_path];
1450 rtw_load_table(rtwdev, tbl);
1451 }
1452 }
1453
1454 static u8 rtw_get_channel_group(u8 channel)
1455 {
1456 switch (channel) {
1457 default:
1458 WARN_ON(1);
1459
1460 case 1:
1461 case 2:
1462 case 36:
1463 case 38:
1464 case 40:
1465 case 42:
1466 return 0;
1467 case 3:
1468 case 4:
1469 case 5:
1470 case 44:
1471 case 46:
1472 case 48:
1473 case 50:
1474 return 1;
1475 case 6:
1476 case 7:
1477 case 8:
1478 case 52:
1479 case 54:
1480 case 56:
1481 case 58:
1482 return 2;
1483 case 9:
1484 case 10:
1485 case 11:
1486 case 60:
1487 case 62:
1488 case 64:
1489 return 3;
1490 case 12:
1491 case 13:
1492 case 100:
1493 case 102:
1494 case 104:
1495 case 106:
1496 return 4;
1497 case 14:
1498 case 108:
1499 case 110:
1500 case 112:
1501 case 114:
1502 return 5;
1503 case 116:
1504 case 118:
1505 case 120:
1506 case 122:
1507 return 6;
1508 case 124:
1509 case 126:
1510 case 128:
1511 case 130:
1512 return 7;
1513 case 132:
1514 case 134:
1515 case 136:
1516 case 138:
1517 return 8;
1518 case 140:
1519 case 142:
1520 case 144:
1521 return 9;
1522 case 149:
1523 case 151:
1524 case 153:
1525 case 155:
1526 return 10;
1527 case 157:
1528 case 159:
1529 case 161:
1530 return 11;
1531 case 165:
1532 case 167:
1533 case 169:
1534 case 171:
1535 return 12;
1536 case 173:
1537 case 175:
1538 case 177:
1539 return 13;
1540 }
1541 }
1542
1543 static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate)
1544 {
1545 struct rtw_chip_info *chip = rtwdev->chip;
1546 s8 dpd_diff = 0;
1547
1548 if (!chip->en_dis_dpd)
1549 return 0;
1550
1551 #define RTW_DPD_RATE_CHECK(_rate) \
1552 case DESC_RATE ## _rate: \
1553 if (DIS_DPD_RATE ## _rate & chip->dpd_ratemask) \
1554 dpd_diff = -6 * chip->txgi_factor; \
1555 break
1556
1557 switch (rate) {
1558 RTW_DPD_RATE_CHECK(6M);
1559 RTW_DPD_RATE_CHECK(9M);
1560 RTW_DPD_RATE_CHECK(MCS0);
1561 RTW_DPD_RATE_CHECK(MCS1);
1562 RTW_DPD_RATE_CHECK(MCS8);
1563 RTW_DPD_RATE_CHECK(MCS9);
1564 RTW_DPD_RATE_CHECK(VHT1SS_MCS0);
1565 RTW_DPD_RATE_CHECK(VHT1SS_MCS1);
1566 RTW_DPD_RATE_CHECK(VHT2SS_MCS0);
1567 RTW_DPD_RATE_CHECK(VHT2SS_MCS1);
1568 }
1569 #undef RTW_DPD_RATE_CHECK
1570
1571 return dpd_diff;
1572 }
1573
1574 static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
1575 struct rtw_2g_txpwr_idx *pwr_idx_2g,
1576 enum rtw_bandwidth bandwidth,
1577 u8 rate, u8 group)
1578 {
1579 struct rtw_chip_info *chip = rtwdev->chip;
1580 u8 tx_power;
1581 bool mcs_rate;
1582 bool above_2ss;
1583 u8 factor = chip->txgi_factor;
1584
1585 if (rate <= DESC_RATE11M)
1586 tx_power = pwr_idx_2g->cck_base[group];
1587 else
1588 tx_power = pwr_idx_2g->bw40_base[group];
1589
1590 if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
1591 tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor;
1592
1593 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
1594 (rate >= DESC_RATEVHT1SS_MCS0 &&
1595 rate <= DESC_RATEVHT2SS_MCS9);
1596 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
1597 (rate >= DESC_RATEVHT2SS_MCS0);
1598
1599 if (!mcs_rate)
1600 return tx_power;
1601
1602 switch (bandwidth) {
1603 default:
1604 WARN_ON(1);
1605
1606 case RTW_CHANNEL_WIDTH_20:
1607 tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
1608 if (above_2ss)
1609 tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor;
1610 break;
1611 case RTW_CHANNEL_WIDTH_40:
1612
1613 if (above_2ss)
1614 tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor;
1615 break;
1616 }
1617
1618 return tx_power;
1619 }
1620
1621 static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
1622 struct rtw_5g_txpwr_idx *pwr_idx_5g,
1623 enum rtw_bandwidth bandwidth,
1624 u8 rate, u8 group)
1625 {
1626 struct rtw_chip_info *chip = rtwdev->chip;
1627 u8 tx_power;
1628 u8 upper, lower;
1629 bool mcs_rate;
1630 bool above_2ss;
1631 u8 factor = chip->txgi_factor;
1632
1633 tx_power = pwr_idx_5g->bw40_base[group];
1634
1635 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
1636 (rate >= DESC_RATEVHT1SS_MCS0 &&
1637 rate <= DESC_RATEVHT2SS_MCS9);
1638 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
1639 (rate >= DESC_RATEVHT2SS_MCS0);
1640
1641 if (!mcs_rate) {
1642 tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor;
1643 return tx_power;
1644 }
1645
1646 switch (bandwidth) {
1647 default:
1648 WARN_ON(1);
1649
1650 case RTW_CHANNEL_WIDTH_20:
1651 tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
1652 if (above_2ss)
1653 tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor;
1654 break;
1655 case RTW_CHANNEL_WIDTH_40:
1656
1657 if (above_2ss)
1658 tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor;
1659 break;
1660 case RTW_CHANNEL_WIDTH_80:
1661
1662 lower = pwr_idx_5g->bw40_base[group];
1663 upper = pwr_idx_5g->bw40_base[group + 1];
1664
1665 tx_power = (lower + upper) / 2;
1666 tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor;
1667 if (above_2ss)
1668 tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor;
1669 break;
1670 }
1671
1672 return tx_power;
1673 }
1674
1675 static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
1676 enum rtw_bandwidth bw, u8 rf_path,
1677 u8 rate, u8 channel, u8 regd)
1678 {
1679 struct rtw_hal *hal = &rtwdev->hal;
1680 u8 *cch_by_bw = hal->cch_by_bw;
1681 s8 power_limit = (s8)rtwdev->chip->max_power_index;
1682 u8 rs;
1683 int ch_idx;
1684 u8 cur_bw, cur_ch;
1685 s8 cur_lmt;
1686
1687 if (regd > RTW_REGD_WW)
1688 return power_limit;
1689
1690 if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
1691 rs = RTW_RATE_SECTION_CCK;
1692 else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
1693 rs = RTW_RATE_SECTION_OFDM;
1694 else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
1695 rs = RTW_RATE_SECTION_HT_1S;
1696 else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
1697 rs = RTW_RATE_SECTION_HT_2S;
1698 else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
1699 rs = RTW_RATE_SECTION_VHT_1S;
1700 else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
1701 rs = RTW_RATE_SECTION_VHT_2S;
1702 else
1703 goto err;
1704
1705
1706 if (rs == RTW_RATE_SECTION_CCK || rs == RTW_RATE_SECTION_OFDM)
1707 bw = RTW_CHANNEL_WIDTH_20;
1708
1709
1710 if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S)
1711 bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40);
1712
1713
1714 for (cur_bw = RTW_CHANNEL_WIDTH_20; cur_bw <= bw; cur_bw++) {
1715 cur_ch = cch_by_bw[cur_bw];
1716
1717 ch_idx = rtw_channel_to_idx(band, cur_ch);
1718 if (ch_idx < 0)
1719 goto err;
1720
1721 cur_lmt = cur_ch <= RTW_MAX_CHANNEL_NUM_2G ?
1722 hal->tx_pwr_limit_2g[regd][cur_bw][rs][ch_idx] :
1723 hal->tx_pwr_limit_5g[regd][cur_bw][rs][ch_idx];
1724
1725 power_limit = min_t(s8, cur_lmt, power_limit);
1726 }
1727
1728 return power_limit;
1729
1730 err:
1731 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n",
1732 band, bw, rf_path, rate, channel);
1733 return (s8)rtwdev->chip->max_power_index;
1734 }
1735
1736 void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
1737 u8 ch, u8 regd, struct rtw_power_params *pwr_param)
1738 {
1739 struct rtw_hal *hal = &rtwdev->hal;
1740 struct rtw_txpwr_idx *pwr_idx;
1741 u8 group, band;
1742 u8 *base = &pwr_param->pwr_base;
1743 s8 *offset = &pwr_param->pwr_offset;
1744 s8 *limit = &pwr_param->pwr_limit;
1745
1746 pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
1747 group = rtw_get_channel_group(ch);
1748
1749
1750 if (ch <= 14) {
1751 band = PHY_BAND_2G;
1752 *base = rtw_phy_get_2g_tx_power_index(rtwdev,
1753 &pwr_idx->pwr_idx_2g,
1754 bw, rate, group);
1755 *offset = hal->tx_pwr_by_rate_offset_2g[path][rate];
1756 } else {
1757 band = PHY_BAND_5G;
1758 *base = rtw_phy_get_5g_tx_power_index(rtwdev,
1759 &pwr_idx->pwr_idx_5g,
1760 bw, rate, group);
1761 *offset = hal->tx_pwr_by_rate_offset_5g[path][rate];
1762 }
1763
1764 *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path,
1765 rate, ch, regd);
1766 }
1767
1768 u8
1769 rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate,
1770 enum rtw_bandwidth bandwidth, u8 channel, u8 regd)
1771 {
1772 struct rtw_power_params pwr_param = {0};
1773 u8 tx_power;
1774 s8 offset;
1775
1776 rtw_get_tx_power_params(rtwdev, rf_path, rate, bandwidth,
1777 channel, regd, &pwr_param);
1778
1779 tx_power = pwr_param.pwr_base;
1780 offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit);
1781
1782 if (rtwdev->chip->en_dis_dpd)
1783 offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate);
1784
1785 tx_power += offset;
1786
1787 if (tx_power > rtwdev->chip->max_power_index)
1788 tx_power = rtwdev->chip->max_power_index;
1789
1790 return tx_power;
1791 }
1792
1793 static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev,
1794 u8 ch, u8 path, u8 rs)
1795 {
1796 struct rtw_hal *hal = &rtwdev->hal;
1797 u8 regd = rtwdev->regd.txpwr_regd;
1798 u8 *rates;
1799 u8 size;
1800 u8 rate;
1801 u8 pwr_idx;
1802 u8 bw;
1803 int i;
1804
1805 if (rs >= RTW_RATE_SECTION_MAX)
1806 return;
1807
1808 rates = rtw_rate_section[rs];
1809 size = rtw_rate_size[rs];
1810 bw = hal->current_band_width;
1811 for (i = 0; i < size; i++) {
1812 rate = rates[i];
1813 pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, rate,
1814 bw, ch, regd);
1815 hal->tx_pwr_tbl[path][rate] = pwr_idx;
1816 }
1817 }
1818
1819
1820
1821
1822
1823
1824 static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev,
1825 u8 ch, u8 path)
1826 {
1827 struct rtw_hal *hal = &rtwdev->hal;
1828 u8 rs;
1829
1830
1831 if (hal->current_band_type == RTW_BAND_2G)
1832 rs = RTW_RATE_SECTION_CCK;
1833 else
1834 rs = RTW_RATE_SECTION_OFDM;
1835
1836 for (; rs < RTW_RATE_SECTION_MAX; rs++)
1837 rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
1838 }
1839
1840 void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
1841 {
1842 struct rtw_chip_info *chip = rtwdev->chip;
1843 struct rtw_hal *hal = &rtwdev->hal;
1844 u8 path;
1845
1846 mutex_lock(&hal->tx_power_mutex);
1847
1848 for (path = 0; path < hal->rf_path_num; path++)
1849 rtw_phy_set_tx_power_level_by_path(rtwdev, channel, path);
1850
1851 chip->ops->set_tx_power_index(rtwdev);
1852 mutex_unlock(&hal->tx_power_mutex);
1853 }
1854
1855 static void
1856 rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
1857 u8 rs, u8 size, u8 *rates)
1858 {
1859 u8 rate;
1860 u8 base_idx, rate_idx;
1861 s8 base_2g, base_5g;
1862
1863 if (rs >= RTW_RATE_SECTION_VHT_1S)
1864 base_idx = rates[size - 3];
1865 else
1866 base_idx = rates[size - 1];
1867 base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx];
1868 base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx];
1869 hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g;
1870 hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g;
1871 for (rate = 0; rate < size; rate++) {
1872 rate_idx = rates[rate];
1873 hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g;
1874 hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g;
1875 }
1876 }
1877
1878 void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal)
1879 {
1880 u8 path;
1881
1882 for (path = 0; path < RTW_RF_PATH_MAX; path++) {
1883 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
1884 RTW_RATE_SECTION_CCK,
1885 rtw_cck_size, rtw_cck_rates);
1886 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
1887 RTW_RATE_SECTION_OFDM,
1888 rtw_ofdm_size, rtw_ofdm_rates);
1889 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
1890 RTW_RATE_SECTION_HT_1S,
1891 rtw_ht_1s_size, rtw_ht_1s_rates);
1892 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
1893 RTW_RATE_SECTION_HT_2S,
1894 rtw_ht_2s_size, rtw_ht_2s_rates);
1895 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
1896 RTW_RATE_SECTION_VHT_1S,
1897 rtw_vht_1s_size, rtw_vht_1s_rates);
1898 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
1899 RTW_RATE_SECTION_VHT_2S,
1900 rtw_vht_2s_size, rtw_vht_2s_rates);
1901 }
1902 }
1903
1904 static void
1905 __rtw_phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
1906 {
1907 s8 base;
1908 u8 ch;
1909
1910 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) {
1911 base = hal->tx_pwr_by_rate_base_2g[0][rs];
1912 hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base;
1913 }
1914
1915 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) {
1916 base = hal->tx_pwr_by_rate_base_5g[0][rs];
1917 hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base;
1918 }
1919 }
1920
1921 void rtw_phy_tx_power_limit_config(struct rtw_hal *hal)
1922 {
1923 u8 regd, bw, rs;
1924
1925
1926 hal->cch_by_bw[RTW_CHANNEL_WIDTH_20] = 1;
1927
1928 for (regd = 0; regd < RTW_REGD_MAX; regd++)
1929 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
1930 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
1931 __rtw_phy_tx_power_limit_config(hal, regd, bw, rs);
1932 }
1933
1934 static void rtw_phy_init_tx_power_limit(struct rtw_dev *rtwdev,
1935 u8 regd, u8 bw, u8 rs)
1936 {
1937 struct rtw_hal *hal = &rtwdev->hal;
1938 s8 max_power_index = (s8)rtwdev->chip->max_power_index;
1939 u8 ch;
1940
1941
1942 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
1943 hal->tx_pwr_limit_2g[regd][bw][rs][ch] = max_power_index;
1944
1945
1946 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
1947 hal->tx_pwr_limit_5g[regd][bw][rs][ch] = max_power_index;
1948 }
1949
1950 void rtw_phy_init_tx_power(struct rtw_dev *rtwdev)
1951 {
1952 struct rtw_hal *hal = &rtwdev->hal;
1953 u8 regd, path, rate, rs, bw;
1954
1955
1956 for (path = 0; path < RTW_RF_PATH_MAX; path++) {
1957 for (rate = 0; rate < DESC_RATE_MAX; rate++) {
1958 hal->tx_pwr_by_rate_offset_2g[path][rate] = 0;
1959 hal->tx_pwr_by_rate_offset_5g[path][rate] = 0;
1960 }
1961 }
1962
1963
1964 for (regd = 0; regd < RTW_REGD_MAX; regd++)
1965 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
1966 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
1967 rtw_phy_init_tx_power_limit(rtwdev, regd, bw,
1968 rs);
1969 }