This source file includes following definitions.
- ath9k_hw_analog_shift_regwrite
- ath9k_hw_analog_shift_rmw
- ath9k_hw_interpolate
- ath9k_hw_get_lower_upper_index
- ath9k_hw_usb_gen_fill_eeprom
- ath9k_hw_nvram_read_array
- ath9k_hw_nvram_read_pdata
- ath9k_hw_nvram_read_firmware
- ath9k_hw_nvram_read
- ath9k_hw_nvram_swap_data
- ath9k_hw_nvram_validate_checksum
- ath9k_hw_nvram_check_version
- ath9k_hw_fill_vpd_table
- ath9k_hw_get_legacy_target_powers
- ath9k_hw_get_target_powers
- ath9k_hw_get_max_edge_power
- ath9k_hw_get_scaled_power
- ath9k_hw_update_regulatory_maxpower
- ath9k_hw_get_gain_boundaries_pdadcs
- ath9k_hw_eeprom_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #include "hw.h"
18 #include <linux/ath9k_platform.h>
19
20 void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
21 {
22 REG_WRITE(ah, reg, val);
23
24 if (ah->config.analog_shiftreg)
25 udelay(100);
26 }
27
28 void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
29 u32 shift, u32 val)
30 {
31 REG_RMW(ah, reg, ((val << shift) & mask), mask);
32
33 if (ah->config.analog_shiftreg)
34 udelay(100);
35 }
36
37 int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
38 int16_t targetLeft, int16_t targetRight)
39 {
40 int16_t rv;
41
42 if (srcRight == srcLeft) {
43 rv = targetLeft;
44 } else {
45 rv = (int16_t) (((target - srcLeft) * targetRight +
46 (srcRight - target) * targetLeft) /
47 (srcRight - srcLeft));
48 }
49 return rv;
50 }
51
52 bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
53 u16 *indexL, u16 *indexR)
54 {
55 u16 i;
56
57 if (target <= pList[0]) {
58 *indexL = *indexR = 0;
59 return true;
60 }
61 if (target >= pList[listSize - 1]) {
62 *indexL = *indexR = (u16) (listSize - 1);
63 return true;
64 }
65
66 for (i = 0; i < listSize - 1; i++) {
67 if (pList[i] == target) {
68 *indexL = *indexR = i;
69 return true;
70 }
71 if (target < pList[i + 1]) {
72 *indexL = i;
73 *indexR = (u16) (i + 1);
74 return false;
75 }
76 }
77 return false;
78 }
79
80 void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
81 int eep_start_loc, int size)
82 {
83 int i = 0, j, addr;
84 u32 addrdata[8];
85 u32 data[8];
86
87 for (addr = 0; addr < size; addr++) {
88 addrdata[i] = AR5416_EEPROM_OFFSET +
89 ((addr + eep_start_loc) << AR5416_EEPROM_S);
90 i++;
91 if (i == 8) {
92 REG_READ_MULTI(ah, addrdata, data, i);
93
94 for (j = 0; j < i; j++) {
95 *eep_data = data[j];
96 eep_data++;
97 }
98 i = 0;
99 }
100 }
101
102 if (i != 0) {
103 REG_READ_MULTI(ah, addrdata, data, i);
104
105 for (j = 0; j < i; j++) {
106 *eep_data = data[j];
107 eep_data++;
108 }
109 }
110 }
111
112 static bool ath9k_hw_nvram_read_array(u16 *blob, size_t blob_size,
113 off_t offset, u16 *data)
114 {
115 if (offset >= blob_size)
116 return false;
117
118 *data = blob[offset];
119 return true;
120 }
121
122 static bool ath9k_hw_nvram_read_pdata(struct ath9k_platform_data *pdata,
123 off_t offset, u16 *data)
124 {
125 return ath9k_hw_nvram_read_array(pdata->eeprom_data,
126 ARRAY_SIZE(pdata->eeprom_data),
127 offset, data);
128 }
129
130 static bool ath9k_hw_nvram_read_firmware(const struct firmware *eeprom_blob,
131 off_t offset, u16 *data)
132 {
133 return ath9k_hw_nvram_read_array((u16 *) eeprom_blob->data,
134 eeprom_blob->size / sizeof(u16),
135 offset, data);
136 }
137
138 bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
139 {
140 struct ath_common *common = ath9k_hw_common(ah);
141 struct ath9k_platform_data *pdata = ah->dev->platform_data;
142 bool ret;
143
144 if (ah->eeprom_blob)
145 ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data);
146 else if (pdata && !pdata->use_eeprom)
147 ret = ath9k_hw_nvram_read_pdata(pdata, off, data);
148 else
149 ret = common->bus_ops->eeprom_read(common, off, data);
150
151 if (!ret)
152 ath_dbg(common, EEPROM,
153 "unable to read eeprom region at offset %u\n", off);
154
155 return ret;
156 }
157
158 int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size)
159 {
160 u16 magic;
161 u16 *eepdata;
162 int i;
163 bool needs_byteswap = false;
164 struct ath_common *common = ath9k_hw_common(ah);
165
166 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
167 ath_err(common, "Reading Magic # failed\n");
168 return -EIO;
169 }
170
171 if (swab16(magic) == AR5416_EEPROM_MAGIC) {
172 needs_byteswap = true;
173 ath_dbg(common, EEPROM,
174 "EEPROM needs byte-swapping to correct endianness.\n");
175 } else if (magic != AR5416_EEPROM_MAGIC) {
176 if (ath9k_hw_use_flash(ah)) {
177 ath_dbg(common, EEPROM,
178 "Ignoring invalid EEPROM magic (0x%04x).\n",
179 magic);
180 } else {
181 ath_err(common,
182 "Invalid EEPROM magic (0x%04x).\n", magic);
183 return -EINVAL;
184 }
185 }
186
187 if (needs_byteswap) {
188 if (ah->ah_flags & AH_NO_EEP_SWAP) {
189 ath_info(common,
190 "Ignoring endianness difference in EEPROM magic bytes.\n");
191 } else {
192 eepdata = (u16 *)(&ah->eeprom);
193
194 for (i = 0; i < size; i++)
195 eepdata[i] = swab16(eepdata[i]);
196 }
197 }
198
199 if (ah->eep_ops->get_eepmisc(ah) & AR5416_EEPMISC_BIG_ENDIAN) {
200 *swap_needed = true;
201 ath_dbg(common, EEPROM,
202 "Big Endian EEPROM detected according to EEPMISC register.\n");
203 } else {
204 *swap_needed = false;
205 }
206
207 return 0;
208 }
209
210 bool ath9k_hw_nvram_validate_checksum(struct ath_hw *ah, int size)
211 {
212 u32 i, sum = 0;
213 u16 *eepdata = (u16 *)(&ah->eeprom);
214 struct ath_common *common = ath9k_hw_common(ah);
215
216 for (i = 0; i < size; i++)
217 sum ^= eepdata[i];
218
219 if (sum != 0xffff) {
220 ath_err(common, "Bad EEPROM checksum 0x%x\n", sum);
221 return false;
222 }
223
224 return true;
225 }
226
227 bool ath9k_hw_nvram_check_version(struct ath_hw *ah, int version, int minrev)
228 {
229 struct ath_common *common = ath9k_hw_common(ah);
230
231 if (ah->eep_ops->get_eeprom_ver(ah) != version ||
232 ah->eep_ops->get_eeprom_rev(ah) < minrev) {
233 ath_err(common, "Bad EEPROM VER 0x%04x or REV 0x%04x\n",
234 ah->eep_ops->get_eeprom_ver(ah),
235 ah->eep_ops->get_eeprom_rev(ah));
236 return false;
237 }
238
239 return true;
240 }
241
242 void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
243 u8 *pVpdList, u16 numIntercepts,
244 u8 *pRetVpdList)
245 {
246 u16 i, k;
247 u8 currPwr = pwrMin;
248 u16 idxL = 0, idxR = 0;
249
250 for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) {
251 ath9k_hw_get_lower_upper_index(currPwr, pPwrList,
252 numIntercepts, &(idxL),
253 &(idxR));
254 if (idxR < 1)
255 idxR = 1;
256 if (idxL == numIntercepts - 1)
257 idxL = (u16) (numIntercepts - 2);
258 if (pPwrList[idxL] == pPwrList[idxR])
259 k = pVpdList[idxL];
260 else
261 k = (u16)(((currPwr - pPwrList[idxL]) * pVpdList[idxR] +
262 (pPwrList[idxR] - currPwr) * pVpdList[idxL]) /
263 (pPwrList[idxR] - pPwrList[idxL]));
264 pRetVpdList[i] = (u8) k;
265 currPwr += 2;
266 }
267 }
268
269 void ath9k_hw_get_legacy_target_powers(struct ath_hw *ah,
270 struct ath9k_channel *chan,
271 struct cal_target_power_leg *powInfo,
272 u16 numChannels,
273 struct cal_target_power_leg *pNewPower,
274 u16 numRates, bool isExtTarget)
275 {
276 struct chan_centers centers;
277 u16 clo, chi;
278 int i;
279 int matchIndex = -1, lowIndex = -1;
280 u16 freq;
281
282 ath9k_hw_get_channel_centers(ah, chan, ¢ers);
283 freq = (isExtTarget) ? centers.ext_center : centers.ctl_center;
284
285 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel,
286 IS_CHAN_2GHZ(chan))) {
287 matchIndex = 0;
288 } else {
289 for (i = 0; (i < numChannels) &&
290 (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
291 if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel,
292 IS_CHAN_2GHZ(chan))) {
293 matchIndex = i;
294 break;
295 } else if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel,
296 IS_CHAN_2GHZ(chan)) && i > 0 &&
297 freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel,
298 IS_CHAN_2GHZ(chan))) {
299 lowIndex = i - 1;
300 break;
301 }
302 }
303 if ((matchIndex == -1) && (lowIndex == -1))
304 matchIndex = i - 1;
305 }
306
307 if (matchIndex != -1) {
308 *pNewPower = powInfo[matchIndex];
309 } else {
310 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
311 IS_CHAN_2GHZ(chan));
312 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
313 IS_CHAN_2GHZ(chan));
314
315 for (i = 0; i < numRates; i++) {
316 pNewPower->tPow2x[i] =
317 (u8)ath9k_hw_interpolate(freq, clo, chi,
318 powInfo[lowIndex].tPow2x[i],
319 powInfo[lowIndex + 1].tPow2x[i]);
320 }
321 }
322 }
323
324 void ath9k_hw_get_target_powers(struct ath_hw *ah,
325 struct ath9k_channel *chan,
326 struct cal_target_power_ht *powInfo,
327 u16 numChannels,
328 struct cal_target_power_ht *pNewPower,
329 u16 numRates, bool isHt40Target)
330 {
331 struct chan_centers centers;
332 u16 clo, chi;
333 int i;
334 int matchIndex = -1, lowIndex = -1;
335 u16 freq;
336
337 ath9k_hw_get_channel_centers(ah, chan, ¢ers);
338 freq = isHt40Target ? centers.synth_center : centers.ctl_center;
339
340 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) {
341 matchIndex = 0;
342 } else {
343 for (i = 0; (i < numChannels) &&
344 (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
345 if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel,
346 IS_CHAN_2GHZ(chan))) {
347 matchIndex = i;
348 break;
349 } else
350 if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel,
351 IS_CHAN_2GHZ(chan)) && i > 0 &&
352 freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel,
353 IS_CHAN_2GHZ(chan))) {
354 lowIndex = i - 1;
355 break;
356 }
357 }
358 if ((matchIndex == -1) && (lowIndex == -1))
359 matchIndex = i - 1;
360 }
361
362 if (matchIndex != -1) {
363 *pNewPower = powInfo[matchIndex];
364 } else {
365 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
366 IS_CHAN_2GHZ(chan));
367 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
368 IS_CHAN_2GHZ(chan));
369
370 for (i = 0; i < numRates; i++) {
371 pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq,
372 clo, chi,
373 powInfo[lowIndex].tPow2x[i],
374 powInfo[lowIndex + 1].tPow2x[i]);
375 }
376 }
377 }
378
379 u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
380 bool is2GHz, int num_band_edges)
381 {
382 u16 twiceMaxEdgePower = MAX_RATE_POWER;
383 int i;
384
385 for (i = 0; (i < num_band_edges) &&
386 (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
387 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
388 twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl);
389 break;
390 } else if ((i > 0) &&
391 (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
392 is2GHz))) {
393 if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
394 is2GHz) < freq &&
395 CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) {
396 twiceMaxEdgePower =
397 CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl);
398 }
399 break;
400 }
401 }
402
403 return twiceMaxEdgePower;
404 }
405
406 u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit,
407 u8 antenna_reduction)
408 {
409 u16 reduction = antenna_reduction;
410
411
412
413
414
415 switch (ar5416_get_ntxchains(ah->txchainmask)) {
416 case 1:
417 break;
418 case 2:
419 reduction += POWER_CORRECTION_FOR_TWO_CHAIN;
420 break;
421 case 3:
422 reduction += POWER_CORRECTION_FOR_THREE_CHAIN;
423 break;
424 }
425
426 if (power_limit > reduction)
427 power_limit -= reduction;
428 else
429 power_limit = 0;
430
431 return min_t(u16, power_limit, MAX_RATE_POWER);
432 }
433
434 void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
435 {
436 struct ath_common *common = ath9k_hw_common(ah);
437 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
438
439 switch (ar5416_get_ntxchains(ah->txchainmask)) {
440 case 1:
441 break;
442 case 2:
443 regulatory->max_power_level += POWER_CORRECTION_FOR_TWO_CHAIN;
444 break;
445 case 3:
446 regulatory->max_power_level += POWER_CORRECTION_FOR_THREE_CHAIN;
447 break;
448 default:
449 ath_dbg(common, EEPROM, "Invalid chainmask configuration\n");
450 break;
451 }
452 }
453
454 void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
455 struct ath9k_channel *chan,
456 void *pRawDataSet,
457 u8 *bChans, u16 availPiers,
458 u16 tPdGainOverlap,
459 u16 *pPdGainBoundaries, u8 *pPDADCValues,
460 u16 numXpdGains)
461 {
462 int i, j, k;
463 int16_t ss;
464 u16 idxL = 0, idxR = 0, numPiers;
465 static u8 vpdTableL[AR5416_NUM_PD_GAINS]
466 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
467 static u8 vpdTableR[AR5416_NUM_PD_GAINS]
468 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
469 static u8 vpdTableI[AR5416_NUM_PD_GAINS]
470 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
471
472 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
473 u8 minPwrT4[AR5416_NUM_PD_GAINS];
474 u8 maxPwrT4[AR5416_NUM_PD_GAINS];
475 int16_t vpdStep;
476 int16_t tmpVal;
477 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
478 bool match;
479 int16_t minDelta = 0;
480 struct chan_centers centers;
481 int pdgain_boundary_default;
482 struct cal_data_per_freq *data_def = pRawDataSet;
483 struct cal_data_per_freq_4k *data_4k = pRawDataSet;
484 struct cal_data_per_freq_ar9287 *data_9287 = pRawDataSet;
485 bool eeprom_4k = AR_SREV_9285(ah) || AR_SREV_9271(ah);
486 int intercepts;
487
488 if (AR_SREV_9287(ah))
489 intercepts = AR9287_PD_GAIN_ICEPTS;
490 else
491 intercepts = AR5416_PD_GAIN_ICEPTS;
492
493 memset(&minPwrT4, 0, AR5416_NUM_PD_GAINS);
494 ath9k_hw_get_channel_centers(ah, chan, ¢ers);
495
496 for (numPiers = 0; numPiers < availPiers; numPiers++) {
497 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
498 break;
499 }
500
501 match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center,
502 IS_CHAN_2GHZ(chan)),
503 bChans, numPiers, &idxL, &idxR);
504
505 if (match) {
506 if (AR_SREV_9287(ah)) {
507 for (i = 0; i < numXpdGains; i++) {
508 minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
509 maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
510 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
511 data_9287[idxL].pwrPdg[i],
512 data_9287[idxL].vpdPdg[i],
513 intercepts,
514 vpdTableI[i]);
515 }
516 } else if (eeprom_4k) {
517 for (i = 0; i < numXpdGains; i++) {
518 minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
519 maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
520 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
521 data_4k[idxL].pwrPdg[i],
522 data_4k[idxL].vpdPdg[i],
523 intercepts,
524 vpdTableI[i]);
525 }
526 } else {
527 for (i = 0; i < numXpdGains; i++) {
528 minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
529 maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
530 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
531 data_def[idxL].pwrPdg[i],
532 data_def[idxL].vpdPdg[i],
533 intercepts,
534 vpdTableI[i]);
535 }
536 }
537 } else {
538 for (i = 0; i < numXpdGains; i++) {
539 if (AR_SREV_9287(ah)) {
540 pVpdL = data_9287[idxL].vpdPdg[i];
541 pPwrL = data_9287[idxL].pwrPdg[i];
542 pVpdR = data_9287[idxR].vpdPdg[i];
543 pPwrR = data_9287[idxR].pwrPdg[i];
544 } else if (eeprom_4k) {
545 pVpdL = data_4k[idxL].vpdPdg[i];
546 pPwrL = data_4k[idxL].pwrPdg[i];
547 pVpdR = data_4k[idxR].vpdPdg[i];
548 pPwrR = data_4k[idxR].pwrPdg[i];
549 } else {
550 pVpdL = data_def[idxL].vpdPdg[i];
551 pPwrL = data_def[idxL].pwrPdg[i];
552 pVpdR = data_def[idxR].vpdPdg[i];
553 pPwrR = data_def[idxR].pwrPdg[i];
554 }
555
556 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
557
558 maxPwrT4[i] =
559 min(pPwrL[intercepts - 1],
560 pPwrR[intercepts - 1]);
561
562
563 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
564 pPwrL, pVpdL,
565 intercepts,
566 vpdTableL[i]);
567 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
568 pPwrR, pVpdR,
569 intercepts,
570 vpdTableR[i]);
571
572 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
573 vpdTableI[i][j] =
574 (u8)(ath9k_hw_interpolate((u16)
575 FREQ2FBIN(centers.
576 synth_center,
577 IS_CHAN_2GHZ
578 (chan)),
579 bChans[idxL], bChans[idxR],
580 vpdTableL[i][j], vpdTableR[i][j]));
581 }
582 }
583 }
584
585 k = 0;
586
587 for (i = 0; i < numXpdGains; i++) {
588 if (i == (numXpdGains - 1))
589 pPdGainBoundaries[i] =
590 (u16)(maxPwrT4[i] / 2);
591 else
592 pPdGainBoundaries[i] =
593 (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
594
595 pPdGainBoundaries[i] =
596 min((u16)MAX_RATE_POWER, pPdGainBoundaries[i]);
597
598 minDelta = 0;
599
600 if (i == 0) {
601 if (AR_SREV_9280_20_OR_LATER(ah))
602 ss = (int16_t)(0 - (minPwrT4[i] / 2));
603 else
604 ss = 0;
605 } else {
606 ss = (int16_t)((pPdGainBoundaries[i - 1] -
607 (minPwrT4[i] / 2)) -
608 tPdGainOverlap + 1 + minDelta);
609 }
610 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
611 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
612
613 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
614 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
615 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
616 ss++;
617 }
618
619 sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
620 tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
621 (minPwrT4[i] / 2));
622 maxIndex = (tgtIndex < sizeCurrVpdTable) ?
623 tgtIndex : sizeCurrVpdTable;
624
625 while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
626 pPDADCValues[k++] = vpdTableI[i][ss++];
627 }
628
629 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
630 vpdTableI[i][sizeCurrVpdTable - 2]);
631 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
632
633 if (tgtIndex >= maxIndex) {
634 while ((ss <= tgtIndex) &&
635 (k < (AR5416_NUM_PDADC_VALUES - 1))) {
636 tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
637 (ss - maxIndex + 1) * vpdStep));
638 pPDADCValues[k++] = (u8)((tmpVal > 255) ?
639 255 : tmpVal);
640 ss++;
641 }
642 }
643 }
644
645 if (eeprom_4k)
646 pdgain_boundary_default = 58;
647 else
648 pdgain_boundary_default = pPdGainBoundaries[i - 1];
649
650 while (i < AR5416_PD_GAINS_IN_MASK) {
651 pPdGainBoundaries[i] = pdgain_boundary_default;
652 i++;
653 }
654
655 while (k < AR5416_NUM_PDADC_VALUES) {
656 pPDADCValues[k] = pPDADCValues[k - 1];
657 k++;
658 }
659 }
660
661 int ath9k_hw_eeprom_init(struct ath_hw *ah)
662 {
663 int status;
664
665 if (AR_SREV_9300_20_OR_LATER(ah))
666 ah->eep_ops = &eep_ar9300_ops;
667 else if (AR_SREV_9287(ah)) {
668 ah->eep_ops = &eep_ar9287_ops;
669 } else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
670 ah->eep_ops = &eep_4k_ops;
671 } else {
672 ah->eep_ops = &eep_def_ops;
673 }
674
675 if (!ah->eep_ops->fill_eeprom(ah))
676 return -EIO;
677
678 status = ah->eep_ops->check_eeprom(ah);
679
680 return status;
681 }