This source file includes following definitions.
- ixgbe_read_mbx
- ixgbe_write_mbx
- ixgbe_check_for_msg
- ixgbe_check_for_ack
- ixgbe_check_for_rst
- ixgbe_poll_for_msg
- ixgbe_poll_for_ack
- ixgbe_read_posted_mbx
- ixgbe_write_posted_mbx
- ixgbe_check_for_bit_pf
- ixgbe_check_for_msg_pf
- ixgbe_check_for_ack_pf
- ixgbe_check_for_rst_pf
- ixgbe_obtain_mbx_lock_pf
- ixgbe_write_mbx_pf
- ixgbe_read_mbx_pf
- ixgbe_init_mbx_params_pf
1
2
3
4 #include <linux/pci.h>
5 #include <linux/delay.h>
6 #include "ixgbe.h"
7 #include "ixgbe_mbx.h"
8
9
10
11
12
13
14
15
16
17
18 s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
19 {
20 struct ixgbe_mbx_info *mbx = &hw->mbx;
21
22
23 if (size > mbx->size)
24 size = mbx->size;
25
26 if (!mbx->ops)
27 return IXGBE_ERR_MBX;
28
29 return mbx->ops->read(hw, msg, size, mbx_id);
30 }
31
32
33
34
35
36
37
38
39
40
41 s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
42 {
43 struct ixgbe_mbx_info *mbx = &hw->mbx;
44
45 if (size > mbx->size)
46 return IXGBE_ERR_MBX;
47
48 if (!mbx->ops)
49 return IXGBE_ERR_MBX;
50
51 return mbx->ops->write(hw, msg, size, mbx_id);
52 }
53
54
55
56
57
58
59
60
61 s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
62 {
63 struct ixgbe_mbx_info *mbx = &hw->mbx;
64
65 if (!mbx->ops)
66 return IXGBE_ERR_MBX;
67
68 return mbx->ops->check_for_msg(hw, mbx_id);
69 }
70
71
72
73
74
75
76
77
78 s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
79 {
80 struct ixgbe_mbx_info *mbx = &hw->mbx;
81
82 if (!mbx->ops)
83 return IXGBE_ERR_MBX;
84
85 return mbx->ops->check_for_ack(hw, mbx_id);
86 }
87
88
89
90
91
92
93
94
95 s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
96 {
97 struct ixgbe_mbx_info *mbx = &hw->mbx;
98
99 if (!mbx->ops)
100 return IXGBE_ERR_MBX;
101
102 return mbx->ops->check_for_rst(hw, mbx_id);
103 }
104
105
106
107
108
109
110
111
112 static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
113 {
114 struct ixgbe_mbx_info *mbx = &hw->mbx;
115 int countdown = mbx->timeout;
116
117 if (!countdown || !mbx->ops)
118 return IXGBE_ERR_MBX;
119
120 while (mbx->ops->check_for_msg(hw, mbx_id)) {
121 countdown--;
122 if (!countdown)
123 return IXGBE_ERR_MBX;
124 udelay(mbx->usec_delay);
125 }
126
127 return 0;
128 }
129
130
131
132
133
134
135
136
137 static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
138 {
139 struct ixgbe_mbx_info *mbx = &hw->mbx;
140 int countdown = mbx->timeout;
141
142 if (!countdown || !mbx->ops)
143 return IXGBE_ERR_MBX;
144
145 while (mbx->ops->check_for_ack(hw, mbx_id)) {
146 countdown--;
147 if (!countdown)
148 return IXGBE_ERR_MBX;
149 udelay(mbx->usec_delay);
150 }
151
152 return 0;
153 }
154
155
156
157
158
159
160
161
162
163
164
165 static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
166 u16 mbx_id)
167 {
168 struct ixgbe_mbx_info *mbx = &hw->mbx;
169 s32 ret_val;
170
171 if (!mbx->ops)
172 return IXGBE_ERR_MBX;
173
174 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
175 if (ret_val)
176 return ret_val;
177
178
179 return mbx->ops->read(hw, msg, size, mbx_id);
180 }
181
182
183
184
185
186
187
188
189
190
191
192 static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
193 u16 mbx_id)
194 {
195 struct ixgbe_mbx_info *mbx = &hw->mbx;
196 s32 ret_val;
197
198
199 if (!mbx->ops || !mbx->timeout)
200 return IXGBE_ERR_MBX;
201
202
203 ret_val = mbx->ops->write(hw, msg, size, mbx_id);
204 if (ret_val)
205 return ret_val;
206
207
208 return ixgbe_poll_for_ack(hw, mbx_id);
209 }
210
211 static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
212 {
213 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
214
215 if (mbvficr & mask) {
216 IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
217 return 0;
218 }
219
220 return IXGBE_ERR_MBX;
221 }
222
223
224
225
226
227
228
229
230 static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
231 {
232 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
233 u32 vf_bit = vf_number % 16;
234
235 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
236 index)) {
237 hw->mbx.stats.reqs++;
238 return 0;
239 }
240
241 return IXGBE_ERR_MBX;
242 }
243
244
245
246
247
248
249
250
251 static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
252 {
253 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
254 u32 vf_bit = vf_number % 16;
255
256 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
257 index)) {
258 hw->mbx.stats.acks++;
259 return 0;
260 }
261
262 return IXGBE_ERR_MBX;
263 }
264
265
266
267
268
269
270
271
272 static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
273 {
274 u32 reg_offset = (vf_number < 32) ? 0 : 1;
275 u32 vf_shift = vf_number % 32;
276 u32 vflre = 0;
277
278 switch (hw->mac.type) {
279 case ixgbe_mac_82599EB:
280 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
281 break;
282 case ixgbe_mac_X540:
283 case ixgbe_mac_X550:
284 case ixgbe_mac_X550EM_x:
285 case ixgbe_mac_x550em_a:
286 vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
287 break;
288 default:
289 break;
290 }
291
292 if (vflre & BIT(vf_shift)) {
293 IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
294 hw->mbx.stats.rsts++;
295 return 0;
296 }
297
298 return IXGBE_ERR_MBX;
299 }
300
301
302
303
304
305
306
307
308 static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
309 {
310 u32 p2v_mailbox;
311
312
313 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
314
315
316 p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
317 if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
318 return 0;
319
320 return IXGBE_ERR_MBX;
321 }
322
323
324
325
326
327
328
329
330
331
332 static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
333 u16 vf_number)
334 {
335 s32 ret_val;
336 u16 i;
337
338
339 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
340 if (ret_val)
341 return ret_val;
342
343
344 ixgbe_check_for_msg_pf(hw, vf_number);
345 ixgbe_check_for_ack_pf(hw, vf_number);
346
347
348 for (i = 0; i < size; i++)
349 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
350
351
352 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
353
354
355 hw->mbx.stats.msgs_tx++;
356
357 return 0;
358 }
359
360
361
362
363
364
365
366
367
368
369
370
371 static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
372 u16 vf_number)
373 {
374 s32 ret_val;
375 u16 i;
376
377
378 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
379 if (ret_val)
380 return ret_val;
381
382
383 for (i = 0; i < size; i++)
384 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
385
386
387 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
388
389
390 hw->mbx.stats.msgs_rx++;
391
392 return 0;
393 }
394
395 #ifdef CONFIG_PCI_IOV
396
397
398
399
400
401
402 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
403 {
404 struct ixgbe_mbx_info *mbx = &hw->mbx;
405
406 if (hw->mac.type != ixgbe_mac_82599EB &&
407 hw->mac.type != ixgbe_mac_X550 &&
408 hw->mac.type != ixgbe_mac_X550EM_x &&
409 hw->mac.type != ixgbe_mac_x550em_a &&
410 hw->mac.type != ixgbe_mac_X540)
411 return;
412
413 mbx->timeout = 0;
414 mbx->usec_delay = 0;
415
416 mbx->stats.msgs_tx = 0;
417 mbx->stats.msgs_rx = 0;
418 mbx->stats.reqs = 0;
419 mbx->stats.acks = 0;
420 mbx->stats.rsts = 0;
421
422 mbx->size = IXGBE_VFMAILBOX_SIZE;
423 }
424 #endif
425
426 const struct ixgbe_mbx_operations mbx_ops_generic = {
427 .read = ixgbe_read_mbx_pf,
428 .write = ixgbe_write_mbx_pf,
429 .read_posted = ixgbe_read_posted_mbx,
430 .write_posted = ixgbe_write_posted_mbx,
431 .check_for_msg = ixgbe_check_for_msg_pf,
432 .check_for_ack = ixgbe_check_for_ack_pf,
433 .check_for_rst = ixgbe_check_for_rst_pf,
434 };
435