This source file includes following definitions.
- mlx5e_port_query_buffer
- port_set_buffer
- calculate_xoff
- update_xoff_threshold
- update_buffer_lossy
- fill_pfc_en
- mlx5e_port_manual_buffer_config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include "port_buffer.h"
33
34 int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
35 struct mlx5e_port_buffer *port_buffer)
36 {
37 struct mlx5_core_dev *mdev = priv->mdev;
38 int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
39 u32 total_used = 0;
40 void *buffer;
41 void *out;
42 int err;
43 int i;
44
45 out = kzalloc(sz, GFP_KERNEL);
46 if (!out)
47 return -ENOMEM;
48
49 err = mlx5e_port_query_pbmc(mdev, out);
50 if (err)
51 goto out;
52
53 for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
54 buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
55 port_buffer->buffer[i].lossy =
56 MLX5_GET(bufferx_reg, buffer, lossy);
57 port_buffer->buffer[i].epsb =
58 MLX5_GET(bufferx_reg, buffer, epsb);
59 port_buffer->buffer[i].size =
60 MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT;
61 port_buffer->buffer[i].xon =
62 MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT;
63 port_buffer->buffer[i].xoff =
64 MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT;
65 total_used += port_buffer->buffer[i].size;
66
67 mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
68 port_buffer->buffer[i].size,
69 port_buffer->buffer[i].xon,
70 port_buffer->buffer[i].xoff,
71 port_buffer->buffer[i].epsb,
72 port_buffer->buffer[i].lossy);
73 }
74
75 port_buffer->port_buffer_size =
76 MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT;
77 port_buffer->spare_buffer_size =
78 port_buffer->port_buffer_size - total_used;
79
80 mlx5e_dbg(HW, priv, "total buffer size=%d, spare buffer size=%d\n",
81 port_buffer->port_buffer_size,
82 port_buffer->spare_buffer_size);
83 out:
84 kfree(out);
85 return err;
86 }
87
88 static int port_set_buffer(struct mlx5e_priv *priv,
89 struct mlx5e_port_buffer *port_buffer)
90 {
91 struct mlx5_core_dev *mdev = priv->mdev;
92 int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
93 void *buffer;
94 void *in;
95 int err;
96 int i;
97
98 in = kzalloc(sz, GFP_KERNEL);
99 if (!in)
100 return -ENOMEM;
101
102 err = mlx5e_port_query_pbmc(mdev, in);
103 if (err)
104 goto out;
105
106 for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
107 buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
108
109 MLX5_SET(bufferx_reg, buffer, size,
110 port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT);
111 MLX5_SET(bufferx_reg, buffer, lossy,
112 port_buffer->buffer[i].lossy);
113 MLX5_SET(bufferx_reg, buffer, xoff_threshold,
114 port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT);
115 MLX5_SET(bufferx_reg, buffer, xon_threshold,
116 port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT);
117 }
118
119 err = mlx5e_port_set_pbmc(mdev, in);
120 out:
121 kfree(in);
122 return err;
123 }
124
125
126
127
128 static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
129 {
130 u32 speed;
131 u32 xoff;
132 int err;
133
134 err = mlx5e_port_linkspeed(priv->mdev, &speed);
135 if (err)
136 speed = SPEED_40000;
137 speed = max_t(u32, speed, SPEED_40000);
138
139 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
140
141 mlx5e_dbg(HW, priv, "%s: xoff=%d\n", __func__, xoff);
142 return xoff;
143 }
144
145 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
146 u32 xoff, unsigned int max_mtu)
147 {
148 int i;
149
150 for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
151 if (port_buffer->buffer[i].lossy) {
152 port_buffer->buffer[i].xoff = 0;
153 port_buffer->buffer[i].xon = 0;
154 continue;
155 }
156
157 if (port_buffer->buffer[i].size <
158 (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
159 pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
160 i, port_buffer->buffer[i].size);
161 return -ENOMEM;
162 }
163
164 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
165 port_buffer->buffer[i].xon =
166 port_buffer->buffer[i].xoff - max_mtu;
167 }
168
169 return 0;
170 }
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191 static int update_buffer_lossy(unsigned int max_mtu,
192 u8 pfc_en, u8 *buffer, u32 xoff,
193 struct mlx5e_port_buffer *port_buffer,
194 bool *change)
195 {
196 bool changed = false;
197 u8 lossy_count;
198 u8 prio_count;
199 u8 lossy;
200 int prio;
201 int err;
202 int i;
203
204 for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
205 prio_count = 0;
206 lossy_count = 0;
207
208 for (prio = 0; prio < MLX5E_MAX_PRIORITY; prio++) {
209 if (buffer[prio] != i)
210 continue;
211
212 prio_count++;
213 lossy_count += !(pfc_en & (1 << prio));
214 }
215
216 if (lossy_count == prio_count)
217 lossy = 1;
218 else
219 lossy = 0;
220
221 if (lossy != port_buffer->buffer[i].lossy) {
222 port_buffer->buffer[i].lossy = lossy;
223 changed = true;
224 }
225 }
226
227 if (changed) {
228 err = update_xoff_threshold(port_buffer, xoff, max_mtu);
229 if (err)
230 return err;
231
232 *change = true;
233 }
234
235 return 0;
236 }
237
238 static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en)
239 {
240 u32 g_rx_pause, g_tx_pause;
241 int err;
242
243 err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause);
244 if (err)
245 return err;
246
247
248
249
250 if (g_rx_pause || g_tx_pause)
251 *pfc_en = 0xff;
252 else
253 err = mlx5_query_port_pfc(mdev, pfc_en, NULL);
254
255 return err;
256 }
257
258 #define MINIMUM_MAX_MTU 9216
259 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
260 u32 change, unsigned int mtu,
261 struct ieee_pfc *pfc,
262 u32 *buffer_size,
263 u8 *prio2buffer)
264 {
265 struct mlx5e_port_buffer port_buffer;
266 u32 xoff = calculate_xoff(priv, mtu);
267 bool update_prio2buffer = false;
268 u8 buffer[MLX5E_MAX_PRIORITY];
269 bool update_buffer = false;
270 unsigned int max_mtu;
271 u32 total_used = 0;
272 u8 curr_pfc_en;
273 int err;
274 int i;
275
276 mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
277 max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
278
279 err = mlx5e_port_query_buffer(priv, &port_buffer);
280 if (err)
281 return err;
282
283 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
284 update_buffer = true;
285 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
286 if (err)
287 return err;
288 }
289
290 if (change & MLX5E_PORT_BUFFER_PFC) {
291 err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
292 if (err)
293 return err;
294
295 err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
296 &port_buffer, &update_buffer);
297 if (err)
298 return err;
299 }
300
301 if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
302 update_prio2buffer = true;
303 err = fill_pfc_en(priv->mdev, &curr_pfc_en);
304 if (err)
305 return err;
306
307 err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
308 xoff, &port_buffer, &update_buffer);
309 if (err)
310 return err;
311 }
312
313 if (change & MLX5E_PORT_BUFFER_SIZE) {
314 for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
315 mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
316 if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
317 mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n",
318 __func__, i);
319 return -EINVAL;
320 }
321
322 port_buffer.buffer[i].size = buffer_size[i];
323 total_used += buffer_size[i];
324 }
325
326 mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used);
327
328 if (total_used > port_buffer.port_buffer_size)
329 return -EINVAL;
330
331 update_buffer = true;
332 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
333 if (err)
334 return err;
335 }
336
337
338 if (!update_buffer && xoff != priv->dcbx.xoff) {
339 update_buffer = true;
340 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
341 if (err)
342 return err;
343 }
344 priv->dcbx.xoff = xoff;
345
346
347 if (update_buffer) {
348 err = port_set_buffer(priv, &port_buffer);
349 if (err)
350 return err;
351 }
352
353 if (update_prio2buffer)
354 err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
355
356 return err;
357 }