This source file includes following definitions.
- wait_fw_init
- mlx5_set_driver_version
- set_dma_caps
- mlx5_pci_enable_device
- mlx5_pci_disable_device
- request_bar
- release_bar
- to_fw_pkey_sz
- mlx5_core_get_caps_mode
- mlx5_core_get_caps
- set_caps
- handle_hca_cap_atomic
- handle_hca_cap_odp
- handle_hca_cap
- set_hca_cap
- set_hca_ctrl
- mlx5_core_set_hca_defaults
- mlx5_core_enable_hca
- mlx5_core_disable_hca
- mlx5_read_internal_timer
- mlx5_core_set_issi
- mlx5_pci_init
- mlx5_pci_close
- mlx5_init_once
- mlx5_cleanup_once
- mlx5_function_setup
- mlx5_function_teardown
- mlx5_load
- mlx5_unload
- mlx5_load_one
- mlx5_unload_one
- mlx5_mdev_init
- mlx5_mdev_uninit
- init_one
- remove_one
- mlx5_pci_err_detected
- wait_vital
- mlx5_pci_slot_reset
- mlx5_pci_resume
- mlx5_try_fast_unload
- shutdown
- mlx5_suspend
- mlx5_resume
- mlx5_disable_device
- mlx5_recover_device
- mlx5_core_verify_params
- cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/debugfs.h>
47 #include <linux/kmod.h>
48 #include <linux/mlx5/mlx5_ifc.h>
49 #include <linux/mlx5/vport.h>
50 #ifdef CONFIG_RFS_ACCEL
51 #include <linux/cpu_rmap.h>
52 #endif
53 #include <net/devlink.h>
54 #include "mlx5_core.h"
55 #include "lib/eq.h"
56 #include "fs_core.h"
57 #include "lib/mpfs.h"
58 #include "eswitch.h"
59 #include "devlink.h"
60 #include "lib/mlx5.h"
61 #include "fpga/core.h"
62 #include "fpga/ipsec.h"
63 #include "accel/ipsec.h"
64 #include "accel/tls.h"
65 #include "lib/clock.h"
66 #include "lib/vxlan.h"
67 #include "lib/geneve.h"
68 #include "lib/devcom.h"
69 #include "lib/pci_vsc.h"
70 #include "diag/fw_tracer.h"
71 #include "ecpf.h"
72 #include "lib/hv_vhca.h"
73
74 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
75 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
76 MODULE_LICENSE("Dual BSD/GPL");
77 MODULE_VERSION(DRIVER_VERSION);
78
79 unsigned int mlx5_core_debug_mask;
80 module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
81 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
82
83 #define MLX5_DEFAULT_PROF 2
84 static unsigned int prof_sel = MLX5_DEFAULT_PROF;
85 module_param_named(prof_sel, prof_sel, uint, 0444);
86 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
87
88 static u32 sw_owner_id[4];
89
90 enum {
91 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
92 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
93 };
94
95 static struct mlx5_profile profile[] = {
96 [0] = {
97 .mask = 0,
98 },
99 [1] = {
100 .mask = MLX5_PROF_MASK_QP_SIZE,
101 .log_max_qp = 12,
102 },
103 [2] = {
104 .mask = MLX5_PROF_MASK_QP_SIZE |
105 MLX5_PROF_MASK_MR_CACHE,
106 .log_max_qp = 18,
107 .mr_cache[0] = {
108 .size = 500,
109 .limit = 250
110 },
111 .mr_cache[1] = {
112 .size = 500,
113 .limit = 250
114 },
115 .mr_cache[2] = {
116 .size = 500,
117 .limit = 250
118 },
119 .mr_cache[3] = {
120 .size = 500,
121 .limit = 250
122 },
123 .mr_cache[4] = {
124 .size = 500,
125 .limit = 250
126 },
127 .mr_cache[5] = {
128 .size = 500,
129 .limit = 250
130 },
131 .mr_cache[6] = {
132 .size = 500,
133 .limit = 250
134 },
135 .mr_cache[7] = {
136 .size = 500,
137 .limit = 250
138 },
139 .mr_cache[8] = {
140 .size = 500,
141 .limit = 250
142 },
143 .mr_cache[9] = {
144 .size = 500,
145 .limit = 250
146 },
147 .mr_cache[10] = {
148 .size = 500,
149 .limit = 250
150 },
151 .mr_cache[11] = {
152 .size = 500,
153 .limit = 250
154 },
155 .mr_cache[12] = {
156 .size = 64,
157 .limit = 32
158 },
159 .mr_cache[13] = {
160 .size = 32,
161 .limit = 16
162 },
163 .mr_cache[14] = {
164 .size = 16,
165 .limit = 8
166 },
167 .mr_cache[15] = {
168 .size = 8,
169 .limit = 4
170 },
171 },
172 };
173
174 #define FW_INIT_TIMEOUT_MILI 2000
175 #define FW_INIT_WAIT_MS 2
176 #define FW_PRE_INIT_TIMEOUT_MILI 120000
177 #define FW_INIT_WARN_MESSAGE_INTERVAL 20000
178
179 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
180 u32 warn_time_mili)
181 {
182 unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
183 unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
184 int err = 0;
185
186 BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL);
187
188 while (fw_initializing(dev)) {
189 if (time_after(jiffies, end)) {
190 err = -EBUSY;
191 break;
192 }
193 if (warn_time_mili && time_after(jiffies, warn)) {
194 mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n",
195 jiffies_to_msecs(end - warn) / 1000);
196 warn = jiffies + msecs_to_jiffies(warn_time_mili);
197 }
198 msleep(FW_INIT_WAIT_MS);
199 }
200
201 return err;
202 }
203
204 static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
205 {
206 int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
207 driver_version);
208 u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0};
209 u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0};
210 int remaining_size = driver_ver_sz;
211 char *string;
212
213 if (!MLX5_CAP_GEN(dev, driver_version))
214 return;
215
216 string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
217
218 strncpy(string, "Linux", remaining_size);
219
220 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
221 strncat(string, ",", remaining_size);
222
223 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
224 strncat(string, DRIVER_NAME, remaining_size);
225
226 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
227 strncat(string, ",", remaining_size);
228
229 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
230 strncat(string, DRIVER_VERSION, remaining_size);
231
232
233 MLX5_SET(set_driver_version_in, in, opcode,
234 MLX5_CMD_OP_SET_DRIVER_VERSION);
235
236 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
237 }
238
239 static int set_dma_caps(struct pci_dev *pdev)
240 {
241 int err;
242
243 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
244 if (err) {
245 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
246 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
247 if (err) {
248 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
249 return err;
250 }
251 }
252
253 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
254 if (err) {
255 dev_warn(&pdev->dev,
256 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
257 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
258 if (err) {
259 dev_err(&pdev->dev,
260 "Can't set consistent PCI DMA mask, aborting\n");
261 return err;
262 }
263 }
264
265 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
266 return err;
267 }
268
269 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
270 {
271 struct pci_dev *pdev = dev->pdev;
272 int err = 0;
273
274 mutex_lock(&dev->pci_status_mutex);
275 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
276 err = pci_enable_device(pdev);
277 if (!err)
278 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
279 }
280 mutex_unlock(&dev->pci_status_mutex);
281
282 return err;
283 }
284
285 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
286 {
287 struct pci_dev *pdev = dev->pdev;
288
289 mutex_lock(&dev->pci_status_mutex);
290 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
291 pci_disable_device(pdev);
292 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
293 }
294 mutex_unlock(&dev->pci_status_mutex);
295 }
296
297 static int request_bar(struct pci_dev *pdev)
298 {
299 int err = 0;
300
301 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
302 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
303 return -ENODEV;
304 }
305
306 err = pci_request_regions(pdev, DRIVER_NAME);
307 if (err)
308 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
309
310 return err;
311 }
312
313 static void release_bar(struct pci_dev *pdev)
314 {
315 pci_release_regions(pdev);
316 }
317
318 struct mlx5_reg_host_endianness {
319 u8 he;
320 u8 rsvd[15];
321 };
322
323 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
324
325 enum {
326 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
327 MLX5_DEV_CAP_FLAG_DCT,
328 };
329
330 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
331 {
332 switch (size) {
333 case 128:
334 return 0;
335 case 256:
336 return 1;
337 case 512:
338 return 2;
339 case 1024:
340 return 3;
341 case 2048:
342 return 4;
343 case 4096:
344 return 5;
345 default:
346 mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
347 return 0;
348 }
349 }
350
351 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
352 enum mlx5_cap_type cap_type,
353 enum mlx5_cap_mode cap_mode)
354 {
355 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
356 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
357 void *out, *hca_caps;
358 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
359 int err;
360
361 memset(in, 0, sizeof(in));
362 out = kzalloc(out_sz, GFP_KERNEL);
363 if (!out)
364 return -ENOMEM;
365
366 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
367 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
368 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
369 if (err) {
370 mlx5_core_warn(dev,
371 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
372 cap_type, cap_mode, err);
373 goto query_ex;
374 }
375
376 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
377
378 switch (cap_mode) {
379 case HCA_CAP_OPMOD_GET_MAX:
380 memcpy(dev->caps.hca_max[cap_type], hca_caps,
381 MLX5_UN_SZ_BYTES(hca_cap_union));
382 break;
383 case HCA_CAP_OPMOD_GET_CUR:
384 memcpy(dev->caps.hca_cur[cap_type], hca_caps,
385 MLX5_UN_SZ_BYTES(hca_cap_union));
386 break;
387 default:
388 mlx5_core_warn(dev,
389 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
390 cap_type, cap_mode);
391 err = -EINVAL;
392 break;
393 }
394 query_ex:
395 kfree(out);
396 return err;
397 }
398
399 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
400 {
401 int ret;
402
403 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
404 if (ret)
405 return ret;
406 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
407 }
408
409 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
410 {
411 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
412
413 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
414 MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
415 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
416 }
417
418 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
419 {
420 void *set_ctx;
421 void *set_hca_cap;
422 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
423 int req_endianness;
424 int err;
425
426 if (MLX5_CAP_GEN(dev, atomic)) {
427 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
428 if (err)
429 return err;
430 } else {
431 return 0;
432 }
433
434 req_endianness =
435 MLX5_CAP_ATOMIC(dev,
436 supported_atomic_req_8B_endianness_mode_1);
437
438 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
439 return 0;
440
441 set_ctx = kzalloc(set_sz, GFP_KERNEL);
442 if (!set_ctx)
443 return -ENOMEM;
444
445 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
446
447
448 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
449 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
450
451 err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
452
453 kfree(set_ctx);
454 return err;
455 }
456
457 static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
458 {
459 void *set_hca_cap;
460 void *set_ctx;
461 int set_sz;
462 bool do_set = false;
463 int err;
464
465 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
466 !MLX5_CAP_GEN(dev, pg))
467 return 0;
468
469 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
470 if (err)
471 return err;
472
473 set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
474 set_ctx = kzalloc(set_sz, GFP_KERNEL);
475 if (!set_ctx)
476 return -ENOMEM;
477
478 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
479 memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
480 MLX5_ST_SZ_BYTES(odp_cap));
481
482 #define ODP_CAP_SET_MAX(dev, field) \
483 do { \
484 u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
485 if (_res) { \
486 do_set = true; \
487 MLX5_SET(odp_cap, set_hca_cap, field, _res); \
488 } \
489 } while (0)
490
491 ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
492 ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
493 ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
494 ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
495 ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
496 ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
497 ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
498 ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
499 ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
500 ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
501 ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
502 ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
503 ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
504 ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
505
506 if (do_set)
507 err = set_caps(dev, set_ctx, set_sz,
508 MLX5_SET_HCA_CAP_OP_MOD_ODP);
509
510 kfree(set_ctx);
511
512 return err;
513 }
514
515 static int handle_hca_cap(struct mlx5_core_dev *dev)
516 {
517 void *set_ctx = NULL;
518 struct mlx5_profile *prof = dev->profile;
519 int err = -ENOMEM;
520 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
521 void *set_hca_cap;
522
523 set_ctx = kzalloc(set_sz, GFP_KERNEL);
524 if (!set_ctx)
525 goto query_ex;
526
527 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
528 if (err)
529 goto query_ex;
530
531 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
532 capability);
533 memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
534 MLX5_ST_SZ_BYTES(cmd_hca_cap));
535
536 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
537 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
538 128);
539
540 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
541 to_fw_pkey_sz(dev, 128));
542
543
544 if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
545 mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
546 profile[prof_sel].log_max_qp,
547 MLX5_CAP_GEN_MAX(dev, log_max_qp));
548 profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
549 }
550 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
551 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
552 prof->log_max_qp);
553
554
555 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
556
557
558
559
560 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
561 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
562
563 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
564
565 if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
566 MLX5_SET(cmd_hca_cap,
567 set_hca_cap,
568 cache_line_128byte,
569 cache_line_size() >= 128 ? 1 : 0);
570
571 if (MLX5_CAP_GEN_MAX(dev, dct))
572 MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
573
574 if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
575 MLX5_SET(cmd_hca_cap,
576 set_hca_cap,
577 num_vhca_ports,
578 MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
579
580 err = set_caps(dev, set_ctx, set_sz,
581 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
582
583 query_ex:
584 kfree(set_ctx);
585 return err;
586 }
587
588 static int set_hca_cap(struct mlx5_core_dev *dev)
589 {
590 int err;
591
592 err = handle_hca_cap(dev);
593 if (err) {
594 mlx5_core_err(dev, "handle_hca_cap failed\n");
595 goto out;
596 }
597
598 err = handle_hca_cap_atomic(dev);
599 if (err) {
600 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
601 goto out;
602 }
603
604 err = handle_hca_cap_odp(dev);
605 if (err) {
606 mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
607 goto out;
608 }
609
610 out:
611 return err;
612 }
613
614 static int set_hca_ctrl(struct mlx5_core_dev *dev)
615 {
616 struct mlx5_reg_host_endianness he_in;
617 struct mlx5_reg_host_endianness he_out;
618 int err;
619
620 if (!mlx5_core_is_pf(dev))
621 return 0;
622
623 memset(&he_in, 0, sizeof(he_in));
624 he_in.he = MLX5_SET_HOST_ENDIANNESS;
625 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
626 &he_out, sizeof(he_out),
627 MLX5_REG_HOST_ENDIANNESS, 0, 1);
628 return err;
629 }
630
631 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
632 {
633 int ret = 0;
634
635
636 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
637 ret = mlx5_nic_vport_update_local_lb(dev, false);
638
639 return ret;
640 }
641
642 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
643 {
644 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
645 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
646
647 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
648 MLX5_SET(enable_hca_in, in, function_id, func_id);
649 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
650 dev->caps.embedded_cpu);
651 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
652 }
653
654 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
655 {
656 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
657 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
658
659 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
660 MLX5_SET(disable_hca_in, in, function_id, func_id);
661 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
662 dev->caps.embedded_cpu);
663 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
664 }
665
666 u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
667 struct ptp_system_timestamp *sts)
668 {
669 u32 timer_h, timer_h1, timer_l;
670
671 timer_h = ioread32be(&dev->iseg->internal_timer_h);
672 ptp_read_system_prets(sts);
673 timer_l = ioread32be(&dev->iseg->internal_timer_l);
674 ptp_read_system_postts(sts);
675 timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
676 if (timer_h != timer_h1) {
677
678 ptp_read_system_prets(sts);
679 timer_l = ioread32be(&dev->iseg->internal_timer_l);
680 ptp_read_system_postts(sts);
681 }
682
683 return (u64)timer_l | (u64)timer_h1 << 32;
684 }
685
686 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
687 {
688 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
689 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
690 u32 sup_issi;
691 int err;
692
693 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
694 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
695 query_out, sizeof(query_out));
696 if (err) {
697 u32 syndrome;
698 u8 status;
699
700 mlx5_cmd_mbox_status(query_out, &status, &syndrome);
701 if (!status || syndrome == MLX5_DRIVER_SYND) {
702 mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
703 err, status, syndrome);
704 return err;
705 }
706
707 mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
708 dev->issi = 0;
709 return 0;
710 }
711
712 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
713
714 if (sup_issi & (1 << 1)) {
715 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
716 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
717
718 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
719 MLX5_SET(set_issi_in, set_in, current_issi, 1);
720 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
721 set_out, sizeof(set_out));
722 if (err) {
723 mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
724 err);
725 return err;
726 }
727
728 dev->issi = 1;
729
730 return 0;
731 } else if (sup_issi & (1 << 0) || !sup_issi) {
732 return 0;
733 }
734
735 return -EOPNOTSUPP;
736 }
737
738 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
739 const struct pci_device_id *id)
740 {
741 struct mlx5_priv *priv = &dev->priv;
742 int err = 0;
743
744 mutex_init(&dev->pci_status_mutex);
745 pci_set_drvdata(dev->pdev, dev);
746
747 dev->bar_addr = pci_resource_start(pdev, 0);
748 priv->numa_node = dev_to_node(&dev->pdev->dev);
749
750 err = mlx5_pci_enable_device(dev);
751 if (err) {
752 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
753 return err;
754 }
755
756 err = request_bar(pdev);
757 if (err) {
758 mlx5_core_err(dev, "error requesting BARs, aborting\n");
759 goto err_disable;
760 }
761
762 pci_set_master(pdev);
763
764 err = set_dma_caps(pdev);
765 if (err) {
766 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
767 goto err_clr_master;
768 }
769
770 if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
771 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
772 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
773 mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
774
775 dev->iseg_base = dev->bar_addr;
776 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
777 if (!dev->iseg) {
778 err = -ENOMEM;
779 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
780 goto err_clr_master;
781 }
782
783 mlx5_pci_vsc_init(dev);
784
785 return 0;
786
787 err_clr_master:
788 pci_clear_master(dev->pdev);
789 release_bar(dev->pdev);
790 err_disable:
791 mlx5_pci_disable_device(dev);
792 return err;
793 }
794
795 static void mlx5_pci_close(struct mlx5_core_dev *dev)
796 {
797
798
799
800
801 mlx5_drain_health_wq(dev);
802 iounmap(dev->iseg);
803 pci_clear_master(dev->pdev);
804 release_bar(dev->pdev);
805 mlx5_pci_disable_device(dev);
806 }
807
808 static int mlx5_init_once(struct mlx5_core_dev *dev)
809 {
810 int err;
811
812 dev->priv.devcom = mlx5_devcom_register_device(dev);
813 if (IS_ERR(dev->priv.devcom))
814 mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
815 dev->priv.devcom);
816
817 err = mlx5_query_board_id(dev);
818 if (err) {
819 mlx5_core_err(dev, "query board id failed\n");
820 goto err_devcom;
821 }
822
823 err = mlx5_irq_table_init(dev);
824 if (err) {
825 mlx5_core_err(dev, "failed to initialize irq table\n");
826 goto err_devcom;
827 }
828
829 err = mlx5_eq_table_init(dev);
830 if (err) {
831 mlx5_core_err(dev, "failed to initialize eq\n");
832 goto err_irq_cleanup;
833 }
834
835 err = mlx5_events_init(dev);
836 if (err) {
837 mlx5_core_err(dev, "failed to initialize events\n");
838 goto err_eq_cleanup;
839 }
840
841 mlx5_cq_debugfs_init(dev);
842
843 mlx5_init_qp_table(dev);
844
845 mlx5_init_mkey_table(dev);
846
847 mlx5_init_reserved_gids(dev);
848
849 mlx5_init_clock(dev);
850
851 dev->vxlan = mlx5_vxlan_create(dev);
852 dev->geneve = mlx5_geneve_create(dev);
853
854 err = mlx5_init_rl_table(dev);
855 if (err) {
856 mlx5_core_err(dev, "Failed to init rate limiting\n");
857 goto err_tables_cleanup;
858 }
859
860 err = mlx5_mpfs_init(dev);
861 if (err) {
862 mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
863 goto err_rl_cleanup;
864 }
865
866 err = mlx5_sriov_init(dev);
867 if (err) {
868 mlx5_core_err(dev, "Failed to init sriov %d\n", err);
869 goto err_mpfs_cleanup;
870 }
871
872 err = mlx5_eswitch_init(dev);
873 if (err) {
874 mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
875 goto err_sriov_cleanup;
876 }
877
878 err = mlx5_fpga_init(dev);
879 if (err) {
880 mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
881 goto err_eswitch_cleanup;
882 }
883
884 dev->dm = mlx5_dm_create(dev);
885 if (IS_ERR(dev->dm))
886 mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
887
888 dev->tracer = mlx5_fw_tracer_create(dev);
889 dev->hv_vhca = mlx5_hv_vhca_create(dev);
890
891 return 0;
892
893 err_eswitch_cleanup:
894 mlx5_eswitch_cleanup(dev->priv.eswitch);
895 err_sriov_cleanup:
896 mlx5_sriov_cleanup(dev);
897 err_mpfs_cleanup:
898 mlx5_mpfs_cleanup(dev);
899 err_rl_cleanup:
900 mlx5_cleanup_rl_table(dev);
901 err_tables_cleanup:
902 mlx5_geneve_destroy(dev->geneve);
903 mlx5_vxlan_destroy(dev->vxlan);
904 mlx5_cleanup_mkey_table(dev);
905 mlx5_cleanup_qp_table(dev);
906 mlx5_cq_debugfs_cleanup(dev);
907 mlx5_events_cleanup(dev);
908 err_eq_cleanup:
909 mlx5_eq_table_cleanup(dev);
910 err_irq_cleanup:
911 mlx5_irq_table_cleanup(dev);
912 err_devcom:
913 mlx5_devcom_unregister_device(dev->priv.devcom);
914
915 return err;
916 }
917
918 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
919 {
920 mlx5_hv_vhca_destroy(dev->hv_vhca);
921 mlx5_fw_tracer_destroy(dev->tracer);
922 mlx5_dm_cleanup(dev);
923 mlx5_fpga_cleanup(dev);
924 mlx5_eswitch_cleanup(dev->priv.eswitch);
925 mlx5_sriov_cleanup(dev);
926 mlx5_mpfs_cleanup(dev);
927 mlx5_cleanup_rl_table(dev);
928 mlx5_geneve_destroy(dev->geneve);
929 mlx5_vxlan_destroy(dev->vxlan);
930 mlx5_cleanup_clock(dev);
931 mlx5_cleanup_reserved_gids(dev);
932 mlx5_cleanup_mkey_table(dev);
933 mlx5_cleanup_qp_table(dev);
934 mlx5_cq_debugfs_cleanup(dev);
935 mlx5_events_cleanup(dev);
936 mlx5_eq_table_cleanup(dev);
937 mlx5_irq_table_cleanup(dev);
938 mlx5_devcom_unregister_device(dev->priv.devcom);
939 }
940
941 static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
942 {
943 int err;
944
945 mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
946 fw_rev_min(dev), fw_rev_sub(dev));
947
948
949 if (mlx5_core_is_pf(dev))
950 pcie_print_link_status(dev->pdev);
951
952
953
954 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL);
955 if (err) {
956 mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
957 FW_PRE_INIT_TIMEOUT_MILI);
958 return err;
959 }
960
961 err = mlx5_cmd_init(dev);
962 if (err) {
963 mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
964 return err;
965 }
966
967 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
968 if (err) {
969 mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
970 FW_INIT_TIMEOUT_MILI);
971 goto err_cmd_cleanup;
972 }
973
974 err = mlx5_core_enable_hca(dev, 0);
975 if (err) {
976 mlx5_core_err(dev, "enable hca failed\n");
977 goto err_cmd_cleanup;
978 }
979
980 err = mlx5_core_set_issi(dev);
981 if (err) {
982 mlx5_core_err(dev, "failed to set issi\n");
983 goto err_disable_hca;
984 }
985
986 err = mlx5_satisfy_startup_pages(dev, 1);
987 if (err) {
988 mlx5_core_err(dev, "failed to allocate boot pages\n");
989 goto err_disable_hca;
990 }
991
992 err = set_hca_ctrl(dev);
993 if (err) {
994 mlx5_core_err(dev, "set_hca_ctrl failed\n");
995 goto reclaim_boot_pages;
996 }
997
998 err = set_hca_cap(dev);
999 if (err) {
1000 mlx5_core_err(dev, "set_hca_cap failed\n");
1001 goto reclaim_boot_pages;
1002 }
1003
1004 err = mlx5_satisfy_startup_pages(dev, 0);
1005 if (err) {
1006 mlx5_core_err(dev, "failed to allocate init pages\n");
1007 goto reclaim_boot_pages;
1008 }
1009
1010 err = mlx5_cmd_init_hca(dev, sw_owner_id);
1011 if (err) {
1012 mlx5_core_err(dev, "init hca failed\n");
1013 goto reclaim_boot_pages;
1014 }
1015
1016 mlx5_set_driver_version(dev);
1017
1018 mlx5_start_health_poll(dev);
1019
1020 err = mlx5_query_hca_caps(dev);
1021 if (err) {
1022 mlx5_core_err(dev, "query hca failed\n");
1023 goto stop_health;
1024 }
1025
1026 return 0;
1027
1028 stop_health:
1029 mlx5_stop_health_poll(dev, boot);
1030 reclaim_boot_pages:
1031 mlx5_reclaim_startup_pages(dev);
1032 err_disable_hca:
1033 mlx5_core_disable_hca(dev, 0);
1034 err_cmd_cleanup:
1035 mlx5_cmd_cleanup(dev);
1036
1037 return err;
1038 }
1039
1040 static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
1041 {
1042 int err;
1043
1044 mlx5_stop_health_poll(dev, boot);
1045 err = mlx5_cmd_teardown_hca(dev);
1046 if (err) {
1047 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1048 return err;
1049 }
1050 mlx5_reclaim_startup_pages(dev);
1051 mlx5_core_disable_hca(dev, 0);
1052 mlx5_cmd_cleanup(dev);
1053
1054 return 0;
1055 }
1056
1057 static int mlx5_load(struct mlx5_core_dev *dev)
1058 {
1059 int err;
1060
1061 dev->priv.uar = mlx5_get_uars_page(dev);
1062 if (IS_ERR(dev->priv.uar)) {
1063 mlx5_core_err(dev, "Failed allocating uar, aborting\n");
1064 err = PTR_ERR(dev->priv.uar);
1065 return err;
1066 }
1067
1068 mlx5_events_start(dev);
1069 mlx5_pagealloc_start(dev);
1070
1071 err = mlx5_irq_table_create(dev);
1072 if (err) {
1073 mlx5_core_err(dev, "Failed to alloc IRQs\n");
1074 goto err_irq_table;
1075 }
1076
1077 err = mlx5_eq_table_create(dev);
1078 if (err) {
1079 mlx5_core_err(dev, "Failed to create EQs\n");
1080 goto err_eq_table;
1081 }
1082
1083 err = mlx5_fw_tracer_init(dev->tracer);
1084 if (err) {
1085 mlx5_core_err(dev, "Failed to init FW tracer\n");
1086 goto err_fw_tracer;
1087 }
1088
1089 mlx5_hv_vhca_init(dev->hv_vhca);
1090
1091 err = mlx5_fpga_device_start(dev);
1092 if (err) {
1093 mlx5_core_err(dev, "fpga device start failed %d\n", err);
1094 goto err_fpga_start;
1095 }
1096
1097 err = mlx5_accel_ipsec_init(dev);
1098 if (err) {
1099 mlx5_core_err(dev, "IPSec device start failed %d\n", err);
1100 goto err_ipsec_start;
1101 }
1102
1103 err = mlx5_accel_tls_init(dev);
1104 if (err) {
1105 mlx5_core_err(dev, "TLS device start failed %d\n", err);
1106 goto err_tls_start;
1107 }
1108
1109 err = mlx5_init_fs(dev);
1110 if (err) {
1111 mlx5_core_err(dev, "Failed to init flow steering\n");
1112 goto err_fs;
1113 }
1114
1115 err = mlx5_core_set_hca_defaults(dev);
1116 if (err) {
1117 mlx5_core_err(dev, "Failed to set hca defaults\n");
1118 goto err_sriov;
1119 }
1120
1121 err = mlx5_sriov_attach(dev);
1122 if (err) {
1123 mlx5_core_err(dev, "sriov init failed %d\n", err);
1124 goto err_sriov;
1125 }
1126
1127 err = mlx5_ec_init(dev);
1128 if (err) {
1129 mlx5_core_err(dev, "Failed to init embedded CPU\n");
1130 goto err_ec;
1131 }
1132
1133 return 0;
1134
1135 err_ec:
1136 mlx5_sriov_detach(dev);
1137 err_sriov:
1138 mlx5_cleanup_fs(dev);
1139 err_fs:
1140 mlx5_accel_tls_cleanup(dev);
1141 err_tls_start:
1142 mlx5_accel_ipsec_cleanup(dev);
1143 err_ipsec_start:
1144 mlx5_fpga_device_stop(dev);
1145 err_fpga_start:
1146 mlx5_hv_vhca_cleanup(dev->hv_vhca);
1147 mlx5_fw_tracer_cleanup(dev->tracer);
1148 err_fw_tracer:
1149 mlx5_eq_table_destroy(dev);
1150 err_eq_table:
1151 mlx5_irq_table_destroy(dev);
1152 err_irq_table:
1153 mlx5_pagealloc_stop(dev);
1154 mlx5_events_stop(dev);
1155 mlx5_put_uars_page(dev, dev->priv.uar);
1156 return err;
1157 }
1158
1159 static void mlx5_unload(struct mlx5_core_dev *dev)
1160 {
1161 mlx5_ec_cleanup(dev);
1162 mlx5_sriov_detach(dev);
1163 mlx5_cleanup_fs(dev);
1164 mlx5_accel_ipsec_cleanup(dev);
1165 mlx5_accel_tls_cleanup(dev);
1166 mlx5_fpga_device_stop(dev);
1167 mlx5_hv_vhca_cleanup(dev->hv_vhca);
1168 mlx5_fw_tracer_cleanup(dev->tracer);
1169 mlx5_eq_table_destroy(dev);
1170 mlx5_irq_table_destroy(dev);
1171 mlx5_pagealloc_stop(dev);
1172 mlx5_events_stop(dev);
1173 mlx5_put_uars_page(dev, dev->priv.uar);
1174 }
1175
1176 static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
1177 {
1178 int err = 0;
1179
1180 dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
1181 mutex_lock(&dev->intf_state_mutex);
1182 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1183 mlx5_core_warn(dev, "interface is up, NOP\n");
1184 goto out;
1185 }
1186
1187 dev->state = MLX5_DEVICE_STATE_UP;
1188
1189 err = mlx5_function_setup(dev, boot);
1190 if (err)
1191 goto err_function;
1192
1193 if (boot) {
1194 err = mlx5_init_once(dev);
1195 if (err) {
1196 mlx5_core_err(dev, "sw objs init failed\n");
1197 goto function_teardown;
1198 }
1199 }
1200
1201 err = mlx5_load(dev);
1202 if (err)
1203 goto err_load;
1204
1205 if (boot) {
1206 err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
1207 if (err)
1208 goto err_devlink_reg;
1209 }
1210
1211 if (mlx5_device_registered(dev)) {
1212 mlx5_attach_device(dev);
1213 } else {
1214 err = mlx5_register_device(dev);
1215 if (err) {
1216 mlx5_core_err(dev, "register device failed %d\n", err);
1217 goto err_reg_dev;
1218 }
1219 }
1220
1221 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1222 out:
1223 mutex_unlock(&dev->intf_state_mutex);
1224
1225 return err;
1226
1227 err_reg_dev:
1228 if (boot)
1229 mlx5_devlink_unregister(priv_to_devlink(dev));
1230 err_devlink_reg:
1231 mlx5_unload(dev);
1232 err_load:
1233 if (boot)
1234 mlx5_cleanup_once(dev);
1235 function_teardown:
1236 mlx5_function_teardown(dev, boot);
1237 err_function:
1238 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1239 mutex_unlock(&dev->intf_state_mutex);
1240
1241 return err;
1242 }
1243
1244 static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
1245 {
1246 int err = 0;
1247
1248 if (cleanup) {
1249 mlx5_unregister_device(dev);
1250 mlx5_drain_health_wq(dev);
1251 }
1252
1253 mutex_lock(&dev->intf_state_mutex);
1254 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1255 mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1256 __func__);
1257 if (cleanup)
1258 mlx5_cleanup_once(dev);
1259 goto out;
1260 }
1261
1262 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1263
1264 if (mlx5_device_registered(dev))
1265 mlx5_detach_device(dev);
1266
1267 mlx5_unload(dev);
1268
1269 if (cleanup)
1270 mlx5_cleanup_once(dev);
1271
1272 mlx5_function_teardown(dev, cleanup);
1273 out:
1274 mutex_unlock(&dev->intf_state_mutex);
1275 return err;
1276 }
1277
1278 static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
1279 {
1280 struct mlx5_priv *priv = &dev->priv;
1281 int err;
1282
1283 dev->profile = &profile[profile_idx];
1284
1285 INIT_LIST_HEAD(&priv->ctx_list);
1286 spin_lock_init(&priv->ctx_lock);
1287 mutex_init(&dev->intf_state_mutex);
1288
1289 mutex_init(&priv->bfregs.reg_head.lock);
1290 mutex_init(&priv->bfregs.wc_head.lock);
1291 INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
1292 INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
1293
1294 mutex_init(&priv->alloc_mutex);
1295 mutex_init(&priv->pgdir_mutex);
1296 INIT_LIST_HEAD(&priv->pgdir_list);
1297 spin_lock_init(&priv->mkey_lock);
1298
1299 priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
1300 mlx5_debugfs_root);
1301 if (!priv->dbg_root) {
1302 dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n");
1303 return -ENOMEM;
1304 }
1305
1306 err = mlx5_health_init(dev);
1307 if (err)
1308 goto err_health_init;
1309
1310 err = mlx5_pagealloc_init(dev);
1311 if (err)
1312 goto err_pagealloc_init;
1313
1314 return 0;
1315
1316 err_pagealloc_init:
1317 mlx5_health_cleanup(dev);
1318 err_health_init:
1319 debugfs_remove(dev->priv.dbg_root);
1320
1321 return err;
1322 }
1323
1324 static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
1325 {
1326 mlx5_pagealloc_cleanup(dev);
1327 mlx5_health_cleanup(dev);
1328 debugfs_remove_recursive(dev->priv.dbg_root);
1329 }
1330
1331 #define MLX5_IB_MOD "mlx5_ib"
1332 static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1333 {
1334 struct mlx5_core_dev *dev;
1335 struct devlink *devlink;
1336 int err;
1337
1338 devlink = mlx5_devlink_alloc();
1339 if (!devlink) {
1340 dev_err(&pdev->dev, "devlink alloc failed\n");
1341 return -ENOMEM;
1342 }
1343
1344 dev = devlink_priv(devlink);
1345 dev->device = &pdev->dev;
1346 dev->pdev = pdev;
1347
1348 dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
1349 MLX5_COREDEV_VF : MLX5_COREDEV_PF;
1350
1351 err = mlx5_mdev_init(dev, prof_sel);
1352 if (err)
1353 goto mdev_init_err;
1354
1355 err = mlx5_pci_init(dev, pdev, id);
1356 if (err) {
1357 mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
1358 err);
1359 goto pci_init_err;
1360 }
1361
1362 err = mlx5_load_one(dev, true);
1363 if (err) {
1364 mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n",
1365 err);
1366 goto err_load_one;
1367 }
1368
1369 request_module_nowait(MLX5_IB_MOD);
1370
1371 err = mlx5_crdump_enable(dev);
1372 if (err)
1373 dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
1374
1375 pci_save_state(pdev);
1376 return 0;
1377
1378 err_load_one:
1379 mlx5_pci_close(dev);
1380 pci_init_err:
1381 mlx5_mdev_uninit(dev);
1382 mdev_init_err:
1383 mlx5_devlink_free(devlink);
1384
1385 return err;
1386 }
1387
1388 static void remove_one(struct pci_dev *pdev)
1389 {
1390 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1391 struct devlink *devlink = priv_to_devlink(dev);
1392
1393 mlx5_crdump_disable(dev);
1394 mlx5_devlink_unregister(devlink);
1395
1396 if (mlx5_unload_one(dev, true)) {
1397 mlx5_core_err(dev, "mlx5_unload_one failed\n");
1398 mlx5_health_flush(dev);
1399 return;
1400 }
1401
1402 mlx5_pci_close(dev);
1403 mlx5_mdev_uninit(dev);
1404 mlx5_devlink_free(devlink);
1405 }
1406
1407 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1408 pci_channel_state_t state)
1409 {
1410 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1411
1412 mlx5_core_info(dev, "%s was called\n", __func__);
1413
1414 mlx5_enter_error_state(dev, false);
1415 mlx5_error_sw_reset(dev);
1416 mlx5_unload_one(dev, false);
1417 mlx5_drain_health_wq(dev);
1418 mlx5_pci_disable_device(dev);
1419
1420 return state == pci_channel_io_perm_failure ?
1421 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1422 }
1423
1424
1425
1426
1427 static int wait_vital(struct pci_dev *pdev)
1428 {
1429 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1430 struct mlx5_core_health *health = &dev->priv.health;
1431 const int niter = 100;
1432 u32 last_count = 0;
1433 u32 count;
1434 int i;
1435
1436 for (i = 0; i < niter; i++) {
1437 count = ioread32be(health->health_counter);
1438 if (count && count != 0xffffffff) {
1439 if (last_count && last_count != count) {
1440 mlx5_core_info(dev,
1441 "wait vital counter value 0x%x after %d iterations\n",
1442 count, i);
1443 return 0;
1444 }
1445 last_count = count;
1446 }
1447 msleep(50);
1448 }
1449
1450 return -ETIMEDOUT;
1451 }
1452
1453 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1454 {
1455 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1456 int err;
1457
1458 mlx5_core_info(dev, "%s was called\n", __func__);
1459
1460 err = mlx5_pci_enable_device(dev);
1461 if (err) {
1462 mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
1463 __func__, err);
1464 return PCI_ERS_RESULT_DISCONNECT;
1465 }
1466
1467 pci_set_master(pdev);
1468 pci_restore_state(pdev);
1469 pci_save_state(pdev);
1470
1471 if (wait_vital(pdev)) {
1472 mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
1473 return PCI_ERS_RESULT_DISCONNECT;
1474 }
1475
1476 return PCI_ERS_RESULT_RECOVERED;
1477 }
1478
1479 static void mlx5_pci_resume(struct pci_dev *pdev)
1480 {
1481 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1482 int err;
1483
1484 mlx5_core_info(dev, "%s was called\n", __func__);
1485
1486 err = mlx5_load_one(dev, false);
1487 if (err)
1488 mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
1489 __func__, err);
1490 else
1491 mlx5_core_info(dev, "%s: device recovered\n", __func__);
1492 }
1493
1494 static const struct pci_error_handlers mlx5_err_handler = {
1495 .error_detected = mlx5_pci_err_detected,
1496 .slot_reset = mlx5_pci_slot_reset,
1497 .resume = mlx5_pci_resume
1498 };
1499
1500 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1501 {
1502 bool fast_teardown = false, force_teardown = false;
1503 int ret = 1;
1504
1505 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
1506 force_teardown = MLX5_CAP_GEN(dev, force_teardown);
1507
1508 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
1509 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
1510
1511 if (!fast_teardown && !force_teardown)
1512 return -EOPNOTSUPP;
1513
1514 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1515 mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1516 return -EAGAIN;
1517 }
1518
1519
1520
1521
1522 mlx5_drain_health_wq(dev);
1523 mlx5_stop_health_poll(dev, false);
1524
1525 ret = mlx5_cmd_fast_teardown_hca(dev);
1526 if (!ret)
1527 goto succeed;
1528
1529 ret = mlx5_cmd_force_teardown_hca(dev);
1530 if (!ret)
1531 goto succeed;
1532
1533 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
1534 mlx5_start_health_poll(dev);
1535 return ret;
1536
1537 succeed:
1538 mlx5_enter_error_state(dev, true);
1539
1540
1541
1542
1543
1544
1545 mlx5_core_eq_free_irqs(dev);
1546
1547 return 0;
1548 }
1549
1550 static void shutdown(struct pci_dev *pdev)
1551 {
1552 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1553 int err;
1554
1555 mlx5_core_info(dev, "Shutdown was called\n");
1556 err = mlx5_try_fast_unload(dev);
1557 if (err)
1558 mlx5_unload_one(dev, false);
1559 mlx5_pci_disable_device(dev);
1560 }
1561
1562 static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
1563 {
1564 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1565
1566 mlx5_unload_one(dev, false);
1567
1568 return 0;
1569 }
1570
1571 static int mlx5_resume(struct pci_dev *pdev)
1572 {
1573 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1574
1575 return mlx5_load_one(dev, false);
1576 }
1577
1578 static const struct pci_device_id mlx5_core_pci_table[] = {
1579 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
1580 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF},
1581 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
1582 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF},
1583 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
1584 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},
1585 { PCI_VDEVICE(MELLANOX, 0x1017) },
1586 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},
1587 { PCI_VDEVICE(MELLANOX, 0x1019) },
1588 { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF},
1589 { PCI_VDEVICE(MELLANOX, 0x101b) },
1590 { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF},
1591 { PCI_VDEVICE(MELLANOX, 0x101d) },
1592 { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF},
1593 { PCI_VDEVICE(MELLANOX, 0x101f) },
1594 { PCI_VDEVICE(MELLANOX, 0x1021) },
1595 { PCI_VDEVICE(MELLANOX, 0xa2d2) },
1596 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},
1597 { PCI_VDEVICE(MELLANOX, 0xa2d6) },
1598 { 0, }
1599 };
1600
1601 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1602
1603 void mlx5_disable_device(struct mlx5_core_dev *dev)
1604 {
1605 mlx5_error_sw_reset(dev);
1606 mlx5_unload_one(dev, false);
1607 }
1608
1609 void mlx5_recover_device(struct mlx5_core_dev *dev)
1610 {
1611 mlx5_pci_disable_device(dev);
1612 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
1613 mlx5_pci_resume(dev->pdev);
1614 }
1615
1616 static struct pci_driver mlx5_core_driver = {
1617 .name = DRIVER_NAME,
1618 .id_table = mlx5_core_pci_table,
1619 .probe = init_one,
1620 .remove = remove_one,
1621 .suspend = mlx5_suspend,
1622 .resume = mlx5_resume,
1623 .shutdown = shutdown,
1624 .err_handler = &mlx5_err_handler,
1625 .sriov_configure = mlx5_core_sriov_configure,
1626 };
1627
1628 static void mlx5_core_verify_params(void)
1629 {
1630 if (prof_sel >= ARRAY_SIZE(profile)) {
1631 pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
1632 prof_sel,
1633 ARRAY_SIZE(profile) - 1,
1634 MLX5_DEFAULT_PROF);
1635 prof_sel = MLX5_DEFAULT_PROF;
1636 }
1637 }
1638
1639 static int __init init(void)
1640 {
1641 int err;
1642
1643 get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
1644
1645 mlx5_core_verify_params();
1646 mlx5_accel_ipsec_build_fs_cmds();
1647 mlx5_register_debugfs();
1648
1649 err = pci_register_driver(&mlx5_core_driver);
1650 if (err)
1651 goto err_debug;
1652
1653 #ifdef CONFIG_MLX5_CORE_EN
1654 mlx5e_init();
1655 #endif
1656
1657 return 0;
1658
1659 err_debug:
1660 mlx5_unregister_debugfs();
1661 return err;
1662 }
1663
1664 static void __exit cleanup(void)
1665 {
1666 #ifdef CONFIG_MLX5_CORE_EN
1667 mlx5e_cleanup();
1668 #endif
1669 pci_unregister_driver(&mlx5_core_driver);
1670 mlx5_unregister_debugfs();
1671 }
1672
1673 module_init(init);
1674 module_exit(cleanup);