This source file includes following definitions.
- mlxsw_core_port_driver_priv
- mlxsw_core_port_check
- mlxsw_ports_init
- mlxsw_ports_fini
- mlxsw_core_max_ports
- mlxsw_core_driver_priv
- mlxsw_core_res_query_enabled
- mlxsw_core_reg_access_type_str
- mlxsw_emad_pack_end_tlv
- mlxsw_emad_pack_reg_tlv
- mlxsw_emad_pack_op_tlv
- mlxsw_emad_construct_eth_hdr
- mlxsw_emad_construct
- mlxsw_emad_op_tlv
- mlxsw_emad_reg_tlv
- mlxsw_emad_reg_payload
- mlxsw_emad_get_tid
- mlxsw_emad_is_resp
- mlxsw_emad_process_status
- mlxsw_emad_process_status_skb
- mlxsw_emad_trans_timeout_schedule
- mlxsw_emad_transmit
- mlxsw_emad_trans_finish
- mlxsw_emad_transmit_retry
- mlxsw_emad_trans_timeout_work
- mlxsw_emad_process_response
- mlxsw_emad_rx_listener_func
- mlxsw_emad_init
- mlxsw_emad_fini
- mlxsw_emad_alloc
- mlxsw_emad_reg_access
- mlxsw_core_driver_register
- mlxsw_core_driver_unregister
- __driver_find
- mlxsw_core_driver_get
- mlxsw_devlink_port_split
- mlxsw_devlink_port_unsplit
- mlxsw_devlink_sb_pool_get
- mlxsw_devlink_sb_pool_set
- __dl_port
- mlxsw_devlink_port_type_set
- mlxsw_devlink_sb_port_pool_get
- mlxsw_devlink_sb_port_pool_set
- mlxsw_devlink_sb_tc_pool_bind_get
- mlxsw_devlink_sb_tc_pool_bind_set
- mlxsw_devlink_sb_occ_snapshot
- mlxsw_devlink_sb_occ_max_clear
- mlxsw_devlink_sb_occ_port_pool_get
- mlxsw_devlink_sb_occ_tc_port_bind_get
- mlxsw_devlink_info_get
- mlxsw_devlink_core_bus_device_reload_down
- mlxsw_devlink_core_bus_device_reload_up
- mlxsw_devlink_flash_update
- mlxsw_devlink_trap_init
- mlxsw_devlink_trap_fini
- mlxsw_devlink_trap_action_set
- mlxsw_devlink_trap_group_init
- __mlxsw_core_bus_device_register
- mlxsw_core_bus_device_register
- mlxsw_core_bus_device_unregister
- mlxsw_core_skb_transmit_busy
- mlxsw_core_skb_transmit
- mlxsw_core_ptp_transmitted
- __is_rx_listener_equal
- __find_rx_listener_item
- mlxsw_core_rx_listener_register
- mlxsw_core_rx_listener_unregister
- mlxsw_core_event_listener_func
- __is_event_listener_equal
- __find_event_listener_item
- mlxsw_core_event_listener_register
- mlxsw_core_event_listener_unregister
- mlxsw_core_listener_register
- mlxsw_core_listener_unregister
- mlxsw_core_trap_register
- mlxsw_core_trap_unregister
- mlxsw_core_trap_action_set
- mlxsw_core_tid_get
- mlxsw_core_reg_access_emad
- mlxsw_reg_trans_query
- mlxsw_reg_trans_write
- mlxsw_reg_trans_wait
- mlxsw_reg_trans_bulk_wait
- mlxsw_core_reg_access_cmd
- mlxsw_core_reg_access_cb
- mlxsw_core_reg_access
- mlxsw_reg_query
- mlxsw_reg_write
- mlxsw_core_skb_receive
- mlxsw_core_lag_mapping_index
- mlxsw_core_lag_mapping_set
- mlxsw_core_lag_mapping_get
- mlxsw_core_lag_mapping_clear
- mlxsw_core_res_valid
- mlxsw_core_res_get
- __mlxsw_core_port_init
- __mlxsw_core_port_fini
- mlxsw_core_port_init
- mlxsw_core_port_fini
- mlxsw_core_cpu_port_init
- mlxsw_core_cpu_port_fini
- mlxsw_core_port_eth_set
- mlxsw_core_port_ib_set
- mlxsw_core_port_clear
- mlxsw_core_port_type_get
- mlxsw_core_port_devlink_port_get
- mlxsw_core_buf_dump_dbg
- mlxsw_cmd_exec
- mlxsw_core_schedule_dw
- mlxsw_core_schedule_work
- mlxsw_core_flush_owq
- mlxsw_core_kvd_sizes_get
- mlxsw_core_fw_flash_start
- mlxsw_core_fw_flash_end
- mlxsw_core_resources_query
- mlxsw_core_read_frc_h
- mlxsw_core_read_frc_l
- mlxsw_core_module_init
- mlxsw_core_module_exit
1
2
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/export.h>
8 #include <linux/err.h>
9 #include <linux/if_link.h>
10 #include <linux/netdevice.h>
11 #include <linux/completion.h>
12 #include <linux/skbuff.h>
13 #include <linux/etherdevice.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/gfp.h>
17 #include <linux/random.h>
18 #include <linux/jiffies.h>
19 #include <linux/mutex.h>
20 #include <linux/rcupdate.h>
21 #include <linux/slab.h>
22 #include <linux/workqueue.h>
23 #include <asm/byteorder.h>
24 #include <net/devlink.h>
25 #include <trace/events/devlink.h>
26
27 #include "core.h"
28 #include "item.h"
29 #include "cmd.h"
30 #include "port.h"
31 #include "trap.h"
32 #include "emad.h"
33 #include "reg.h"
34 #include "resources.h"
35
36 static LIST_HEAD(mlxsw_core_driver_list);
37 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
38
39 static const char mlxsw_core_driver_name[] = "mlxsw_core";
40
41 static struct workqueue_struct *mlxsw_wq;
42 static struct workqueue_struct *mlxsw_owq;
43
44 struct mlxsw_core_port {
45 struct devlink_port devlink_port;
46 void *port_driver_priv;
47 u8 local_port;
48 };
49
50 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
51 {
52 return mlxsw_core_port->port_driver_priv;
53 }
54 EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
55
56 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
57 {
58 return mlxsw_core_port->port_driver_priv != NULL;
59 }
60
61 struct mlxsw_core {
62 struct mlxsw_driver *driver;
63 const struct mlxsw_bus *bus;
64 void *bus_priv;
65 const struct mlxsw_bus_info *bus_info;
66 struct workqueue_struct *emad_wq;
67 struct list_head rx_listener_list;
68 struct list_head event_listener_list;
69 struct {
70 atomic64_t tid;
71 struct list_head trans_list;
72 spinlock_t trans_list_lock;
73 bool use_emad;
74 } emad;
75 struct {
76 u8 *mapping;
77 } lag;
78 struct mlxsw_res res;
79 struct mlxsw_hwmon *hwmon;
80 struct mlxsw_thermal *thermal;
81 struct mlxsw_core_port *ports;
82 unsigned int max_ports;
83 bool fw_flash_in_progress;
84 unsigned long driver_priv[0];
85
86 };
87
88 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
89
90 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
91 {
92
93 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
94 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
95 MAX_SYSTEM_PORT) + 1;
96 else
97 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
98
99 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
100 sizeof(struct mlxsw_core_port), GFP_KERNEL);
101 if (!mlxsw_core->ports)
102 return -ENOMEM;
103
104 return 0;
105 }
106
107 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
108 {
109 kfree(mlxsw_core->ports);
110 }
111
112 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
113 {
114 return mlxsw_core->max_ports;
115 }
116 EXPORT_SYMBOL(mlxsw_core_max_ports);
117
118 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
119 {
120 return mlxsw_core->driver_priv;
121 }
122 EXPORT_SYMBOL(mlxsw_core_driver_priv);
123
124 bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
125 {
126 return mlxsw_core->driver->res_query_enabled;
127 }
128 EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
129
130 struct mlxsw_rx_listener_item {
131 struct list_head list;
132 struct mlxsw_rx_listener rxl;
133 void *priv;
134 };
135
136 struct mlxsw_event_listener_item {
137 struct list_head list;
138 struct mlxsw_event_listener el;
139 void *priv;
140 };
141
142
143
144
145
146
147
148
149
150 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
151
152
153
154
155
156 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
157
158
159
160
161
162 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
163
164
165
166
167
168 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
169
170
171
172
173
174 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
175
176
177
178
179
180 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
181
182
183
184
185
186 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
187
188
189
190
191
192
193
194 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
212
213
214
215
216 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
217
218
219
220
221 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
222
223
224
225
226
227
228
229
230 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
231
232
233
234
235 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
236
237
238
239
240 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
241
242
243
244
245
246 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
247
248
249
250
251 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
252
253
254
255
256
257 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
258
259
260
261
262
263 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
264
265 enum mlxsw_core_reg_access_type {
266 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
267 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
268 };
269
270 static inline const char *
271 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
272 {
273 switch (type) {
274 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
275 return "query";
276 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
277 return "write";
278 }
279 BUG();
280 }
281
282 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
283 {
284 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
285 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
286 }
287
288 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
289 const struct mlxsw_reg_info *reg,
290 char *payload)
291 {
292 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
293 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
294 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
295 }
296
297 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
298 const struct mlxsw_reg_info *reg,
299 enum mlxsw_core_reg_access_type type,
300 u64 tid)
301 {
302 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
303 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
304 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
305 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
306 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
307 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
308 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
309 mlxsw_emad_op_tlv_method_set(op_tlv,
310 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
311 else
312 mlxsw_emad_op_tlv_method_set(op_tlv,
313 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
314 mlxsw_emad_op_tlv_class_set(op_tlv,
315 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
316 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
317 }
318
319 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
320 {
321 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
322
323 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
324 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
325 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
326 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
327 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
328
329 skb_reset_mac_header(skb);
330
331 return 0;
332 }
333
334 static void mlxsw_emad_construct(struct sk_buff *skb,
335 const struct mlxsw_reg_info *reg,
336 char *payload,
337 enum mlxsw_core_reg_access_type type,
338 u64 tid)
339 {
340 char *buf;
341
342 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
343 mlxsw_emad_pack_end_tlv(buf);
344
345 buf = skb_push(skb, reg->len + sizeof(u32));
346 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
347
348 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
349 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
350
351 mlxsw_emad_construct_eth_hdr(skb);
352 }
353
354 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
355 {
356 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
357 }
358
359 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
360 {
361 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
362 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
363 }
364
365 static char *mlxsw_emad_reg_payload(const char *op_tlv)
366 {
367 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
368 }
369
370 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
371 {
372 char *op_tlv;
373
374 op_tlv = mlxsw_emad_op_tlv(skb);
375 return mlxsw_emad_op_tlv_tid_get(op_tlv);
376 }
377
378 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
379 {
380 char *op_tlv;
381
382 op_tlv = mlxsw_emad_op_tlv(skb);
383 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
384 }
385
386 static int mlxsw_emad_process_status(char *op_tlv,
387 enum mlxsw_emad_op_tlv_status *p_status)
388 {
389 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
390
391 switch (*p_status) {
392 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
393 return 0;
394 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
395 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
396 return -EAGAIN;
397 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
398 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
399 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
400 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
401 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
402 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
403 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
404 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
405 default:
406 return -EIO;
407 }
408 }
409
410 static int
411 mlxsw_emad_process_status_skb(struct sk_buff *skb,
412 enum mlxsw_emad_op_tlv_status *p_status)
413 {
414 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
415 }
416
417 struct mlxsw_reg_trans {
418 struct list_head list;
419 struct list_head bulk_list;
420 struct mlxsw_core *core;
421 struct sk_buff *tx_skb;
422 struct mlxsw_tx_info tx_info;
423 struct delayed_work timeout_dw;
424 unsigned int retries;
425 u64 tid;
426 struct completion completion;
427 atomic_t active;
428 mlxsw_reg_trans_cb_t *cb;
429 unsigned long cb_priv;
430 const struct mlxsw_reg_info *reg;
431 enum mlxsw_core_reg_access_type type;
432 int err;
433 enum mlxsw_emad_op_tlv_status emad_status;
434 struct rcu_head rcu;
435 };
436
437 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
438 #define MLXSW_EMAD_TIMEOUT_MS 200
439
440 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
441 {
442 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
443
444 if (trans->core->fw_flash_in_progress)
445 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
446
447 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
448 }
449
450 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
451 struct mlxsw_reg_trans *trans)
452 {
453 struct sk_buff *skb;
454 int err;
455
456 skb = skb_copy(trans->tx_skb, GFP_KERNEL);
457 if (!skb)
458 return -ENOMEM;
459
460 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
461 skb->data + mlxsw_core->driver->txhdr_len,
462 skb->len - mlxsw_core->driver->txhdr_len);
463
464 atomic_set(&trans->active, 1);
465 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
466 if (err) {
467 dev_kfree_skb(skb);
468 return err;
469 }
470 mlxsw_emad_trans_timeout_schedule(trans);
471 return 0;
472 }
473
474 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
475 {
476 struct mlxsw_core *mlxsw_core = trans->core;
477
478 dev_kfree_skb(trans->tx_skb);
479 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
480 list_del_rcu(&trans->list);
481 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
482 trans->err = err;
483 complete(&trans->completion);
484 }
485
486 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
487 struct mlxsw_reg_trans *trans)
488 {
489 int err;
490
491 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
492 trans->retries++;
493 err = mlxsw_emad_transmit(trans->core, trans);
494 if (err == 0)
495 return;
496 } else {
497 err = -EIO;
498 }
499 mlxsw_emad_trans_finish(trans, err);
500 }
501
502 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
503 {
504 struct mlxsw_reg_trans *trans = container_of(work,
505 struct mlxsw_reg_trans,
506 timeout_dw.work);
507
508 if (!atomic_dec_and_test(&trans->active))
509 return;
510
511 mlxsw_emad_transmit_retry(trans->core, trans);
512 }
513
514 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
515 struct mlxsw_reg_trans *trans,
516 struct sk_buff *skb)
517 {
518 int err;
519
520 if (!atomic_dec_and_test(&trans->active))
521 return;
522
523 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
524 if (err == -EAGAIN) {
525 mlxsw_emad_transmit_retry(mlxsw_core, trans);
526 } else {
527 if (err == 0) {
528 char *op_tlv = mlxsw_emad_op_tlv(skb);
529
530 if (trans->cb)
531 trans->cb(mlxsw_core,
532 mlxsw_emad_reg_payload(op_tlv),
533 trans->reg->len, trans->cb_priv);
534 }
535 mlxsw_emad_trans_finish(trans, err);
536 }
537 }
538
539
540 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
541 void *priv)
542 {
543 struct mlxsw_core *mlxsw_core = priv;
544 struct mlxsw_reg_trans *trans;
545
546 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
547 skb->data, skb->len);
548
549 if (!mlxsw_emad_is_resp(skb))
550 goto free_skb;
551
552 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
553 if (mlxsw_emad_get_tid(skb) == trans->tid) {
554 mlxsw_emad_process_response(mlxsw_core, trans, skb);
555 break;
556 }
557 }
558
559 free_skb:
560 dev_kfree_skb(skb);
561 }
562
563 static const struct mlxsw_listener mlxsw_emad_rx_listener =
564 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
565 EMAD, DISCARD);
566
567 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
568 {
569 struct workqueue_struct *emad_wq;
570 u64 tid;
571 int err;
572
573 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
574 return 0;
575
576 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
577 if (!emad_wq)
578 return -ENOMEM;
579 mlxsw_core->emad_wq = emad_wq;
580
581
582
583
584
585 get_random_bytes(&tid, 4);
586 tid <<= 32;
587 atomic64_set(&mlxsw_core->emad.tid, tid);
588
589 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
590 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
591
592 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
593 mlxsw_core);
594 if (err)
595 return err;
596
597 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
598 if (err)
599 goto err_emad_trap_set;
600 mlxsw_core->emad.use_emad = true;
601
602 return 0;
603
604 err_emad_trap_set:
605 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
606 mlxsw_core);
607 destroy_workqueue(mlxsw_core->emad_wq);
608 return err;
609 }
610
611 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
612 {
613
614 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
615 return;
616
617 mlxsw_core->emad.use_emad = false;
618 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
619 mlxsw_core);
620 destroy_workqueue(mlxsw_core->emad_wq);
621 }
622
623 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
624 u16 reg_len)
625 {
626 struct sk_buff *skb;
627 u16 emad_len;
628
629 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
630 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
631 sizeof(u32) + mlxsw_core->driver->txhdr_len);
632 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
633 return NULL;
634
635 skb = netdev_alloc_skb(NULL, emad_len);
636 if (!skb)
637 return NULL;
638 memset(skb->data, 0, emad_len);
639 skb_reserve(skb, emad_len);
640
641 return skb;
642 }
643
644 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
645 const struct mlxsw_reg_info *reg,
646 char *payload,
647 enum mlxsw_core_reg_access_type type,
648 struct mlxsw_reg_trans *trans,
649 struct list_head *bulk_list,
650 mlxsw_reg_trans_cb_t *cb,
651 unsigned long cb_priv, u64 tid)
652 {
653 struct sk_buff *skb;
654 int err;
655
656 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
657 tid, reg->id, mlxsw_reg_id_str(reg->id),
658 mlxsw_core_reg_access_type_str(type));
659
660 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
661 if (!skb)
662 return -ENOMEM;
663
664 list_add_tail(&trans->bulk_list, bulk_list);
665 trans->core = mlxsw_core;
666 trans->tx_skb = skb;
667 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
668 trans->tx_info.is_emad = true;
669 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
670 trans->tid = tid;
671 init_completion(&trans->completion);
672 trans->cb = cb;
673 trans->cb_priv = cb_priv;
674 trans->reg = reg;
675 trans->type = type;
676
677 mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
678 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
679
680 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
681 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
682 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
683 err = mlxsw_emad_transmit(mlxsw_core, trans);
684 if (err)
685 goto err_out;
686 return 0;
687
688 err_out:
689 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
690 list_del_rcu(&trans->list);
691 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
692 list_del(&trans->bulk_list);
693 dev_kfree_skb(trans->tx_skb);
694 return err;
695 }
696
697
698
699
700
701 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
702 {
703 spin_lock(&mlxsw_core_driver_list_lock);
704 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
705 spin_unlock(&mlxsw_core_driver_list_lock);
706 return 0;
707 }
708 EXPORT_SYMBOL(mlxsw_core_driver_register);
709
710 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
711 {
712 spin_lock(&mlxsw_core_driver_list_lock);
713 list_del(&mlxsw_driver->list);
714 spin_unlock(&mlxsw_core_driver_list_lock);
715 }
716 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
717
718 static struct mlxsw_driver *__driver_find(const char *kind)
719 {
720 struct mlxsw_driver *mlxsw_driver;
721
722 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
723 if (strcmp(mlxsw_driver->kind, kind) == 0)
724 return mlxsw_driver;
725 }
726 return NULL;
727 }
728
729 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
730 {
731 struct mlxsw_driver *mlxsw_driver;
732
733 spin_lock(&mlxsw_core_driver_list_lock);
734 mlxsw_driver = __driver_find(kind);
735 spin_unlock(&mlxsw_core_driver_list_lock);
736 return mlxsw_driver;
737 }
738
739 static int mlxsw_devlink_port_split(struct devlink *devlink,
740 unsigned int port_index,
741 unsigned int count,
742 struct netlink_ext_ack *extack)
743 {
744 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
745
746 if (port_index >= mlxsw_core->max_ports) {
747 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
748 return -EINVAL;
749 }
750 if (!mlxsw_core->driver->port_split)
751 return -EOPNOTSUPP;
752 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
753 extack);
754 }
755
756 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
757 unsigned int port_index,
758 struct netlink_ext_ack *extack)
759 {
760 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
761
762 if (port_index >= mlxsw_core->max_ports) {
763 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
764 return -EINVAL;
765 }
766 if (!mlxsw_core->driver->port_unsplit)
767 return -EOPNOTSUPP;
768 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
769 extack);
770 }
771
772 static int
773 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
774 unsigned int sb_index, u16 pool_index,
775 struct devlink_sb_pool_info *pool_info)
776 {
777 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
778 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
779
780 if (!mlxsw_driver->sb_pool_get)
781 return -EOPNOTSUPP;
782 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
783 pool_index, pool_info);
784 }
785
786 static int
787 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
788 unsigned int sb_index, u16 pool_index, u32 size,
789 enum devlink_sb_threshold_type threshold_type,
790 struct netlink_ext_ack *extack)
791 {
792 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
793 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
794
795 if (!mlxsw_driver->sb_pool_set)
796 return -EOPNOTSUPP;
797 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
798 pool_index, size, threshold_type,
799 extack);
800 }
801
802 static void *__dl_port(struct devlink_port *devlink_port)
803 {
804 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
805 }
806
807 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
808 enum devlink_port_type port_type)
809 {
810 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
811 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
812 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
813
814 if (!mlxsw_driver->port_type_set)
815 return -EOPNOTSUPP;
816
817 return mlxsw_driver->port_type_set(mlxsw_core,
818 mlxsw_core_port->local_port,
819 port_type);
820 }
821
822 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
823 unsigned int sb_index, u16 pool_index,
824 u32 *p_threshold)
825 {
826 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
827 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
828 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
829
830 if (!mlxsw_driver->sb_port_pool_get ||
831 !mlxsw_core_port_check(mlxsw_core_port))
832 return -EOPNOTSUPP;
833 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
834 pool_index, p_threshold);
835 }
836
837 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
838 unsigned int sb_index, u16 pool_index,
839 u32 threshold,
840 struct netlink_ext_ack *extack)
841 {
842 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
843 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
844 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
845
846 if (!mlxsw_driver->sb_port_pool_set ||
847 !mlxsw_core_port_check(mlxsw_core_port))
848 return -EOPNOTSUPP;
849 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
850 pool_index, threshold, extack);
851 }
852
853 static int
854 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
855 unsigned int sb_index, u16 tc_index,
856 enum devlink_sb_pool_type pool_type,
857 u16 *p_pool_index, u32 *p_threshold)
858 {
859 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
860 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
861 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
862
863 if (!mlxsw_driver->sb_tc_pool_bind_get ||
864 !mlxsw_core_port_check(mlxsw_core_port))
865 return -EOPNOTSUPP;
866 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
867 tc_index, pool_type,
868 p_pool_index, p_threshold);
869 }
870
871 static int
872 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
873 unsigned int sb_index, u16 tc_index,
874 enum devlink_sb_pool_type pool_type,
875 u16 pool_index, u32 threshold,
876 struct netlink_ext_ack *extack)
877 {
878 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
879 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
880 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
881
882 if (!mlxsw_driver->sb_tc_pool_bind_set ||
883 !mlxsw_core_port_check(mlxsw_core_port))
884 return -EOPNOTSUPP;
885 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
886 tc_index, pool_type,
887 pool_index, threshold, extack);
888 }
889
890 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
891 unsigned int sb_index)
892 {
893 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
894 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
895
896 if (!mlxsw_driver->sb_occ_snapshot)
897 return -EOPNOTSUPP;
898 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
899 }
900
901 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
902 unsigned int sb_index)
903 {
904 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
905 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
906
907 if (!mlxsw_driver->sb_occ_max_clear)
908 return -EOPNOTSUPP;
909 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
910 }
911
912 static int
913 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
914 unsigned int sb_index, u16 pool_index,
915 u32 *p_cur, u32 *p_max)
916 {
917 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
918 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
919 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
920
921 if (!mlxsw_driver->sb_occ_port_pool_get ||
922 !mlxsw_core_port_check(mlxsw_core_port))
923 return -EOPNOTSUPP;
924 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
925 pool_index, p_cur, p_max);
926 }
927
928 static int
929 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
930 unsigned int sb_index, u16 tc_index,
931 enum devlink_sb_pool_type pool_type,
932 u32 *p_cur, u32 *p_max)
933 {
934 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
935 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
936 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
937
938 if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
939 !mlxsw_core_port_check(mlxsw_core_port))
940 return -EOPNOTSUPP;
941 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
942 sb_index, tc_index,
943 pool_type, p_cur, p_max);
944 }
945
946 static int
947 mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
948 struct netlink_ext_ack *extack)
949 {
950 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
951 char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
952 u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
953 char mgir_pl[MLXSW_REG_MGIR_LEN];
954 char buf[32];
955 int err;
956
957 err = devlink_info_driver_name_put(req,
958 mlxsw_core->bus_info->device_kind);
959 if (err)
960 return err;
961
962 mlxsw_reg_mgir_pack(mgir_pl);
963 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
964 if (err)
965 return err;
966 mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
967 &fw_minor, &fw_sub_minor);
968
969 sprintf(buf, "%X", hw_rev);
970 err = devlink_info_version_fixed_put(req, "hw.revision", buf);
971 if (err)
972 return err;
973
974 err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
975 if (err)
976 return err;
977
978 sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
979 err = devlink_info_version_running_put(req, "fw.version", buf);
980 if (err)
981 return err;
982
983 return 0;
984 }
985
986 static int
987 mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
988 struct netlink_ext_ack *extack)
989 {
990 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
991
992 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
993 return -EOPNOTSUPP;
994
995 mlxsw_core_bus_device_unregister(mlxsw_core, true);
996 return 0;
997 }
998
999 static int
1000 mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink,
1001 struct netlink_ext_ack *extack)
1002 {
1003 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1004
1005 return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
1006 mlxsw_core->bus,
1007 mlxsw_core->bus_priv, true,
1008 devlink);
1009 }
1010
1011 static int mlxsw_devlink_flash_update(struct devlink *devlink,
1012 const char *file_name,
1013 const char *component,
1014 struct netlink_ext_ack *extack)
1015 {
1016 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1017 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1018
1019 if (!mlxsw_driver->flash_update)
1020 return -EOPNOTSUPP;
1021 return mlxsw_driver->flash_update(mlxsw_core, file_name,
1022 component, extack);
1023 }
1024
1025 static int mlxsw_devlink_trap_init(struct devlink *devlink,
1026 const struct devlink_trap *trap,
1027 void *trap_ctx)
1028 {
1029 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1030 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1031
1032 if (!mlxsw_driver->trap_init)
1033 return -EOPNOTSUPP;
1034 return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
1035 }
1036
1037 static void mlxsw_devlink_trap_fini(struct devlink *devlink,
1038 const struct devlink_trap *trap,
1039 void *trap_ctx)
1040 {
1041 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1042 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1043
1044 if (!mlxsw_driver->trap_fini)
1045 return;
1046 mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
1047 }
1048
1049 static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
1050 const struct devlink_trap *trap,
1051 enum devlink_trap_action action)
1052 {
1053 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1054 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1055
1056 if (!mlxsw_driver->trap_action_set)
1057 return -EOPNOTSUPP;
1058 return mlxsw_driver->trap_action_set(mlxsw_core, trap, action);
1059 }
1060
1061 static int
1062 mlxsw_devlink_trap_group_init(struct devlink *devlink,
1063 const struct devlink_trap_group *group)
1064 {
1065 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1066 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1067
1068 if (!mlxsw_driver->trap_group_init)
1069 return -EOPNOTSUPP;
1070 return mlxsw_driver->trap_group_init(mlxsw_core, group);
1071 }
1072
1073 static const struct devlink_ops mlxsw_devlink_ops = {
1074 .reload_down = mlxsw_devlink_core_bus_device_reload_down,
1075 .reload_up = mlxsw_devlink_core_bus_device_reload_up,
1076 .port_type_set = mlxsw_devlink_port_type_set,
1077 .port_split = mlxsw_devlink_port_split,
1078 .port_unsplit = mlxsw_devlink_port_unsplit,
1079 .sb_pool_get = mlxsw_devlink_sb_pool_get,
1080 .sb_pool_set = mlxsw_devlink_sb_pool_set,
1081 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
1082 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
1083 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
1084 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
1085 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
1086 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
1087 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
1088 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
1089 .info_get = mlxsw_devlink_info_get,
1090 .flash_update = mlxsw_devlink_flash_update,
1091 .trap_init = mlxsw_devlink_trap_init,
1092 .trap_fini = mlxsw_devlink_trap_fini,
1093 .trap_action_set = mlxsw_devlink_trap_action_set,
1094 .trap_group_init = mlxsw_devlink_trap_group_init,
1095 };
1096
1097 static int
1098 __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1099 const struct mlxsw_bus *mlxsw_bus,
1100 void *bus_priv, bool reload,
1101 struct devlink *devlink)
1102 {
1103 const char *device_kind = mlxsw_bus_info->device_kind;
1104 struct mlxsw_core *mlxsw_core;
1105 struct mlxsw_driver *mlxsw_driver;
1106 struct mlxsw_res *res;
1107 size_t alloc_size;
1108 int err;
1109
1110 mlxsw_driver = mlxsw_core_driver_get(device_kind);
1111 if (!mlxsw_driver)
1112 return -EINVAL;
1113
1114 if (!reload) {
1115 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
1116 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
1117 if (!devlink) {
1118 err = -ENOMEM;
1119 goto err_devlink_alloc;
1120 }
1121 }
1122
1123 mlxsw_core = devlink_priv(devlink);
1124 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1125 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1126 mlxsw_core->driver = mlxsw_driver;
1127 mlxsw_core->bus = mlxsw_bus;
1128 mlxsw_core->bus_priv = bus_priv;
1129 mlxsw_core->bus_info = mlxsw_bus_info;
1130
1131 res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
1132 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
1133 if (err)
1134 goto err_bus_init;
1135
1136 if (mlxsw_driver->resources_register && !reload) {
1137 err = mlxsw_driver->resources_register(mlxsw_core);
1138 if (err)
1139 goto err_register_resources;
1140 }
1141
1142 err = mlxsw_ports_init(mlxsw_core);
1143 if (err)
1144 goto err_ports_init;
1145
1146 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
1147 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
1148 alloc_size = sizeof(u8) *
1149 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
1150 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
1151 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1152 if (!mlxsw_core->lag.mapping) {
1153 err = -ENOMEM;
1154 goto err_alloc_lag_mapping;
1155 }
1156 }
1157
1158 err = mlxsw_emad_init(mlxsw_core);
1159 if (err)
1160 goto err_emad_init;
1161
1162 if (!reload) {
1163 err = devlink_register(devlink, mlxsw_bus_info->dev);
1164 if (err)
1165 goto err_devlink_register;
1166 }
1167
1168 if (mlxsw_driver->params_register && !reload) {
1169 err = mlxsw_driver->params_register(mlxsw_core);
1170 if (err)
1171 goto err_register_params;
1172 }
1173
1174 if (mlxsw_driver->init) {
1175 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
1176 if (err)
1177 goto err_driver_init;
1178 }
1179
1180 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1181 if (err)
1182 goto err_hwmon_init;
1183
1184 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
1185 &mlxsw_core->thermal);
1186 if (err)
1187 goto err_thermal_init;
1188
1189 if (mlxsw_driver->params_register)
1190 devlink_params_publish(devlink);
1191
1192 if (!reload)
1193 devlink_reload_enable(devlink);
1194
1195 return 0;
1196
1197 err_thermal_init:
1198 mlxsw_hwmon_fini(mlxsw_core->hwmon);
1199 err_hwmon_init:
1200 if (mlxsw_core->driver->fini)
1201 mlxsw_core->driver->fini(mlxsw_core);
1202 err_driver_init:
1203 if (mlxsw_driver->params_unregister && !reload)
1204 mlxsw_driver->params_unregister(mlxsw_core);
1205 err_register_params:
1206 if (!reload)
1207 devlink_unregister(devlink);
1208 err_devlink_register:
1209 mlxsw_emad_fini(mlxsw_core);
1210 err_emad_init:
1211 kfree(mlxsw_core->lag.mapping);
1212 err_alloc_lag_mapping:
1213 mlxsw_ports_fini(mlxsw_core);
1214 err_ports_init:
1215 if (!reload)
1216 devlink_resources_unregister(devlink, NULL);
1217 err_register_resources:
1218 mlxsw_bus->fini(bus_priv);
1219 err_bus_init:
1220 if (!reload)
1221 devlink_free(devlink);
1222 err_devlink_alloc:
1223 return err;
1224 }
1225
1226 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1227 const struct mlxsw_bus *mlxsw_bus,
1228 void *bus_priv, bool reload,
1229 struct devlink *devlink)
1230 {
1231 bool called_again = false;
1232 int err;
1233
1234 again:
1235 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
1236 bus_priv, reload, devlink);
1237
1238
1239
1240
1241 if (err == -EAGAIN && !called_again) {
1242 called_again = true;
1243 goto again;
1244 }
1245
1246 return err;
1247 }
1248 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1249
1250 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1251 bool reload)
1252 {
1253 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1254
1255 if (!reload)
1256 devlink_reload_disable(devlink);
1257 if (devlink_is_reload_failed(devlink)) {
1258 if (!reload)
1259
1260
1261
1262 goto reload_fail_deinit;
1263 else
1264 return;
1265 }
1266
1267 if (mlxsw_core->driver->params_unregister)
1268 devlink_params_unpublish(devlink);
1269 mlxsw_thermal_fini(mlxsw_core->thermal);
1270 mlxsw_hwmon_fini(mlxsw_core->hwmon);
1271 if (mlxsw_core->driver->fini)
1272 mlxsw_core->driver->fini(mlxsw_core);
1273 if (mlxsw_core->driver->params_unregister && !reload)
1274 mlxsw_core->driver->params_unregister(mlxsw_core);
1275 if (!reload)
1276 devlink_unregister(devlink);
1277 mlxsw_emad_fini(mlxsw_core);
1278 kfree(mlxsw_core->lag.mapping);
1279 mlxsw_ports_fini(mlxsw_core);
1280 if (!reload)
1281 devlink_resources_unregister(devlink, NULL);
1282 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1283
1284 return;
1285
1286 reload_fail_deinit:
1287 if (mlxsw_core->driver->params_unregister)
1288 mlxsw_core->driver->params_unregister(mlxsw_core);
1289 devlink_unregister(devlink);
1290 devlink_resources_unregister(devlink, NULL);
1291 devlink_free(devlink);
1292 }
1293 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1294
1295 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
1296 const struct mlxsw_tx_info *tx_info)
1297 {
1298 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1299 tx_info);
1300 }
1301 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1302
1303 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1304 const struct mlxsw_tx_info *tx_info)
1305 {
1306 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1307 tx_info);
1308 }
1309 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1310
1311 void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
1312 struct sk_buff *skb, u8 local_port)
1313 {
1314 if (mlxsw_core->driver->ptp_transmitted)
1315 mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
1316 local_port);
1317 }
1318 EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
1319
1320 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1321 const struct mlxsw_rx_listener *rxl_b)
1322 {
1323 return (rxl_a->func == rxl_b->func &&
1324 rxl_a->local_port == rxl_b->local_port &&
1325 rxl_a->trap_id == rxl_b->trap_id);
1326 }
1327
1328 static struct mlxsw_rx_listener_item *
1329 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1330 const struct mlxsw_rx_listener *rxl,
1331 void *priv)
1332 {
1333 struct mlxsw_rx_listener_item *rxl_item;
1334
1335 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1336 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1337 rxl_item->priv == priv)
1338 return rxl_item;
1339 }
1340 return NULL;
1341 }
1342
1343 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1344 const struct mlxsw_rx_listener *rxl,
1345 void *priv)
1346 {
1347 struct mlxsw_rx_listener_item *rxl_item;
1348
1349 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1350 if (rxl_item)
1351 return -EEXIST;
1352 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1353 if (!rxl_item)
1354 return -ENOMEM;
1355 rxl_item->rxl = *rxl;
1356 rxl_item->priv = priv;
1357
1358 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1359 return 0;
1360 }
1361 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1362
1363 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1364 const struct mlxsw_rx_listener *rxl,
1365 void *priv)
1366 {
1367 struct mlxsw_rx_listener_item *rxl_item;
1368
1369 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1370 if (!rxl_item)
1371 return;
1372 list_del_rcu(&rxl_item->list);
1373 synchronize_rcu();
1374 kfree(rxl_item);
1375 }
1376 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1377
1378 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1379 void *priv)
1380 {
1381 struct mlxsw_event_listener_item *event_listener_item = priv;
1382 struct mlxsw_reg_info reg;
1383 char *payload;
1384 char *op_tlv = mlxsw_emad_op_tlv(skb);
1385 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1386
1387 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1388 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1389 payload = mlxsw_emad_reg_payload(op_tlv);
1390 event_listener_item->el.func(®, payload, event_listener_item->priv);
1391 dev_kfree_skb(skb);
1392 }
1393
1394 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1395 const struct mlxsw_event_listener *el_b)
1396 {
1397 return (el_a->func == el_b->func &&
1398 el_a->trap_id == el_b->trap_id);
1399 }
1400
1401 static struct mlxsw_event_listener_item *
1402 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
1403 const struct mlxsw_event_listener *el,
1404 void *priv)
1405 {
1406 struct mlxsw_event_listener_item *el_item;
1407
1408 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1409 if (__is_event_listener_equal(&el_item->el, el) &&
1410 el_item->priv == priv)
1411 return el_item;
1412 }
1413 return NULL;
1414 }
1415
1416 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1417 const struct mlxsw_event_listener *el,
1418 void *priv)
1419 {
1420 int err;
1421 struct mlxsw_event_listener_item *el_item;
1422 const struct mlxsw_rx_listener rxl = {
1423 .func = mlxsw_core_event_listener_func,
1424 .local_port = MLXSW_PORT_DONT_CARE,
1425 .trap_id = el->trap_id,
1426 };
1427
1428 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1429 if (el_item)
1430 return -EEXIST;
1431 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1432 if (!el_item)
1433 return -ENOMEM;
1434 el_item->el = *el;
1435 el_item->priv = priv;
1436
1437 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1438 if (err)
1439 goto err_rx_listener_register;
1440
1441
1442
1443
1444 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1445
1446 return 0;
1447
1448 err_rx_listener_register:
1449 kfree(el_item);
1450 return err;
1451 }
1452 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1453
1454 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1455 const struct mlxsw_event_listener *el,
1456 void *priv)
1457 {
1458 struct mlxsw_event_listener_item *el_item;
1459 const struct mlxsw_rx_listener rxl = {
1460 .func = mlxsw_core_event_listener_func,
1461 .local_port = MLXSW_PORT_DONT_CARE,
1462 .trap_id = el->trap_id,
1463 };
1464
1465 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1466 if (!el_item)
1467 return;
1468 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1469 list_del(&el_item->list);
1470 kfree(el_item);
1471 }
1472 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1473
1474 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
1475 const struct mlxsw_listener *listener,
1476 void *priv)
1477 {
1478 if (listener->is_event)
1479 return mlxsw_core_event_listener_register(mlxsw_core,
1480 &listener->u.event_listener,
1481 priv);
1482 else
1483 return mlxsw_core_rx_listener_register(mlxsw_core,
1484 &listener->u.rx_listener,
1485 priv);
1486 }
1487
1488 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
1489 const struct mlxsw_listener *listener,
1490 void *priv)
1491 {
1492 if (listener->is_event)
1493 mlxsw_core_event_listener_unregister(mlxsw_core,
1494 &listener->u.event_listener,
1495 priv);
1496 else
1497 mlxsw_core_rx_listener_unregister(mlxsw_core,
1498 &listener->u.rx_listener,
1499 priv);
1500 }
1501
1502 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
1503 const struct mlxsw_listener *listener, void *priv)
1504 {
1505 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1506 int err;
1507
1508 err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
1509 if (err)
1510 return err;
1511
1512 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
1513 listener->trap_group, listener->is_ctrl);
1514 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1515 if (err)
1516 goto err_trap_set;
1517
1518 return 0;
1519
1520 err_trap_set:
1521 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1522 return err;
1523 }
1524 EXPORT_SYMBOL(mlxsw_core_trap_register);
1525
1526 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
1527 const struct mlxsw_listener *listener,
1528 void *priv)
1529 {
1530 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1531
1532 if (!listener->is_event) {
1533 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
1534 listener->trap_id, listener->trap_group,
1535 listener->is_ctrl);
1536 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1537 }
1538
1539 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1540 }
1541 EXPORT_SYMBOL(mlxsw_core_trap_unregister);
1542
1543 int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
1544 const struct mlxsw_listener *listener,
1545 enum mlxsw_reg_hpkt_action action)
1546 {
1547 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1548
1549 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
1550 listener->trap_group, listener->is_ctrl);
1551 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1552 }
1553 EXPORT_SYMBOL(mlxsw_core_trap_action_set);
1554
1555 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1556 {
1557 return atomic64_inc_return(&mlxsw_core->emad.tid);
1558 }
1559
1560 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1561 const struct mlxsw_reg_info *reg,
1562 char *payload,
1563 enum mlxsw_core_reg_access_type type,
1564 struct list_head *bulk_list,
1565 mlxsw_reg_trans_cb_t *cb,
1566 unsigned long cb_priv)
1567 {
1568 u64 tid = mlxsw_core_tid_get(mlxsw_core);
1569 struct mlxsw_reg_trans *trans;
1570 int err;
1571
1572 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1573 if (!trans)
1574 return -ENOMEM;
1575
1576 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1577 bulk_list, cb, cb_priv, tid);
1578 if (err) {
1579 kfree(trans);
1580 return err;
1581 }
1582 return 0;
1583 }
1584
1585 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1586 const struct mlxsw_reg_info *reg, char *payload,
1587 struct list_head *bulk_list,
1588 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1589 {
1590 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1591 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1592 bulk_list, cb, cb_priv);
1593 }
1594 EXPORT_SYMBOL(mlxsw_reg_trans_query);
1595
1596 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1597 const struct mlxsw_reg_info *reg, char *payload,
1598 struct list_head *bulk_list,
1599 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1600 {
1601 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1602 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1603 bulk_list, cb, cb_priv);
1604 }
1605 EXPORT_SYMBOL(mlxsw_reg_trans_write);
1606
1607 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1608 {
1609 struct mlxsw_core *mlxsw_core = trans->core;
1610 int err;
1611
1612 wait_for_completion(&trans->completion);
1613 cancel_delayed_work_sync(&trans->timeout_dw);
1614 err = trans->err;
1615
1616 if (trans->retries)
1617 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1618 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1619 if (err) {
1620 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1621 trans->tid, trans->reg->id,
1622 mlxsw_reg_id_str(trans->reg->id),
1623 mlxsw_core_reg_access_type_str(trans->type),
1624 trans->emad_status,
1625 mlxsw_emad_op_tlv_status_str(trans->emad_status));
1626 trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
1627 trans->emad_status,
1628 mlxsw_emad_op_tlv_status_str(trans->emad_status));
1629 }
1630
1631 list_del(&trans->bulk_list);
1632 kfree_rcu(trans, rcu);
1633 return err;
1634 }
1635
1636 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1637 {
1638 struct mlxsw_reg_trans *trans;
1639 struct mlxsw_reg_trans *tmp;
1640 int sum_err = 0;
1641 int err;
1642
1643 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1644 err = mlxsw_reg_trans_wait(trans);
1645 if (err && sum_err == 0)
1646 sum_err = err;
1647 }
1648 return sum_err;
1649 }
1650 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1651
1652 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1653 const struct mlxsw_reg_info *reg,
1654 char *payload,
1655 enum mlxsw_core_reg_access_type type)
1656 {
1657 enum mlxsw_emad_op_tlv_status status;
1658 int err, n_retry;
1659 bool reset_ok;
1660 char *in_mbox, *out_mbox, *tmp;
1661
1662 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1663 reg->id, mlxsw_reg_id_str(reg->id),
1664 mlxsw_core_reg_access_type_str(type));
1665
1666 in_mbox = mlxsw_cmd_mbox_alloc();
1667 if (!in_mbox)
1668 return -ENOMEM;
1669
1670 out_mbox = mlxsw_cmd_mbox_alloc();
1671 if (!out_mbox) {
1672 err = -ENOMEM;
1673 goto free_in_mbox;
1674 }
1675
1676 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1677 mlxsw_core_tid_get(mlxsw_core));
1678 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1679 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1680
1681
1682
1683
1684
1685
1686 reset_ok = reg->id == MLXSW_REG_MRSR_ID;
1687
1688 n_retry = 0;
1689 retry:
1690 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
1691 if (!err) {
1692 err = mlxsw_emad_process_status(out_mbox, &status);
1693 if (err) {
1694 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1695 goto retry;
1696 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1697 status, mlxsw_emad_op_tlv_status_str(status));
1698 }
1699 }
1700
1701 if (!err)
1702 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1703 reg->len);
1704
1705 mlxsw_cmd_mbox_free(out_mbox);
1706 free_in_mbox:
1707 mlxsw_cmd_mbox_free(in_mbox);
1708 if (err)
1709 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1710 reg->id, mlxsw_reg_id_str(reg->id),
1711 mlxsw_core_reg_access_type_str(type));
1712 return err;
1713 }
1714
1715 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1716 char *payload, size_t payload_len,
1717 unsigned long cb_priv)
1718 {
1719 char *orig_payload = (char *) cb_priv;
1720
1721 memcpy(orig_payload, payload, payload_len);
1722 }
1723
1724 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1725 const struct mlxsw_reg_info *reg,
1726 char *payload,
1727 enum mlxsw_core_reg_access_type type)
1728 {
1729 LIST_HEAD(bulk_list);
1730 int err;
1731
1732
1733
1734
1735
1736 if (!mlxsw_core->emad.use_emad)
1737 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1738 payload, type);
1739
1740 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1741 payload, type, &bulk_list,
1742 mlxsw_core_reg_access_cb,
1743 (unsigned long) payload);
1744 if (err)
1745 return err;
1746 return mlxsw_reg_trans_bulk_wait(&bulk_list);
1747 }
1748
1749 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1750 const struct mlxsw_reg_info *reg, char *payload)
1751 {
1752 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1753 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1754 }
1755 EXPORT_SYMBOL(mlxsw_reg_query);
1756
1757 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1758 const struct mlxsw_reg_info *reg, char *payload)
1759 {
1760 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1761 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1762 }
1763 EXPORT_SYMBOL(mlxsw_reg_write);
1764
1765 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1766 struct mlxsw_rx_info *rx_info)
1767 {
1768 struct mlxsw_rx_listener_item *rxl_item;
1769 const struct mlxsw_rx_listener *rxl;
1770 u8 local_port;
1771 bool found = false;
1772
1773 if (rx_info->is_lag) {
1774 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1775 __func__, rx_info->u.lag_id,
1776 rx_info->trap_id);
1777
1778
1779
1780 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1781 rx_info->u.lag_id,
1782 rx_info->lag_port_index);
1783 } else {
1784 local_port = rx_info->u.sys_port;
1785 }
1786
1787 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1788 __func__, local_port, rx_info->trap_id);
1789
1790 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1791 (local_port >= mlxsw_core->max_ports))
1792 goto drop;
1793
1794 rcu_read_lock();
1795 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1796 rxl = &rxl_item->rxl;
1797 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1798 rxl->local_port == local_port) &&
1799 rxl->trap_id == rx_info->trap_id) {
1800 found = true;
1801 break;
1802 }
1803 }
1804 rcu_read_unlock();
1805 if (!found)
1806 goto drop;
1807
1808 rxl->func(skb, local_port, rxl_item->priv);
1809 return;
1810
1811 drop:
1812 dev_kfree_skb(skb);
1813 }
1814 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1815
1816 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1817 u16 lag_id, u8 port_index)
1818 {
1819 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
1820 port_index;
1821 }
1822
1823 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1824 u16 lag_id, u8 port_index, u8 local_port)
1825 {
1826 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1827 lag_id, port_index);
1828
1829 mlxsw_core->lag.mapping[index] = local_port;
1830 }
1831 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1832
1833 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1834 u16 lag_id, u8 port_index)
1835 {
1836 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1837 lag_id, port_index);
1838
1839 return mlxsw_core->lag.mapping[index];
1840 }
1841 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1842
1843 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1844 u16 lag_id, u8 local_port)
1845 {
1846 int i;
1847
1848 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
1849 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1850 lag_id, i);
1851
1852 if (mlxsw_core->lag.mapping[index] == local_port)
1853 mlxsw_core->lag.mapping[index] = 0;
1854 }
1855 }
1856 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1857
1858 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
1859 enum mlxsw_res_id res_id)
1860 {
1861 return mlxsw_res_valid(&mlxsw_core->res, res_id);
1862 }
1863 EXPORT_SYMBOL(mlxsw_core_res_valid);
1864
1865 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
1866 enum mlxsw_res_id res_id)
1867 {
1868 return mlxsw_res_get(&mlxsw_core->res, res_id);
1869 }
1870 EXPORT_SYMBOL(mlxsw_core_res_get);
1871
1872 static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
1873 enum devlink_port_flavour flavour,
1874 u32 port_number, bool split,
1875 u32 split_port_subnumber,
1876 const unsigned char *switch_id,
1877 unsigned char switch_id_len)
1878 {
1879 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1880 struct mlxsw_core_port *mlxsw_core_port =
1881 &mlxsw_core->ports[local_port];
1882 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1883 int err;
1884
1885 mlxsw_core_port->local_port = local_port;
1886 devlink_port_attrs_set(devlink_port, flavour, port_number,
1887 split, split_port_subnumber,
1888 switch_id, switch_id_len);
1889 err = devlink_port_register(devlink, devlink_port, local_port);
1890 if (err)
1891 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1892 return err;
1893 }
1894
1895 static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
1896 {
1897 struct mlxsw_core_port *mlxsw_core_port =
1898 &mlxsw_core->ports[local_port];
1899 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1900
1901 devlink_port_unregister(devlink_port);
1902 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1903 }
1904
1905 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
1906 u32 port_number, bool split,
1907 u32 split_port_subnumber,
1908 const unsigned char *switch_id,
1909 unsigned char switch_id_len)
1910 {
1911 return __mlxsw_core_port_init(mlxsw_core, local_port,
1912 DEVLINK_PORT_FLAVOUR_PHYSICAL,
1913 port_number, split, split_port_subnumber,
1914 switch_id, switch_id_len);
1915 }
1916 EXPORT_SYMBOL(mlxsw_core_port_init);
1917
1918 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
1919 {
1920 __mlxsw_core_port_fini(mlxsw_core, local_port);
1921 }
1922 EXPORT_SYMBOL(mlxsw_core_port_fini);
1923
1924 int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
1925 void *port_driver_priv,
1926 const unsigned char *switch_id,
1927 unsigned char switch_id_len)
1928 {
1929 struct mlxsw_core_port *mlxsw_core_port =
1930 &mlxsw_core->ports[MLXSW_PORT_CPU_PORT];
1931 int err;
1932
1933 err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
1934 DEVLINK_PORT_FLAVOUR_CPU,
1935 0, false, 0,
1936 switch_id, switch_id_len);
1937 if (err)
1938 return err;
1939
1940 mlxsw_core_port->port_driver_priv = port_driver_priv;
1941 return 0;
1942 }
1943 EXPORT_SYMBOL(mlxsw_core_cpu_port_init);
1944
1945 void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
1946 {
1947 __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT);
1948 }
1949 EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
1950
1951 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1952 void *port_driver_priv, struct net_device *dev)
1953 {
1954 struct mlxsw_core_port *mlxsw_core_port =
1955 &mlxsw_core->ports[local_port];
1956 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1957
1958 mlxsw_core_port->port_driver_priv = port_driver_priv;
1959 devlink_port_type_eth_set(devlink_port, dev);
1960 }
1961 EXPORT_SYMBOL(mlxsw_core_port_eth_set);
1962
1963 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1964 void *port_driver_priv)
1965 {
1966 struct mlxsw_core_port *mlxsw_core_port =
1967 &mlxsw_core->ports[local_port];
1968 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1969
1970 mlxsw_core_port->port_driver_priv = port_driver_priv;
1971 devlink_port_type_ib_set(devlink_port, NULL);
1972 }
1973 EXPORT_SYMBOL(mlxsw_core_port_ib_set);
1974
1975 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
1976 void *port_driver_priv)
1977 {
1978 struct mlxsw_core_port *mlxsw_core_port =
1979 &mlxsw_core->ports[local_port];
1980 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1981
1982 mlxsw_core_port->port_driver_priv = port_driver_priv;
1983 devlink_port_type_clear(devlink_port);
1984 }
1985 EXPORT_SYMBOL(mlxsw_core_port_clear);
1986
1987 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
1988 u8 local_port)
1989 {
1990 struct mlxsw_core_port *mlxsw_core_port =
1991 &mlxsw_core->ports[local_port];
1992 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1993
1994 return devlink_port->type;
1995 }
1996 EXPORT_SYMBOL(mlxsw_core_port_type_get);
1997
1998
1999 struct devlink_port *
2000 mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
2001 u8 local_port)
2002 {
2003 struct mlxsw_core_port *mlxsw_core_port =
2004 &mlxsw_core->ports[local_port];
2005 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
2006
2007 return devlink_port;
2008 }
2009 EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
2010
2011 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
2012 const char *buf, size_t size)
2013 {
2014 __be32 *m = (__be32 *) buf;
2015 int i;
2016 int count = size / sizeof(__be32);
2017
2018 for (i = count - 1; i >= 0; i--)
2019 if (m[i])
2020 break;
2021 i++;
2022 count = i ? i : 1;
2023 for (i = 0; i < count; i += 4)
2024 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
2025 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
2026 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
2027 }
2028
2029 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
2030 u32 in_mod, bool out_mbox_direct, bool reset_ok,
2031 char *in_mbox, size_t in_mbox_size,
2032 char *out_mbox, size_t out_mbox_size)
2033 {
2034 u8 status;
2035 int err;
2036
2037 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
2038 if (!mlxsw_core->bus->cmd_exec)
2039 return -EOPNOTSUPP;
2040
2041 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
2042 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
2043 if (in_mbox) {
2044 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
2045 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
2046 }
2047
2048 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
2049 opcode_mod, in_mod, out_mbox_direct,
2050 in_mbox, in_mbox_size,
2051 out_mbox, out_mbox_size, &status);
2052
2053 if (!err && out_mbox) {
2054 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
2055 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
2056 }
2057
2058 if (reset_ok && err == -EIO &&
2059 status == MLXSW_CMD_STATUS_RUNNING_RESET) {
2060 err = 0;
2061 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
2062 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
2063 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
2064 in_mod, status, mlxsw_cmd_status_str(status));
2065 } else if (err == -ETIMEDOUT) {
2066 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
2067 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
2068 in_mod);
2069 }
2070
2071 return err;
2072 }
2073 EXPORT_SYMBOL(mlxsw_cmd_exec);
2074
2075 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
2076 {
2077 return queue_delayed_work(mlxsw_wq, dwork, delay);
2078 }
2079 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
2080
2081 bool mlxsw_core_schedule_work(struct work_struct *work)
2082 {
2083 return queue_work(mlxsw_owq, work);
2084 }
2085 EXPORT_SYMBOL(mlxsw_core_schedule_work);
2086
2087 void mlxsw_core_flush_owq(void)
2088 {
2089 flush_workqueue(mlxsw_owq);
2090 }
2091 EXPORT_SYMBOL(mlxsw_core_flush_owq);
2092
2093 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
2094 const struct mlxsw_config_profile *profile,
2095 u64 *p_single_size, u64 *p_double_size,
2096 u64 *p_linear_size)
2097 {
2098 struct mlxsw_driver *driver = mlxsw_core->driver;
2099
2100 if (!driver->kvd_sizes_get)
2101 return -EINVAL;
2102
2103 return driver->kvd_sizes_get(mlxsw_core, profile,
2104 p_single_size, p_double_size,
2105 p_linear_size);
2106 }
2107 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
2108
2109 void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
2110 {
2111 mlxsw_core->fw_flash_in_progress = true;
2112 }
2113 EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
2114
2115 void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
2116 {
2117 mlxsw_core->fw_flash_in_progress = false;
2118 }
2119 EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
2120
2121 int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
2122 struct mlxsw_res *res)
2123 {
2124 int index, i;
2125 u64 data;
2126 u16 id;
2127 int err;
2128
2129 if (!res)
2130 return 0;
2131
2132 mlxsw_cmd_mbox_zero(mbox);
2133
2134 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
2135 index++) {
2136 err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index);
2137 if (err)
2138 return err;
2139
2140 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
2141 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
2142 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
2143
2144 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
2145 return 0;
2146
2147 mlxsw_res_parse(res, id, data);
2148 }
2149 }
2150
2151
2152
2153
2154 return -EIO;
2155 }
2156 EXPORT_SYMBOL(mlxsw_core_resources_query);
2157
2158 u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
2159 {
2160 return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
2161 }
2162 EXPORT_SYMBOL(mlxsw_core_read_frc_h);
2163
2164 u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
2165 {
2166 return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
2167 }
2168 EXPORT_SYMBOL(mlxsw_core_read_frc_l);
2169
2170 static int __init mlxsw_core_module_init(void)
2171 {
2172 int err;
2173
2174 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
2175 if (!mlxsw_wq)
2176 return -ENOMEM;
2177 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
2178 mlxsw_core_driver_name);
2179 if (!mlxsw_owq) {
2180 err = -ENOMEM;
2181 goto err_alloc_ordered_workqueue;
2182 }
2183 return 0;
2184
2185 err_alloc_ordered_workqueue:
2186 destroy_workqueue(mlxsw_wq);
2187 return err;
2188 }
2189
2190 static void __exit mlxsw_core_module_exit(void)
2191 {
2192 destroy_workqueue(mlxsw_owq);
2193 destroy_workqueue(mlxsw_wq);
2194 }
2195
2196 module_init(mlxsw_core_module_init);
2197 module_exit(mlxsw_core_module_exit);
2198
2199 MODULE_LICENSE("Dual BSD/GPL");
2200 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
2201 MODULE_DESCRIPTION("Mellanox switch device core driver");