1/*
2 * drivers/net/ethernet/mellanox/mlxsw/pci.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 *    contributors may be used to endorse or promote products derived from
16 *    this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/export.h>
38#include <linux/err.h>
39#include <linux/device.h>
40#include <linux/pci.h>
41#include <linux/interrupt.h>
42#include <linux/wait.h>
43#include <linux/types.h>
44#include <linux/skbuff.h>
45#include <linux/if_vlan.h>
46#include <linux/log2.h>
47#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#include <linux/string.h>
50
51#include "pci.h"
52#include "core.h"
53#include "cmd.h"
54#include "port.h"
55
56static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
57
58static const struct pci_device_id mlxsw_pci_id_table[] = {
59	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
60	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
61	{0, }
62};
63
64static struct dentry *mlxsw_pci_dbg_root;
65
66static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
67{
68	switch (id->device) {
69	case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
70		return MLXSW_DEVICE_KIND_SWITCHX2;
71	case PCI_DEVICE_ID_MELLANOX_SPECTRUM:
72		return MLXSW_DEVICE_KIND_SPECTRUM;
73	default:
74		BUG();
75	}
76}
77
78#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
79	iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
80#define mlxsw_pci_read32(mlxsw_pci, reg) \
81	ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
82
83enum mlxsw_pci_queue_type {
84	MLXSW_PCI_QUEUE_TYPE_SDQ,
85	MLXSW_PCI_QUEUE_TYPE_RDQ,
86	MLXSW_PCI_QUEUE_TYPE_CQ,
87	MLXSW_PCI_QUEUE_TYPE_EQ,
88};
89
90static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
91{
92	switch (q_type) {
93	case MLXSW_PCI_QUEUE_TYPE_SDQ:
94		return "sdq";
95	case MLXSW_PCI_QUEUE_TYPE_RDQ:
96		return "rdq";
97	case MLXSW_PCI_QUEUE_TYPE_CQ:
98		return "cq";
99	case MLXSW_PCI_QUEUE_TYPE_EQ:
100		return "eq";
101	}
102	BUG();
103}
104
105#define MLXSW_PCI_QUEUE_TYPE_COUNT	4
106
107static const u16 mlxsw_pci_doorbell_type_offset[] = {
108	MLXSW_PCI_DOORBELL_SDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
109	MLXSW_PCI_DOORBELL_RDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
110	MLXSW_PCI_DOORBELL_CQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_CQ */
111	MLXSW_PCI_DOORBELL_EQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_EQ */
112};
113
114static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
115	0, /* unused */
116	0, /* unused */
117	MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
118	MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
119};
120
121struct mlxsw_pci_mem_item {
122	char *buf;
123	dma_addr_t mapaddr;
124	size_t size;
125};
126
127struct mlxsw_pci_queue_elem_info {
128	char *elem; /* pointer to actual dma mapped element mem chunk */
129	union {
130		struct {
131			struct sk_buff *skb;
132		} sdq;
133		struct {
134			struct sk_buff *skb;
135		} rdq;
136	} u;
137};
138
139struct mlxsw_pci_queue {
140	spinlock_t lock; /* for queue accesses */
141	struct mlxsw_pci_mem_item mem_item;
142	struct mlxsw_pci_queue_elem_info *elem_info;
143	u16 producer_counter;
144	u16 consumer_counter;
145	u16 count; /* number of elements in queue */
146	u8 num; /* queue number */
147	u8 elem_size; /* size of one element */
148	enum mlxsw_pci_queue_type type;
149	struct tasklet_struct tasklet; /* queue processing tasklet */
150	struct mlxsw_pci *pci;
151	union {
152		struct {
153			u32 comp_sdq_count;
154			u32 comp_rdq_count;
155		} cq;
156		struct {
157			u32 ev_cmd_count;
158			u32 ev_comp_count;
159			u32 ev_other_count;
160		} eq;
161	} u;
162};
163
164struct mlxsw_pci_queue_type_group {
165	struct mlxsw_pci_queue *q;
166	u8 count; /* number of queues in group */
167};
168
169struct mlxsw_pci {
170	struct pci_dev *pdev;
171	u8 __iomem *hw_addr;
172	struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
173	u32 doorbell_offset;
174	struct msix_entry msix_entry;
175	struct mlxsw_core *core;
176	struct {
177		struct mlxsw_pci_mem_item *items;
178		unsigned int count;
179	} fw_area;
180	struct {
181		struct mlxsw_pci_mem_item out_mbox;
182		struct mlxsw_pci_mem_item in_mbox;
183		struct mutex lock; /* Lock access to command registers */
184		bool nopoll;
185		wait_queue_head_t wait;
186		bool wait_done;
187		struct {
188			u8 status;
189			u64 out_param;
190		} comp;
191	} cmd;
192	struct mlxsw_bus_info bus_info;
193	struct dentry *dbg_dir;
194};
195
196static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
197{
198	tasklet_schedule(&q->tasklet);
199}
200
201static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
202					size_t elem_size, int elem_index)
203{
204	return q->mem_item.buf + (elem_size * elem_index);
205}
206
207static struct mlxsw_pci_queue_elem_info *
208mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
209{
210	return &q->elem_info[elem_index];
211}
212
213static struct mlxsw_pci_queue_elem_info *
214mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
215{
216	int index = q->producer_counter & (q->count - 1);
217
218	if ((q->producer_counter - q->consumer_counter) == q->count)
219		return NULL;
220	return mlxsw_pci_queue_elem_info_get(q, index);
221}
222
223static struct mlxsw_pci_queue_elem_info *
224mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
225{
226	int index = q->consumer_counter & (q->count - 1);
227
228	return mlxsw_pci_queue_elem_info_get(q, index);
229}
230
231static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
232{
233	return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
234}
235
236static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
237{
238	return owner_bit != !!(q->consumer_counter & q->count);
239}
240
241static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
242					 u32 (*get_elem_owner_func)(char *))
243{
244	struct mlxsw_pci_queue_elem_info *elem_info;
245	char *elem;
246	bool owner_bit;
247
248	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
249	elem = elem_info->elem;
250	owner_bit = get_elem_owner_func(elem);
251	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
252		return NULL;
253	q->consumer_counter++;
254	rmb(); /* make sure we read owned bit before the rest of elem */
255	return elem;
256}
257
258static struct mlxsw_pci_queue_type_group *
259mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
260			       enum mlxsw_pci_queue_type q_type)
261{
262	return &mlxsw_pci->queues[q_type];
263}
264
265static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
266				  enum mlxsw_pci_queue_type q_type)
267{
268	struct mlxsw_pci_queue_type_group *queue_group;
269
270	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
271	return queue_group->count;
272}
273
274static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
275{
276	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
277}
278
279static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
280{
281	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
282}
283
284static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
285{
286	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
287}
288
289static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
290{
291	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
292}
293
294static struct mlxsw_pci_queue *
295__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
296		      enum mlxsw_pci_queue_type q_type, u8 q_num)
297{
298	return &mlxsw_pci->queues[q_type].q[q_num];
299}
300
301static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
302						 u8 q_num)
303{
304	return __mlxsw_pci_queue_get(mlxsw_pci,
305				     MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
306}
307
308static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
309						 u8 q_num)
310{
311	return __mlxsw_pci_queue_get(mlxsw_pci,
312				     MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
313}
314
315static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
316						u8 q_num)
317{
318	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
319}
320
321static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
322						u8 q_num)
323{
324	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
325}
326
327static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
328					   struct mlxsw_pci_queue *q,
329					   u16 val)
330{
331	mlxsw_pci_write32(mlxsw_pci,
332			  DOORBELL(mlxsw_pci->doorbell_offset,
333				   mlxsw_pci_doorbell_type_offset[q->type],
334				   q->num), val);
335}
336
337static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
338					       struct mlxsw_pci_queue *q,
339					       u16 val)
340{
341	mlxsw_pci_write32(mlxsw_pci,
342			  DOORBELL(mlxsw_pci->doorbell_offset,
343				   mlxsw_pci_doorbell_arm_type_offset[q->type],
344				   q->num), val);
345}
346
347static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
348						   struct mlxsw_pci_queue *q)
349{
350	wmb(); /* ensure all writes are done before we ring a bell */
351	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
352}
353
354static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
355						   struct mlxsw_pci_queue *q)
356{
357	wmb(); /* ensure all writes are done before we ring a bell */
358	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
359				       q->consumer_counter + q->count);
360}
361
362static void
363mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
364					   struct mlxsw_pci_queue *q)
365{
366	wmb(); /* ensure all writes are done before we ring a bell */
367	__mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
368}
369
370static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
371					     int page_index)
372{
373	return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
374}
375
376static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
377			      struct mlxsw_pci_queue *q)
378{
379	int i;
380	int err;
381
382	q->producer_counter = 0;
383	q->consumer_counter = 0;
384
385	/* Set CQ of same number of this SDQ. */
386	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
387	mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
388	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
389	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
390		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
391
392		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
393	}
394
395	err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
396	if (err)
397		return err;
398	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
399	return 0;
400}
401
402static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
403			       struct mlxsw_pci_queue *q)
404{
405	mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
406}
407
408static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
409{
410	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
411	struct mlxsw_pci_queue *q;
412	int i;
413	static const char hdr[] =
414		"NUM PROD_COUNT CONS_COUNT COUNT\n";
415
416	seq_printf(file, hdr);
417	for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
418		q = mlxsw_pci_sdq_get(mlxsw_pci, i);
419		spin_lock_bh(&q->lock);
420		seq_printf(file, "%3d %10d %10d %5d\n",
421			   i, q->producer_counter, q->consumer_counter,
422			   q->count);
423		spin_unlock_bh(&q->lock);
424	}
425	return 0;
426}
427
428static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
429				  int index, char *frag_data, size_t frag_len,
430				  int direction)
431{
432	struct pci_dev *pdev = mlxsw_pci->pdev;
433	dma_addr_t mapaddr;
434
435	mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
436	if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
437		dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
438		return -EIO;
439	}
440	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
441	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
442	return 0;
443}
444
445static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
446				     int index, int direction)
447{
448	struct pci_dev *pdev = mlxsw_pci->pdev;
449	size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
450	dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
451
452	if (!frag_len)
453		return;
454	pci_unmap_single(pdev, mapaddr, frag_len, direction);
455}
456
457static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
458				   struct mlxsw_pci_queue_elem_info *elem_info)
459{
460	size_t buf_len = MLXSW_PORT_MAX_MTU;
461	char *wqe = elem_info->elem;
462	struct sk_buff *skb;
463	int err;
464
465	elem_info->u.rdq.skb = NULL;
466	skb = netdev_alloc_skb_ip_align(NULL, buf_len);
467	if (!skb)
468		return -ENOMEM;
469
470	/* Assume that wqe was previously zeroed. */
471
472	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
473				     buf_len, DMA_FROM_DEVICE);
474	if (err)
475		goto err_frag_map;
476
477	elem_info->u.rdq.skb = skb;
478	return 0;
479
480err_frag_map:
481	dev_kfree_skb_any(skb);
482	return err;
483}
484
485static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
486				   struct mlxsw_pci_queue_elem_info *elem_info)
487{
488	struct sk_buff *skb;
489	char *wqe;
490
491	skb = elem_info->u.rdq.skb;
492	wqe = elem_info->elem;
493
494	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
495	dev_kfree_skb_any(skb);
496}
497
498static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
499			      struct mlxsw_pci_queue *q)
500{
501	struct mlxsw_pci_queue_elem_info *elem_info;
502	u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
503	int i;
504	int err;
505
506	q->producer_counter = 0;
507	q->consumer_counter = 0;
508
509	/* Set CQ of same number of this RDQ with base
510	 * above SDQ count as the lower ones are assigned to SDQs.
511	 */
512	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
513	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
514	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
515		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
516
517		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
518	}
519
520	err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
521	if (err)
522		return err;
523
524	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
525
526	for (i = 0; i < q->count; i++) {
527		elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
528		BUG_ON(!elem_info);
529		err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
530		if (err)
531			goto rollback;
532		/* Everything is set up, ring doorbell to pass elem to HW */
533		q->producer_counter++;
534		mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
535	}
536
537	return 0;
538
539rollback:
540	for (i--; i >= 0; i--) {
541		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
542		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
543	}
544	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
545
546	return err;
547}
548
549static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
550			       struct mlxsw_pci_queue *q)
551{
552	struct mlxsw_pci_queue_elem_info *elem_info;
553	int i;
554
555	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
556	for (i = 0; i < q->count; i++) {
557		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
558		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
559	}
560}
561
562static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
563{
564	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
565	struct mlxsw_pci_queue *q;
566	int i;
567	static const char hdr[] =
568		"NUM PROD_COUNT CONS_COUNT COUNT\n";
569
570	seq_printf(file, hdr);
571	for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
572		q = mlxsw_pci_rdq_get(mlxsw_pci, i);
573		spin_lock_bh(&q->lock);
574		seq_printf(file, "%3d %10d %10d %5d\n",
575			   i, q->producer_counter, q->consumer_counter,
576			   q->count);
577		spin_unlock_bh(&q->lock);
578	}
579	return 0;
580}
581
582static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
583			     struct mlxsw_pci_queue *q)
584{
585	int i;
586	int err;
587
588	q->consumer_counter = 0;
589
590	for (i = 0; i < q->count; i++) {
591		char *elem = mlxsw_pci_queue_elem_get(q, i);
592
593		mlxsw_pci_cqe_owner_set(elem, 1);
594	}
595
596	mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
597	mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
598	mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
599	mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
600	mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
601	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
602		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
603
604		mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
605	}
606	err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
607	if (err)
608		return err;
609	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
610	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
611	return 0;
612}
613
614static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
615			      struct mlxsw_pci_queue *q)
616{
617	mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
618}
619
620static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
621{
622	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
623
624	struct mlxsw_pci_queue *q;
625	int i;
626	static const char hdr[] =
627		"NUM CONS_INDEX  SDQ_COUNT  RDQ_COUNT COUNT\n";
628
629	seq_printf(file, hdr);
630	for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
631		q = mlxsw_pci_cq_get(mlxsw_pci, i);
632		spin_lock_bh(&q->lock);
633		seq_printf(file, "%3d %10d %10d %10d %5d\n",
634			   i, q->consumer_counter, q->u.cq.comp_sdq_count,
635			   q->u.cq.comp_rdq_count, q->count);
636		spin_unlock_bh(&q->lock);
637	}
638	return 0;
639}
640
641static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
642				     struct mlxsw_pci_queue *q,
643				     u16 consumer_counter_limit,
644				     char *cqe)
645{
646	struct pci_dev *pdev = mlxsw_pci->pdev;
647	struct mlxsw_pci_queue_elem_info *elem_info;
648	char *wqe;
649	struct sk_buff *skb;
650	int i;
651
652	spin_lock(&q->lock);
653	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
654	skb = elem_info->u.sdq.skb;
655	wqe = elem_info->elem;
656	for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
657		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
658	dev_kfree_skb_any(skb);
659	elem_info->u.sdq.skb = NULL;
660
661	if (q->consumer_counter++ != consumer_counter_limit)
662		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
663	spin_unlock(&q->lock);
664}
665
666static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
667				     struct mlxsw_pci_queue *q,
668				     u16 consumer_counter_limit,
669				     char *cqe)
670{
671	struct pci_dev *pdev = mlxsw_pci->pdev;
672	struct mlxsw_pci_queue_elem_info *elem_info;
673	char *wqe;
674	struct sk_buff *skb;
675	struct mlxsw_rx_info rx_info;
676	u16 byte_count;
677	int err;
678
679	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
680	skb = elem_info->u.sdq.skb;
681	if (!skb)
682		return;
683	wqe = elem_info->elem;
684	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
685
686	if (q->consumer_counter++ != consumer_counter_limit)
687		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
688
689	/* We do not support lag now */
690	if (mlxsw_pci_cqe_lag_get(cqe))
691		goto drop;
692
693	rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
694	rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
695
696	byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
697	if (mlxsw_pci_cqe_crc_get(cqe))
698		byte_count -= ETH_FCS_LEN;
699	skb_put(skb, byte_count);
700	mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
701
702put_new_skb:
703	memset(wqe, 0, q->elem_size);
704	err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
705	if (err)
706		dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
707	/* Everything is set up, ring doorbell to pass elem to HW */
708	q->producer_counter++;
709	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
710	return;
711
712drop:
713	dev_kfree_skb_any(skb);
714	goto put_new_skb;
715}
716
717static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
718{
719	return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
720}
721
722static void mlxsw_pci_cq_tasklet(unsigned long data)
723{
724	struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
725	struct mlxsw_pci *mlxsw_pci = q->pci;
726	char *cqe;
727	int items = 0;
728	int credits = q->count >> 1;
729
730	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
731		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
732		u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
733		u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
734
735		if (sendq) {
736			struct mlxsw_pci_queue *sdq;
737
738			sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
739			mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
740						 wqe_counter, cqe);
741			q->u.cq.comp_sdq_count++;
742		} else {
743			struct mlxsw_pci_queue *rdq;
744
745			rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
746			mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
747						 wqe_counter, cqe);
748			q->u.cq.comp_rdq_count++;
749		}
750		if (++items == credits)
751			break;
752	}
753	if (items) {
754		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
755		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
756	}
757}
758
759static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
760			     struct mlxsw_pci_queue *q)
761{
762	int i;
763	int err;
764
765	q->consumer_counter = 0;
766
767	for (i = 0; i < q->count; i++) {
768		char *elem = mlxsw_pci_queue_elem_get(q, i);
769
770		mlxsw_pci_eqe_owner_set(elem, 1);
771	}
772
773	mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
774	mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
775	mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
776	mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
777	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
778		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
779
780		mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
781	}
782	err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
783	if (err)
784		return err;
785	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
786	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
787	return 0;
788}
789
790static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
791			      struct mlxsw_pci_queue *q)
792{
793	mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
794}
795
796static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
797{
798	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
799	struct mlxsw_pci_queue *q;
800	int i;
801	static const char hdr[] =
802		"NUM CONS_COUNT     EV_CMD    EV_COMP   EV_OTHER COUNT\n";
803
804	seq_printf(file, hdr);
805	for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
806		q = mlxsw_pci_eq_get(mlxsw_pci, i);
807		spin_lock_bh(&q->lock);
808		seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
809			   i, q->consumer_counter, q->u.eq.ev_cmd_count,
810			   q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
811			   q->count);
812		spin_unlock_bh(&q->lock);
813	}
814	return 0;
815}
816
817static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
818{
819	mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
820	mlxsw_pci->cmd.comp.out_param =
821		((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
822		mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
823	mlxsw_pci->cmd.wait_done = true;
824	wake_up(&mlxsw_pci->cmd.wait);
825}
826
827static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
828{
829	return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
830}
831
832static void mlxsw_pci_eq_tasklet(unsigned long data)
833{
834	struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
835	struct mlxsw_pci *mlxsw_pci = q->pci;
836	u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
837	unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
838	char *eqe;
839	u8 cqn;
840	bool cq_handle = false;
841	int items = 0;
842	int credits = q->count >> 1;
843
844	memset(&active_cqns, 0, sizeof(active_cqns));
845
846	while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
847		u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
848
849		switch (event_type) {
850		case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
851			mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
852			q->u.eq.ev_cmd_count++;
853			break;
854		case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
855			cqn = mlxsw_pci_eqe_cqn_get(eqe);
856			set_bit(cqn, active_cqns);
857			cq_handle = true;
858			q->u.eq.ev_comp_count++;
859			break;
860		default:
861			q->u.eq.ev_other_count++;
862		}
863		if (++items == credits)
864			break;
865	}
866	if (items) {
867		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
868		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
869	}
870
871	if (!cq_handle)
872		return;
873	for_each_set_bit(cqn, active_cqns, cq_count) {
874		q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
875		mlxsw_pci_queue_tasklet_schedule(q);
876	}
877}
878
879struct mlxsw_pci_queue_ops {
880	const char *name;
881	enum mlxsw_pci_queue_type type;
882	int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
883		    struct mlxsw_pci_queue *q);
884	void (*fini)(struct mlxsw_pci *mlxsw_pci,
885		     struct mlxsw_pci_queue *q);
886	void (*tasklet)(unsigned long data);
887	int (*dbg_read)(struct seq_file *s, void *data);
888	u16 elem_count;
889	u8 elem_size;
890};
891
892static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
893	.type		= MLXSW_PCI_QUEUE_TYPE_SDQ,
894	.init		= mlxsw_pci_sdq_init,
895	.fini		= mlxsw_pci_sdq_fini,
896	.dbg_read	= mlxsw_pci_sdq_dbg_read,
897	.elem_count	= MLXSW_PCI_WQE_COUNT,
898	.elem_size	= MLXSW_PCI_WQE_SIZE,
899};
900
901static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
902	.type		= MLXSW_PCI_QUEUE_TYPE_RDQ,
903	.init		= mlxsw_pci_rdq_init,
904	.fini		= mlxsw_pci_rdq_fini,
905	.dbg_read	= mlxsw_pci_rdq_dbg_read,
906	.elem_count	= MLXSW_PCI_WQE_COUNT,
907	.elem_size	= MLXSW_PCI_WQE_SIZE
908};
909
910static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
911	.type		= MLXSW_PCI_QUEUE_TYPE_CQ,
912	.init		= mlxsw_pci_cq_init,
913	.fini		= mlxsw_pci_cq_fini,
914	.tasklet	= mlxsw_pci_cq_tasklet,
915	.dbg_read	= mlxsw_pci_cq_dbg_read,
916	.elem_count	= MLXSW_PCI_CQE_COUNT,
917	.elem_size	= MLXSW_PCI_CQE_SIZE
918};
919
920static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
921	.type		= MLXSW_PCI_QUEUE_TYPE_EQ,
922	.init		= mlxsw_pci_eq_init,
923	.fini		= mlxsw_pci_eq_fini,
924	.tasklet	= mlxsw_pci_eq_tasklet,
925	.dbg_read	= mlxsw_pci_eq_dbg_read,
926	.elem_count	= MLXSW_PCI_EQE_COUNT,
927	.elem_size	= MLXSW_PCI_EQE_SIZE
928};
929
930static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
931				const struct mlxsw_pci_queue_ops *q_ops,
932				struct mlxsw_pci_queue *q, u8 q_num)
933{
934	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
935	int i;
936	int err;
937
938	spin_lock_init(&q->lock);
939	q->num = q_num;
940	q->count = q_ops->elem_count;
941	q->elem_size = q_ops->elem_size;
942	q->type = q_ops->type;
943	q->pci = mlxsw_pci;
944
945	if (q_ops->tasklet)
946		tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
947
948	mem_item->size = MLXSW_PCI_AQ_SIZE;
949	mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
950					     mem_item->size,
951					     &mem_item->mapaddr);
952	if (!mem_item->buf)
953		return -ENOMEM;
954	memset(mem_item->buf, 0, mem_item->size);
955
956	q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
957	if (!q->elem_info) {
958		err = -ENOMEM;
959		goto err_elem_info_alloc;
960	}
961
962	/* Initialize dma mapped elements info elem_info for
963	 * future easy access.
964	 */
965	for (i = 0; i < q->count; i++) {
966		struct mlxsw_pci_queue_elem_info *elem_info;
967
968		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
969		elem_info->elem =
970			__mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
971	}
972
973	mlxsw_cmd_mbox_zero(mbox);
974	err = q_ops->init(mlxsw_pci, mbox, q);
975	if (err)
976		goto err_q_ops_init;
977	return 0;
978
979err_q_ops_init:
980	kfree(q->elem_info);
981err_elem_info_alloc:
982	pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
983			    mem_item->buf, mem_item->mapaddr);
984	return err;
985}
986
987static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
988				 const struct mlxsw_pci_queue_ops *q_ops,
989				 struct mlxsw_pci_queue *q)
990{
991	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
992
993	q_ops->fini(mlxsw_pci, q);
994	kfree(q->elem_info);
995	pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
996			    mem_item->buf, mem_item->mapaddr);
997}
998
999static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1000				      const struct mlxsw_pci_queue_ops *q_ops,
1001				      u8 num_qs)
1002{
1003	struct pci_dev *pdev = mlxsw_pci->pdev;
1004	struct mlxsw_pci_queue_type_group *queue_group;
1005	char tmp[16];
1006	int i;
1007	int err;
1008
1009	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1010	queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
1011	if (!queue_group->q)
1012		return -ENOMEM;
1013
1014	for (i = 0; i < num_qs; i++) {
1015		err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
1016					   &queue_group->q[i], i);
1017		if (err)
1018			goto err_queue_init;
1019	}
1020	queue_group->count = num_qs;
1021
1022	sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
1023	debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
1024				    q_ops->dbg_read);
1025
1026	return 0;
1027
1028err_queue_init:
1029	for (i--; i >= 0; i--)
1030		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1031	kfree(queue_group->q);
1032	return err;
1033}
1034
1035static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
1036				       const struct mlxsw_pci_queue_ops *q_ops)
1037{
1038	struct mlxsw_pci_queue_type_group *queue_group;
1039	int i;
1040
1041	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1042	for (i = 0; i < queue_group->count; i++)
1043		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1044	kfree(queue_group->q);
1045}
1046
1047static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
1048{
1049	struct pci_dev *pdev = mlxsw_pci->pdev;
1050	u8 num_sdqs;
1051	u8 sdq_log2sz;
1052	u8 num_rdqs;
1053	u8 rdq_log2sz;
1054	u8 num_cqs;
1055	u8 cq_log2sz;
1056	u8 num_eqs;
1057	u8 eq_log2sz;
1058	int err;
1059
1060	mlxsw_cmd_mbox_zero(mbox);
1061	err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
1062	if (err)
1063		return err;
1064
1065	num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
1066	sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
1067	num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
1068	rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
1069	num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
1070	cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
1071	num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
1072	eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
1073
1074	if (num_sdqs + num_rdqs > num_cqs ||
1075	    num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
1076		dev_err(&pdev->dev, "Unsupported number of queues\n");
1077		return -EINVAL;
1078	}
1079
1080	if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1081	    (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1082	    (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
1083	    (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
1084		dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
1085		return -EINVAL;
1086	}
1087
1088	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1089					 num_eqs);
1090	if (err) {
1091		dev_err(&pdev->dev, "Failed to initialize event queues\n");
1092		return err;
1093	}
1094
1095	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1096					 num_cqs);
1097	if (err) {
1098		dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1099		goto err_cqs_init;
1100	}
1101
1102	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1103					 num_sdqs);
1104	if (err) {
1105		dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1106		goto err_sdqs_init;
1107	}
1108
1109	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1110					 num_rdqs);
1111	if (err) {
1112		dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1113		goto err_rdqs_init;
1114	}
1115
1116	/* We have to poll in command interface until queues are initialized */
1117	mlxsw_pci->cmd.nopoll = true;
1118	return 0;
1119
1120err_rdqs_init:
1121	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1122err_sdqs_init:
1123	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1124err_cqs_init:
1125	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1126	return err;
1127}
1128
1129static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1130{
1131	mlxsw_pci->cmd.nopoll = false;
1132	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1133	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1134	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1135	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1136}
1137
1138static void
1139mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1140				     char *mbox, int index,
1141				     const struct mlxsw_swid_config *swid)
1142{
1143	u8 mask = 0;
1144
1145	if (swid->used_type) {
1146		mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1147			mbox, index, swid->type);
1148		mask |= 1;
1149	}
1150	if (swid->used_properties) {
1151		mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1152			mbox, index, swid->properties);
1153		mask |= 2;
1154	}
1155	mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1156}
1157
1158static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1159				    const struct mlxsw_config_profile *profile)
1160{
1161	int i;
1162
1163	mlxsw_cmd_mbox_zero(mbox);
1164
1165	if (profile->used_max_vepa_channels) {
1166		mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1167			mbox, 1);
1168		mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1169			mbox, profile->max_vepa_channels);
1170	}
1171	if (profile->used_max_lag) {
1172		mlxsw_cmd_mbox_config_profile_set_max_lag_set(
1173			mbox, 1);
1174		mlxsw_cmd_mbox_config_profile_max_lag_set(
1175			mbox, profile->max_lag);
1176	}
1177	if (profile->used_max_port_per_lag) {
1178		mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
1179			mbox, 1);
1180		mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
1181			mbox, profile->max_port_per_lag);
1182	}
1183	if (profile->used_max_mid) {
1184		mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1185			mbox, 1);
1186		mlxsw_cmd_mbox_config_profile_max_mid_set(
1187			mbox, profile->max_mid);
1188	}
1189	if (profile->used_max_pgt) {
1190		mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1191			mbox, 1);
1192		mlxsw_cmd_mbox_config_profile_max_pgt_set(
1193			mbox, profile->max_pgt);
1194	}
1195	if (profile->used_max_system_port) {
1196		mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1197			mbox, 1);
1198		mlxsw_cmd_mbox_config_profile_max_system_port_set(
1199			mbox, profile->max_system_port);
1200	}
1201	if (profile->used_max_vlan_groups) {
1202		mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1203			mbox, 1);
1204		mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1205			mbox, profile->max_vlan_groups);
1206	}
1207	if (profile->used_max_regions) {
1208		mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1209			mbox, 1);
1210		mlxsw_cmd_mbox_config_profile_max_regions_set(
1211			mbox, profile->max_regions);
1212	}
1213	if (profile->used_flood_tables) {
1214		mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1215			mbox, 1);
1216		mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1217			mbox, profile->max_flood_tables);
1218		mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1219			mbox, profile->max_vid_flood_tables);
1220		mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1221			mbox, profile->max_fid_offset_flood_tables);
1222		mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1223			mbox, profile->fid_offset_flood_table_size);
1224		mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1225			mbox, profile->max_fid_flood_tables);
1226		mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1227			mbox, profile->fid_flood_table_size);
1228	}
1229	if (profile->used_flood_mode) {
1230		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1231			mbox, 1);
1232		mlxsw_cmd_mbox_config_profile_flood_mode_set(
1233			mbox, profile->flood_mode);
1234	}
1235	if (profile->used_max_ib_mc) {
1236		mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1237			mbox, 1);
1238		mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1239			mbox, profile->max_ib_mc);
1240	}
1241	if (profile->used_max_pkey) {
1242		mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1243			mbox, 1);
1244		mlxsw_cmd_mbox_config_profile_max_pkey_set(
1245			mbox, profile->max_pkey);
1246	}
1247	if (profile->used_ar_sec) {
1248		mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1249			mbox, 1);
1250		mlxsw_cmd_mbox_config_profile_ar_sec_set(
1251			mbox, profile->ar_sec);
1252	}
1253	if (profile->used_adaptive_routing_group_cap) {
1254		mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1255			mbox, 1);
1256		mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1257			mbox, profile->adaptive_routing_group_cap);
1258	}
1259
1260	for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1261		mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1262						     &profile->swid_config[i]);
1263
1264	return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1265}
1266
1267static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1268{
1269	struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1270	int err;
1271
1272	mlxsw_cmd_mbox_zero(mbox);
1273	err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1274	if (err)
1275		return err;
1276	mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1277	mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1278	return 0;
1279}
1280
1281static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1282				  u16 num_pages)
1283{
1284	struct mlxsw_pci_mem_item *mem_item;
1285	int nent = 0;
1286	int i;
1287	int err;
1288
1289	mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1290					   GFP_KERNEL);
1291	if (!mlxsw_pci->fw_area.items)
1292		return -ENOMEM;
1293	mlxsw_pci->fw_area.count = num_pages;
1294
1295	mlxsw_cmd_mbox_zero(mbox);
1296	for (i = 0; i < num_pages; i++) {
1297		mem_item = &mlxsw_pci->fw_area.items[i];
1298
1299		mem_item->size = MLXSW_PCI_PAGE_SIZE;
1300		mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1301						     mem_item->size,
1302						     &mem_item->mapaddr);
1303		if (!mem_item->buf) {
1304			err = -ENOMEM;
1305			goto err_alloc;
1306		}
1307		mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1308		mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1309		if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1310			err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1311			if (err)
1312				goto err_cmd_map_fa;
1313			nent = 0;
1314			mlxsw_cmd_mbox_zero(mbox);
1315		}
1316	}
1317
1318	if (nent) {
1319		err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1320		if (err)
1321			goto err_cmd_map_fa;
1322	}
1323
1324	return 0;
1325
1326err_cmd_map_fa:
1327err_alloc:
1328	for (i--; i >= 0; i--) {
1329		mem_item = &mlxsw_pci->fw_area.items[i];
1330
1331		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1332				    mem_item->buf, mem_item->mapaddr);
1333	}
1334	kfree(mlxsw_pci->fw_area.items);
1335	return err;
1336}
1337
1338static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1339{
1340	struct mlxsw_pci_mem_item *mem_item;
1341	int i;
1342
1343	mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1344
1345	for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1346		mem_item = &mlxsw_pci->fw_area.items[i];
1347
1348		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1349				    mem_item->buf, mem_item->mapaddr);
1350	}
1351	kfree(mlxsw_pci->fw_area.items);
1352}
1353
1354static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1355{
1356	struct mlxsw_pci *mlxsw_pci = dev_id;
1357	struct mlxsw_pci_queue *q;
1358	int i;
1359
1360	for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1361		q = mlxsw_pci_eq_get(mlxsw_pci, i);
1362		mlxsw_pci_queue_tasklet_schedule(q);
1363	}
1364	return IRQ_HANDLED;
1365}
1366
1367static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1368				struct mlxsw_pci_mem_item *mbox)
1369{
1370	struct pci_dev *pdev = mlxsw_pci->pdev;
1371	int err = 0;
1372
1373	mbox->size = MLXSW_CMD_MBOX_SIZE;
1374	mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
1375					 &mbox->mapaddr);
1376	if (!mbox->buf) {
1377		dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1378		err = -ENOMEM;
1379	}
1380
1381	return err;
1382}
1383
1384static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1385				struct mlxsw_pci_mem_item *mbox)
1386{
1387	struct pci_dev *pdev = mlxsw_pci->pdev;
1388
1389	pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1390			    mbox->mapaddr);
1391}
1392
1393static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1394			  const struct mlxsw_config_profile *profile)
1395{
1396	struct mlxsw_pci *mlxsw_pci = bus_priv;
1397	struct pci_dev *pdev = mlxsw_pci->pdev;
1398	char *mbox;
1399	u16 num_pages;
1400	int err;
1401
1402	mutex_init(&mlxsw_pci->cmd.lock);
1403	init_waitqueue_head(&mlxsw_pci->cmd.wait);
1404
1405	mlxsw_pci->core = mlxsw_core;
1406
1407	mbox = mlxsw_cmd_mbox_alloc();
1408	if (!mbox)
1409		return -ENOMEM;
1410
1411	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1412	if (err)
1413		goto mbox_put;
1414
1415	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1416	if (err)
1417		goto err_out_mbox_alloc;
1418
1419	err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1420	if (err)
1421		goto err_query_fw;
1422
1423	mlxsw_pci->bus_info.fw_rev.major =
1424		mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1425	mlxsw_pci->bus_info.fw_rev.minor =
1426		mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1427	mlxsw_pci->bus_info.fw_rev.subminor =
1428		mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1429
1430	if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1431		dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1432		err = -EINVAL;
1433		goto err_iface_rev;
1434	}
1435	if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1436		dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1437		err = -EINVAL;
1438		goto err_doorbell_page_bar;
1439	}
1440
1441	mlxsw_pci->doorbell_offset =
1442		mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1443
1444	num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1445	err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1446	if (err)
1447		goto err_fw_area_init;
1448
1449	err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1450	if (err)
1451		goto err_boardinfo;
1452
1453	err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
1454	if (err)
1455		goto err_config_profile;
1456
1457	err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1458	if (err)
1459		goto err_aqs_init;
1460
1461	err = request_irq(mlxsw_pci->msix_entry.vector,
1462			  mlxsw_pci_eq_irq_handler, 0,
1463			  mlxsw_pci_driver_name, mlxsw_pci);
1464	if (err) {
1465		dev_err(&pdev->dev, "IRQ request failed\n");
1466		goto err_request_eq_irq;
1467	}
1468
1469	goto mbox_put;
1470
1471err_request_eq_irq:
1472	mlxsw_pci_aqs_fini(mlxsw_pci);
1473err_aqs_init:
1474err_config_profile:
1475err_boardinfo:
1476	mlxsw_pci_fw_area_fini(mlxsw_pci);
1477err_fw_area_init:
1478err_doorbell_page_bar:
1479err_iface_rev:
1480err_query_fw:
1481	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1482err_out_mbox_alloc:
1483	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1484mbox_put:
1485	mlxsw_cmd_mbox_free(mbox);
1486	return err;
1487}
1488
1489static void mlxsw_pci_fini(void *bus_priv)
1490{
1491	struct mlxsw_pci *mlxsw_pci = bus_priv;
1492
1493	free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
1494	mlxsw_pci_aqs_fini(mlxsw_pci);
1495	mlxsw_pci_fw_area_fini(mlxsw_pci);
1496	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1497	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1498}
1499
1500static struct mlxsw_pci_queue *
1501mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1502		   const struct mlxsw_tx_info *tx_info)
1503{
1504	u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
1505
1506	return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1507}
1508
1509static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1510					const struct mlxsw_tx_info *tx_info)
1511{
1512	struct mlxsw_pci *mlxsw_pci = bus_priv;
1513	struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1514
1515	return !mlxsw_pci_queue_elem_info_producer_get(q);
1516}
1517
1518static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1519				  const struct mlxsw_tx_info *tx_info)
1520{
1521	struct mlxsw_pci *mlxsw_pci = bus_priv;
1522	struct mlxsw_pci_queue *q;
1523	struct mlxsw_pci_queue_elem_info *elem_info;
1524	char *wqe;
1525	int i;
1526	int err;
1527
1528	if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1529		err = skb_linearize(skb);
1530		if (err)
1531			return err;
1532	}
1533
1534	q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1535	spin_lock_bh(&q->lock);
1536	elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1537	if (!elem_info) {
1538		/* queue is full */
1539		err = -EAGAIN;
1540		goto unlock;
1541	}
1542	elem_info->u.sdq.skb = skb;
1543
1544	wqe = elem_info->elem;
1545	mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1546	mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1547	mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1548
1549	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1550				     skb_headlen(skb), DMA_TO_DEVICE);
1551	if (err)
1552		goto unlock;
1553
1554	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1555		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1556
1557		err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1558					     skb_frag_address(frag),
1559					     skb_frag_size(frag),
1560					     DMA_TO_DEVICE);
1561		if (err)
1562			goto unmap_frags;
1563	}
1564
1565	/* Set unused sq entries byte count to zero. */
1566	for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1567		mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1568
1569	/* Everything is set up, ring producer doorbell to get HW going */
1570	q->producer_counter++;
1571	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1572
1573	goto unlock;
1574
1575unmap_frags:
1576	for (; i >= 0; i--)
1577		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1578unlock:
1579	spin_unlock_bh(&q->lock);
1580	return err;
1581}
1582
1583static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1584			      u32 in_mod, bool out_mbox_direct,
1585			      char *in_mbox, size_t in_mbox_size,
1586			      char *out_mbox, size_t out_mbox_size,
1587			      u8 *p_status)
1588{
1589	struct mlxsw_pci *mlxsw_pci = bus_priv;
1590	dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1591	dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1592	bool evreq = mlxsw_pci->cmd.nopoll;
1593	unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1594	bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1595	int err;
1596
1597	*p_status = MLXSW_CMD_STATUS_OK;
1598
1599	err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1600	if (err)
1601		return err;
1602
1603	if (in_mbox)
1604		memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1605	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1606	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1607
1608	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1609	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1610
1611	mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1612	mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1613
1614	*p_wait_done = false;
1615
1616	wmb(); /* all needs to be written before we write control register */
1617	mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1618			  MLXSW_PCI_CIR_CTRL_GO_BIT |
1619			  (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1620			  (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1621			  opcode);
1622
1623	if (!evreq) {
1624		unsigned long end;
1625
1626		end = jiffies + timeout;
1627		do {
1628			u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1629
1630			if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1631				*p_wait_done = true;
1632				*p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1633				break;
1634			}
1635			cond_resched();
1636		} while (time_before(jiffies, end));
1637	} else {
1638		wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1639		*p_status = mlxsw_pci->cmd.comp.status;
1640	}
1641
1642	err = 0;
1643	if (*p_wait_done) {
1644		if (*p_status)
1645			err = -EIO;
1646	} else {
1647		err = -ETIMEDOUT;
1648	}
1649
1650	if (!err && out_mbox && out_mbox_direct) {
1651		/* Some commands don't use output param as address to mailbox
1652		 * but they store output directly into registers. In that case,
1653		 * copy registers into mbox buffer.
1654		 */
1655		__be32 tmp;
1656
1657		if (!evreq) {
1658			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1659							   CIR_OUT_PARAM_HI));
1660			memcpy(out_mbox, &tmp, sizeof(tmp));
1661			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1662							   CIR_OUT_PARAM_LO));
1663			memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1664		}
1665	} else if (!err && out_mbox) {
1666		memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1667	}
1668
1669	mutex_unlock(&mlxsw_pci->cmd.lock);
1670
1671	return err;
1672}
1673
1674static const struct mlxsw_bus mlxsw_pci_bus = {
1675	.kind			= "pci",
1676	.init			= mlxsw_pci_init,
1677	.fini			= mlxsw_pci_fini,
1678	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
1679	.skb_transmit		= mlxsw_pci_skb_transmit,
1680	.cmd_exec		= mlxsw_pci_cmd_exec,
1681};
1682
1683static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
1684{
1685	mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
1686	/* Current firware does not let us know when the reset is done.
1687	 * So we just wait here for constant time and hope for the best.
1688	 */
1689	msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1690	return 0;
1691}
1692
1693static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1694{
1695	struct mlxsw_pci *mlxsw_pci;
1696	int err;
1697
1698	mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1699	if (!mlxsw_pci)
1700		return -ENOMEM;
1701
1702	err = pci_enable_device(pdev);
1703	if (err) {
1704		dev_err(&pdev->dev, "pci_enable_device failed\n");
1705		goto err_pci_enable_device;
1706	}
1707
1708	err = pci_request_regions(pdev, mlxsw_pci_driver_name);
1709	if (err) {
1710		dev_err(&pdev->dev, "pci_request_regions failed\n");
1711		goto err_pci_request_regions;
1712	}
1713
1714	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1715	if (!err) {
1716		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1717		if (err) {
1718			dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1719			goto err_pci_set_dma_mask;
1720		}
1721	} else {
1722		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1723		if (err) {
1724			dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1725			goto err_pci_set_dma_mask;
1726		}
1727	}
1728
1729	if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1730		dev_err(&pdev->dev, "invalid PCI region size\n");
1731		err = -EINVAL;
1732		goto err_pci_resource_len_check;
1733	}
1734
1735	mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1736				     pci_resource_len(pdev, 0));
1737	if (!mlxsw_pci->hw_addr) {
1738		dev_err(&pdev->dev, "ioremap failed\n");
1739		err = -EIO;
1740		goto err_ioremap;
1741	}
1742	pci_set_master(pdev);
1743
1744	mlxsw_pci->pdev = pdev;
1745	pci_set_drvdata(pdev, mlxsw_pci);
1746
1747	err = mlxsw_pci_sw_reset(mlxsw_pci);
1748	if (err) {
1749		dev_err(&pdev->dev, "Software reset failed\n");
1750		goto err_sw_reset;
1751	}
1752
1753	err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
1754	if (err) {
1755		dev_err(&pdev->dev, "MSI-X init failed\n");
1756		goto err_msix_init;
1757	}
1758
1759	mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
1760	mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1761	mlxsw_pci->bus_info.dev = &pdev->dev;
1762
1763	mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
1764						mlxsw_pci_dbg_root);
1765	if (!mlxsw_pci->dbg_dir) {
1766		dev_err(&pdev->dev, "Failed to create debugfs dir\n");
1767		err = -ENOMEM;
1768		goto err_dbg_create_dir;
1769	}
1770
1771	err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1772					     &mlxsw_pci_bus, mlxsw_pci);
1773	if (err) {
1774		dev_err(&pdev->dev, "cannot register bus device\n");
1775		goto err_bus_device_register;
1776	}
1777
1778	return 0;
1779
1780err_bus_device_register:
1781	debugfs_remove_recursive(mlxsw_pci->dbg_dir);
1782err_dbg_create_dir:
1783	pci_disable_msix(mlxsw_pci->pdev);
1784err_msix_init:
1785err_sw_reset:
1786	iounmap(mlxsw_pci->hw_addr);
1787err_ioremap:
1788err_pci_resource_len_check:
1789err_pci_set_dma_mask:
1790	pci_release_regions(pdev);
1791err_pci_request_regions:
1792	pci_disable_device(pdev);
1793err_pci_enable_device:
1794	kfree(mlxsw_pci);
1795	return err;
1796}
1797
1798static void mlxsw_pci_remove(struct pci_dev *pdev)
1799{
1800	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1801
1802	mlxsw_core_bus_device_unregister(mlxsw_pci->core);
1803	debugfs_remove_recursive(mlxsw_pci->dbg_dir);
1804	pci_disable_msix(mlxsw_pci->pdev);
1805	iounmap(mlxsw_pci->hw_addr);
1806	pci_release_regions(mlxsw_pci->pdev);
1807	pci_disable_device(mlxsw_pci->pdev);
1808	kfree(mlxsw_pci);
1809}
1810
1811static struct pci_driver mlxsw_pci_driver = {
1812	.name		= mlxsw_pci_driver_name,
1813	.id_table	= mlxsw_pci_id_table,
1814	.probe		= mlxsw_pci_probe,
1815	.remove		= mlxsw_pci_remove,
1816};
1817
1818static int __init mlxsw_pci_module_init(void)
1819{
1820	int err;
1821
1822	mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
1823	if (!mlxsw_pci_dbg_root)
1824		return -ENOMEM;
1825	err = pci_register_driver(&mlxsw_pci_driver);
1826	if (err)
1827		goto err_register_driver;
1828	return 0;
1829
1830err_register_driver:
1831	debugfs_remove_recursive(mlxsw_pci_dbg_root);
1832	return err;
1833}
1834
1835static void __exit mlxsw_pci_module_exit(void)
1836{
1837	pci_unregister_driver(&mlxsw_pci_driver);
1838	debugfs_remove_recursive(mlxsw_pci_dbg_root);
1839}
1840
1841module_init(mlxsw_pci_module_init);
1842module_exit(mlxsw_pci_module_exit);
1843
1844MODULE_LICENSE("Dual BSD/GPL");
1845MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1846MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
1847MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
1848