1/*
2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation.  The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include <linux/module.h>
19#include "be.h"
20#include "be_cmds.h"
21
22static char *be_port_misconfig_evt_desc[] = {
23	"A valid SFP module detected",
24	"Optics faulted/ incorrectly installed/ not installed.",
25	"Optics of two types installed.",
26	"Incompatible optics.",
27	"Unknown port SFP status"
28};
29
30static char *be_port_misconfig_remedy_desc[] = {
31	"",
32	"Reseat optics. If issue not resolved, replace",
33	"Remove one optic or install matching pair of optics",
34	"Replace with compatible optics for card to function",
35	""
36};
37
38static struct be_cmd_priv_map cmd_priv_map[] = {
39	{
40		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
41		CMD_SUBSYSTEM_ETH,
42		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
43		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
44	},
45	{
46		OPCODE_COMMON_GET_FLOW_CONTROL,
47		CMD_SUBSYSTEM_COMMON,
48		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
49		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
50	},
51	{
52		OPCODE_COMMON_SET_FLOW_CONTROL,
53		CMD_SUBSYSTEM_COMMON,
54		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
55		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
56	},
57	{
58		OPCODE_ETH_GET_PPORT_STATS,
59		CMD_SUBSYSTEM_ETH,
60		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
61		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
62	},
63	{
64		OPCODE_COMMON_GET_PHY_DETAILS,
65		CMD_SUBSYSTEM_COMMON,
66		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
67		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
68	}
69};
70
71static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
72{
73	int i;
74	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
75	u32 cmd_privileges = adapter->cmd_privileges;
76
77	for (i = 0; i < num_entries; i++)
78		if (opcode == cmd_priv_map[i].opcode &&
79		    subsystem == cmd_priv_map[i].subsystem)
80			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
81				return false;
82
83	return true;
84}
85
86static inline void *embedded_payload(struct be_mcc_wrb *wrb)
87{
88	return wrb->payload.embedded_payload;
89}
90
91static void be_mcc_notify(struct be_adapter *adapter)
92{
93	struct be_queue_info *mccq = &adapter->mcc_obj.q;
94	u32 val = 0;
95
96	if (be_error(adapter))
97		return;
98
99	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
100	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
101
102	wmb();
103	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
104}
105
106/* To check if valid bit is set, check the entire word as we don't know
107 * the endianness of the data (old entry is host endian while a new entry is
108 * little endian) */
109static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
110{
111	u32 flags;
112
113	if (compl->flags != 0) {
114		flags = le32_to_cpu(compl->flags);
115		if (flags & CQE_FLAGS_VALID_MASK) {
116			compl->flags = flags;
117			return true;
118		}
119	}
120	return false;
121}
122
123/* Need to reset the entire word that houses the valid bit */
124static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
125{
126	compl->flags = 0;
127}
128
129static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
130{
131	unsigned long addr;
132
133	addr = tag1;
134	addr = ((addr << 16) << 16) | tag0;
135	return (void *)addr;
136}
137
138static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
139{
140	if (base_status == MCC_STATUS_NOT_SUPPORTED ||
141	    base_status == MCC_STATUS_ILLEGAL_REQUEST ||
142	    addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
143	    (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
144	    (base_status == MCC_STATUS_ILLEGAL_FIELD ||
145	     addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
146		return true;
147	else
148		return false;
149}
150
151/* Place holder for all the async MCC cmds wherein the caller is not in a busy
152 * loop (has not issued be_mcc_notify_wait())
153 */
154static void be_async_cmd_process(struct be_adapter *adapter,
155				 struct be_mcc_compl *compl,
156				 struct be_cmd_resp_hdr *resp_hdr)
157{
158	enum mcc_base_status base_status = base_status(compl->status);
159	u8 opcode = 0, subsystem = 0;
160
161	if (resp_hdr) {
162		opcode = resp_hdr->opcode;
163		subsystem = resp_hdr->subsystem;
164	}
165
166	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
167	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
168		complete(&adapter->et_cmd_compl);
169		return;
170	}
171
172	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
173	     opcode == OPCODE_COMMON_WRITE_OBJECT) &&
174	    subsystem == CMD_SUBSYSTEM_COMMON) {
175		adapter->flash_status = compl->status;
176		complete(&adapter->et_cmd_compl);
177		return;
178	}
179
180	if ((opcode == OPCODE_ETH_GET_STATISTICS ||
181	     opcode == OPCODE_ETH_GET_PPORT_STATS) &&
182	    subsystem == CMD_SUBSYSTEM_ETH &&
183	    base_status == MCC_STATUS_SUCCESS) {
184		be_parse_stats(adapter);
185		adapter->stats_cmd_sent = false;
186		return;
187	}
188
189	if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
190	    subsystem == CMD_SUBSYSTEM_COMMON) {
191		if (base_status == MCC_STATUS_SUCCESS) {
192			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
193							(void *)resp_hdr;
194			adapter->drv_stats.be_on_die_temperature =
195						resp->on_die_temperature;
196		} else {
197			adapter->be_get_temp_freq = 0;
198		}
199		return;
200	}
201}
202
203static int be_mcc_compl_process(struct be_adapter *adapter,
204				struct be_mcc_compl *compl)
205{
206	enum mcc_base_status base_status;
207	enum mcc_addl_status addl_status;
208	struct be_cmd_resp_hdr *resp_hdr;
209	u8 opcode = 0, subsystem = 0;
210
211	/* Just swap the status to host endian; mcc tag is opaquely copied
212	 * from mcc_wrb */
213	be_dws_le_to_cpu(compl, 4);
214
215	base_status = base_status(compl->status);
216	addl_status = addl_status(compl->status);
217
218	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
219	if (resp_hdr) {
220		opcode = resp_hdr->opcode;
221		subsystem = resp_hdr->subsystem;
222	}
223
224	be_async_cmd_process(adapter, compl, resp_hdr);
225
226	if (base_status != MCC_STATUS_SUCCESS &&
227	    !be_skip_err_log(opcode, base_status, addl_status)) {
228		if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
229			dev_warn(&adapter->pdev->dev,
230				 "VF is not privileged to issue opcode %d-%d\n",
231				 opcode, subsystem);
232		} else {
233			dev_err(&adapter->pdev->dev,
234				"opcode %d-%d failed:status %d-%d\n",
235				opcode, subsystem, base_status, addl_status);
236		}
237	}
238	return compl->status;
239}
240
241/* Link state evt is a string of bytes; no need for endian swapping */
242static void be_async_link_state_process(struct be_adapter *adapter,
243					struct be_mcc_compl *compl)
244{
245	struct be_async_event_link_state *evt =
246			(struct be_async_event_link_state *)compl;
247
248	/* When link status changes, link speed must be re-queried from FW */
249	adapter->phy.link_speed = -1;
250
251	/* On BEx the FW does not send a separate link status
252	 * notification for physical and logical link.
253	 * On other chips just process the logical link
254	 * status notification
255	 */
256	if (!BEx_chip(adapter) &&
257	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
258		return;
259
260	/* For the initial link status do not rely on the ASYNC event as
261	 * it may not be received in some cases.
262	 */
263	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
264		be_link_status_update(adapter,
265				      evt->port_link_status & LINK_STATUS_MASK);
266}
267
268static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
269						  struct be_mcc_compl *compl)
270{
271	struct be_async_event_misconfig_port *evt =
272			(struct be_async_event_misconfig_port *)compl;
273	u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
274	struct device *dev = &adapter->pdev->dev;
275	u8 port_misconfig_evt;
276
277	port_misconfig_evt =
278		((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
279
280	/* Log an error message that would allow a user to determine
281	 * whether the SFPs have an issue
282	 */
283	dev_info(dev, "Port %c: %s %s", adapter->port_name,
284		 be_port_misconfig_evt_desc[port_misconfig_evt],
285		 be_port_misconfig_remedy_desc[port_misconfig_evt]);
286
287	if (port_misconfig_evt == INCOMPATIBLE_SFP)
288		adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
289}
290
291/* Grp5 CoS Priority evt */
292static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
293					       struct be_mcc_compl *compl)
294{
295	struct be_async_event_grp5_cos_priority *evt =
296			(struct be_async_event_grp5_cos_priority *)compl;
297
298	if (evt->valid) {
299		adapter->vlan_prio_bmap = evt->available_priority_bmap;
300		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
301		adapter->recommended_prio =
302			evt->reco_default_priority << VLAN_PRIO_SHIFT;
303	}
304}
305
306/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
307static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
308					    struct be_mcc_compl *compl)
309{
310	struct be_async_event_grp5_qos_link_speed *evt =
311			(struct be_async_event_grp5_qos_link_speed *)compl;
312
313	if (adapter->phy.link_speed >= 0 &&
314	    evt->physical_port == adapter->port_num)
315		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
316}
317
318/*Grp5 PVID evt*/
319static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
320					     struct be_mcc_compl *compl)
321{
322	struct be_async_event_grp5_pvid_state *evt =
323			(struct be_async_event_grp5_pvid_state *)compl;
324
325	if (evt->enabled) {
326		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
327		dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
328	} else {
329		adapter->pvid = 0;
330	}
331}
332
333static void be_async_grp5_evt_process(struct be_adapter *adapter,
334				      struct be_mcc_compl *compl)
335{
336	u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
337				ASYNC_EVENT_TYPE_MASK;
338
339	switch (event_type) {
340	case ASYNC_EVENT_COS_PRIORITY:
341		be_async_grp5_cos_priority_process(adapter, compl);
342		break;
343	case ASYNC_EVENT_QOS_SPEED:
344		be_async_grp5_qos_speed_process(adapter, compl);
345		break;
346	case ASYNC_EVENT_PVID_STATE:
347		be_async_grp5_pvid_state_process(adapter, compl);
348		break;
349	default:
350		break;
351	}
352}
353
354static void be_async_dbg_evt_process(struct be_adapter *adapter,
355				     struct be_mcc_compl *cmp)
356{
357	u8 event_type = 0;
358	struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
359
360	event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
361			ASYNC_EVENT_TYPE_MASK;
362
363	switch (event_type) {
364	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
365		if (evt->valid)
366			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
367		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
368	break;
369	default:
370		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
371			 event_type);
372	break;
373	}
374}
375
376static void be_async_sliport_evt_process(struct be_adapter *adapter,
377					 struct be_mcc_compl *cmp)
378{
379	u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
380			ASYNC_EVENT_TYPE_MASK;
381
382	if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
383		be_async_port_misconfig_event_process(adapter, cmp);
384}
385
386static inline bool is_link_state_evt(u32 flags)
387{
388	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
389			ASYNC_EVENT_CODE_LINK_STATE;
390}
391
392static inline bool is_grp5_evt(u32 flags)
393{
394	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
395			ASYNC_EVENT_CODE_GRP_5;
396}
397
398static inline bool is_dbg_evt(u32 flags)
399{
400	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
401			ASYNC_EVENT_CODE_QNQ;
402}
403
404static inline bool is_sliport_evt(u32 flags)
405{
406	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
407		ASYNC_EVENT_CODE_SLIPORT;
408}
409
410static void be_mcc_event_process(struct be_adapter *adapter,
411				 struct be_mcc_compl *compl)
412{
413	if (is_link_state_evt(compl->flags))
414		be_async_link_state_process(adapter, compl);
415	else if (is_grp5_evt(compl->flags))
416		be_async_grp5_evt_process(adapter, compl);
417	else if (is_dbg_evt(compl->flags))
418		be_async_dbg_evt_process(adapter, compl);
419	else if (is_sliport_evt(compl->flags))
420		be_async_sliport_evt_process(adapter, compl);
421}
422
423static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
424{
425	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
426	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
427
428	if (be_mcc_compl_is_new(compl)) {
429		queue_tail_inc(mcc_cq);
430		return compl;
431	}
432	return NULL;
433}
434
435void be_async_mcc_enable(struct be_adapter *adapter)
436{
437	spin_lock_bh(&adapter->mcc_cq_lock);
438
439	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
440	adapter->mcc_obj.rearm_cq = true;
441
442	spin_unlock_bh(&adapter->mcc_cq_lock);
443}
444
445void be_async_mcc_disable(struct be_adapter *adapter)
446{
447	spin_lock_bh(&adapter->mcc_cq_lock);
448
449	adapter->mcc_obj.rearm_cq = false;
450	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
451
452	spin_unlock_bh(&adapter->mcc_cq_lock);
453}
454
455int be_process_mcc(struct be_adapter *adapter)
456{
457	struct be_mcc_compl *compl;
458	int num = 0, status = 0;
459	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
460
461	spin_lock(&adapter->mcc_cq_lock);
462
463	while ((compl = be_mcc_compl_get(adapter))) {
464		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
465			be_mcc_event_process(adapter, compl);
466		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
467			status = be_mcc_compl_process(adapter, compl);
468			atomic_dec(&mcc_obj->q.used);
469		}
470		be_mcc_compl_use(compl);
471		num++;
472	}
473
474	if (num)
475		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
476
477	spin_unlock(&adapter->mcc_cq_lock);
478	return status;
479}
480
481/* Wait till no more pending mcc requests are present */
482static int be_mcc_wait_compl(struct be_adapter *adapter)
483{
484#define mcc_timeout		120000 /* 12s timeout */
485	int i, status = 0;
486	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
487
488	for (i = 0; i < mcc_timeout; i++) {
489		if (be_error(adapter))
490			return -EIO;
491
492		local_bh_disable();
493		status = be_process_mcc(adapter);
494		local_bh_enable();
495
496		if (atomic_read(&mcc_obj->q.used) == 0)
497			break;
498		udelay(100);
499	}
500	if (i == mcc_timeout) {
501		dev_err(&adapter->pdev->dev, "FW not responding\n");
502		adapter->fw_timeout = true;
503		return -EIO;
504	}
505	return status;
506}
507
508/* Notify MCC requests and wait for completion */
509static int be_mcc_notify_wait(struct be_adapter *adapter)
510{
511	int status;
512	struct be_mcc_wrb *wrb;
513	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
514	u16 index = mcc_obj->q.head;
515	struct be_cmd_resp_hdr *resp;
516
517	index_dec(&index, mcc_obj->q.len);
518	wrb = queue_index_node(&mcc_obj->q, index);
519
520	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
521
522	be_mcc_notify(adapter);
523
524	status = be_mcc_wait_compl(adapter);
525	if (status == -EIO)
526		goto out;
527
528	status = (resp->base_status |
529		  ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
530		   CQE_ADDL_STATUS_SHIFT));
531out:
532	return status;
533}
534
535static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
536{
537	int msecs = 0;
538	u32 ready;
539
540	do {
541		if (be_error(adapter))
542			return -EIO;
543
544		ready = ioread32(db);
545		if (ready == 0xffffffff)
546			return -1;
547
548		ready &= MPU_MAILBOX_DB_RDY_MASK;
549		if (ready)
550			break;
551
552		if (msecs > 4000) {
553			dev_err(&adapter->pdev->dev, "FW not responding\n");
554			adapter->fw_timeout = true;
555			be_detect_error(adapter);
556			return -1;
557		}
558
559		msleep(1);
560		msecs++;
561	} while (true);
562
563	return 0;
564}
565
566/*
567 * Insert the mailbox address into the doorbell in two steps
568 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
569 */
570static int be_mbox_notify_wait(struct be_adapter *adapter)
571{
572	int status;
573	u32 val = 0;
574	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
575	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
576	struct be_mcc_mailbox *mbox = mbox_mem->va;
577	struct be_mcc_compl *compl = &mbox->compl;
578
579	/* wait for ready to be set */
580	status = be_mbox_db_ready_wait(adapter, db);
581	if (status != 0)
582		return status;
583
584	val |= MPU_MAILBOX_DB_HI_MASK;
585	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
586	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
587	iowrite32(val, db);
588
589	/* wait for ready to be set */
590	status = be_mbox_db_ready_wait(adapter, db);
591	if (status != 0)
592		return status;
593
594	val = 0;
595	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
596	val |= (u32)(mbox_mem->dma >> 4) << 2;
597	iowrite32(val, db);
598
599	status = be_mbox_db_ready_wait(adapter, db);
600	if (status != 0)
601		return status;
602
603	/* A cq entry has been made now */
604	if (be_mcc_compl_is_new(compl)) {
605		status = be_mcc_compl_process(adapter, &mbox->compl);
606		be_mcc_compl_use(compl);
607		if (status)
608			return status;
609	} else {
610		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
611		return -1;
612	}
613	return 0;
614}
615
616static u16 be_POST_stage_get(struct be_adapter *adapter)
617{
618	u32 sem;
619
620	if (BEx_chip(adapter))
621		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
622	else
623		pci_read_config_dword(adapter->pdev,
624				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
625
626	return sem & POST_STAGE_MASK;
627}
628
629static int lancer_wait_ready(struct be_adapter *adapter)
630{
631#define SLIPORT_READY_TIMEOUT 30
632	u32 sliport_status;
633	int i;
634
635	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
636		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
637		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
638			return 0;
639
640		if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
641		    !(sliport_status & SLIPORT_STATUS_RN_MASK))
642			return -EIO;
643
644		msleep(1000);
645	}
646
647	return sliport_status ? : -1;
648}
649
650int be_fw_wait_ready(struct be_adapter *adapter)
651{
652	u16 stage;
653	int status, timeout = 0;
654	struct device *dev = &adapter->pdev->dev;
655
656	if (lancer_chip(adapter)) {
657		status = lancer_wait_ready(adapter);
658		if (status) {
659			stage = status;
660			goto err;
661		}
662		return 0;
663	}
664
665	do {
666		/* There's no means to poll POST state on BE2/3 VFs */
667		if (BEx_chip(adapter) && be_virtfn(adapter))
668			return 0;
669
670		stage = be_POST_stage_get(adapter);
671		if (stage == POST_STAGE_ARMFW_RDY)
672			return 0;
673
674		dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
675		if (msleep_interruptible(2000)) {
676			dev_err(dev, "Waiting for POST aborted\n");
677			return -EINTR;
678		}
679		timeout += 2;
680	} while (timeout < 60);
681
682err:
683	dev_err(dev, "POST timeout; stage=%#x\n", stage);
684	return -ETIMEDOUT;
685}
686
687static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
688{
689	return &wrb->payload.sgl[0];
690}
691
692static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
693{
694	wrb->tag0 = addr & 0xFFFFFFFF;
695	wrb->tag1 = upper_32_bits(addr);
696}
697
698/* Don't touch the hdr after it's prepared */
699/* mem will be NULL for embedded commands */
700static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
701				   u8 subsystem, u8 opcode, int cmd_len,
702				   struct be_mcc_wrb *wrb,
703				   struct be_dma_mem *mem)
704{
705	struct be_sge *sge;
706
707	req_hdr->opcode = opcode;
708	req_hdr->subsystem = subsystem;
709	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
710	req_hdr->version = 0;
711	fill_wrb_tags(wrb, (ulong) req_hdr);
712	wrb->payload_length = cmd_len;
713	if (mem) {
714		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
715			MCC_WRB_SGE_CNT_SHIFT;
716		sge = nonembedded_sgl(wrb);
717		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
718		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
719		sge->len = cpu_to_le32(mem->size);
720	} else
721		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
722	be_dws_cpu_to_le(wrb, 8);
723}
724
725static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
726				      struct be_dma_mem *mem)
727{
728	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
729	u64 dma = (u64)mem->dma;
730
731	for (i = 0; i < buf_pages; i++) {
732		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
733		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
734		dma += PAGE_SIZE_4K;
735	}
736}
737
738static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
739{
740	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
741	struct be_mcc_wrb *wrb
742		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
743	memset(wrb, 0, sizeof(*wrb));
744	return wrb;
745}
746
747static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
748{
749	struct be_queue_info *mccq = &adapter->mcc_obj.q;
750	struct be_mcc_wrb *wrb;
751
752	if (!mccq->created)
753		return NULL;
754
755	if (atomic_read(&mccq->used) >= mccq->len)
756		return NULL;
757
758	wrb = queue_head_node(mccq);
759	queue_head_inc(mccq);
760	atomic_inc(&mccq->used);
761	memset(wrb, 0, sizeof(*wrb));
762	return wrb;
763}
764
765static bool use_mcc(struct be_adapter *adapter)
766{
767	return adapter->mcc_obj.q.created;
768}
769
770/* Must be used only in process context */
771static int be_cmd_lock(struct be_adapter *adapter)
772{
773	if (use_mcc(adapter)) {
774		spin_lock_bh(&adapter->mcc_lock);
775		return 0;
776	} else {
777		return mutex_lock_interruptible(&adapter->mbox_lock);
778	}
779}
780
781/* Must be used only in process context */
782static void be_cmd_unlock(struct be_adapter *adapter)
783{
784	if (use_mcc(adapter))
785		spin_unlock_bh(&adapter->mcc_lock);
786	else
787		return mutex_unlock(&adapter->mbox_lock);
788}
789
790static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
791				      struct be_mcc_wrb *wrb)
792{
793	struct be_mcc_wrb *dest_wrb;
794
795	if (use_mcc(adapter)) {
796		dest_wrb = wrb_from_mccq(adapter);
797		if (!dest_wrb)
798			return NULL;
799	} else {
800		dest_wrb = wrb_from_mbox(adapter);
801	}
802
803	memcpy(dest_wrb, wrb, sizeof(*wrb));
804	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
805		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
806
807	return dest_wrb;
808}
809
810/* Must be used only in process context */
811static int be_cmd_notify_wait(struct be_adapter *adapter,
812			      struct be_mcc_wrb *wrb)
813{
814	struct be_mcc_wrb *dest_wrb;
815	int status;
816
817	status = be_cmd_lock(adapter);
818	if (status)
819		return status;
820
821	dest_wrb = be_cmd_copy(adapter, wrb);
822	if (!dest_wrb)
823		return -EBUSY;
824
825	if (use_mcc(adapter))
826		status = be_mcc_notify_wait(adapter);
827	else
828		status = be_mbox_notify_wait(adapter);
829
830	if (!status)
831		memcpy(wrb, dest_wrb, sizeof(*wrb));
832
833	be_cmd_unlock(adapter);
834	return status;
835}
836
837/* Tell fw we're about to start firing cmds by writing a
838 * special pattern across the wrb hdr; uses mbox
839 */
840int be_cmd_fw_init(struct be_adapter *adapter)
841{
842	u8 *wrb;
843	int status;
844
845	if (lancer_chip(adapter))
846		return 0;
847
848	if (mutex_lock_interruptible(&adapter->mbox_lock))
849		return -1;
850
851	wrb = (u8 *)wrb_from_mbox(adapter);
852	*wrb++ = 0xFF;
853	*wrb++ = 0x12;
854	*wrb++ = 0x34;
855	*wrb++ = 0xFF;
856	*wrb++ = 0xFF;
857	*wrb++ = 0x56;
858	*wrb++ = 0x78;
859	*wrb = 0xFF;
860
861	status = be_mbox_notify_wait(adapter);
862
863	mutex_unlock(&adapter->mbox_lock);
864	return status;
865}
866
867/* Tell fw we're done with firing cmds by writing a
868 * special pattern across the wrb hdr; uses mbox
869 */
870int be_cmd_fw_clean(struct be_adapter *adapter)
871{
872	u8 *wrb;
873	int status;
874
875	if (lancer_chip(adapter))
876		return 0;
877
878	if (mutex_lock_interruptible(&adapter->mbox_lock))
879		return -1;
880
881	wrb = (u8 *)wrb_from_mbox(adapter);
882	*wrb++ = 0xFF;
883	*wrb++ = 0xAA;
884	*wrb++ = 0xBB;
885	*wrb++ = 0xFF;
886	*wrb++ = 0xFF;
887	*wrb++ = 0xCC;
888	*wrb++ = 0xDD;
889	*wrb = 0xFF;
890
891	status = be_mbox_notify_wait(adapter);
892
893	mutex_unlock(&adapter->mbox_lock);
894	return status;
895}
896
897int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
898{
899	struct be_mcc_wrb *wrb;
900	struct be_cmd_req_eq_create *req;
901	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
902	int status, ver = 0;
903
904	if (mutex_lock_interruptible(&adapter->mbox_lock))
905		return -1;
906
907	wrb = wrb_from_mbox(adapter);
908	req = embedded_payload(wrb);
909
910	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
911			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
912			       NULL);
913
914	/* Support for EQ_CREATEv2 available only SH-R onwards */
915	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
916		ver = 2;
917
918	req->hdr.version = ver;
919	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
920
921	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
922	/* 4byte eqe*/
923	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
924	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
925		      __ilog2_u32(eqo->q.len / 256));
926	be_dws_cpu_to_le(req->context, sizeof(req->context));
927
928	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
929
930	status = be_mbox_notify_wait(adapter);
931	if (!status) {
932		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
933
934		eqo->q.id = le16_to_cpu(resp->eq_id);
935		eqo->msix_idx =
936			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
937		eqo->q.created = true;
938	}
939
940	mutex_unlock(&adapter->mbox_lock);
941	return status;
942}
943
944/* Use MCC */
945int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
946			  bool permanent, u32 if_handle, u32 pmac_id)
947{
948	struct be_mcc_wrb *wrb;
949	struct be_cmd_req_mac_query *req;
950	int status;
951
952	spin_lock_bh(&adapter->mcc_lock);
953
954	wrb = wrb_from_mccq(adapter);
955	if (!wrb) {
956		status = -EBUSY;
957		goto err;
958	}
959	req = embedded_payload(wrb);
960
961	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
962			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
963			       NULL);
964	req->type = MAC_ADDRESS_TYPE_NETWORK;
965	if (permanent) {
966		req->permanent = 1;
967	} else {
968		req->if_id = cpu_to_le16((u16)if_handle);
969		req->pmac_id = cpu_to_le32(pmac_id);
970		req->permanent = 0;
971	}
972
973	status = be_mcc_notify_wait(adapter);
974	if (!status) {
975		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
976
977		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
978	}
979
980err:
981	spin_unlock_bh(&adapter->mcc_lock);
982	return status;
983}
984
985/* Uses synchronous MCCQ */
986int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
987		    u32 if_id, u32 *pmac_id, u32 domain)
988{
989	struct be_mcc_wrb *wrb;
990	struct be_cmd_req_pmac_add *req;
991	int status;
992
993	spin_lock_bh(&adapter->mcc_lock);
994
995	wrb = wrb_from_mccq(adapter);
996	if (!wrb) {
997		status = -EBUSY;
998		goto err;
999	}
1000	req = embedded_payload(wrb);
1001
1002	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1003			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1004			       NULL);
1005
1006	req->hdr.domain = domain;
1007	req->if_id = cpu_to_le32(if_id);
1008	memcpy(req->mac_address, mac_addr, ETH_ALEN);
1009
1010	status = be_mcc_notify_wait(adapter);
1011	if (!status) {
1012		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1013
1014		*pmac_id = le32_to_cpu(resp->pmac_id);
1015	}
1016
1017err:
1018	spin_unlock_bh(&adapter->mcc_lock);
1019
1020	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1021		status = -EPERM;
1022
1023	return status;
1024}
1025
1026/* Uses synchronous MCCQ */
1027int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1028{
1029	struct be_mcc_wrb *wrb;
1030	struct be_cmd_req_pmac_del *req;
1031	int status;
1032
1033	if (pmac_id == -1)
1034		return 0;
1035
1036	spin_lock_bh(&adapter->mcc_lock);
1037
1038	wrb = wrb_from_mccq(adapter);
1039	if (!wrb) {
1040		status = -EBUSY;
1041		goto err;
1042	}
1043	req = embedded_payload(wrb);
1044
1045	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1046			       OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1047			       wrb, NULL);
1048
1049	req->hdr.domain = dom;
1050	req->if_id = cpu_to_le32(if_id);
1051	req->pmac_id = cpu_to_le32(pmac_id);
1052
1053	status = be_mcc_notify_wait(adapter);
1054
1055err:
1056	spin_unlock_bh(&adapter->mcc_lock);
1057	return status;
1058}
1059
1060/* Uses Mbox */
1061int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1062		     struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1063{
1064	struct be_mcc_wrb *wrb;
1065	struct be_cmd_req_cq_create *req;
1066	struct be_dma_mem *q_mem = &cq->dma_mem;
1067	void *ctxt;
1068	int status;
1069
1070	if (mutex_lock_interruptible(&adapter->mbox_lock))
1071		return -1;
1072
1073	wrb = wrb_from_mbox(adapter);
1074	req = embedded_payload(wrb);
1075	ctxt = &req->context;
1076
1077	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1078			       OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1079			       NULL);
1080
1081	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1082
1083	if (BEx_chip(adapter)) {
1084		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1085			      coalesce_wm);
1086		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1087			      ctxt, no_delay);
1088		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1089			      __ilog2_u32(cq->len / 256));
1090		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1091		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1092		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1093	} else {
1094		req->hdr.version = 2;
1095		req->page_size = 1; /* 1 for 4K */
1096
1097		/* coalesce-wm field in this cmd is not relevant to Lancer.
1098		 * Lancer uses COMMON_MODIFY_CQ to set this field
1099		 */
1100		if (!lancer_chip(adapter))
1101			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1102				      ctxt, coalesce_wm);
1103		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1104			      no_delay);
1105		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1106			      __ilog2_u32(cq->len / 256));
1107		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1108		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1109		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1110	}
1111
1112	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1113
1114	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1115
1116	status = be_mbox_notify_wait(adapter);
1117	if (!status) {
1118		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1119
1120		cq->id = le16_to_cpu(resp->cq_id);
1121		cq->created = true;
1122	}
1123
1124	mutex_unlock(&adapter->mbox_lock);
1125
1126	return status;
1127}
1128
1129static u32 be_encoded_q_len(int q_len)
1130{
1131	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1132
1133	if (len_encoded == 16)
1134		len_encoded = 0;
1135	return len_encoded;
1136}
1137
1138static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1139				  struct be_queue_info *mccq,
1140				  struct be_queue_info *cq)
1141{
1142	struct be_mcc_wrb *wrb;
1143	struct be_cmd_req_mcc_ext_create *req;
1144	struct be_dma_mem *q_mem = &mccq->dma_mem;
1145	void *ctxt;
1146	int status;
1147
1148	if (mutex_lock_interruptible(&adapter->mbox_lock))
1149		return -1;
1150
1151	wrb = wrb_from_mbox(adapter);
1152	req = embedded_payload(wrb);
1153	ctxt = &req->context;
1154
1155	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1156			       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1157			       NULL);
1158
1159	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1160	if (BEx_chip(adapter)) {
1161		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1162		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1163			      be_encoded_q_len(mccq->len));
1164		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1165	} else {
1166		req->hdr.version = 1;
1167		req->cq_id = cpu_to_le16(cq->id);
1168
1169		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1170			      be_encoded_q_len(mccq->len));
1171		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1172		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1173			      ctxt, cq->id);
1174		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1175			      ctxt, 1);
1176	}
1177
1178	/* Subscribe to Link State, Sliport Event and Group 5 Events
1179	 * (bits 1, 5 and 17 set)
1180	 */
1181	req->async_event_bitmap[0] =
1182			cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1183				    BIT(ASYNC_EVENT_CODE_GRP_5) |
1184				    BIT(ASYNC_EVENT_CODE_QNQ) |
1185				    BIT(ASYNC_EVENT_CODE_SLIPORT));
1186
1187	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1188
1189	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1190
1191	status = be_mbox_notify_wait(adapter);
1192	if (!status) {
1193		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1194
1195		mccq->id = le16_to_cpu(resp->id);
1196		mccq->created = true;
1197	}
1198	mutex_unlock(&adapter->mbox_lock);
1199
1200	return status;
1201}
1202
1203static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1204				  struct be_queue_info *mccq,
1205				  struct be_queue_info *cq)
1206{
1207	struct be_mcc_wrb *wrb;
1208	struct be_cmd_req_mcc_create *req;
1209	struct be_dma_mem *q_mem = &mccq->dma_mem;
1210	void *ctxt;
1211	int status;
1212
1213	if (mutex_lock_interruptible(&adapter->mbox_lock))
1214		return -1;
1215
1216	wrb = wrb_from_mbox(adapter);
1217	req = embedded_payload(wrb);
1218	ctxt = &req->context;
1219
1220	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1221			       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1222			       NULL);
1223
1224	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1225
1226	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1227	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1228		      be_encoded_q_len(mccq->len));
1229	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1230
1231	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1232
1233	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1234
1235	status = be_mbox_notify_wait(adapter);
1236	if (!status) {
1237		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1238
1239		mccq->id = le16_to_cpu(resp->id);
1240		mccq->created = true;
1241	}
1242
1243	mutex_unlock(&adapter->mbox_lock);
1244	return status;
1245}
1246
1247int be_cmd_mccq_create(struct be_adapter *adapter,
1248		       struct be_queue_info *mccq, struct be_queue_info *cq)
1249{
1250	int status;
1251
1252	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1253	if (status && BEx_chip(adapter)) {
1254		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1255			"or newer to avoid conflicting priorities between NIC "
1256			"and FCoE traffic");
1257		status = be_cmd_mccq_org_create(adapter, mccq, cq);
1258	}
1259	return status;
1260}
1261
1262int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1263{
1264	struct be_mcc_wrb wrb = {0};
1265	struct be_cmd_req_eth_tx_create *req;
1266	struct be_queue_info *txq = &txo->q;
1267	struct be_queue_info *cq = &txo->cq;
1268	struct be_dma_mem *q_mem = &txq->dma_mem;
1269	int status, ver = 0;
1270
1271	req = embedded_payload(&wrb);
1272	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1273			       OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1274
1275	if (lancer_chip(adapter)) {
1276		req->hdr.version = 1;
1277	} else if (BEx_chip(adapter)) {
1278		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1279			req->hdr.version = 2;
1280	} else { /* For SH */
1281		req->hdr.version = 2;
1282	}
1283
1284	if (req->hdr.version > 0)
1285		req->if_id = cpu_to_le16(adapter->if_handle);
1286	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1287	req->ulp_num = BE_ULP1_NUM;
1288	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1289	req->cq_id = cpu_to_le16(cq->id);
1290	req->queue_size = be_encoded_q_len(txq->len);
1291	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1292	ver = req->hdr.version;
1293
1294	status = be_cmd_notify_wait(adapter, &wrb);
1295	if (!status) {
1296		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1297
1298		txq->id = le16_to_cpu(resp->cid);
1299		if (ver == 2)
1300			txo->db_offset = le32_to_cpu(resp->db_offset);
1301		else
1302			txo->db_offset = DB_TXULP1_OFFSET;
1303		txq->created = true;
1304	}
1305
1306	return status;
1307}
1308
1309/* Uses MCC */
1310int be_cmd_rxq_create(struct be_adapter *adapter,
1311		      struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1312		      u32 if_id, u32 rss, u8 *rss_id)
1313{
1314	struct be_mcc_wrb *wrb;
1315	struct be_cmd_req_eth_rx_create *req;
1316	struct be_dma_mem *q_mem = &rxq->dma_mem;
1317	int status;
1318
1319	spin_lock_bh(&adapter->mcc_lock);
1320
1321	wrb = wrb_from_mccq(adapter);
1322	if (!wrb) {
1323		status = -EBUSY;
1324		goto err;
1325	}
1326	req = embedded_payload(wrb);
1327
1328	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1329			       OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1330
1331	req->cq_id = cpu_to_le16(cq_id);
1332	req->frag_size = fls(frag_size) - 1;
1333	req->num_pages = 2;
1334	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1335	req->interface_id = cpu_to_le32(if_id);
1336	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1337	req->rss_queue = cpu_to_le32(rss);
1338
1339	status = be_mcc_notify_wait(adapter);
1340	if (!status) {
1341		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1342
1343		rxq->id = le16_to_cpu(resp->id);
1344		rxq->created = true;
1345		*rss_id = resp->rss_id;
1346	}
1347
1348err:
1349	spin_unlock_bh(&adapter->mcc_lock);
1350	return status;
1351}
1352
1353/* Generic destroyer function for all types of queues
1354 * Uses Mbox
1355 */
1356int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1357		     int queue_type)
1358{
1359	struct be_mcc_wrb *wrb;
1360	struct be_cmd_req_q_destroy *req;
1361	u8 subsys = 0, opcode = 0;
1362	int status;
1363
1364	if (mutex_lock_interruptible(&adapter->mbox_lock))
1365		return -1;
1366
1367	wrb = wrb_from_mbox(adapter);
1368	req = embedded_payload(wrb);
1369
1370	switch (queue_type) {
1371	case QTYPE_EQ:
1372		subsys = CMD_SUBSYSTEM_COMMON;
1373		opcode = OPCODE_COMMON_EQ_DESTROY;
1374		break;
1375	case QTYPE_CQ:
1376		subsys = CMD_SUBSYSTEM_COMMON;
1377		opcode = OPCODE_COMMON_CQ_DESTROY;
1378		break;
1379	case QTYPE_TXQ:
1380		subsys = CMD_SUBSYSTEM_ETH;
1381		opcode = OPCODE_ETH_TX_DESTROY;
1382		break;
1383	case QTYPE_RXQ:
1384		subsys = CMD_SUBSYSTEM_ETH;
1385		opcode = OPCODE_ETH_RX_DESTROY;
1386		break;
1387	case QTYPE_MCCQ:
1388		subsys = CMD_SUBSYSTEM_COMMON;
1389		opcode = OPCODE_COMMON_MCC_DESTROY;
1390		break;
1391	default:
1392		BUG();
1393	}
1394
1395	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1396			       NULL);
1397	req->id = cpu_to_le16(q->id);
1398
1399	status = be_mbox_notify_wait(adapter);
1400	q->created = false;
1401
1402	mutex_unlock(&adapter->mbox_lock);
1403	return status;
1404}
1405
1406/* Uses MCC */
1407int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1408{
1409	struct be_mcc_wrb *wrb;
1410	struct be_cmd_req_q_destroy *req;
1411	int status;
1412
1413	spin_lock_bh(&adapter->mcc_lock);
1414
1415	wrb = wrb_from_mccq(adapter);
1416	if (!wrb) {
1417		status = -EBUSY;
1418		goto err;
1419	}
1420	req = embedded_payload(wrb);
1421
1422	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1423			       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1424	req->id = cpu_to_le16(q->id);
1425
1426	status = be_mcc_notify_wait(adapter);
1427	q->created = false;
1428
1429err:
1430	spin_unlock_bh(&adapter->mcc_lock);
1431	return status;
1432}
1433
1434/* Create an rx filtering policy configuration on an i/f
1435 * Will use MBOX only if MCCQ has not been created.
1436 */
1437int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1438		     u32 *if_handle, u32 domain)
1439{
1440	struct be_mcc_wrb wrb = {0};
1441	struct be_cmd_req_if_create *req;
1442	int status;
1443
1444	req = embedded_payload(&wrb);
1445	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1446			       OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1447			       sizeof(*req), &wrb, NULL);
1448	req->hdr.domain = domain;
1449	req->capability_flags = cpu_to_le32(cap_flags);
1450	req->enable_flags = cpu_to_le32(en_flags);
1451	req->pmac_invalid = true;
1452
1453	status = be_cmd_notify_wait(adapter, &wrb);
1454	if (!status) {
1455		struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1456
1457		*if_handle = le32_to_cpu(resp->interface_id);
1458
1459		/* Hack to retrieve VF's pmac-id on BE3 */
1460		if (BE3_chip(adapter) && !be_physfn(adapter))
1461			adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1462	}
1463	return status;
1464}
1465
1466/* Uses MCCQ */
1467int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1468{
1469	struct be_mcc_wrb *wrb;
1470	struct be_cmd_req_if_destroy *req;
1471	int status;
1472
1473	if (interface_id == -1)
1474		return 0;
1475
1476	spin_lock_bh(&adapter->mcc_lock);
1477
1478	wrb = wrb_from_mccq(adapter);
1479	if (!wrb) {
1480		status = -EBUSY;
1481		goto err;
1482	}
1483	req = embedded_payload(wrb);
1484
1485	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1486			       OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1487			       sizeof(*req), wrb, NULL);
1488	req->hdr.domain = domain;
1489	req->interface_id = cpu_to_le32(interface_id);
1490
1491	status = be_mcc_notify_wait(adapter);
1492err:
1493	spin_unlock_bh(&adapter->mcc_lock);
1494	return status;
1495}
1496
1497/* Get stats is a non embedded command: the request is not embedded inside
1498 * WRB but is a separate dma memory block
1499 * Uses asynchronous MCC
1500 */
1501int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1502{
1503	struct be_mcc_wrb *wrb;
1504	struct be_cmd_req_hdr *hdr;
1505	int status = 0;
1506
1507	spin_lock_bh(&adapter->mcc_lock);
1508
1509	wrb = wrb_from_mccq(adapter);
1510	if (!wrb) {
1511		status = -EBUSY;
1512		goto err;
1513	}
1514	hdr = nonemb_cmd->va;
1515
1516	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1517			       OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1518			       nonemb_cmd);
1519
1520	/* version 1 of the cmd is not supported only by BE2 */
1521	if (BE2_chip(adapter))
1522		hdr->version = 0;
1523	if (BE3_chip(adapter) || lancer_chip(adapter))
1524		hdr->version = 1;
1525	else
1526		hdr->version = 2;
1527
1528	be_mcc_notify(adapter);
1529	adapter->stats_cmd_sent = true;
1530
1531err:
1532	spin_unlock_bh(&adapter->mcc_lock);
1533	return status;
1534}
1535
1536/* Lancer Stats */
1537int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1538			       struct be_dma_mem *nonemb_cmd)
1539{
1540	struct be_mcc_wrb *wrb;
1541	struct lancer_cmd_req_pport_stats *req;
1542	int status = 0;
1543
1544	if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1545			    CMD_SUBSYSTEM_ETH))
1546		return -EPERM;
1547
1548	spin_lock_bh(&adapter->mcc_lock);
1549
1550	wrb = wrb_from_mccq(adapter);
1551	if (!wrb) {
1552		status = -EBUSY;
1553		goto err;
1554	}
1555	req = nonemb_cmd->va;
1556
1557	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1558			       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1559			       wrb, nonemb_cmd);
1560
1561	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1562	req->cmd_params.params.reset_stats = 0;
1563
1564	be_mcc_notify(adapter);
1565	adapter->stats_cmd_sent = true;
1566
1567err:
1568	spin_unlock_bh(&adapter->mcc_lock);
1569	return status;
1570}
1571
1572static int be_mac_to_link_speed(int mac_speed)
1573{
1574	switch (mac_speed) {
1575	case PHY_LINK_SPEED_ZERO:
1576		return 0;
1577	case PHY_LINK_SPEED_10MBPS:
1578		return 10;
1579	case PHY_LINK_SPEED_100MBPS:
1580		return 100;
1581	case PHY_LINK_SPEED_1GBPS:
1582		return 1000;
1583	case PHY_LINK_SPEED_10GBPS:
1584		return 10000;
1585	case PHY_LINK_SPEED_20GBPS:
1586		return 20000;
1587	case PHY_LINK_SPEED_25GBPS:
1588		return 25000;
1589	case PHY_LINK_SPEED_40GBPS:
1590		return 40000;
1591	}
1592	return 0;
1593}
1594
1595/* Uses synchronous mcc
1596 * Returns link_speed in Mbps
1597 */
1598int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1599			     u8 *link_status, u32 dom)
1600{
1601	struct be_mcc_wrb *wrb;
1602	struct be_cmd_req_link_status *req;
1603	int status;
1604
1605	spin_lock_bh(&adapter->mcc_lock);
1606
1607	if (link_status)
1608		*link_status = LINK_DOWN;
1609
1610	wrb = wrb_from_mccq(adapter);
1611	if (!wrb) {
1612		status = -EBUSY;
1613		goto err;
1614	}
1615	req = embedded_payload(wrb);
1616
1617	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1618			       OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1619			       sizeof(*req), wrb, NULL);
1620
1621	/* version 1 of the cmd is not supported only by BE2 */
1622	if (!BE2_chip(adapter))
1623		req->hdr.version = 1;
1624
1625	req->hdr.domain = dom;
1626
1627	status = be_mcc_notify_wait(adapter);
1628	if (!status) {
1629		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1630
1631		if (link_speed) {
1632			*link_speed = resp->link_speed ?
1633				      le16_to_cpu(resp->link_speed) * 10 :
1634				      be_mac_to_link_speed(resp->mac_speed);
1635
1636			if (!resp->logical_link_status)
1637				*link_speed = 0;
1638		}
1639		if (link_status)
1640			*link_status = resp->logical_link_status;
1641	}
1642
1643err:
1644	spin_unlock_bh(&adapter->mcc_lock);
1645	return status;
1646}
1647
1648/* Uses synchronous mcc */
1649int be_cmd_get_die_temperature(struct be_adapter *adapter)
1650{
1651	struct be_mcc_wrb *wrb;
1652	struct be_cmd_req_get_cntl_addnl_attribs *req;
1653	int status = 0;
1654
1655	spin_lock_bh(&adapter->mcc_lock);
1656
1657	wrb = wrb_from_mccq(adapter);
1658	if (!wrb) {
1659		status = -EBUSY;
1660		goto err;
1661	}
1662	req = embedded_payload(wrb);
1663
1664	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1665			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1666			       sizeof(*req), wrb, NULL);
1667
1668	be_mcc_notify(adapter);
1669
1670err:
1671	spin_unlock_bh(&adapter->mcc_lock);
1672	return status;
1673}
1674
1675/* Uses synchronous mcc */
1676int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1677{
1678	struct be_mcc_wrb *wrb;
1679	struct be_cmd_req_get_fat *req;
1680	int status;
1681
1682	spin_lock_bh(&adapter->mcc_lock);
1683
1684	wrb = wrb_from_mccq(adapter);
1685	if (!wrb) {
1686		status = -EBUSY;
1687		goto err;
1688	}
1689	req = embedded_payload(wrb);
1690
1691	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1692			       OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1693			       NULL);
1694	req->fat_operation = cpu_to_le32(QUERY_FAT);
1695	status = be_mcc_notify_wait(adapter);
1696	if (!status) {
1697		struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1698
1699		if (log_size && resp->log_size)
1700			*log_size = le32_to_cpu(resp->log_size) -
1701					sizeof(u32);
1702	}
1703err:
1704	spin_unlock_bh(&adapter->mcc_lock);
1705	return status;
1706}
1707
1708int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1709{
1710	struct be_dma_mem get_fat_cmd;
1711	struct be_mcc_wrb *wrb;
1712	struct be_cmd_req_get_fat *req;
1713	u32 offset = 0, total_size, buf_size,
1714				log_offset = sizeof(u32), payload_len;
1715	int status = 0;
1716
1717	if (buf_len == 0)
1718		return -EIO;
1719
1720	total_size = buf_len;
1721
1722	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1723	get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1724					     get_fat_cmd.size,
1725					     &get_fat_cmd.dma, GFP_ATOMIC);
1726	if (!get_fat_cmd.va) {
1727		dev_err(&adapter->pdev->dev,
1728			"Memory allocation failure while reading FAT data\n");
1729		return -ENOMEM;
1730	}
1731
1732	spin_lock_bh(&adapter->mcc_lock);
1733
1734	while (total_size) {
1735		buf_size = min(total_size, (u32)60*1024);
1736		total_size -= buf_size;
1737
1738		wrb = wrb_from_mccq(adapter);
1739		if (!wrb) {
1740			status = -EBUSY;
1741			goto err;
1742		}
1743		req = get_fat_cmd.va;
1744
1745		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1746		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1747				       OPCODE_COMMON_MANAGE_FAT, payload_len,
1748				       wrb, &get_fat_cmd);
1749
1750		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1751		req->read_log_offset = cpu_to_le32(log_offset);
1752		req->read_log_length = cpu_to_le32(buf_size);
1753		req->data_buffer_size = cpu_to_le32(buf_size);
1754
1755		status = be_mcc_notify_wait(adapter);
1756		if (!status) {
1757			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1758
1759			memcpy(buf + offset,
1760			       resp->data_buffer,
1761			       le32_to_cpu(resp->read_log_length));
1762		} else {
1763			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1764			goto err;
1765		}
1766		offset += buf_size;
1767		log_offset += buf_size;
1768	}
1769err:
1770	dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1771			  get_fat_cmd.va, get_fat_cmd.dma);
1772	spin_unlock_bh(&adapter->mcc_lock);
1773	return status;
1774}
1775
1776/* Uses synchronous mcc */
1777int be_cmd_get_fw_ver(struct be_adapter *adapter)
1778{
1779	struct be_mcc_wrb *wrb;
1780	struct be_cmd_req_get_fw_version *req;
1781	int status;
1782
1783	spin_lock_bh(&adapter->mcc_lock);
1784
1785	wrb = wrb_from_mccq(adapter);
1786	if (!wrb) {
1787		status = -EBUSY;
1788		goto err;
1789	}
1790
1791	req = embedded_payload(wrb);
1792
1793	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1794			       OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1795			       NULL);
1796	status = be_mcc_notify_wait(adapter);
1797	if (!status) {
1798		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1799
1800		strlcpy(adapter->fw_ver, resp->firmware_version_string,
1801			sizeof(adapter->fw_ver));
1802		strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1803			sizeof(adapter->fw_on_flash));
1804	}
1805err:
1806	spin_unlock_bh(&adapter->mcc_lock);
1807	return status;
1808}
1809
1810/* set the EQ delay interval of an EQ to specified value
1811 * Uses async mcc
1812 */
1813static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1814			       struct be_set_eqd *set_eqd, int num)
1815{
1816	struct be_mcc_wrb *wrb;
1817	struct be_cmd_req_modify_eq_delay *req;
1818	int status = 0, i;
1819
1820	spin_lock_bh(&adapter->mcc_lock);
1821
1822	wrb = wrb_from_mccq(adapter);
1823	if (!wrb) {
1824		status = -EBUSY;
1825		goto err;
1826	}
1827	req = embedded_payload(wrb);
1828
1829	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1830			       OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1831			       NULL);
1832
1833	req->num_eq = cpu_to_le32(num);
1834	for (i = 0; i < num; i++) {
1835		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1836		req->set_eqd[i].phase = 0;
1837		req->set_eqd[i].delay_multiplier =
1838				cpu_to_le32(set_eqd[i].delay_multiplier);
1839	}
1840
1841	be_mcc_notify(adapter);
1842err:
1843	spin_unlock_bh(&adapter->mcc_lock);
1844	return status;
1845}
1846
1847int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1848		      int num)
1849{
1850	int num_eqs, i = 0;
1851
1852	while (num) {
1853		num_eqs = min(num, 8);
1854		__be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1855		i += num_eqs;
1856		num -= num_eqs;
1857	}
1858
1859	return 0;
1860}
1861
1862/* Uses sycnhronous mcc */
1863int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1864		       u32 num, u32 domain)
1865{
1866	struct be_mcc_wrb *wrb;
1867	struct be_cmd_req_vlan_config *req;
1868	int status;
1869
1870	spin_lock_bh(&adapter->mcc_lock);
1871
1872	wrb = wrb_from_mccq(adapter);
1873	if (!wrb) {
1874		status = -EBUSY;
1875		goto err;
1876	}
1877	req = embedded_payload(wrb);
1878
1879	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1880			       OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1881			       wrb, NULL);
1882	req->hdr.domain = domain;
1883
1884	req->interface_id = if_id;
1885	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1886	req->num_vlan = num;
1887	memcpy(req->normal_vlan, vtag_array,
1888	       req->num_vlan * sizeof(vtag_array[0]));
1889
1890	status = be_mcc_notify_wait(adapter);
1891err:
1892	spin_unlock_bh(&adapter->mcc_lock);
1893	return status;
1894}
1895
1896static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1897{
1898	struct be_mcc_wrb *wrb;
1899	struct be_dma_mem *mem = &adapter->rx_filter;
1900	struct be_cmd_req_rx_filter *req = mem->va;
1901	int status;
1902
1903	spin_lock_bh(&adapter->mcc_lock);
1904
1905	wrb = wrb_from_mccq(adapter);
1906	if (!wrb) {
1907		status = -EBUSY;
1908		goto err;
1909	}
1910	memset(req, 0, sizeof(*req));
1911	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1912			       OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1913			       wrb, mem);
1914
1915	req->if_id = cpu_to_le32(adapter->if_handle);
1916	req->if_flags_mask = cpu_to_le32(flags);
1917	req->if_flags = (value == ON) ? req->if_flags_mask : 0;
1918
1919	if (flags & BE_IF_FLAGS_MULTICAST) {
1920		struct netdev_hw_addr *ha;
1921		int i = 0;
1922
1923		/* Reset mcast promisc mode if already set by setting mask
1924		 * and not setting flags field
1925		 */
1926		req->if_flags_mask |=
1927			cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1928				    be_if_cap_flags(adapter));
1929		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1930		netdev_for_each_mc_addr(ha, adapter->netdev)
1931			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1932	}
1933
1934	status = be_mcc_notify_wait(adapter);
1935err:
1936	spin_unlock_bh(&adapter->mcc_lock);
1937	return status;
1938}
1939
1940int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1941{
1942	struct device *dev = &adapter->pdev->dev;
1943
1944	if ((flags & be_if_cap_flags(adapter)) != flags) {
1945		dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
1946		dev_warn(dev, "Interface is capable of 0x%x flags only\n",
1947			 be_if_cap_flags(adapter));
1948	}
1949	flags &= be_if_cap_flags(adapter);
1950
1951	return __be_cmd_rx_filter(adapter, flags, value);
1952}
1953
1954/* Uses synchrounous mcc */
1955int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1956{
1957	struct be_mcc_wrb *wrb;
1958	struct be_cmd_req_set_flow_control *req;
1959	int status;
1960
1961	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1962			    CMD_SUBSYSTEM_COMMON))
1963		return -EPERM;
1964
1965	spin_lock_bh(&adapter->mcc_lock);
1966
1967	wrb = wrb_from_mccq(adapter);
1968	if (!wrb) {
1969		status = -EBUSY;
1970		goto err;
1971	}
1972	req = embedded_payload(wrb);
1973
1974	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1975			       OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1976			       wrb, NULL);
1977
1978	req->hdr.version = 1;
1979	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1980	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1981
1982	status = be_mcc_notify_wait(adapter);
1983
1984err:
1985	spin_unlock_bh(&adapter->mcc_lock);
1986
1987	if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
1988		return  -EOPNOTSUPP;
1989
1990	return status;
1991}
1992
1993/* Uses sycn mcc */
1994int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1995{
1996	struct be_mcc_wrb *wrb;
1997	struct be_cmd_req_get_flow_control *req;
1998	int status;
1999
2000	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2001			    CMD_SUBSYSTEM_COMMON))
2002		return -EPERM;
2003
2004	spin_lock_bh(&adapter->mcc_lock);
2005
2006	wrb = wrb_from_mccq(adapter);
2007	if (!wrb) {
2008		status = -EBUSY;
2009		goto err;
2010	}
2011	req = embedded_payload(wrb);
2012
2013	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2014			       OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2015			       wrb, NULL);
2016
2017	status = be_mcc_notify_wait(adapter);
2018	if (!status) {
2019		struct be_cmd_resp_get_flow_control *resp =
2020						embedded_payload(wrb);
2021
2022		*tx_fc = le16_to_cpu(resp->tx_flow_control);
2023		*rx_fc = le16_to_cpu(resp->rx_flow_control);
2024	}
2025
2026err:
2027	spin_unlock_bh(&adapter->mcc_lock);
2028	return status;
2029}
2030
2031/* Uses mbox */
2032int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2033{
2034	struct be_mcc_wrb *wrb;
2035	struct be_cmd_req_query_fw_cfg *req;
2036	int status;
2037
2038	if (mutex_lock_interruptible(&adapter->mbox_lock))
2039		return -1;
2040
2041	wrb = wrb_from_mbox(adapter);
2042	req = embedded_payload(wrb);
2043
2044	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2045			       OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2046			       sizeof(*req), wrb, NULL);
2047
2048	status = be_mbox_notify_wait(adapter);
2049	if (!status) {
2050		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2051
2052		adapter->port_num = le32_to_cpu(resp->phys_port);
2053		adapter->function_mode = le32_to_cpu(resp->function_mode);
2054		adapter->function_caps = le32_to_cpu(resp->function_caps);
2055		adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2056		dev_info(&adapter->pdev->dev,
2057			 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2058			 adapter->function_mode, adapter->function_caps);
2059	}
2060
2061	mutex_unlock(&adapter->mbox_lock);
2062	return status;
2063}
2064
2065/* Uses mbox */
2066int be_cmd_reset_function(struct be_adapter *adapter)
2067{
2068	struct be_mcc_wrb *wrb;
2069	struct be_cmd_req_hdr *req;
2070	int status;
2071
2072	if (lancer_chip(adapter)) {
2073		iowrite32(SLI_PORT_CONTROL_IP_MASK,
2074			  adapter->db + SLIPORT_CONTROL_OFFSET);
2075		status = lancer_wait_ready(adapter);
2076		if (status)
2077			dev_err(&adapter->pdev->dev,
2078				"Adapter in non recoverable error\n");
2079		return status;
2080	}
2081
2082	if (mutex_lock_interruptible(&adapter->mbox_lock))
2083		return -1;
2084
2085	wrb = wrb_from_mbox(adapter);
2086	req = embedded_payload(wrb);
2087
2088	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2089			       OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2090			       NULL);
2091
2092	status = be_mbox_notify_wait(adapter);
2093
2094	mutex_unlock(&adapter->mbox_lock);
2095	return status;
2096}
2097
2098int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2099		      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2100{
2101	struct be_mcc_wrb *wrb;
2102	struct be_cmd_req_rss_config *req;
2103	int status;
2104
2105	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2106		return 0;
2107
2108	spin_lock_bh(&adapter->mcc_lock);
2109
2110	wrb = wrb_from_mccq(adapter);
2111	if (!wrb) {
2112		status = -EBUSY;
2113		goto err;
2114	}
2115	req = embedded_payload(wrb);
2116
2117	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2118			       OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2119
2120	req->if_id = cpu_to_le32(adapter->if_handle);
2121	req->enable_rss = cpu_to_le16(rss_hash_opts);
2122	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2123
2124	if (!BEx_chip(adapter))
2125		req->hdr.version = 1;
2126
2127	memcpy(req->cpu_table, rsstable, table_size);
2128	memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2129	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2130
2131	status = be_mcc_notify_wait(adapter);
2132err:
2133	spin_unlock_bh(&adapter->mcc_lock);
2134	return status;
2135}
2136
2137/* Uses sync mcc */
2138int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2139			    u8 bcn, u8 sts, u8 state)
2140{
2141	struct be_mcc_wrb *wrb;
2142	struct be_cmd_req_enable_disable_beacon *req;
2143	int status;
2144
2145	spin_lock_bh(&adapter->mcc_lock);
2146
2147	wrb = wrb_from_mccq(adapter);
2148	if (!wrb) {
2149		status = -EBUSY;
2150		goto err;
2151	}
2152	req = embedded_payload(wrb);
2153
2154	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2155			       OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2156			       sizeof(*req), wrb, NULL);
2157
2158	req->port_num = port_num;
2159	req->beacon_state = state;
2160	req->beacon_duration = bcn;
2161	req->status_duration = sts;
2162
2163	status = be_mcc_notify_wait(adapter);
2164
2165err:
2166	spin_unlock_bh(&adapter->mcc_lock);
2167	return status;
2168}
2169
2170/* Uses sync mcc */
2171int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2172{
2173	struct be_mcc_wrb *wrb;
2174	struct be_cmd_req_get_beacon_state *req;
2175	int status;
2176
2177	spin_lock_bh(&adapter->mcc_lock);
2178
2179	wrb = wrb_from_mccq(adapter);
2180	if (!wrb) {
2181		status = -EBUSY;
2182		goto err;
2183	}
2184	req = embedded_payload(wrb);
2185
2186	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2187			       OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2188			       wrb, NULL);
2189
2190	req->port_num = port_num;
2191
2192	status = be_mcc_notify_wait(adapter);
2193	if (!status) {
2194		struct be_cmd_resp_get_beacon_state *resp =
2195						embedded_payload(wrb);
2196
2197		*state = resp->beacon_state;
2198	}
2199
2200err:
2201	spin_unlock_bh(&adapter->mcc_lock);
2202	return status;
2203}
2204
2205/* Uses sync mcc */
2206int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2207				      u8 page_num, u8 *data)
2208{
2209	struct be_dma_mem cmd;
2210	struct be_mcc_wrb *wrb;
2211	struct be_cmd_req_port_type *req;
2212	int status;
2213
2214	if (page_num > TR_PAGE_A2)
2215		return -EINVAL;
2216
2217	cmd.size = sizeof(struct be_cmd_resp_port_type);
2218	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2219				     GFP_ATOMIC);
2220	if (!cmd.va) {
2221		dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2222		return -ENOMEM;
2223	}
2224
2225	spin_lock_bh(&adapter->mcc_lock);
2226
2227	wrb = wrb_from_mccq(adapter);
2228	if (!wrb) {
2229		status = -EBUSY;
2230		goto err;
2231	}
2232	req = cmd.va;
2233
2234	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2235			       OPCODE_COMMON_READ_TRANSRECV_DATA,
2236			       cmd.size, wrb, &cmd);
2237
2238	req->port = cpu_to_le32(adapter->hba_port_num);
2239	req->page_num = cpu_to_le32(page_num);
2240	status = be_mcc_notify_wait(adapter);
2241	if (!status) {
2242		struct be_cmd_resp_port_type *resp = cmd.va;
2243
2244		memcpy(data, resp->page_data, PAGE_DATA_LEN);
2245	}
2246err:
2247	spin_unlock_bh(&adapter->mcc_lock);
2248	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2249	return status;
2250}
2251
2252int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2253			    u32 data_size, u32 data_offset,
2254			    const char *obj_name, u32 *data_written,
2255			    u8 *change_status, u8 *addn_status)
2256{
2257	struct be_mcc_wrb *wrb;
2258	struct lancer_cmd_req_write_object *req;
2259	struct lancer_cmd_resp_write_object *resp;
2260	void *ctxt = NULL;
2261	int status;
2262
2263	spin_lock_bh(&adapter->mcc_lock);
2264	adapter->flash_status = 0;
2265
2266	wrb = wrb_from_mccq(adapter);
2267	if (!wrb) {
2268		status = -EBUSY;
2269		goto err_unlock;
2270	}
2271
2272	req = embedded_payload(wrb);
2273
2274	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2275			       OPCODE_COMMON_WRITE_OBJECT,
2276			       sizeof(struct lancer_cmd_req_write_object), wrb,
2277			       NULL);
2278
2279	ctxt = &req->context;
2280	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2281		      write_length, ctxt, data_size);
2282
2283	if (data_size == 0)
2284		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2285			      eof, ctxt, 1);
2286	else
2287		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2288			      eof, ctxt, 0);
2289
2290	be_dws_cpu_to_le(ctxt, sizeof(req->context));
2291	req->write_offset = cpu_to_le32(data_offset);
2292	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2293	req->descriptor_count = cpu_to_le32(1);
2294	req->buf_len = cpu_to_le32(data_size);
2295	req->addr_low = cpu_to_le32((cmd->dma +
2296				     sizeof(struct lancer_cmd_req_write_object))
2297				    & 0xFFFFFFFF);
2298	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2299				sizeof(struct lancer_cmd_req_write_object)));
2300
2301	be_mcc_notify(adapter);
2302	spin_unlock_bh(&adapter->mcc_lock);
2303
2304	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2305					 msecs_to_jiffies(60000)))
2306		status = -ETIMEDOUT;
2307	else
2308		status = adapter->flash_status;
2309
2310	resp = embedded_payload(wrb);
2311	if (!status) {
2312		*data_written = le32_to_cpu(resp->actual_write_len);
2313		*change_status = resp->change_status;
2314	} else {
2315		*addn_status = resp->additional_status;
2316	}
2317
2318	return status;
2319
2320err_unlock:
2321	spin_unlock_bh(&adapter->mcc_lock);
2322	return status;
2323}
2324
2325int be_cmd_query_cable_type(struct be_adapter *adapter)
2326{
2327	u8 page_data[PAGE_DATA_LEN];
2328	int status;
2329
2330	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2331						   page_data);
2332	if (!status) {
2333		switch (adapter->phy.interface_type) {
2334		case PHY_TYPE_QSFP:
2335			adapter->phy.cable_type =
2336				page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2337			break;
2338		case PHY_TYPE_SFP_PLUS_10GB:
2339			adapter->phy.cable_type =
2340				page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2341			break;
2342		default:
2343			adapter->phy.cable_type = 0;
2344			break;
2345		}
2346	}
2347	return status;
2348}
2349
2350int be_cmd_query_sfp_info(struct be_adapter *adapter)
2351{
2352	u8 page_data[PAGE_DATA_LEN];
2353	int status;
2354
2355	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2356						   page_data);
2357	if (!status) {
2358		strlcpy(adapter->phy.vendor_name, page_data +
2359			SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2360		strlcpy(adapter->phy.vendor_pn,
2361			page_data + SFP_VENDOR_PN_OFFSET,
2362			SFP_VENDOR_NAME_LEN - 1);
2363	}
2364
2365	return status;
2366}
2367
2368int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2369{
2370	struct lancer_cmd_req_delete_object *req;
2371	struct be_mcc_wrb *wrb;
2372	int status;
2373
2374	spin_lock_bh(&adapter->mcc_lock);
2375
2376	wrb = wrb_from_mccq(adapter);
2377	if (!wrb) {
2378		status = -EBUSY;
2379		goto err;
2380	}
2381
2382	req = embedded_payload(wrb);
2383
2384	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2385			       OPCODE_COMMON_DELETE_OBJECT,
2386			       sizeof(*req), wrb, NULL);
2387
2388	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2389
2390	status = be_mcc_notify_wait(adapter);
2391err:
2392	spin_unlock_bh(&adapter->mcc_lock);
2393	return status;
2394}
2395
2396int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2397			   u32 data_size, u32 data_offset, const char *obj_name,
2398			   u32 *data_read, u32 *eof, u8 *addn_status)
2399{
2400	struct be_mcc_wrb *wrb;
2401	struct lancer_cmd_req_read_object *req;
2402	struct lancer_cmd_resp_read_object *resp;
2403	int status;
2404
2405	spin_lock_bh(&adapter->mcc_lock);
2406
2407	wrb = wrb_from_mccq(adapter);
2408	if (!wrb) {
2409		status = -EBUSY;
2410		goto err_unlock;
2411	}
2412
2413	req = embedded_payload(wrb);
2414
2415	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2416			       OPCODE_COMMON_READ_OBJECT,
2417			       sizeof(struct lancer_cmd_req_read_object), wrb,
2418			       NULL);
2419
2420	req->desired_read_len = cpu_to_le32(data_size);
2421	req->read_offset = cpu_to_le32(data_offset);
2422	strcpy(req->object_name, obj_name);
2423	req->descriptor_count = cpu_to_le32(1);
2424	req->buf_len = cpu_to_le32(data_size);
2425	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2426	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2427
2428	status = be_mcc_notify_wait(adapter);
2429
2430	resp = embedded_payload(wrb);
2431	if (!status) {
2432		*data_read = le32_to_cpu(resp->actual_read_len);
2433		*eof = le32_to_cpu(resp->eof);
2434	} else {
2435		*addn_status = resp->additional_status;
2436	}
2437
2438err_unlock:
2439	spin_unlock_bh(&adapter->mcc_lock);
2440	return status;
2441}
2442
2443int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2444			  u32 flash_type, u32 flash_opcode, u32 img_offset,
2445			  u32 buf_size)
2446{
2447	struct be_mcc_wrb *wrb;
2448	struct be_cmd_write_flashrom *req;
2449	int status;
2450
2451	spin_lock_bh(&adapter->mcc_lock);
2452	adapter->flash_status = 0;
2453
2454	wrb = wrb_from_mccq(adapter);
2455	if (!wrb) {
2456		status = -EBUSY;
2457		goto err_unlock;
2458	}
2459	req = cmd->va;
2460
2461	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2462			       OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2463			       cmd);
2464
2465	req->params.op_type = cpu_to_le32(flash_type);
2466	if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2467		req->params.offset = cpu_to_le32(img_offset);
2468
2469	req->params.op_code = cpu_to_le32(flash_opcode);
2470	req->params.data_buf_size = cpu_to_le32(buf_size);
2471
2472	be_mcc_notify(adapter);
2473	spin_unlock_bh(&adapter->mcc_lock);
2474
2475	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2476					 msecs_to_jiffies(40000)))
2477		status = -ETIMEDOUT;
2478	else
2479		status = adapter->flash_status;
2480
2481	return status;
2482
2483err_unlock:
2484	spin_unlock_bh(&adapter->mcc_lock);
2485	return status;
2486}
2487
2488int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2489			 u16 img_optype, u32 img_offset, u32 crc_offset)
2490{
2491	struct be_cmd_read_flash_crc *req;
2492	struct be_mcc_wrb *wrb;
2493	int status;
2494
2495	spin_lock_bh(&adapter->mcc_lock);
2496
2497	wrb = wrb_from_mccq(adapter);
2498	if (!wrb) {
2499		status = -EBUSY;
2500		goto err;
2501	}
2502	req = embedded_payload(wrb);
2503
2504	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2505			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2506			       wrb, NULL);
2507
2508	req->params.op_type = cpu_to_le32(img_optype);
2509	if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2510		req->params.offset = cpu_to_le32(img_offset + crc_offset);
2511	else
2512		req->params.offset = cpu_to_le32(crc_offset);
2513
2514	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2515	req->params.data_buf_size = cpu_to_le32(0x4);
2516
2517	status = be_mcc_notify_wait(adapter);
2518	if (!status)
2519		memcpy(flashed_crc, req->crc, 4);
2520
2521err:
2522	spin_unlock_bh(&adapter->mcc_lock);
2523	return status;
2524}
2525
2526int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2527			    struct be_dma_mem *nonemb_cmd)
2528{
2529	struct be_mcc_wrb *wrb;
2530	struct be_cmd_req_acpi_wol_magic_config *req;
2531	int status;
2532
2533	spin_lock_bh(&adapter->mcc_lock);
2534
2535	wrb = wrb_from_mccq(adapter);
2536	if (!wrb) {
2537		status = -EBUSY;
2538		goto err;
2539	}
2540	req = nonemb_cmd->va;
2541
2542	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2543			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2544			       wrb, nonemb_cmd);
2545	memcpy(req->magic_mac, mac, ETH_ALEN);
2546
2547	status = be_mcc_notify_wait(adapter);
2548
2549err:
2550	spin_unlock_bh(&adapter->mcc_lock);
2551	return status;
2552}
2553
2554int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2555			u8 loopback_type, u8 enable)
2556{
2557	struct be_mcc_wrb *wrb;
2558	struct be_cmd_req_set_lmode *req;
2559	int status;
2560
2561	spin_lock_bh(&adapter->mcc_lock);
2562
2563	wrb = wrb_from_mccq(adapter);
2564	if (!wrb) {
2565		status = -EBUSY;
2566		goto err;
2567	}
2568
2569	req = embedded_payload(wrb);
2570
2571	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2572			       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2573			       wrb, NULL);
2574
2575	req->src_port = port_num;
2576	req->dest_port = port_num;
2577	req->loopback_type = loopback_type;
2578	req->loopback_state = enable;
2579
2580	status = be_mcc_notify_wait(adapter);
2581err:
2582	spin_unlock_bh(&adapter->mcc_lock);
2583	return status;
2584}
2585
2586int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2587			 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2588			 u64 pattern)
2589{
2590	struct be_mcc_wrb *wrb;
2591	struct be_cmd_req_loopback_test *req;
2592	struct be_cmd_resp_loopback_test *resp;
2593	int status;
2594
2595	spin_lock_bh(&adapter->mcc_lock);
2596
2597	wrb = wrb_from_mccq(adapter);
2598	if (!wrb) {
2599		status = -EBUSY;
2600		goto err;
2601	}
2602
2603	req = embedded_payload(wrb);
2604
2605	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2606			       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2607			       NULL);
2608
2609	req->hdr.timeout = cpu_to_le32(15);
2610	req->pattern = cpu_to_le64(pattern);
2611	req->src_port = cpu_to_le32(port_num);
2612	req->dest_port = cpu_to_le32(port_num);
2613	req->pkt_size = cpu_to_le32(pkt_size);
2614	req->num_pkts = cpu_to_le32(num_pkts);
2615	req->loopback_type = cpu_to_le32(loopback_type);
2616
2617	be_mcc_notify(adapter);
2618
2619	spin_unlock_bh(&adapter->mcc_lock);
2620
2621	wait_for_completion(&adapter->et_cmd_compl);
2622	resp = embedded_payload(wrb);
2623	status = le32_to_cpu(resp->status);
2624
2625	return status;
2626err:
2627	spin_unlock_bh(&adapter->mcc_lock);
2628	return status;
2629}
2630
2631int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2632			u32 byte_cnt, struct be_dma_mem *cmd)
2633{
2634	struct be_mcc_wrb *wrb;
2635	struct be_cmd_req_ddrdma_test *req;
2636	int status;
2637	int i, j = 0;
2638
2639	spin_lock_bh(&adapter->mcc_lock);
2640
2641	wrb = wrb_from_mccq(adapter);
2642	if (!wrb) {
2643		status = -EBUSY;
2644		goto err;
2645	}
2646	req = cmd->va;
2647	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2648			       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2649			       cmd);
2650
2651	req->pattern = cpu_to_le64(pattern);
2652	req->byte_count = cpu_to_le32(byte_cnt);
2653	for (i = 0; i < byte_cnt; i++) {
2654		req->snd_buff[i] = (u8)(pattern >> (j*8));
2655		j++;
2656		if (j > 7)
2657			j = 0;
2658	}
2659
2660	status = be_mcc_notify_wait(adapter);
2661
2662	if (!status) {
2663		struct be_cmd_resp_ddrdma_test *resp;
2664
2665		resp = cmd->va;
2666		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2667		    resp->snd_err) {
2668			status = -1;
2669		}
2670	}
2671
2672err:
2673	spin_unlock_bh(&adapter->mcc_lock);
2674	return status;
2675}
2676
2677int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2678			    struct be_dma_mem *nonemb_cmd)
2679{
2680	struct be_mcc_wrb *wrb;
2681	struct be_cmd_req_seeprom_read *req;
2682	int status;
2683
2684	spin_lock_bh(&adapter->mcc_lock);
2685
2686	wrb = wrb_from_mccq(adapter);
2687	if (!wrb) {
2688		status = -EBUSY;
2689		goto err;
2690	}
2691	req = nonemb_cmd->va;
2692
2693	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2694			       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2695			       nonemb_cmd);
2696
2697	status = be_mcc_notify_wait(adapter);
2698
2699err:
2700	spin_unlock_bh(&adapter->mcc_lock);
2701	return status;
2702}
2703
2704int be_cmd_get_phy_info(struct be_adapter *adapter)
2705{
2706	struct be_mcc_wrb *wrb;
2707	struct be_cmd_req_get_phy_info *req;
2708	struct be_dma_mem cmd;
2709	int status;
2710
2711	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2712			    CMD_SUBSYSTEM_COMMON))
2713		return -EPERM;
2714
2715	spin_lock_bh(&adapter->mcc_lock);
2716
2717	wrb = wrb_from_mccq(adapter);
2718	if (!wrb) {
2719		status = -EBUSY;
2720		goto err;
2721	}
2722	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2723	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2724				     GFP_ATOMIC);
2725	if (!cmd.va) {
2726		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2727		status = -ENOMEM;
2728		goto err;
2729	}
2730
2731	req = cmd.va;
2732
2733	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2734			       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2735			       wrb, &cmd);
2736
2737	status = be_mcc_notify_wait(adapter);
2738	if (!status) {
2739		struct be_phy_info *resp_phy_info =
2740				cmd.va + sizeof(struct be_cmd_req_hdr);
2741
2742		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2743		adapter->phy.interface_type =
2744			le16_to_cpu(resp_phy_info->interface_type);
2745		adapter->phy.auto_speeds_supported =
2746			le16_to_cpu(resp_phy_info->auto_speeds_supported);
2747		adapter->phy.fixed_speeds_supported =
2748			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2749		adapter->phy.misc_params =
2750			le32_to_cpu(resp_phy_info->misc_params);
2751
2752		if (BE2_chip(adapter)) {
2753			adapter->phy.fixed_speeds_supported =
2754				BE_SUPPORTED_SPEED_10GBPS |
2755				BE_SUPPORTED_SPEED_1GBPS;
2756		}
2757	}
2758	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2759err:
2760	spin_unlock_bh(&adapter->mcc_lock);
2761	return status;
2762}
2763
2764static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2765{
2766	struct be_mcc_wrb *wrb;
2767	struct be_cmd_req_set_qos *req;
2768	int status;
2769
2770	spin_lock_bh(&adapter->mcc_lock);
2771
2772	wrb = wrb_from_mccq(adapter);
2773	if (!wrb) {
2774		status = -EBUSY;
2775		goto err;
2776	}
2777
2778	req = embedded_payload(wrb);
2779
2780	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2781			       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2782
2783	req->hdr.domain = domain;
2784	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2785	req->max_bps_nic = cpu_to_le32(bps);
2786
2787	status = be_mcc_notify_wait(adapter);
2788
2789err:
2790	spin_unlock_bh(&adapter->mcc_lock);
2791	return status;
2792}
2793
2794int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2795{
2796	struct be_mcc_wrb *wrb;
2797	struct be_cmd_req_cntl_attribs *req;
2798	struct be_cmd_resp_cntl_attribs *resp;
2799	int status;
2800	int payload_len = max(sizeof(*req), sizeof(*resp));
2801	struct mgmt_controller_attrib *attribs;
2802	struct be_dma_mem attribs_cmd;
2803
2804	if (mutex_lock_interruptible(&adapter->mbox_lock))
2805		return -1;
2806
2807	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2808	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2809	attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2810					     attribs_cmd.size,
2811					     &attribs_cmd.dma, GFP_ATOMIC);
2812	if (!attribs_cmd.va) {
2813		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2814		status = -ENOMEM;
2815		goto err;
2816	}
2817
2818	wrb = wrb_from_mbox(adapter);
2819	if (!wrb) {
2820		status = -EBUSY;
2821		goto err;
2822	}
2823	req = attribs_cmd.va;
2824
2825	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2826			       OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2827			       wrb, &attribs_cmd);
2828
2829	status = be_mbox_notify_wait(adapter);
2830	if (!status) {
2831		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2832		adapter->hba_port_num = attribs->hba_attribs.phy_port;
2833	}
2834
2835err:
2836	mutex_unlock(&adapter->mbox_lock);
2837	if (attribs_cmd.va)
2838		dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
2839				  attribs_cmd.va, attribs_cmd.dma);
2840	return status;
2841}
2842
2843/* Uses mbox */
2844int be_cmd_req_native_mode(struct be_adapter *adapter)
2845{
2846	struct be_mcc_wrb *wrb;
2847	struct be_cmd_req_set_func_cap *req;
2848	int status;
2849
2850	if (mutex_lock_interruptible(&adapter->mbox_lock))
2851		return -1;
2852
2853	wrb = wrb_from_mbox(adapter);
2854	if (!wrb) {
2855		status = -EBUSY;
2856		goto err;
2857	}
2858
2859	req = embedded_payload(wrb);
2860
2861	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2862			       OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2863			       sizeof(*req), wrb, NULL);
2864
2865	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2866				CAPABILITY_BE3_NATIVE_ERX_API);
2867	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2868
2869	status = be_mbox_notify_wait(adapter);
2870	if (!status) {
2871		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2872
2873		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2874					CAPABILITY_BE3_NATIVE_ERX_API;
2875		if (!adapter->be3_native)
2876			dev_warn(&adapter->pdev->dev,
2877				 "adapter not in advanced mode\n");
2878	}
2879err:
2880	mutex_unlock(&adapter->mbox_lock);
2881	return status;
2882}
2883
2884/* Get privilege(s) for a function */
2885int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2886			     u32 domain)
2887{
2888	struct be_mcc_wrb *wrb;
2889	struct be_cmd_req_get_fn_privileges *req;
2890	int status;
2891
2892	spin_lock_bh(&adapter->mcc_lock);
2893
2894	wrb = wrb_from_mccq(adapter);
2895	if (!wrb) {
2896		status = -EBUSY;
2897		goto err;
2898	}
2899
2900	req = embedded_payload(wrb);
2901
2902	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2903			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2904			       wrb, NULL);
2905
2906	req->hdr.domain = domain;
2907
2908	status = be_mcc_notify_wait(adapter);
2909	if (!status) {
2910		struct be_cmd_resp_get_fn_privileges *resp =
2911						embedded_payload(wrb);
2912
2913		*privilege = le32_to_cpu(resp->privilege_mask);
2914
2915		/* In UMC mode FW does not return right privileges.
2916		 * Override with correct privilege equivalent to PF.
2917		 */
2918		if (BEx_chip(adapter) && be_is_mc(adapter) &&
2919		    be_physfn(adapter))
2920			*privilege = MAX_PRIVILEGES;
2921	}
2922
2923err:
2924	spin_unlock_bh(&adapter->mcc_lock);
2925	return status;
2926}
2927
2928/* Set privilege(s) for a function */
2929int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2930			     u32 domain)
2931{
2932	struct be_mcc_wrb *wrb;
2933	struct be_cmd_req_set_fn_privileges *req;
2934	int status;
2935
2936	spin_lock_bh(&adapter->mcc_lock);
2937
2938	wrb = wrb_from_mccq(adapter);
2939	if (!wrb) {
2940		status = -EBUSY;
2941		goto err;
2942	}
2943
2944	req = embedded_payload(wrb);
2945	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2946			       OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2947			       wrb, NULL);
2948	req->hdr.domain = domain;
2949	if (lancer_chip(adapter))
2950		req->privileges_lancer = cpu_to_le32(privileges);
2951	else
2952		req->privileges = cpu_to_le32(privileges);
2953
2954	status = be_mcc_notify_wait(adapter);
2955err:
2956	spin_unlock_bh(&adapter->mcc_lock);
2957	return status;
2958}
2959
2960/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2961 * pmac_id_valid: false => pmac_id or MAC address is requested.
2962 *		  If pmac_id is returned, pmac_id_valid is returned as true
2963 */
2964int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2965			     bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
2966			     u8 domain)
2967{
2968	struct be_mcc_wrb *wrb;
2969	struct be_cmd_req_get_mac_list *req;
2970	int status;
2971	int mac_count;
2972	struct be_dma_mem get_mac_list_cmd;
2973	int i;
2974
2975	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2976	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2977	get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2978						  get_mac_list_cmd.size,
2979						  &get_mac_list_cmd.dma,
2980						  GFP_ATOMIC);
2981
2982	if (!get_mac_list_cmd.va) {
2983		dev_err(&adapter->pdev->dev,
2984			"Memory allocation failure during GET_MAC_LIST\n");
2985		return -ENOMEM;
2986	}
2987
2988	spin_lock_bh(&adapter->mcc_lock);
2989
2990	wrb = wrb_from_mccq(adapter);
2991	if (!wrb) {
2992		status = -EBUSY;
2993		goto out;
2994	}
2995
2996	req = get_mac_list_cmd.va;
2997
2998	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2999			       OPCODE_COMMON_GET_MAC_LIST,
3000			       get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
3001	req->hdr.domain = domain;
3002	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
3003	if (*pmac_id_valid) {
3004		req->mac_id = cpu_to_le32(*pmac_id);
3005		req->iface_id = cpu_to_le16(if_handle);
3006		req->perm_override = 0;
3007	} else {
3008		req->perm_override = 1;
3009	}
3010
3011	status = be_mcc_notify_wait(adapter);
3012	if (!status) {
3013		struct be_cmd_resp_get_mac_list *resp =
3014						get_mac_list_cmd.va;
3015
3016		if (*pmac_id_valid) {
3017			memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3018			       ETH_ALEN);
3019			goto out;
3020		}
3021
3022		mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3023		/* Mac list returned could contain one or more active mac_ids
3024		 * or one or more true or pseudo permanent mac addresses.
3025		 * If an active mac_id is present, return first active mac_id
3026		 * found.
3027		 */
3028		for (i = 0; i < mac_count; i++) {
3029			struct get_list_macaddr *mac_entry;
3030			u16 mac_addr_size;
3031			u32 mac_id;
3032
3033			mac_entry = &resp->macaddr_list[i];
3034			mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3035			/* mac_id is a 32 bit value and mac_addr size
3036			 * is 6 bytes
3037			 */
3038			if (mac_addr_size == sizeof(u32)) {
3039				*pmac_id_valid = true;
3040				mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3041				*pmac_id = le32_to_cpu(mac_id);
3042				goto out;
3043			}
3044		}
3045		/* If no active mac_id found, return first mac addr */
3046		*pmac_id_valid = false;
3047		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3048		       ETH_ALEN);
3049	}
3050
3051out:
3052	spin_unlock_bh(&adapter->mcc_lock);
3053	dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
3054			  get_mac_list_cmd.va, get_mac_list_cmd.dma);
3055	return status;
3056}
3057
3058int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3059			  u8 *mac, u32 if_handle, bool active, u32 domain)
3060{
3061	if (!active)
3062		be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3063					 if_handle, domain);
3064	if (BEx_chip(adapter))
3065		return be_cmd_mac_addr_query(adapter, mac, false,
3066					     if_handle, curr_pmac_id);
3067	else
3068		/* Fetch the MAC address using pmac_id */
3069		return be_cmd_get_mac_from_list(adapter, mac, &active,
3070						&curr_pmac_id,
3071						if_handle, domain);
3072}
3073
3074int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3075{
3076	int status;
3077	bool pmac_valid = false;
3078
3079	eth_zero_addr(mac);
3080
3081	if (BEx_chip(adapter)) {
3082		if (be_physfn(adapter))
3083			status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3084						       0);
3085		else
3086			status = be_cmd_mac_addr_query(adapter, mac, false,
3087						       adapter->if_handle, 0);
3088	} else {
3089		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3090						  NULL, adapter->if_handle, 0);
3091	}
3092
3093	return status;
3094}
3095
3096/* Uses synchronous MCCQ */
3097int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3098			u8 mac_count, u32 domain)
3099{
3100	struct be_mcc_wrb *wrb;
3101	struct be_cmd_req_set_mac_list *req;
3102	int status;
3103	struct be_dma_mem cmd;
3104
3105	memset(&cmd, 0, sizeof(struct be_dma_mem));
3106	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3107	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3108				     GFP_KERNEL);
3109	if (!cmd.va)
3110		return -ENOMEM;
3111
3112	spin_lock_bh(&adapter->mcc_lock);
3113
3114	wrb = wrb_from_mccq(adapter);
3115	if (!wrb) {
3116		status = -EBUSY;
3117		goto err;
3118	}
3119
3120	req = cmd.va;
3121	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3122			       OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3123			       wrb, &cmd);
3124
3125	req->hdr.domain = domain;
3126	req->mac_count = mac_count;
3127	if (mac_count)
3128		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3129
3130	status = be_mcc_notify_wait(adapter);
3131
3132err:
3133	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3134	spin_unlock_bh(&adapter->mcc_lock);
3135	return status;
3136}
3137
3138/* Wrapper to delete any active MACs and provision the new mac.
3139 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3140 * current list are active.
3141 */
3142int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3143{
3144	bool active_mac = false;
3145	u8 old_mac[ETH_ALEN];
3146	u32 pmac_id;
3147	int status;
3148
3149	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3150					  &pmac_id, if_id, dom);
3151
3152	if (!status && active_mac)
3153		be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3154
3155	return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3156}
3157
3158int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3159			  u32 domain, u16 intf_id, u16 hsw_mode)
3160{
3161	struct be_mcc_wrb *wrb;
3162	struct be_cmd_req_set_hsw_config *req;
3163	void *ctxt;
3164	int status;
3165
3166	spin_lock_bh(&adapter->mcc_lock);
3167
3168	wrb = wrb_from_mccq(adapter);
3169	if (!wrb) {
3170		status = -EBUSY;
3171		goto err;
3172	}
3173
3174	req = embedded_payload(wrb);
3175	ctxt = &req->context;
3176
3177	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3178			       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3179			       NULL);
3180
3181	req->hdr.domain = domain;
3182	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3183	if (pvid) {
3184		AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3185		AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3186	}
3187	if (!BEx_chip(adapter) && hsw_mode) {
3188		AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3189			      ctxt, adapter->hba_port_num);
3190		AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3191		AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3192			      ctxt, hsw_mode);
3193	}
3194
3195	be_dws_cpu_to_le(req->context, sizeof(req->context));
3196	status = be_mcc_notify_wait(adapter);
3197
3198err:
3199	spin_unlock_bh(&adapter->mcc_lock);
3200	return status;
3201}
3202
3203/* Get Hyper switch config */
3204int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3205			  u32 domain, u16 intf_id, u8 *mode)
3206{
3207	struct be_mcc_wrb *wrb;
3208	struct be_cmd_req_get_hsw_config *req;
3209	void *ctxt;
3210	int status;
3211	u16 vid;
3212
3213	spin_lock_bh(&adapter->mcc_lock);
3214
3215	wrb = wrb_from_mccq(adapter);
3216	if (!wrb) {
3217		status = -EBUSY;
3218		goto err;
3219	}
3220
3221	req = embedded_payload(wrb);
3222	ctxt = &req->context;
3223
3224	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3225			       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3226			       NULL);
3227
3228	req->hdr.domain = domain;
3229	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3230		      ctxt, intf_id);
3231	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3232
3233	if (!BEx_chip(adapter) && mode) {
3234		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3235			      ctxt, adapter->hba_port_num);
3236		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3237	}
3238	be_dws_cpu_to_le(req->context, sizeof(req->context));
3239
3240	status = be_mcc_notify_wait(adapter);
3241	if (!status) {
3242		struct be_cmd_resp_get_hsw_config *resp =
3243						embedded_payload(wrb);
3244
3245		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3246		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3247				    pvid, &resp->context);
3248		if (pvid)
3249			*pvid = le16_to_cpu(vid);
3250		if (mode)
3251			*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3252					      port_fwd_type, &resp->context);
3253	}
3254
3255err:
3256	spin_unlock_bh(&adapter->mcc_lock);
3257	return status;
3258}
3259
3260static bool be_is_wol_excluded(struct be_adapter *adapter)
3261{
3262	struct pci_dev *pdev = adapter->pdev;
3263
3264	if (!be_physfn(adapter))
3265		return true;
3266
3267	switch (pdev->subsystem_device) {
3268	case OC_SUBSYS_DEVICE_ID1:
3269	case OC_SUBSYS_DEVICE_ID2:
3270	case OC_SUBSYS_DEVICE_ID3:
3271	case OC_SUBSYS_DEVICE_ID4:
3272		return true;
3273	default:
3274		return false;
3275	}
3276}
3277
3278int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3279{
3280	struct be_mcc_wrb *wrb;
3281	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3282	int status = 0;
3283	struct be_dma_mem cmd;
3284
3285	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3286			    CMD_SUBSYSTEM_ETH))
3287		return -EPERM;
3288
3289	if (be_is_wol_excluded(adapter))
3290		return status;
3291
3292	if (mutex_lock_interruptible(&adapter->mbox_lock))
3293		return -1;
3294
3295	memset(&cmd, 0, sizeof(struct be_dma_mem));
3296	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3297	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3298				     GFP_ATOMIC);
3299	if (!cmd.va) {
3300		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3301		status = -ENOMEM;
3302		goto err;
3303	}
3304
3305	wrb = wrb_from_mbox(adapter);
3306	if (!wrb) {
3307		status = -EBUSY;
3308		goto err;
3309	}
3310
3311	req = cmd.va;
3312
3313	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3314			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3315			       sizeof(*req), wrb, &cmd);
3316
3317	req->hdr.version = 1;
3318	req->query_options = BE_GET_WOL_CAP;
3319
3320	status = be_mbox_notify_wait(adapter);
3321	if (!status) {
3322		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3323
3324		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3325
3326		adapter->wol_cap = resp->wol_settings;
3327		if (adapter->wol_cap & BE_WOL_CAP)
3328			adapter->wol_en = true;
3329	}
3330err:
3331	mutex_unlock(&adapter->mbox_lock);
3332	if (cmd.va)
3333		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3334				  cmd.dma);
3335	return status;
3336
3337}
3338
3339int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3340{
3341	struct be_dma_mem extfat_cmd;
3342	struct be_fat_conf_params *cfgs;
3343	int status;
3344	int i, j;
3345
3346	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3347	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3348	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3349					    extfat_cmd.size, &extfat_cmd.dma,
3350					    GFP_ATOMIC);
3351	if (!extfat_cmd.va)
3352		return -ENOMEM;
3353
3354	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3355	if (status)
3356		goto err;
3357
3358	cfgs = (struct be_fat_conf_params *)
3359			(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3360	for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3361		u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3362
3363		for (j = 0; j < num_modes; j++) {
3364			if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3365				cfgs->module[i].trace_lvl[j].dbg_lvl =
3366							cpu_to_le32(level);
3367		}
3368	}
3369
3370	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3371err:
3372	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
3373			  extfat_cmd.dma);
3374	return status;
3375}
3376
3377int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3378{
3379	struct be_dma_mem extfat_cmd;
3380	struct be_fat_conf_params *cfgs;
3381	int status, j;
3382	int level = 0;
3383
3384	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3385	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3386	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3387					    extfat_cmd.size, &extfat_cmd.dma,
3388					    GFP_ATOMIC);
3389
3390	if (!extfat_cmd.va) {
3391		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3392			__func__);
3393		goto err;
3394	}
3395
3396	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3397	if (!status) {
3398		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3399						sizeof(struct be_cmd_resp_hdr));
3400
3401		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3402			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3403				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3404		}
3405	}
3406	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
3407			  extfat_cmd.dma);
3408err:
3409	return level;
3410}
3411
3412int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3413				   struct be_dma_mem *cmd)
3414{
3415	struct be_mcc_wrb *wrb;
3416	struct be_cmd_req_get_ext_fat_caps *req;
3417	int status;
3418
3419	if (mutex_lock_interruptible(&adapter->mbox_lock))
3420		return -1;
3421
3422	wrb = wrb_from_mbox(adapter);
3423	if (!wrb) {
3424		status = -EBUSY;
3425		goto err;
3426	}
3427
3428	req = cmd->va;
3429	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3430			       OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3431			       cmd->size, wrb, cmd);
3432	req->parameter_type = cpu_to_le32(1);
3433
3434	status = be_mbox_notify_wait(adapter);
3435err:
3436	mutex_unlock(&adapter->mbox_lock);
3437	return status;
3438}
3439
3440int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3441				   struct be_dma_mem *cmd,
3442				   struct be_fat_conf_params *configs)
3443{
3444	struct be_mcc_wrb *wrb;
3445	struct be_cmd_req_set_ext_fat_caps *req;
3446	int status;
3447
3448	spin_lock_bh(&adapter->mcc_lock);
3449
3450	wrb = wrb_from_mccq(adapter);
3451	if (!wrb) {
3452		status = -EBUSY;
3453		goto err;
3454	}
3455
3456	req = cmd->va;
3457	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3458	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3459			       OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3460			       cmd->size, wrb, cmd);
3461
3462	status = be_mcc_notify_wait(adapter);
3463err:
3464	spin_unlock_bh(&adapter->mcc_lock);
3465	return status;
3466}
3467
3468int be_cmd_query_port_name(struct be_adapter *adapter)
3469{
3470	struct be_cmd_req_get_port_name *req;
3471	struct be_mcc_wrb *wrb;
3472	int status;
3473
3474	if (mutex_lock_interruptible(&adapter->mbox_lock))
3475		return -1;
3476
3477	wrb = wrb_from_mbox(adapter);
3478	req = embedded_payload(wrb);
3479
3480	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3481			       OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3482			       NULL);
3483	if (!BEx_chip(adapter))
3484		req->hdr.version = 1;
3485
3486	status = be_mbox_notify_wait(adapter);
3487	if (!status) {
3488		struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3489
3490		adapter->port_name = resp->port_name[adapter->hba_port_num];
3491	} else {
3492		adapter->port_name = adapter->hba_port_num + '0';
3493	}
3494
3495	mutex_unlock(&adapter->mbox_lock);
3496	return status;
3497}
3498
3499/* Descriptor type */
3500enum {
3501	FUNC_DESC = 1,
3502	VFT_DESC = 2
3503};
3504
3505static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
3506					       int desc_type)
3507{
3508	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3509	struct be_nic_res_desc *nic;
3510	int i;
3511
3512	for (i = 0; i < desc_count; i++) {
3513		if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3514		    hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
3515			nic = (struct be_nic_res_desc *)hdr;
3516			if (desc_type == FUNC_DESC ||
3517			    (desc_type == VFT_DESC &&
3518			     nic->flags & (1 << VFT_SHIFT)))
3519				return nic;
3520		}
3521
3522		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3523		hdr = (void *)hdr + hdr->desc_len;
3524	}
3525	return NULL;
3526}
3527
3528static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
3529{
3530	return be_get_nic_desc(buf, desc_count, VFT_DESC);
3531}
3532
3533static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
3534{
3535	return be_get_nic_desc(buf, desc_count, FUNC_DESC);
3536}
3537
3538static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3539						 u32 desc_count)
3540{
3541	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3542	struct be_pcie_res_desc *pcie;
3543	int i;
3544
3545	for (i = 0; i < desc_count; i++) {
3546		if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3547		     hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3548			pcie = (struct be_pcie_res_desc	*)hdr;
3549			if (pcie->pf_num == devfn)
3550				return pcie;
3551		}
3552
3553		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3554		hdr = (void *)hdr + hdr->desc_len;
3555	}
3556	return NULL;
3557}
3558
3559static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3560{
3561	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3562	int i;
3563
3564	for (i = 0; i < desc_count; i++) {
3565		if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3566			return (struct be_port_res_desc *)hdr;
3567
3568		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3569		hdr = (void *)hdr + hdr->desc_len;
3570	}
3571	return NULL;
3572}
3573
3574static void be_copy_nic_desc(struct be_resources *res,
3575			     struct be_nic_res_desc *desc)
3576{
3577	res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3578	res->max_vlans = le16_to_cpu(desc->vlan_count);
3579	res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3580	res->max_tx_qs = le16_to_cpu(desc->txq_count);
3581	res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3582	res->max_rx_qs = le16_to_cpu(desc->rq_count);
3583	res->max_evt_qs = le16_to_cpu(desc->eq_count);
3584	res->max_cq_count = le16_to_cpu(desc->cq_count);
3585	res->max_iface_count = le16_to_cpu(desc->iface_count);
3586	res->max_mcc_count = le16_to_cpu(desc->mcc_count);
3587	/* Clear flags that driver is not interested in */
3588	res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3589				BE_IF_CAP_FLAGS_WANT;
3590}
3591
3592/* Uses Mbox */
3593int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3594{
3595	struct be_mcc_wrb *wrb;
3596	struct be_cmd_req_get_func_config *req;
3597	int status;
3598	struct be_dma_mem cmd;
3599
3600	if (mutex_lock_interruptible(&adapter->mbox_lock))
3601		return -1;
3602
3603	memset(&cmd, 0, sizeof(struct be_dma_mem));
3604	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3605	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3606				     GFP_ATOMIC);
3607	if (!cmd.va) {
3608		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3609		status = -ENOMEM;
3610		goto err;
3611	}
3612
3613	wrb = wrb_from_mbox(adapter);
3614	if (!wrb) {
3615		status = -EBUSY;
3616		goto err;
3617	}
3618
3619	req = cmd.va;
3620
3621	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3622			       OPCODE_COMMON_GET_FUNC_CONFIG,
3623			       cmd.size, wrb, &cmd);
3624
3625	if (skyhawk_chip(adapter))
3626		req->hdr.version = 1;
3627
3628	status = be_mbox_notify_wait(adapter);
3629	if (!status) {
3630		struct be_cmd_resp_get_func_config *resp = cmd.va;
3631		u32 desc_count = le32_to_cpu(resp->desc_count);
3632		struct be_nic_res_desc *desc;
3633
3634		desc = be_get_func_nic_desc(resp->func_param, desc_count);
3635		if (!desc) {
3636			status = -EINVAL;
3637			goto err;
3638		}
3639
3640		adapter->pf_number = desc->pf_num;
3641		be_copy_nic_desc(res, desc);
3642	}
3643err:
3644	mutex_unlock(&adapter->mbox_lock);
3645	if (cmd.va)
3646		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3647				  cmd.dma);
3648	return status;
3649}
3650
3651/* Will use MBOX only if MCCQ has not been created */
3652int be_cmd_get_profile_config(struct be_adapter *adapter,
3653			      struct be_resources *res, u8 query, u8 domain)
3654{
3655	struct be_cmd_resp_get_profile_config *resp;
3656	struct be_cmd_req_get_profile_config *req;
3657	struct be_nic_res_desc *vf_res;
3658	struct be_pcie_res_desc *pcie;
3659	struct be_port_res_desc *port;
3660	struct be_nic_res_desc *nic;
3661	struct be_mcc_wrb wrb = {0};
3662	struct be_dma_mem cmd;
3663	u16 desc_count;
3664	int status;
3665
3666	memset(&cmd, 0, sizeof(struct be_dma_mem));
3667	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3668	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3669				     GFP_ATOMIC);
3670	if (!cmd.va)
3671		return -ENOMEM;
3672
3673	req = cmd.va;
3674	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3675			       OPCODE_COMMON_GET_PROFILE_CONFIG,
3676			       cmd.size, &wrb, &cmd);
3677
3678	req->hdr.domain = domain;
3679	if (!lancer_chip(adapter))
3680		req->hdr.version = 1;
3681	req->type = ACTIVE_PROFILE_TYPE;
3682
3683	/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
3684	 * descriptors with all bits set to "1" for the fields which can be
3685	 * modified using SET_PROFILE_CONFIG cmd.
3686	 */
3687	if (query == RESOURCE_MODIFIABLE)
3688		req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
3689
3690	status = be_cmd_notify_wait(adapter, &wrb);
3691	if (status)
3692		goto err;
3693
3694	resp = cmd.va;
3695	desc_count = le16_to_cpu(resp->desc_count);
3696
3697	pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3698				desc_count);
3699	if (pcie)
3700		res->max_vfs = le16_to_cpu(pcie->num_vfs);
3701
3702	port = be_get_port_desc(resp->func_param, desc_count);
3703	if (port)
3704		adapter->mc_type = port->mc_type;
3705
3706	nic = be_get_func_nic_desc(resp->func_param, desc_count);
3707	if (nic)
3708		be_copy_nic_desc(res, nic);
3709
3710	vf_res = be_get_vft_desc(resp->func_param, desc_count);
3711	if (vf_res)
3712		res->vf_if_cap_flags = vf_res->cap_flags;
3713err:
3714	if (cmd.va)
3715		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3716				  cmd.dma);
3717	return status;
3718}
3719
3720/* Will use MBOX only if MCCQ has not been created */
3721static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3722				     int size, int count, u8 version, u8 domain)
3723{
3724	struct be_cmd_req_set_profile_config *req;
3725	struct be_mcc_wrb wrb = {0};
3726	struct be_dma_mem cmd;
3727	int status;
3728
3729	memset(&cmd, 0, sizeof(struct be_dma_mem));
3730	cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3731	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3732				     GFP_ATOMIC);
3733	if (!cmd.va)
3734		return -ENOMEM;
3735
3736	req = cmd.va;
3737	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3738			       OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
3739			       &wrb, &cmd);
3740	req->hdr.version = version;
3741	req->hdr.domain = domain;
3742	req->desc_count = cpu_to_le32(count);
3743	memcpy(req->desc, desc, size);
3744
3745	status = be_cmd_notify_wait(adapter, &wrb);
3746
3747	if (cmd.va)
3748		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3749				  cmd.dma);
3750	return status;
3751}
3752
3753/* Mark all fields invalid */
3754static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3755{
3756	memset(nic, 0, sizeof(*nic));
3757	nic->unicast_mac_count = 0xFFFF;
3758	nic->mcc_count = 0xFFFF;
3759	nic->vlan_count = 0xFFFF;
3760	nic->mcast_mac_count = 0xFFFF;
3761	nic->txq_count = 0xFFFF;
3762	nic->rq_count = 0xFFFF;
3763	nic->rssq_count = 0xFFFF;
3764	nic->lro_count = 0xFFFF;
3765	nic->cq_count = 0xFFFF;
3766	nic->toe_conn_count = 0xFFFF;
3767	nic->eq_count = 0xFFFF;
3768	nic->iface_count = 0xFFFF;
3769	nic->link_param = 0xFF;
3770	nic->channel_id_param = cpu_to_le16(0xF000);
3771	nic->acpi_params = 0xFF;
3772	nic->wol_param = 0x0F;
3773	nic->tunnel_iface_count = 0xFFFF;
3774	nic->direct_tenant_iface_count = 0xFFFF;
3775	nic->bw_min = 0xFFFFFFFF;
3776	nic->bw_max = 0xFFFFFFFF;
3777}
3778
3779/* Mark all fields invalid */
3780static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
3781{
3782	memset(pcie, 0, sizeof(*pcie));
3783	pcie->sriov_state = 0xFF;
3784	pcie->pf_state = 0xFF;
3785	pcie->pf_type = 0xFF;
3786	pcie->num_vfs = 0xFFFF;
3787}
3788
3789int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3790		      u8 domain)
3791{
3792	struct be_nic_res_desc nic_desc;
3793	u32 bw_percent;
3794	u16 version = 0;
3795
3796	if (BE3_chip(adapter))
3797		return be_cmd_set_qos(adapter, max_rate / 10, domain);
3798
3799	be_reset_nic_desc(&nic_desc);
3800	nic_desc.pf_num = adapter->pf_number;
3801	nic_desc.vf_num = domain;
3802	nic_desc.bw_min = 0;
3803	if (lancer_chip(adapter)) {
3804		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3805		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3806		nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3807					(1 << NOSV_SHIFT);
3808		nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3809	} else {
3810		version = 1;
3811		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3812		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3813		nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3814		bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3815		nic_desc.bw_max = cpu_to_le32(bw_percent);
3816	}
3817
3818	return be_cmd_set_profile_config(adapter, &nic_desc,
3819					 nic_desc.hdr.desc_len,
3820					 1, version, domain);
3821}
3822
3823static void be_fill_vf_res_template(struct be_adapter *adapter,
3824				    struct be_resources pool_res,
3825				    u16 num_vfs, u16 num_vf_qs,
3826				    struct be_nic_res_desc *nic_vft)
3827{
3828	u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
3829	struct be_resources res_mod = {0};
3830
3831	/* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3832	 * which are modifiable using SET_PROFILE_CONFIG cmd.
3833	 */
3834	be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
3835
3836	/* If RSS IFACE capability flags are modifiable for a VF, set the
3837	 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3838	 * more than 1 RSSQ is available for a VF.
3839	 * Otherwise, provision only 1 queue pair for VF.
3840	 */
3841	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3842		nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3843		if (num_vf_qs > 1) {
3844			vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3845			if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3846				vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3847		} else {
3848			vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3849					     BE_IF_FLAGS_DEFQ_RSS);
3850		}
3851
3852		nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
3853	} else {
3854		num_vf_qs = 1;
3855	}
3856
3857	nic_vft->rq_count = cpu_to_le16(num_vf_qs);
3858	nic_vft->txq_count = cpu_to_le16(num_vf_qs);
3859	nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
3860	nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
3861					(num_vfs + 1));
3862
3863	/* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3864	 * among the PF and it's VFs, if the fields are changeable
3865	 */
3866	if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3867		nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
3868							 (num_vfs + 1));
3869
3870	if (res_mod.max_vlans == FIELD_MODIFIABLE)
3871		nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
3872						  (num_vfs + 1));
3873
3874	if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3875		nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
3876						   (num_vfs + 1));
3877
3878	if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3879		nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
3880						 (num_vfs + 1));
3881}
3882
3883int be_cmd_set_sriov_config(struct be_adapter *adapter,
3884			    struct be_resources pool_res, u16 num_vfs,
3885			    u16 num_vf_qs)
3886{
3887	struct {
3888		struct be_pcie_res_desc pcie;
3889		struct be_nic_res_desc nic_vft;
3890	} __packed desc;
3891
3892	/* PF PCIE descriptor */
3893	be_reset_pcie_desc(&desc.pcie);
3894	desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
3895	desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3896	desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3897	desc.pcie.pf_num = adapter->pdev->devfn;
3898	desc.pcie.sriov_state = num_vfs ? 1 : 0;
3899	desc.pcie.num_vfs = cpu_to_le16(num_vfs);
3900
3901	/* VF NIC Template descriptor */
3902	be_reset_nic_desc(&desc.nic_vft);
3903	desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3904	desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3905	desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3906	desc.nic_vft.pf_num = adapter->pdev->devfn;
3907	desc.nic_vft.vf_num = 0;
3908
3909	be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
3910				&desc.nic_vft);
3911
3912	return be_cmd_set_profile_config(adapter, &desc,
3913					 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3914}
3915
3916int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3917{
3918	struct be_mcc_wrb *wrb;
3919	struct be_cmd_req_manage_iface_filters *req;
3920	int status;
3921
3922	if (iface == 0xFFFFFFFF)
3923		return -1;
3924
3925	spin_lock_bh(&adapter->mcc_lock);
3926
3927	wrb = wrb_from_mccq(adapter);
3928	if (!wrb) {
3929		status = -EBUSY;
3930		goto err;
3931	}
3932	req = embedded_payload(wrb);
3933
3934	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3935			       OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3936			       wrb, NULL);
3937	req->op = op;
3938	req->target_iface_id = cpu_to_le32(iface);
3939
3940	status = be_mcc_notify_wait(adapter);
3941err:
3942	spin_unlock_bh(&adapter->mcc_lock);
3943	return status;
3944}
3945
3946int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3947{
3948	struct be_port_res_desc port_desc;
3949
3950	memset(&port_desc, 0, sizeof(port_desc));
3951	port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3952	port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3953	port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3954	port_desc.link_num = adapter->hba_port_num;
3955	if (port) {
3956		port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3957					(1 << RCVID_SHIFT);
3958		port_desc.nv_port = swab16(port);
3959	} else {
3960		port_desc.nv_flags = NV_TYPE_DISABLED;
3961		port_desc.nv_port = 0;
3962	}
3963
3964	return be_cmd_set_profile_config(adapter, &port_desc,
3965					 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
3966}
3967
3968int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3969		     int vf_num)
3970{
3971	struct be_mcc_wrb *wrb;
3972	struct be_cmd_req_get_iface_list *req;
3973	struct be_cmd_resp_get_iface_list *resp;
3974	int status;
3975
3976	spin_lock_bh(&adapter->mcc_lock);
3977
3978	wrb = wrb_from_mccq(adapter);
3979	if (!wrb) {
3980		status = -EBUSY;
3981		goto err;
3982	}
3983	req = embedded_payload(wrb);
3984
3985	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3986			       OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3987			       wrb, NULL);
3988	req->hdr.domain = vf_num + 1;
3989
3990	status = be_mcc_notify_wait(adapter);
3991	if (!status) {
3992		resp = (struct be_cmd_resp_get_iface_list *)req;
3993		vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3994	}
3995
3996err:
3997	spin_unlock_bh(&adapter->mcc_lock);
3998	return status;
3999}
4000
4001static int lancer_wait_idle(struct be_adapter *adapter)
4002{
4003#define SLIPORT_IDLE_TIMEOUT 30
4004	u32 reg_val;
4005	int status = 0, i;
4006
4007	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
4008		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
4009		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4010			break;
4011
4012		ssleep(1);
4013	}
4014
4015	if (i == SLIPORT_IDLE_TIMEOUT)
4016		status = -1;
4017
4018	return status;
4019}
4020
4021int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4022{
4023	int status = 0;
4024
4025	status = lancer_wait_idle(adapter);
4026	if (status)
4027		return status;
4028
4029	iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4030
4031	return status;
4032}
4033
4034/* Routine to check whether dump image is present or not */
4035bool dump_present(struct be_adapter *adapter)
4036{
4037	u32 sliport_status = 0;
4038
4039	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4040	return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4041}
4042
4043int lancer_initiate_dump(struct be_adapter *adapter)
4044{
4045	struct device *dev = &adapter->pdev->dev;
4046	int status;
4047
4048	if (dump_present(adapter)) {
4049		dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4050		return -EEXIST;
4051	}
4052
4053	/* give firmware reset and diagnostic dump */
4054	status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4055				     PHYSDEV_CONTROL_DD_MASK);
4056	if (status < 0) {
4057		dev_err(dev, "FW reset failed\n");
4058		return status;
4059	}
4060
4061	status = lancer_wait_idle(adapter);
4062	if (status)
4063		return status;
4064
4065	if (!dump_present(adapter)) {
4066		dev_err(dev, "FW dump not generated\n");
4067		return -EIO;
4068	}
4069
4070	return 0;
4071}
4072
4073int lancer_delete_dump(struct be_adapter *adapter)
4074{
4075	int status;
4076
4077	status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4078	return be_cmd_status(status);
4079}
4080
4081/* Uses sync mcc */
4082int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4083{
4084	struct be_mcc_wrb *wrb;
4085	struct be_cmd_enable_disable_vf *req;
4086	int status;
4087
4088	if (BEx_chip(adapter))
4089		return 0;
4090
4091	spin_lock_bh(&adapter->mcc_lock);
4092
4093	wrb = wrb_from_mccq(adapter);
4094	if (!wrb) {
4095		status = -EBUSY;
4096		goto err;
4097	}
4098
4099	req = embedded_payload(wrb);
4100
4101	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4102			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4103			       wrb, NULL);
4104
4105	req->hdr.domain = domain;
4106	req->enable = 1;
4107	status = be_mcc_notify_wait(adapter);
4108err:
4109	spin_unlock_bh(&adapter->mcc_lock);
4110	return status;
4111}
4112
4113int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4114{
4115	struct be_mcc_wrb *wrb;
4116	struct be_cmd_req_intr_set *req;
4117	int status;
4118
4119	if (mutex_lock_interruptible(&adapter->mbox_lock))
4120		return -1;
4121
4122	wrb = wrb_from_mbox(adapter);
4123
4124	req = embedded_payload(wrb);
4125
4126	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4127			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4128			       wrb, NULL);
4129
4130	req->intr_enabled = intr_enable;
4131
4132	status = be_mbox_notify_wait(adapter);
4133
4134	mutex_unlock(&adapter->mbox_lock);
4135	return status;
4136}
4137
4138/* Uses MBOX */
4139int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4140{
4141	struct be_cmd_req_get_active_profile *req;
4142	struct be_mcc_wrb *wrb;
4143	int status;
4144
4145	if (mutex_lock_interruptible(&adapter->mbox_lock))
4146		return -1;
4147
4148	wrb = wrb_from_mbox(adapter);
4149	if (!wrb) {
4150		status = -EBUSY;
4151		goto err;
4152	}
4153
4154	req = embedded_payload(wrb);
4155
4156	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4157			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4158			       wrb, NULL);
4159
4160	status = be_mbox_notify_wait(adapter);
4161	if (!status) {
4162		struct be_cmd_resp_get_active_profile *resp =
4163							embedded_payload(wrb);
4164
4165		*profile_id = le16_to_cpu(resp->active_profile_id);
4166	}
4167
4168err:
4169	mutex_unlock(&adapter->mbox_lock);
4170	return status;
4171}
4172
4173int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4174				   int link_state, u8 domain)
4175{
4176	struct be_mcc_wrb *wrb;
4177	struct be_cmd_req_set_ll_link *req;
4178	int status;
4179
4180	if (BEx_chip(adapter) || lancer_chip(adapter))
4181		return -EOPNOTSUPP;
4182
4183	spin_lock_bh(&adapter->mcc_lock);
4184
4185	wrb = wrb_from_mccq(adapter);
4186	if (!wrb) {
4187		status = -EBUSY;
4188		goto err;
4189	}
4190
4191	req = embedded_payload(wrb);
4192
4193	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4194			       OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4195			       sizeof(*req), wrb, NULL);
4196
4197	req->hdr.version = 1;
4198	req->hdr.domain = domain;
4199
4200	if (link_state == IFLA_VF_LINK_STATE_ENABLE)
4201		req->link_config |= 1;
4202
4203	if (link_state == IFLA_VF_LINK_STATE_AUTO)
4204		req->link_config |= 1 << PLINK_TRACK_SHIFT;
4205
4206	status = be_mcc_notify_wait(adapter);
4207err:
4208	spin_unlock_bh(&adapter->mcc_lock);
4209	return status;
4210}
4211
4212int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4213		    int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4214{
4215	struct be_adapter *adapter = netdev_priv(netdev_handle);
4216	struct be_mcc_wrb *wrb;
4217	struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4218	struct be_cmd_req_hdr *req;
4219	struct be_cmd_resp_hdr *resp;
4220	int status;
4221
4222	spin_lock_bh(&adapter->mcc_lock);
4223
4224	wrb = wrb_from_mccq(adapter);
4225	if (!wrb) {
4226		status = -EBUSY;
4227		goto err;
4228	}
4229	req = embedded_payload(wrb);
4230	resp = embedded_payload(wrb);
4231
4232	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4233			       hdr->opcode, wrb_payload_size, wrb, NULL);
4234	memcpy(req, wrb_payload, wrb_payload_size);
4235	be_dws_cpu_to_le(req, wrb_payload_size);
4236
4237	status = be_mcc_notify_wait(adapter);
4238	if (cmd_status)
4239		*cmd_status = (status & 0xffff);
4240	if (ext_status)
4241		*ext_status = 0;
4242	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4243	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4244err:
4245	spin_unlock_bh(&adapter->mcc_lock);
4246	return status;
4247}
4248EXPORT_SYMBOL(be_roce_mcc_cmd);
4249