1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license.  When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 *  Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 *  * Redistributions of source code must retain the above copyright
45 *    notice, this list of conditions and the following disclaimer.
46 *  * Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in
48 *    the documentation and/or other materials provided with the
49 *    distribution.
50 *  * Neither the name Intel Corporation nor the names of its
51 *    contributors may be used to endorse or promote products derived
52 *    from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <linux/pci.h>
68#include <linux/pci-aspm.h>
69#include <linux/interrupt.h>
70#include <linux/debugfs.h>
71#include <linux/sched.h>
72#include <linux/bitops.h>
73#include <linux/gfp.h>
74#include <linux/vmalloc.h>
75
76#include "iwl-drv.h"
77#include "iwl-trans.h"
78#include "iwl-csr.h"
79#include "iwl-prph.h"
80#include "iwl-scd.h"
81#include "iwl-agn-hw.h"
82#include "iwl-fw-error-dump.h"
83#include "internal.h"
84#include "iwl-fh.h"
85
86/* extended range in FW SRAM */
87#define IWL_FW_MEM_EXTENDED_START	0x40000
88#define IWL_FW_MEM_EXTENDED_END		0x57FFF
89
90static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
91{
92	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
93
94	if (!trans_pcie->fw_mon_page)
95		return;
96
97	dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
98		       trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
99	__free_pages(trans_pcie->fw_mon_page,
100		     get_order(trans_pcie->fw_mon_size));
101	trans_pcie->fw_mon_page = NULL;
102	trans_pcie->fw_mon_phys = 0;
103	trans_pcie->fw_mon_size = 0;
104}
105
106static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
107{
108	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
109	struct page *page = NULL;
110	dma_addr_t phys;
111	u32 size;
112	u8 power;
113
114	if (trans_pcie->fw_mon_page) {
115		dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
116					   trans_pcie->fw_mon_size,
117					   DMA_FROM_DEVICE);
118		return;
119	}
120
121	phys = 0;
122	for (power = 26; power >= 11; power--) {
123		int order;
124
125		size = BIT(power);
126		order = get_order(size);
127		page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
128				   order);
129		if (!page)
130			continue;
131
132		phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
133				    DMA_FROM_DEVICE);
134		if (dma_mapping_error(trans->dev, phys)) {
135			__free_pages(page, order);
136			page = NULL;
137			continue;
138		}
139		IWL_INFO(trans,
140			 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
141			 size, order);
142		break;
143	}
144
145	if (WARN_ON_ONCE(!page))
146		return;
147
148	trans_pcie->fw_mon_page = page;
149	trans_pcie->fw_mon_phys = phys;
150	trans_pcie->fw_mon_size = size;
151}
152
153static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
154{
155	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
156		    ((reg & 0x0000ffff) | (2 << 28)));
157	return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
158}
159
160static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
161{
162	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
163	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
164		    ((reg & 0x0000ffff) | (3 << 28)));
165}
166
167static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
168{
169	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
170		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
171				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
172				       ~APMG_PS_CTRL_MSK_PWR_SRC);
173	else
174		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
175				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
176				       ~APMG_PS_CTRL_MSK_PWR_SRC);
177}
178
179/* PCI registers */
180#define PCI_CFG_RETRY_TIMEOUT	0x041
181
182static void iwl_pcie_apm_config(struct iwl_trans *trans)
183{
184	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
185	u16 lctl;
186	u16 cap;
187
188	/*
189	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
190	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
191	 * If so (likely), disable L0S, so device moves directly L0->L1;
192	 *    costs negligible amount of power savings.
193	 * If not (unlikely), enable L0S, so there is at least some
194	 *    power savings, even without L1.
195	 */
196	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
197	if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
198		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
199	else
200		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
201	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
202
203	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
204	trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
205	dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
206		 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
207		 trans->ltr_enabled ? "En" : "Dis");
208}
209
210/*
211 * Start up NIC's basic functionality after it has been reset
212 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
213 * NOTE:  This does not load uCode nor start the embedded processor
214 */
215static int iwl_pcie_apm_init(struct iwl_trans *trans)
216{
217	int ret = 0;
218	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
219
220	/*
221	 * Use "set_bit" below rather than "write", to preserve any hardware
222	 * bits already set by default after reset.
223	 */
224
225	/* Disable L0S exit timer (platform NMI Work/Around) */
226	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
227		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
228			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
229
230	/*
231	 * Disable L0s without affecting L1;
232	 *  don't wait for ICH L0s (ICH bug W/A)
233	 */
234	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
235		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
236
237	/* Set FH wait threshold to maximum (HW error during stress W/A) */
238	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
239
240	/*
241	 * Enable HAP INTA (interrupt from management bus) to
242	 * wake device's PCI Express link L1a -> L0s
243	 */
244	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
245		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
246
247	iwl_pcie_apm_config(trans);
248
249	/* Configure analog phase-lock-loop before activating to D0A */
250	if (trans->cfg->base_params->pll_cfg_val)
251		iwl_set_bit(trans, CSR_ANA_PLL_CFG,
252			    trans->cfg->base_params->pll_cfg_val);
253
254	/*
255	 * Set "initialization complete" bit to move adapter from
256	 * D0U* --> D0A* (powered-up active) state.
257	 */
258	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
259
260	/*
261	 * Wait for clock stabilization; once stabilized, access to
262	 * device-internal resources is supported, e.g. iwl_write_prph()
263	 * and accesses to uCode SRAM.
264	 */
265	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
266			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
267			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
268	if (ret < 0) {
269		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
270		goto out;
271	}
272
273	if (trans->cfg->host_interrupt_operation_mode) {
274		/*
275		 * This is a bit of an abuse - This is needed for 7260 / 3160
276		 * only check host_interrupt_operation_mode even if this is
277		 * not related to host_interrupt_operation_mode.
278		 *
279		 * Enable the oscillator to count wake up time for L1 exit. This
280		 * consumes slightly more power (100uA) - but allows to be sure
281		 * that we wake up from L1 on time.
282		 *
283		 * This looks weird: read twice the same register, discard the
284		 * value, set a bit, and yet again, read that same register
285		 * just to discard the value. But that's the way the hardware
286		 * seems to like it.
287		 */
288		iwl_read_prph(trans, OSC_CLK);
289		iwl_read_prph(trans, OSC_CLK);
290		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
291		iwl_read_prph(trans, OSC_CLK);
292		iwl_read_prph(trans, OSC_CLK);
293	}
294
295	/*
296	 * Enable DMA clock and wait for it to stabilize.
297	 *
298	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
299	 * bits do not disable clocks.  This preserves any hardware
300	 * bits already set by default in "CLK_CTRL_REG" after reset.
301	 */
302	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
303		iwl_write_prph(trans, APMG_CLK_EN_REG,
304			       APMG_CLK_VAL_DMA_CLK_RQT);
305		udelay(20);
306
307		/* Disable L1-Active */
308		iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
309				  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
310
311		/* Clear the interrupt in APMG if the NIC is in RFKILL */
312		iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
313			       APMG_RTC_INT_STT_RFKILL);
314	}
315
316	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
317
318out:
319	return ret;
320}
321
322/*
323 * Enable LP XTAL to avoid HW bug where device may consume much power if
324 * FW is not loaded after device reset. LP XTAL is disabled by default
325 * after device HW reset. Do it only if XTAL is fed by internal source.
326 * Configure device's "persistence" mode to avoid resetting XTAL again when
327 * SHRD_HW_RST occurs in S3.
328 */
329static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
330{
331	int ret;
332	u32 apmg_gp1_reg;
333	u32 apmg_xtal_cfg_reg;
334	u32 dl_cfg_reg;
335
336	/* Force XTAL ON */
337	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
338				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
339
340	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
341	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
342
343	udelay(10);
344
345	/*
346	 * Set "initialization complete" bit to move adapter from
347	 * D0U* --> D0A* (powered-up active) state.
348	 */
349	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
350
351	/*
352	 * Wait for clock stabilization; once stabilized, access to
353	 * device-internal resources is possible.
354	 */
355	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
356			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
357			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
358			   25000);
359	if (WARN_ON(ret < 0)) {
360		IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
361		/* Release XTAL ON request */
362		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
363					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
364		return;
365	}
366
367	/*
368	 * Clear "disable persistence" to avoid LP XTAL resetting when
369	 * SHRD_HW_RST is applied in S3.
370	 */
371	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
372				    APMG_PCIDEV_STT_VAL_PERSIST_DIS);
373
374	/*
375	 * Force APMG XTAL to be active to prevent its disabling by HW
376	 * caused by APMG idle state.
377	 */
378	apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
379						    SHR_APMG_XTAL_CFG_REG);
380	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
381				 apmg_xtal_cfg_reg |
382				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
383
384	/*
385	 * Reset entire device again - do controller reset (results in
386	 * SHRD_HW_RST). Turn MAC off before proceeding.
387	 */
388	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
389
390	udelay(10);
391
392	/* Enable LP XTAL by indirect access through CSR */
393	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
394	iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
395				 SHR_APMG_GP1_WF_XTAL_LP_EN |
396				 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
397
398	/* Clear delay line clock power up */
399	dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
400	iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
401				 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
402
403	/*
404	 * Enable persistence mode to avoid LP XTAL resetting when
405	 * SHRD_HW_RST is applied in S3.
406	 */
407	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
408		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
409
410	/*
411	 * Clear "initialization complete" bit to move adapter from
412	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
413	 */
414	iwl_clear_bit(trans, CSR_GP_CNTRL,
415		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
416
417	/* Activates XTAL resources monitor */
418	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
419				 CSR_MONITOR_XTAL_RESOURCES);
420
421	/* Release XTAL ON request */
422	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
423				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
424	udelay(10);
425
426	/* Release APMG XTAL */
427	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
428				 apmg_xtal_cfg_reg &
429				 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
430}
431
432static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
433{
434	int ret = 0;
435
436	/* stop device's busmaster DMA activity */
437	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
438
439	ret = iwl_poll_bit(trans, CSR_RESET,
440			   CSR_RESET_REG_FLAG_MASTER_DISABLED,
441			   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
442	if (ret < 0)
443		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
444
445	IWL_DEBUG_INFO(trans, "stop master\n");
446
447	return ret;
448}
449
450static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
451{
452	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
453
454	if (op_mode_leave) {
455		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
456			iwl_pcie_apm_init(trans);
457
458		/* inform ME that we are leaving */
459		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
460			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
461					  APMG_PCIDEV_STT_VAL_WAKE_ME);
462		else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
463			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
464				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
465			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
466				    CSR_HW_IF_CONFIG_REG_PREPARE |
467				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
468			mdelay(1);
469			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
470				      CSR_RESET_LINK_PWR_MGMT_DISABLED);
471		}
472		mdelay(5);
473	}
474
475	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
476
477	/* Stop device's DMA activity */
478	iwl_pcie_apm_stop_master(trans);
479
480	if (trans->cfg->lp_xtal_workaround) {
481		iwl_pcie_apm_lp_xtal_enable(trans);
482		return;
483	}
484
485	/* Reset the entire device */
486	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
487
488	udelay(10);
489
490	/*
491	 * Clear "initialization complete" bit to move adapter from
492	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
493	 */
494	iwl_clear_bit(trans, CSR_GP_CNTRL,
495		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
496}
497
498static int iwl_pcie_nic_init(struct iwl_trans *trans)
499{
500	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
501
502	/* nic_init */
503	spin_lock(&trans_pcie->irq_lock);
504	iwl_pcie_apm_init(trans);
505
506	spin_unlock(&trans_pcie->irq_lock);
507
508	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
509		iwl_pcie_set_pwr(trans, false);
510
511	iwl_op_mode_nic_config(trans->op_mode);
512
513	/* Allocate the RX queue, or reset if it is already allocated */
514	iwl_pcie_rx_init(trans);
515
516	/* Allocate or reset and init all Tx and Command queues */
517	if (iwl_pcie_tx_init(trans))
518		return -ENOMEM;
519
520	if (trans->cfg->base_params->shadow_reg_enable) {
521		/* enable shadow regs in HW */
522		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
523		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
524	}
525
526	return 0;
527}
528
529#define HW_READY_TIMEOUT (50)
530
531/* Note: returns poll_bit return value, which is >= 0 if success */
532static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
533{
534	int ret;
535
536	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
537		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
538
539	/* See if we got it */
540	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
541			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
542			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
543			   HW_READY_TIMEOUT);
544
545	if (ret >= 0)
546		iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
547
548	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
549	return ret;
550}
551
552/* Note: returns standard 0/-ERROR code */
553static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
554{
555	int ret;
556	int t = 0;
557	int iter;
558
559	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
560
561	ret = iwl_pcie_set_hw_ready(trans);
562	/* If the card is ready, exit 0 */
563	if (ret >= 0)
564		return 0;
565
566	iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
567		    CSR_RESET_LINK_PWR_MGMT_DISABLED);
568	msleep(1);
569
570	for (iter = 0; iter < 10; iter++) {
571		/* If HW is not ready, prepare the conditions to check again */
572		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
573			    CSR_HW_IF_CONFIG_REG_PREPARE);
574
575		do {
576			ret = iwl_pcie_set_hw_ready(trans);
577			if (ret >= 0)
578				return 0;
579
580			usleep_range(200, 1000);
581			t += 200;
582		} while (t < 150000);
583		msleep(25);
584	}
585
586	IWL_ERR(trans, "Couldn't prepare the card\n");
587
588	return ret;
589}
590
591/*
592 * ucode
593 */
594static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
595				   dma_addr_t phy_addr, u32 byte_cnt)
596{
597	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
598	int ret;
599
600	trans_pcie->ucode_write_complete = false;
601
602	iwl_write_direct32(trans,
603			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
604			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
605
606	iwl_write_direct32(trans,
607			   FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
608			   dst_addr);
609
610	iwl_write_direct32(trans,
611			   FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
612			   phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
613
614	iwl_write_direct32(trans,
615			   FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
616			   (iwl_get_dma_hi_addr(phy_addr)
617				<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
618
619	iwl_write_direct32(trans,
620			   FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
621			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
622			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
623			   FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
624
625	iwl_write_direct32(trans,
626			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
627			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
628			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
629			   FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
630
631	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
632				 trans_pcie->ucode_write_complete, 5 * HZ);
633	if (!ret) {
634		IWL_ERR(trans, "Failed to load firmware chunk!\n");
635		return -ETIMEDOUT;
636	}
637
638	return 0;
639}
640
641static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
642			    const struct fw_desc *section)
643{
644	u8 *v_addr;
645	dma_addr_t p_addr;
646	u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
647	int ret = 0;
648
649	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
650		     section_num);
651
652	v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
653				    GFP_KERNEL | __GFP_NOWARN);
654	if (!v_addr) {
655		IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
656		chunk_sz = PAGE_SIZE;
657		v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
658					    &p_addr, GFP_KERNEL);
659		if (!v_addr)
660			return -ENOMEM;
661	}
662
663	for (offset = 0; offset < section->len; offset += chunk_sz) {
664		u32 copy_size, dst_addr;
665		bool extended_addr = false;
666
667		copy_size = min_t(u32, chunk_sz, section->len - offset);
668		dst_addr = section->offset + offset;
669
670		if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
671		    dst_addr <= IWL_FW_MEM_EXTENDED_END)
672			extended_addr = true;
673
674		if (extended_addr)
675			iwl_set_bits_prph(trans, LMPM_CHICK,
676					  LMPM_CHICK_EXTENDED_ADDR_SPACE);
677
678		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
679		ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
680						   copy_size);
681
682		if (extended_addr)
683			iwl_clear_bits_prph(trans, LMPM_CHICK,
684					    LMPM_CHICK_EXTENDED_ADDR_SPACE);
685
686		if (ret) {
687			IWL_ERR(trans,
688				"Could not load the [%d] uCode section\n",
689				section_num);
690			break;
691		}
692	}
693
694	dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
695	return ret;
696}
697
698/*
699 * Driver Takes the ownership on secure machine before FW load
700 * and prevent race with the BT load.
701 * W/A for ROM bug. (should be remove in the next Si step)
702 */
703static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
704{
705	u32 val, loop = 1000;
706
707	/*
708	 * Check the RSA semaphore is accessible.
709	 * If the HW isn't locked and the rsa semaphore isn't accessible,
710	 * we are in trouble.
711	 */
712	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
713	if (val & (BIT(1) | BIT(17))) {
714		IWL_DEBUG_INFO(trans,
715				"can't access the RSA semaphore it is write protected\n");
716		return 0;
717	}
718
719	/* take ownership on the AUX IF */
720	iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
721	iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
722
723	do {
724		iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
725		val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
726		if (val == 0x1) {
727			iwl_write_prph(trans, RSA_ENABLE, 0);
728			return 0;
729		}
730
731		udelay(10);
732		loop--;
733	} while (loop > 0);
734
735	IWL_ERR(trans, "Failed to take ownership on secure machine\n");
736	return -EIO;
737}
738
739static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
740					   const struct fw_img *image,
741					   int cpu,
742					   int *first_ucode_section)
743{
744	int shift_param;
745	int i, ret = 0, sec_num = 0x1;
746	u32 val, last_read_idx = 0;
747
748	if (cpu == 1) {
749		shift_param = 0;
750		*first_ucode_section = 0;
751	} else {
752		shift_param = 16;
753		(*first_ucode_section)++;
754	}
755
756	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
757		last_read_idx = i;
758
759		if (!image->sec[i].data ||
760		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
761			IWL_DEBUG_FW(trans,
762				     "Break since Data not valid or Empty section, sec = %d\n",
763				     i);
764			break;
765		}
766
767		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
768		if (ret)
769			return ret;
770
771		/* Notify the ucode of the loaded section number and status */
772		val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
773		val = val | (sec_num << shift_param);
774		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
775		sec_num = (sec_num << 1) | 0x1;
776	}
777
778	*first_ucode_section = last_read_idx;
779
780	if (cpu == 1)
781		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
782	else
783		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
784
785	return 0;
786}
787
788static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
789				      const struct fw_img *image,
790				      int cpu,
791				      int *first_ucode_section)
792{
793	int shift_param;
794	int i, ret = 0;
795	u32 last_read_idx = 0;
796
797	if (cpu == 1) {
798		shift_param = 0;
799		*first_ucode_section = 0;
800	} else {
801		shift_param = 16;
802		(*first_ucode_section)++;
803	}
804
805	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
806		last_read_idx = i;
807
808		if (!image->sec[i].data ||
809		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
810			IWL_DEBUG_FW(trans,
811				     "Break since Data not valid or Empty section, sec = %d\n",
812				     i);
813			break;
814		}
815
816		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
817		if (ret)
818			return ret;
819	}
820
821	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
822		iwl_set_bits_prph(trans,
823				  CSR_UCODE_LOAD_STATUS_ADDR,
824				  (LMPM_CPU_UCODE_LOADING_COMPLETED |
825				   LMPM_CPU_HDRS_LOADING_COMPLETED |
826				   LMPM_CPU_UCODE_LOADING_STARTED) <<
827					shift_param);
828
829	*first_ucode_section = last_read_idx;
830
831	return 0;
832}
833
834static void iwl_pcie_apply_destination(struct iwl_trans *trans)
835{
836	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
837	const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
838	int i;
839
840	if (dest->version)
841		IWL_ERR(trans,
842			"DBG DEST version is %d - expect issues\n",
843			dest->version);
844
845	IWL_INFO(trans, "Applying debug destination %s\n",
846		 get_fw_dbg_mode_string(dest->monitor_mode));
847
848	if (dest->monitor_mode == EXTERNAL_MODE)
849		iwl_pcie_alloc_fw_monitor(trans);
850	else
851		IWL_WARN(trans, "PCI should have external buffer debug\n");
852
853	for (i = 0; i < trans->dbg_dest_reg_num; i++) {
854		u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
855		u32 val = le32_to_cpu(dest->reg_ops[i].val);
856
857		switch (dest->reg_ops[i].op) {
858		case CSR_ASSIGN:
859			iwl_write32(trans, addr, val);
860			break;
861		case CSR_SETBIT:
862			iwl_set_bit(trans, addr, BIT(val));
863			break;
864		case CSR_CLEARBIT:
865			iwl_clear_bit(trans, addr, BIT(val));
866			break;
867		case PRPH_ASSIGN:
868			iwl_write_prph(trans, addr, val);
869			break;
870		case PRPH_SETBIT:
871			iwl_set_bits_prph(trans, addr, BIT(val));
872			break;
873		case PRPH_CLEARBIT:
874			iwl_clear_bits_prph(trans, addr, BIT(val));
875			break;
876		default:
877			IWL_ERR(trans, "FW debug - unknown OP %d\n",
878				dest->reg_ops[i].op);
879			break;
880		}
881	}
882
883	if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
884		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
885			       trans_pcie->fw_mon_phys >> dest->base_shift);
886		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
887			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
888					(trans_pcie->fw_mon_phys +
889					trans_pcie->fw_mon_size - 256) >>
890						dest->end_shift);
891		else
892			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
893					(trans_pcie->fw_mon_phys +
894					trans_pcie->fw_mon_size) >>
895						dest->end_shift);
896	}
897}
898
899static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
900				const struct fw_img *image)
901{
902	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
903	int ret = 0;
904	int first_ucode_section;
905
906	IWL_DEBUG_FW(trans, "working with %s CPU\n",
907		     image->is_dual_cpus ? "Dual" : "Single");
908
909	/* load to FW the binary non secured sections of CPU1 */
910	ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
911	if (ret)
912		return ret;
913
914	if (image->is_dual_cpus) {
915		/* set CPU2 header address */
916		iwl_write_prph(trans,
917			       LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
918			       LMPM_SECURE_CPU2_HDR_MEM_SPACE);
919
920		/* load to FW the binary sections of CPU2 */
921		ret = iwl_pcie_load_cpu_sections(trans, image, 2,
922						 &first_ucode_section);
923		if (ret)
924			return ret;
925	}
926
927	/* supported for 7000 only for the moment */
928	if (iwlwifi_mod_params.fw_monitor &&
929	    trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
930		iwl_pcie_alloc_fw_monitor(trans);
931
932		if (trans_pcie->fw_mon_size) {
933			iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
934				       trans_pcie->fw_mon_phys >> 4);
935			iwl_write_prph(trans, MON_BUFF_END_ADDR,
936				       (trans_pcie->fw_mon_phys +
937					trans_pcie->fw_mon_size) >> 4);
938		}
939	} else if (trans->dbg_dest_tlv) {
940		iwl_pcie_apply_destination(trans);
941	}
942
943	/* release CPU reset */
944	iwl_write32(trans, CSR_RESET, 0);
945
946	return 0;
947}
948
949static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
950					  const struct fw_img *image)
951{
952	int ret = 0;
953	int first_ucode_section;
954
955	IWL_DEBUG_FW(trans, "working with %s CPU\n",
956		     image->is_dual_cpus ? "Dual" : "Single");
957
958	if (trans->dbg_dest_tlv)
959		iwl_pcie_apply_destination(trans);
960
961	/* TODO: remove in the next Si step */
962	ret = iwl_pcie_rsa_race_bug_wa(trans);
963	if (ret)
964		return ret;
965
966	/* configure the ucode to be ready to get the secured image */
967	/* release CPU reset */
968	iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
969
970	/* load to FW the binary Secured sections of CPU1 */
971	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
972					      &first_ucode_section);
973	if (ret)
974		return ret;
975
976	/* load to FW the binary sections of CPU2 */
977	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2,
978					      &first_ucode_section);
979	if (ret)
980		return ret;
981
982	return 0;
983}
984
985static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
986				   const struct fw_img *fw, bool run_in_rfkill)
987{
988	int ret;
989	bool hw_rfkill;
990
991	/* This may fail if AMT took ownership of the device */
992	if (iwl_pcie_prepare_card_hw(trans)) {
993		IWL_WARN(trans, "Exit HW not ready\n");
994		return -EIO;
995	}
996
997	iwl_enable_rfkill_int(trans);
998
999	/* If platform's RF_KILL switch is NOT set to KILL */
1000	hw_rfkill = iwl_is_rfkill_set(trans);
1001	if (hw_rfkill)
1002		set_bit(STATUS_RFKILL, &trans->status);
1003	else
1004		clear_bit(STATUS_RFKILL, &trans->status);
1005	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1006	if (hw_rfkill && !run_in_rfkill)
1007		return -ERFKILL;
1008
1009	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1010
1011	ret = iwl_pcie_nic_init(trans);
1012	if (ret) {
1013		IWL_ERR(trans, "Unable to init nic\n");
1014		return ret;
1015	}
1016
1017	/* make sure rfkill handshake bits are cleared */
1018	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1019	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1020		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1021
1022	/* clear (again), then enable host interrupts */
1023	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1024	iwl_enable_interrupts(trans);
1025
1026	/* really make sure rfkill handshake bits are cleared */
1027	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1028	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1029
1030	/* Load the given image to the HW */
1031	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1032		return iwl_pcie_load_given_ucode_8000(trans, fw);
1033	else
1034		return iwl_pcie_load_given_ucode(trans, fw);
1035}
1036
1037static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1038{
1039	iwl_pcie_reset_ict(trans);
1040	iwl_pcie_tx_start(trans, scd_addr);
1041}
1042
1043static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1044{
1045	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1046	bool hw_rfkill, was_hw_rfkill;
1047
1048	was_hw_rfkill = iwl_is_rfkill_set(trans);
1049
1050	/* tell the device to stop sending interrupts */
1051	spin_lock(&trans_pcie->irq_lock);
1052	iwl_disable_interrupts(trans);
1053	spin_unlock(&trans_pcie->irq_lock);
1054
1055	/* device going down, Stop using ICT table */
1056	iwl_pcie_disable_ict(trans);
1057
1058	/*
1059	 * If a HW restart happens during firmware loading,
1060	 * then the firmware loading might call this function
1061	 * and later it might be called again due to the
1062	 * restart. So don't process again if the device is
1063	 * already dead.
1064	 */
1065	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1066		IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
1067		iwl_pcie_tx_stop(trans);
1068		iwl_pcie_rx_stop(trans);
1069
1070		/* Power-down device's busmaster DMA clocks */
1071		if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
1072			iwl_write_prph(trans, APMG_CLK_DIS_REG,
1073				       APMG_CLK_VAL_DMA_CLK_RQT);
1074			udelay(5);
1075		}
1076	}
1077
1078	/* Make sure (redundant) we've released our request to stay awake */
1079	iwl_clear_bit(trans, CSR_GP_CNTRL,
1080		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1081
1082	/* Stop the device, and put it in low power state */
1083	iwl_pcie_apm_stop(trans, false);
1084
1085	/* stop and reset the on-board processor */
1086	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1087	udelay(20);
1088
1089	/*
1090	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1091	 * This is a bug in certain verions of the hardware.
1092	 * Certain devices also keep sending HW RF kill interrupt all
1093	 * the time, unless the interrupt is ACKed even if the interrupt
1094	 * should be masked. Re-ACK all the interrupts here.
1095	 */
1096	spin_lock(&trans_pcie->irq_lock);
1097	iwl_disable_interrupts(trans);
1098	spin_unlock(&trans_pcie->irq_lock);
1099
1100
1101	/* clear all status bits */
1102	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1103	clear_bit(STATUS_INT_ENABLED, &trans->status);
1104	clear_bit(STATUS_TPOWER_PMI, &trans->status);
1105	clear_bit(STATUS_RFKILL, &trans->status);
1106
1107	/*
1108	 * Even if we stop the HW, we still want the RF kill
1109	 * interrupt
1110	 */
1111	iwl_enable_rfkill_int(trans);
1112
1113	/*
1114	 * Check again since the RF kill state may have changed while
1115	 * all the interrupts were disabled, in this case we couldn't
1116	 * receive the RF kill interrupt and update the state in the
1117	 * op_mode.
1118	 * Don't call the op_mode if the rkfill state hasn't changed.
1119	 * This allows the op_mode to call stop_device from the rfkill
1120	 * notification without endless recursion. Under very rare
1121	 * circumstances, we might have a small recursion if the rfkill
1122	 * state changed exactly now while we were called from stop_device.
1123	 * This is very unlikely but can happen and is supported.
1124	 */
1125	hw_rfkill = iwl_is_rfkill_set(trans);
1126	if (hw_rfkill)
1127		set_bit(STATUS_RFKILL, &trans->status);
1128	else
1129		clear_bit(STATUS_RFKILL, &trans->status);
1130	if (hw_rfkill != was_hw_rfkill)
1131		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1132
1133	/* re-take ownership to prevent other users from stealing the deivce */
1134	iwl_pcie_prepare_card_hw(trans);
1135}
1136
1137void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1138{
1139	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
1140		iwl_trans_pcie_stop_device(trans, true);
1141}
1142
1143static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
1144{
1145	iwl_disable_interrupts(trans);
1146
1147	/*
1148	 * in testing mode, the host stays awake and the
1149	 * hardware won't be reset (not even partially)
1150	 */
1151	if (test)
1152		return;
1153
1154	iwl_pcie_disable_ict(trans);
1155
1156	iwl_clear_bit(trans, CSR_GP_CNTRL,
1157		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1158	iwl_clear_bit(trans, CSR_GP_CNTRL,
1159		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1160
1161	/*
1162	 * reset TX queues -- some of their registers reset during S3
1163	 * so if we don't reset everything here the D3 image would try
1164	 * to execute some invalid memory upon resume
1165	 */
1166	iwl_trans_pcie_tx_reset(trans);
1167
1168	iwl_pcie_set_pwr(trans, true);
1169}
1170
1171static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1172				    enum iwl_d3_status *status,
1173				    bool test)
1174{
1175	u32 val;
1176	int ret;
1177
1178	if (test) {
1179		iwl_enable_interrupts(trans);
1180		*status = IWL_D3_STATUS_ALIVE;
1181		return 0;
1182	}
1183
1184	/*
1185	 * Also enables interrupts - none will happen as the device doesn't
1186	 * know we're waking it up, only when the opmode actually tells it
1187	 * after this call.
1188	 */
1189	iwl_pcie_reset_ict(trans);
1190
1191	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1192	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1193
1194	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1195		udelay(2);
1196
1197	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1198			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1199			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1200			   25000);
1201	if (ret < 0) {
1202		IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
1203		return ret;
1204	}
1205
1206	iwl_pcie_set_pwr(trans, false);
1207
1208	iwl_trans_pcie_tx_reset(trans);
1209
1210	ret = iwl_pcie_rx_init(trans);
1211	if (ret) {
1212		IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
1213		return ret;
1214	}
1215
1216	val = iwl_read32(trans, CSR_RESET);
1217	if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1218		*status = IWL_D3_STATUS_RESET;
1219	else
1220		*status = IWL_D3_STATUS_ALIVE;
1221
1222	return 0;
1223}
1224
1225static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1226{
1227	bool hw_rfkill;
1228	int err;
1229
1230	err = iwl_pcie_prepare_card_hw(trans);
1231	if (err) {
1232		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1233		return err;
1234	}
1235
1236	/* Reset the entire device */
1237	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1238
1239	usleep_range(10, 15);
1240
1241	iwl_pcie_apm_init(trans);
1242
1243	/* From now on, the op_mode will be kept updated about RF kill state */
1244	iwl_enable_rfkill_int(trans);
1245
1246	hw_rfkill = iwl_is_rfkill_set(trans);
1247	if (hw_rfkill)
1248		set_bit(STATUS_RFKILL, &trans->status);
1249	else
1250		clear_bit(STATUS_RFKILL, &trans->status);
1251	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1252
1253	return 0;
1254}
1255
1256static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1257{
1258	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1259
1260	/* disable interrupts - don't enable HW RF kill interrupt */
1261	spin_lock(&trans_pcie->irq_lock);
1262	iwl_disable_interrupts(trans);
1263	spin_unlock(&trans_pcie->irq_lock);
1264
1265	iwl_pcie_apm_stop(trans, true);
1266
1267	spin_lock(&trans_pcie->irq_lock);
1268	iwl_disable_interrupts(trans);
1269	spin_unlock(&trans_pcie->irq_lock);
1270
1271	iwl_pcie_disable_ict(trans);
1272}
1273
1274static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1275{
1276	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1277}
1278
1279static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1280{
1281	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1282}
1283
1284static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1285{
1286	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1287}
1288
1289static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1290{
1291	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1292			       ((reg & 0x000FFFFF) | (3 << 24)));
1293	return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1294}
1295
1296static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1297				      u32 val)
1298{
1299	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1300			       ((addr & 0x000FFFFF) | (3 << 24)));
1301	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1302}
1303
1304static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1305{
1306	WARN_ON(1);
1307	return 0;
1308}
1309
1310static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1311				     const struct iwl_trans_config *trans_cfg)
1312{
1313	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1314
1315	trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1316	trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1317	trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1318	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1319		trans_pcie->n_no_reclaim_cmds = 0;
1320	else
1321		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1322	if (trans_pcie->n_no_reclaim_cmds)
1323		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1324		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1325
1326	trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1327	if (trans_pcie->rx_buf_size_8k)
1328		trans_pcie->rx_page_order = get_order(8 * 1024);
1329	else
1330		trans_pcie->rx_page_order = get_order(4 * 1024);
1331
1332	trans_pcie->command_names = trans_cfg->command_names;
1333	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1334	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1335
1336	/* init ref_count to 1 (should be cleared when ucode is loaded) */
1337	trans_pcie->ref_count = 1;
1338
1339	/* Initialize NAPI here - it should be before registering to mac80211
1340	 * in the opmode but after the HW struct is allocated.
1341	 * As this function may be called again in some corner cases don't
1342	 * do anything if NAPI was already initialized.
1343	 */
1344	if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
1345		init_dummy_netdev(&trans_pcie->napi_dev);
1346		iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
1347				     &trans_pcie->napi_dev,
1348				     iwl_pcie_dummy_napi_poll, 64);
1349	}
1350}
1351
1352void iwl_trans_pcie_free(struct iwl_trans *trans)
1353{
1354	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1355
1356	synchronize_irq(trans_pcie->pci_dev->irq);
1357
1358	iwl_pcie_tx_free(trans);
1359	iwl_pcie_rx_free(trans);
1360
1361	free_irq(trans_pcie->pci_dev->irq, trans);
1362	iwl_pcie_free_ict(trans);
1363
1364	pci_disable_msi(trans_pcie->pci_dev);
1365	iounmap(trans_pcie->hw_base);
1366	pci_release_regions(trans_pcie->pci_dev);
1367	pci_disable_device(trans_pcie->pci_dev);
1368	kmem_cache_destroy(trans->dev_cmd_pool);
1369
1370	if (trans_pcie->napi.poll)
1371		netif_napi_del(&trans_pcie->napi);
1372
1373	iwl_pcie_free_fw_monitor(trans);
1374
1375	kfree(trans);
1376}
1377
1378static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1379{
1380	if (state)
1381		set_bit(STATUS_TPOWER_PMI, &trans->status);
1382	else
1383		clear_bit(STATUS_TPOWER_PMI, &trans->status);
1384}
1385
1386static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
1387						unsigned long *flags)
1388{
1389	int ret;
1390	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1391
1392	spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1393
1394	if (trans_pcie->cmd_hold_nic_awake)
1395		goto out;
1396
1397	/* this bit wakes up the NIC */
1398	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1399				 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1400	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1401		udelay(2);
1402
1403	/*
1404	 * These bits say the device is running, and should keep running for
1405	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1406	 * but they do not indicate that embedded SRAM is restored yet;
1407	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1408	 * to/from host DRAM when sleeping/waking for power-saving.
1409	 * Each direction takes approximately 1/4 millisecond; with this
1410	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1411	 * series of register accesses are expected (e.g. reading Event Log),
1412	 * to keep device from sleeping.
1413	 *
1414	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1415	 * SRAM is okay/restored.  We don't check that here because this call
1416	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1417	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1418	 *
1419	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1420	 * and do not save/restore SRAM when power cycling.
1421	 */
1422	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1423			   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1424			   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1425			    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
1426	if (unlikely(ret < 0)) {
1427		iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
1428		if (!silent) {
1429			u32 val = iwl_read32(trans, CSR_GP_CNTRL);
1430			WARN_ONCE(1,
1431				  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1432				  val);
1433			spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1434			return false;
1435		}
1436	}
1437
1438out:
1439	/*
1440	 * Fool sparse by faking we release the lock - sparse will
1441	 * track nic_access anyway.
1442	 */
1443	__release(&trans_pcie->reg_lock);
1444	return true;
1445}
1446
1447static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
1448					      unsigned long *flags)
1449{
1450	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1451
1452	lockdep_assert_held(&trans_pcie->reg_lock);
1453
1454	/*
1455	 * Fool sparse by faking we acquiring the lock - sparse will
1456	 * track nic_access anyway.
1457	 */
1458	__acquire(&trans_pcie->reg_lock);
1459
1460	if (trans_pcie->cmd_hold_nic_awake)
1461		goto out;
1462
1463	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1464				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1465	/*
1466	 * Above we read the CSR_GP_CNTRL register, which will flush
1467	 * any previous writes, but we need the write that clears the
1468	 * MAC_ACCESS_REQ bit to be performed before any other writes
1469	 * scheduled on different CPUs (after we drop reg_lock).
1470	 */
1471	mmiowb();
1472out:
1473	spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1474}
1475
1476static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
1477				   void *buf, int dwords)
1478{
1479	unsigned long flags;
1480	int offs, ret = 0;
1481	u32 *vals = buf;
1482
1483	if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1484		iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
1485		for (offs = 0; offs < dwords; offs++)
1486			vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1487		iwl_trans_release_nic_access(trans, &flags);
1488	} else {
1489		ret = -EBUSY;
1490	}
1491	return ret;
1492}
1493
1494static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1495				    const void *buf, int dwords)
1496{
1497	unsigned long flags;
1498	int offs, ret = 0;
1499	const u32 *vals = buf;
1500
1501	if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1502		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
1503		for (offs = 0; offs < dwords; offs++)
1504			iwl_write32(trans, HBUS_TARG_MEM_WDAT,
1505				    vals ? vals[offs] : 0);
1506		iwl_trans_release_nic_access(trans, &flags);
1507	} else {
1508		ret = -EBUSY;
1509	}
1510	return ret;
1511}
1512
1513static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
1514					    unsigned long txqs,
1515					    bool freeze)
1516{
1517	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1518	int queue;
1519
1520	for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1521		struct iwl_txq *txq = &trans_pcie->txq[queue];
1522		unsigned long now;
1523
1524		spin_lock_bh(&txq->lock);
1525
1526		now = jiffies;
1527
1528		if (txq->frozen == freeze)
1529			goto next_queue;
1530
1531		IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1532				    freeze ? "Freezing" : "Waking", queue);
1533
1534		txq->frozen = freeze;
1535
1536		if (txq->q.read_ptr == txq->q.write_ptr)
1537			goto next_queue;
1538
1539		if (freeze) {
1540			if (unlikely(time_after(now,
1541						txq->stuck_timer.expires))) {
1542				/*
1543				 * The timer should have fired, maybe it is
1544				 * spinning right now on the lock.
1545				 */
1546				goto next_queue;
1547			}
1548			/* remember how long until the timer fires */
1549			txq->frozen_expiry_remainder =
1550				txq->stuck_timer.expires - now;
1551			del_timer(&txq->stuck_timer);
1552			goto next_queue;
1553		}
1554
1555		/*
1556		 * Wake a non-empty queue -> arm timer with the
1557		 * remainder before it froze
1558		 */
1559		mod_timer(&txq->stuck_timer,
1560			  now + txq->frozen_expiry_remainder);
1561
1562next_queue:
1563		spin_unlock_bh(&txq->lock);
1564	}
1565}
1566
1567#define IWL_FLUSH_WAIT_MS	2000
1568
1569static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
1570{
1571	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1572	struct iwl_txq *txq;
1573	struct iwl_queue *q;
1574	int cnt;
1575	unsigned long now = jiffies;
1576	u32 scd_sram_addr;
1577	u8 buf[16];
1578	int ret = 0;
1579
1580	/* waiting for all the tx frames complete might take a while */
1581	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1582		u8 wr_ptr;
1583
1584		if (cnt == trans_pcie->cmd_queue)
1585			continue;
1586		if (!test_bit(cnt, trans_pcie->queue_used))
1587			continue;
1588		if (!(BIT(cnt) & txq_bm))
1589			continue;
1590
1591		IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
1592		txq = &trans_pcie->txq[cnt];
1593		q = &txq->q;
1594		wr_ptr = ACCESS_ONCE(q->write_ptr);
1595
1596		while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
1597		       !time_after(jiffies,
1598				   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
1599			u8 write_ptr = ACCESS_ONCE(q->write_ptr);
1600
1601			if (WARN_ONCE(wr_ptr != write_ptr,
1602				      "WR pointer moved while flushing %d -> %d\n",
1603				      wr_ptr, write_ptr))
1604				return -ETIMEDOUT;
1605			msleep(1);
1606		}
1607
1608		if (q->read_ptr != q->write_ptr) {
1609			IWL_ERR(trans,
1610				"fail to flush all tx fifo queues Q %d\n", cnt);
1611			ret = -ETIMEDOUT;
1612			break;
1613		}
1614		IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
1615	}
1616
1617	if (!ret)
1618		return 0;
1619
1620	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1621		txq->q.read_ptr, txq->q.write_ptr);
1622
1623	scd_sram_addr = trans_pcie->scd_base_addr +
1624			SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
1625	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
1626
1627	iwl_print_hex_error(trans, buf, sizeof(buf));
1628
1629	for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
1630		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
1631			iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
1632
1633	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1634		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
1635		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1636		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1637		u32 tbl_dw =
1638			iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
1639					     SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
1640
1641		if (cnt & 0x1)
1642			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
1643		else
1644			tbl_dw = tbl_dw & 0x0000FFFF;
1645
1646		IWL_ERR(trans,
1647			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1648			cnt, active ? "" : "in", fifo, tbl_dw,
1649			iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
1650				(TFD_QUEUE_SIZE_MAX - 1),
1651			iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1652	}
1653
1654	return ret;
1655}
1656
1657static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1658					 u32 mask, u32 value)
1659{
1660	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1661	unsigned long flags;
1662
1663	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1664	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
1665	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1666}
1667
1668void iwl_trans_pcie_ref(struct iwl_trans *trans)
1669{
1670	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1671	unsigned long flags;
1672
1673	if (iwlwifi_mod_params.d0i3_disable)
1674		return;
1675
1676	spin_lock_irqsave(&trans_pcie->ref_lock, flags);
1677	IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
1678	trans_pcie->ref_count++;
1679	spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1680}
1681
1682void iwl_trans_pcie_unref(struct iwl_trans *trans)
1683{
1684	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1685	unsigned long flags;
1686
1687	if (iwlwifi_mod_params.d0i3_disable)
1688		return;
1689
1690	spin_lock_irqsave(&trans_pcie->ref_lock, flags);
1691	IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
1692	if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
1693		spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1694		return;
1695	}
1696	trans_pcie->ref_count--;
1697	spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1698}
1699
1700static const char *get_csr_string(int cmd)
1701{
1702#define IWL_CMD(x) case x: return #x
1703	switch (cmd) {
1704	IWL_CMD(CSR_HW_IF_CONFIG_REG);
1705	IWL_CMD(CSR_INT_COALESCING);
1706	IWL_CMD(CSR_INT);
1707	IWL_CMD(CSR_INT_MASK);
1708	IWL_CMD(CSR_FH_INT_STATUS);
1709	IWL_CMD(CSR_GPIO_IN);
1710	IWL_CMD(CSR_RESET);
1711	IWL_CMD(CSR_GP_CNTRL);
1712	IWL_CMD(CSR_HW_REV);
1713	IWL_CMD(CSR_EEPROM_REG);
1714	IWL_CMD(CSR_EEPROM_GP);
1715	IWL_CMD(CSR_OTP_GP_REG);
1716	IWL_CMD(CSR_GIO_REG);
1717	IWL_CMD(CSR_GP_UCODE_REG);
1718	IWL_CMD(CSR_GP_DRIVER_REG);
1719	IWL_CMD(CSR_UCODE_DRV_GP1);
1720	IWL_CMD(CSR_UCODE_DRV_GP2);
1721	IWL_CMD(CSR_LED_REG);
1722	IWL_CMD(CSR_DRAM_INT_TBL_REG);
1723	IWL_CMD(CSR_GIO_CHICKEN_BITS);
1724	IWL_CMD(CSR_ANA_PLL_CFG);
1725	IWL_CMD(CSR_HW_REV_WA_REG);
1726	IWL_CMD(CSR_MONITOR_STATUS_REG);
1727	IWL_CMD(CSR_DBG_HPET_MEM_REG);
1728	default:
1729		return "UNKNOWN";
1730	}
1731#undef IWL_CMD
1732}
1733
1734void iwl_pcie_dump_csr(struct iwl_trans *trans)
1735{
1736	int i;
1737	static const u32 csr_tbl[] = {
1738		CSR_HW_IF_CONFIG_REG,
1739		CSR_INT_COALESCING,
1740		CSR_INT,
1741		CSR_INT_MASK,
1742		CSR_FH_INT_STATUS,
1743		CSR_GPIO_IN,
1744		CSR_RESET,
1745		CSR_GP_CNTRL,
1746		CSR_HW_REV,
1747		CSR_EEPROM_REG,
1748		CSR_EEPROM_GP,
1749		CSR_OTP_GP_REG,
1750		CSR_GIO_REG,
1751		CSR_GP_UCODE_REG,
1752		CSR_GP_DRIVER_REG,
1753		CSR_UCODE_DRV_GP1,
1754		CSR_UCODE_DRV_GP2,
1755		CSR_LED_REG,
1756		CSR_DRAM_INT_TBL_REG,
1757		CSR_GIO_CHICKEN_BITS,
1758		CSR_ANA_PLL_CFG,
1759		CSR_MONITOR_STATUS_REG,
1760		CSR_HW_REV_WA_REG,
1761		CSR_DBG_HPET_MEM_REG
1762	};
1763	IWL_ERR(trans, "CSR values:\n");
1764	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1765		"CSR_INT_PERIODIC_REG)\n");
1766	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
1767		IWL_ERR(trans, "  %25s: 0X%08x\n",
1768			get_csr_string(csr_tbl[i]),
1769			iwl_read32(trans, csr_tbl[i]));
1770	}
1771}
1772
1773#ifdef CONFIG_IWLWIFI_DEBUGFS
1774/* create and remove of files */
1775#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
1776	if (!debugfs_create_file(#name, mode, parent, trans,		\
1777				 &iwl_dbgfs_##name##_ops))		\
1778		goto err;						\
1779} while (0)
1780
1781/* file operation */
1782#define DEBUGFS_READ_FILE_OPS(name)					\
1783static const struct file_operations iwl_dbgfs_##name##_ops = {		\
1784	.read = iwl_dbgfs_##name##_read,				\
1785	.open = simple_open,						\
1786	.llseek = generic_file_llseek,					\
1787};
1788
1789#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
1790static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1791	.write = iwl_dbgfs_##name##_write,                              \
1792	.open = simple_open,						\
1793	.llseek = generic_file_llseek,					\
1794};
1795
1796#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
1797static const struct file_operations iwl_dbgfs_##name##_ops = {		\
1798	.write = iwl_dbgfs_##name##_write,				\
1799	.read = iwl_dbgfs_##name##_read,				\
1800	.open = simple_open,						\
1801	.llseek = generic_file_llseek,					\
1802};
1803
1804static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1805				       char __user *user_buf,
1806				       size_t count, loff_t *ppos)
1807{
1808	struct iwl_trans *trans = file->private_data;
1809	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1810	struct iwl_txq *txq;
1811	struct iwl_queue *q;
1812	char *buf;
1813	int pos = 0;
1814	int cnt;
1815	int ret;
1816	size_t bufsz;
1817
1818	bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
1819
1820	if (!trans_pcie->txq)
1821		return -EAGAIN;
1822
1823	buf = kzalloc(bufsz, GFP_KERNEL);
1824	if (!buf)
1825		return -ENOMEM;
1826
1827	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1828		txq = &trans_pcie->txq[cnt];
1829		q = &txq->q;
1830		pos += scnprintf(buf + pos, bufsz - pos,
1831				"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
1832				cnt, q->read_ptr, q->write_ptr,
1833				!!test_bit(cnt, trans_pcie->queue_used),
1834				 !!test_bit(cnt, trans_pcie->queue_stopped),
1835				 txq->need_update, txq->frozen,
1836				 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
1837	}
1838	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1839	kfree(buf);
1840	return ret;
1841}
1842
1843static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1844				       char __user *user_buf,
1845				       size_t count, loff_t *ppos)
1846{
1847	struct iwl_trans *trans = file->private_data;
1848	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1849	struct iwl_rxq *rxq = &trans_pcie->rxq;
1850	char buf[256];
1851	int pos = 0;
1852	const size_t bufsz = sizeof(buf);
1853
1854	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1855						rxq->read);
1856	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1857						rxq->write);
1858	pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
1859						rxq->write_actual);
1860	pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
1861						rxq->need_update);
1862	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1863						rxq->free_count);
1864	if (rxq->rb_stts) {
1865		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1866			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
1867	} else {
1868		pos += scnprintf(buf + pos, bufsz - pos,
1869					"closed_rb_num: Not Allocated\n");
1870	}
1871	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1872}
1873
1874static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1875					char __user *user_buf,
1876					size_t count, loff_t *ppos)
1877{
1878	struct iwl_trans *trans = file->private_data;
1879	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1880	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1881
1882	int pos = 0;
1883	char *buf;
1884	int bufsz = 24 * 64; /* 24 items * 64 char per item */
1885	ssize_t ret;
1886
1887	buf = kzalloc(bufsz, GFP_KERNEL);
1888	if (!buf)
1889		return -ENOMEM;
1890
1891	pos += scnprintf(buf + pos, bufsz - pos,
1892			"Interrupt Statistics Report:\n");
1893
1894	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1895		isr_stats->hw);
1896	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1897		isr_stats->sw);
1898	if (isr_stats->sw || isr_stats->hw) {
1899		pos += scnprintf(buf + pos, bufsz - pos,
1900			"\tLast Restarting Code:  0x%X\n",
1901			isr_stats->err_code);
1902	}
1903#ifdef CONFIG_IWLWIFI_DEBUG
1904	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1905		isr_stats->sch);
1906	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1907		isr_stats->alive);
1908#endif
1909	pos += scnprintf(buf + pos, bufsz - pos,
1910		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1911
1912	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1913		isr_stats->ctkill);
1914
1915	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1916		isr_stats->wakeup);
1917
1918	pos += scnprintf(buf + pos, bufsz - pos,
1919		"Rx command responses:\t\t %u\n", isr_stats->rx);
1920
1921	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1922		isr_stats->tx);
1923
1924	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1925		isr_stats->unhandled);
1926
1927	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1928	kfree(buf);
1929	return ret;
1930}
1931
1932static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1933					 const char __user *user_buf,
1934					 size_t count, loff_t *ppos)
1935{
1936	struct iwl_trans *trans = file->private_data;
1937	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1938	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1939
1940	char buf[8];
1941	int buf_size;
1942	u32 reset_flag;
1943
1944	memset(buf, 0, sizeof(buf));
1945	buf_size = min(count, sizeof(buf) -  1);
1946	if (copy_from_user(buf, user_buf, buf_size))
1947		return -EFAULT;
1948	if (sscanf(buf, "%x", &reset_flag) != 1)
1949		return -EFAULT;
1950	if (reset_flag == 0)
1951		memset(isr_stats, 0, sizeof(*isr_stats));
1952
1953	return count;
1954}
1955
1956static ssize_t iwl_dbgfs_csr_write(struct file *file,
1957				   const char __user *user_buf,
1958				   size_t count, loff_t *ppos)
1959{
1960	struct iwl_trans *trans = file->private_data;
1961	char buf[8];
1962	int buf_size;
1963	int csr;
1964
1965	memset(buf, 0, sizeof(buf));
1966	buf_size = min(count, sizeof(buf) -  1);
1967	if (copy_from_user(buf, user_buf, buf_size))
1968		return -EFAULT;
1969	if (sscanf(buf, "%d", &csr) != 1)
1970		return -EFAULT;
1971
1972	iwl_pcie_dump_csr(trans);
1973
1974	return count;
1975}
1976
1977static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1978				     char __user *user_buf,
1979				     size_t count, loff_t *ppos)
1980{
1981	struct iwl_trans *trans = file->private_data;
1982	char *buf = NULL;
1983	ssize_t ret;
1984
1985	ret = iwl_dump_fh(trans, &buf);
1986	if (ret < 0)
1987		return ret;
1988	if (!buf)
1989		return -EINVAL;
1990	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
1991	kfree(buf);
1992	return ret;
1993}
1994
1995DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1996DEBUGFS_READ_FILE_OPS(fh_reg);
1997DEBUGFS_READ_FILE_OPS(rx_queue);
1998DEBUGFS_READ_FILE_OPS(tx_queue);
1999DEBUGFS_WRITE_FILE_OPS(csr);
2000
2001/*
2002 * Create the debugfs files and directories
2003 *
2004 */
2005static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2006					 struct dentry *dir)
2007{
2008	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2009	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2010	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2011	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2012	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2013	return 0;
2014
2015err:
2016	IWL_ERR(trans, "failed to create the trans debugfs entry\n");
2017	return -ENOMEM;
2018}
2019#else
2020static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2021					 struct dentry *dir)
2022{
2023	return 0;
2024}
2025#endif /*CONFIG_IWLWIFI_DEBUGFS */
2026
2027static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
2028{
2029	u32 cmdlen = 0;
2030	int i;
2031
2032	for (i = 0; i < IWL_NUM_OF_TBS; i++)
2033		cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
2034
2035	return cmdlen;
2036}
2037
2038static const struct {
2039	u32 start, end;
2040} iwl_prph_dump_addr[] = {
2041	{ .start = 0x00a00000, .end = 0x00a00000 },
2042	{ .start = 0x00a0000c, .end = 0x00a00024 },
2043	{ .start = 0x00a0002c, .end = 0x00a0003c },
2044	{ .start = 0x00a00410, .end = 0x00a00418 },
2045	{ .start = 0x00a00420, .end = 0x00a00420 },
2046	{ .start = 0x00a00428, .end = 0x00a00428 },
2047	{ .start = 0x00a00430, .end = 0x00a0043c },
2048	{ .start = 0x00a00444, .end = 0x00a00444 },
2049	{ .start = 0x00a004c0, .end = 0x00a004cc },
2050	{ .start = 0x00a004d8, .end = 0x00a004d8 },
2051	{ .start = 0x00a004e0, .end = 0x00a004f0 },
2052	{ .start = 0x00a00840, .end = 0x00a00840 },
2053	{ .start = 0x00a00850, .end = 0x00a00858 },
2054	{ .start = 0x00a01004, .end = 0x00a01008 },
2055	{ .start = 0x00a01010, .end = 0x00a01010 },
2056	{ .start = 0x00a01018, .end = 0x00a01018 },
2057	{ .start = 0x00a01024, .end = 0x00a01024 },
2058	{ .start = 0x00a0102c, .end = 0x00a01034 },
2059	{ .start = 0x00a0103c, .end = 0x00a01040 },
2060	{ .start = 0x00a01048, .end = 0x00a01094 },
2061	{ .start = 0x00a01c00, .end = 0x00a01c20 },
2062	{ .start = 0x00a01c58, .end = 0x00a01c58 },
2063	{ .start = 0x00a01c7c, .end = 0x00a01c7c },
2064	{ .start = 0x00a01c28, .end = 0x00a01c54 },
2065	{ .start = 0x00a01c5c, .end = 0x00a01c5c },
2066	{ .start = 0x00a01c60, .end = 0x00a01cdc },
2067	{ .start = 0x00a01ce0, .end = 0x00a01d0c },
2068	{ .start = 0x00a01d18, .end = 0x00a01d20 },
2069	{ .start = 0x00a01d2c, .end = 0x00a01d30 },
2070	{ .start = 0x00a01d40, .end = 0x00a01d5c },
2071	{ .start = 0x00a01d80, .end = 0x00a01d80 },
2072	{ .start = 0x00a01d98, .end = 0x00a01d9c },
2073	{ .start = 0x00a01da8, .end = 0x00a01da8 },
2074	{ .start = 0x00a01db8, .end = 0x00a01df4 },
2075	{ .start = 0x00a01dc0, .end = 0x00a01dfc },
2076	{ .start = 0x00a01e00, .end = 0x00a01e2c },
2077	{ .start = 0x00a01e40, .end = 0x00a01e60 },
2078	{ .start = 0x00a01e68, .end = 0x00a01e6c },
2079	{ .start = 0x00a01e74, .end = 0x00a01e74 },
2080	{ .start = 0x00a01e84, .end = 0x00a01e90 },
2081	{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
2082	{ .start = 0x00a01ed0, .end = 0x00a01ee0 },
2083	{ .start = 0x00a01f00, .end = 0x00a01f1c },
2084	{ .start = 0x00a01f44, .end = 0x00a01ffc },
2085	{ .start = 0x00a02000, .end = 0x00a02048 },
2086	{ .start = 0x00a02068, .end = 0x00a020f0 },
2087	{ .start = 0x00a02100, .end = 0x00a02118 },
2088	{ .start = 0x00a02140, .end = 0x00a0214c },
2089	{ .start = 0x00a02168, .end = 0x00a0218c },
2090	{ .start = 0x00a021c0, .end = 0x00a021c0 },
2091	{ .start = 0x00a02400, .end = 0x00a02410 },
2092	{ .start = 0x00a02418, .end = 0x00a02420 },
2093	{ .start = 0x00a02428, .end = 0x00a0242c },
2094	{ .start = 0x00a02434, .end = 0x00a02434 },
2095	{ .start = 0x00a02440, .end = 0x00a02460 },
2096	{ .start = 0x00a02468, .end = 0x00a024b0 },
2097	{ .start = 0x00a024c8, .end = 0x00a024cc },
2098	{ .start = 0x00a02500, .end = 0x00a02504 },
2099	{ .start = 0x00a0250c, .end = 0x00a02510 },
2100	{ .start = 0x00a02540, .end = 0x00a02554 },
2101	{ .start = 0x00a02580, .end = 0x00a025f4 },
2102	{ .start = 0x00a02600, .end = 0x00a0260c },
2103	{ .start = 0x00a02648, .end = 0x00a02650 },
2104	{ .start = 0x00a02680, .end = 0x00a02680 },
2105	{ .start = 0x00a026c0, .end = 0x00a026d0 },
2106	{ .start = 0x00a02700, .end = 0x00a0270c },
2107	{ .start = 0x00a02804, .end = 0x00a02804 },
2108	{ .start = 0x00a02818, .end = 0x00a0281c },
2109	{ .start = 0x00a02c00, .end = 0x00a02db4 },
2110	{ .start = 0x00a02df4, .end = 0x00a02fb0 },
2111	{ .start = 0x00a03000, .end = 0x00a03014 },
2112	{ .start = 0x00a0301c, .end = 0x00a0302c },
2113	{ .start = 0x00a03034, .end = 0x00a03038 },
2114	{ .start = 0x00a03040, .end = 0x00a03048 },
2115	{ .start = 0x00a03060, .end = 0x00a03068 },
2116	{ .start = 0x00a03070, .end = 0x00a03074 },
2117	{ .start = 0x00a0307c, .end = 0x00a0307c },
2118	{ .start = 0x00a03080, .end = 0x00a03084 },
2119	{ .start = 0x00a0308c, .end = 0x00a03090 },
2120	{ .start = 0x00a03098, .end = 0x00a03098 },
2121	{ .start = 0x00a030a0, .end = 0x00a030a0 },
2122	{ .start = 0x00a030a8, .end = 0x00a030b4 },
2123	{ .start = 0x00a030bc, .end = 0x00a030bc },
2124	{ .start = 0x00a030c0, .end = 0x00a0312c },
2125	{ .start = 0x00a03c00, .end = 0x00a03c5c },
2126	{ .start = 0x00a04400, .end = 0x00a04454 },
2127	{ .start = 0x00a04460, .end = 0x00a04474 },
2128	{ .start = 0x00a044c0, .end = 0x00a044ec },
2129	{ .start = 0x00a04500, .end = 0x00a04504 },
2130	{ .start = 0x00a04510, .end = 0x00a04538 },
2131	{ .start = 0x00a04540, .end = 0x00a04548 },
2132	{ .start = 0x00a04560, .end = 0x00a0457c },
2133	{ .start = 0x00a04590, .end = 0x00a04598 },
2134	{ .start = 0x00a045c0, .end = 0x00a045f4 },
2135};
2136
2137static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
2138				    struct iwl_fw_error_dump_data **data)
2139{
2140	struct iwl_fw_error_dump_prph *prph;
2141	unsigned long flags;
2142	u32 prph_len = 0, i;
2143
2144	if (!iwl_trans_grab_nic_access(trans, false, &flags))
2145		return 0;
2146
2147	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
2148		/* The range includes both boundaries */
2149		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
2150			 iwl_prph_dump_addr[i].start + 4;
2151		int reg;
2152		__le32 *val;
2153
2154		prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
2155
2156		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
2157		(*data)->len = cpu_to_le32(sizeof(*prph) +
2158					num_bytes_in_chunk);
2159		prph = (void *)(*data)->data;
2160		prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
2161		val = (void *)prph->data;
2162
2163		for (reg = iwl_prph_dump_addr[i].start;
2164		     reg <= iwl_prph_dump_addr[i].end;
2165		     reg += 4)
2166			*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
2167								      reg));
2168		*data = iwl_fw_error_next_data(*data);
2169	}
2170
2171	iwl_trans_release_nic_access(trans, &flags);
2172
2173	return prph_len;
2174}
2175
2176#define IWL_CSR_TO_DUMP (0x250)
2177
2178static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2179				   struct iwl_fw_error_dump_data **data)
2180{
2181	u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2182	__le32 *val;
2183	int i;
2184
2185	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2186	(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
2187	val = (void *)(*data)->data;
2188
2189	for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
2190		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2191
2192	*data = iwl_fw_error_next_data(*data);
2193
2194	return csr_len;
2195}
2196
2197static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
2198				       struct iwl_fw_error_dump_data **data)
2199{
2200	u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
2201	unsigned long flags;
2202	__le32 *val;
2203	int i;
2204
2205	if (!iwl_trans_grab_nic_access(trans, false, &flags))
2206		return 0;
2207
2208	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
2209	(*data)->len = cpu_to_le32(fh_regs_len);
2210	val = (void *)(*data)->data;
2211
2212	for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
2213		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2214
2215	iwl_trans_release_nic_access(trans, &flags);
2216
2217	*data = iwl_fw_error_next_data(*data);
2218
2219	return sizeof(**data) + fh_regs_len;
2220}
2221
2222static
2223struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
2224{
2225	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2226	struct iwl_fw_error_dump_data *data;
2227	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
2228	struct iwl_fw_error_dump_txcmd *txcmd;
2229	struct iwl_trans_dump_data *dump_data;
2230	u32 len;
2231	u32 monitor_len;
2232	int i, ptr;
2233
2234	/* transport dump header */
2235	len = sizeof(*dump_data);
2236
2237	/* host commands */
2238	len += sizeof(*data) +
2239		cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
2240
2241	/* CSR registers */
2242	len += sizeof(*data) + IWL_CSR_TO_DUMP;
2243
2244	/* PRPH registers */
2245	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
2246		/* The range includes both boundaries */
2247		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
2248			iwl_prph_dump_addr[i].start + 4;
2249
2250		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
2251			num_bytes_in_chunk;
2252	}
2253
2254	/* FH registers */
2255	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
2256
2257	/* FW monitor */
2258	if (trans_pcie->fw_mon_page) {
2259		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2260		       trans_pcie->fw_mon_size;
2261		monitor_len = trans_pcie->fw_mon_size;
2262	} else if (trans->dbg_dest_tlv) {
2263		u32 base, end;
2264
2265		base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
2266		end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
2267
2268		base = iwl_read_prph(trans, base) <<
2269		       trans->dbg_dest_tlv->base_shift;
2270		end = iwl_read_prph(trans, end) <<
2271		      trans->dbg_dest_tlv->end_shift;
2272
2273		/* Make "end" point to the actual end */
2274		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
2275			end += (1 << trans->dbg_dest_tlv->end_shift);
2276		monitor_len = end - base;
2277		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2278		       monitor_len;
2279	} else {
2280		monitor_len = 0;
2281	}
2282
2283	dump_data = vzalloc(len);
2284	if (!dump_data)
2285		return NULL;
2286
2287	len = 0;
2288	data = (void *)dump_data->data;
2289	data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
2290	txcmd = (void *)data->data;
2291	spin_lock_bh(&cmdq->lock);
2292	ptr = cmdq->q.write_ptr;
2293	for (i = 0; i < cmdq->q.n_window; i++) {
2294		u8 idx = get_cmd_index(&cmdq->q, ptr);
2295		u32 caplen, cmdlen;
2296
2297		cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
2298		caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
2299
2300		if (cmdlen) {
2301			len += sizeof(*txcmd) + caplen;
2302			txcmd->cmdlen = cpu_to_le32(cmdlen);
2303			txcmd->caplen = cpu_to_le32(caplen);
2304			memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
2305			txcmd = (void *)((u8 *)txcmd->data + caplen);
2306		}
2307
2308		ptr = iwl_queue_dec_wrap(ptr);
2309	}
2310	spin_unlock_bh(&cmdq->lock);
2311
2312	data->len = cpu_to_le32(len);
2313	len += sizeof(*data);
2314	data = iwl_fw_error_next_data(data);
2315
2316	len += iwl_trans_pcie_dump_prph(trans, &data);
2317	len += iwl_trans_pcie_dump_csr(trans, &data);
2318	len += iwl_trans_pcie_fh_regs_dump(trans, &data);
2319	/* data is already pointing to the next section */
2320
2321	if ((trans_pcie->fw_mon_page &&
2322	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
2323	    trans->dbg_dest_tlv) {
2324		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
2325		u32 base, write_ptr, wrap_cnt;
2326
2327		/* If there was a dest TLV - use the values from there */
2328		if (trans->dbg_dest_tlv) {
2329			write_ptr =
2330				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
2331			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
2332			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
2333		} else {
2334			base = MON_BUFF_BASE_ADDR;
2335			write_ptr = MON_BUFF_WRPTR;
2336			wrap_cnt = MON_BUFF_CYCLE_CNT;
2337		}
2338
2339		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
2340		fw_mon_data = (void *)data->data;
2341		fw_mon_data->fw_mon_wr_ptr =
2342			cpu_to_le32(iwl_read_prph(trans, write_ptr));
2343		fw_mon_data->fw_mon_cycle_cnt =
2344			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
2345		fw_mon_data->fw_mon_base_ptr =
2346			cpu_to_le32(iwl_read_prph(trans, base));
2347
2348		len += sizeof(*data) + sizeof(*fw_mon_data);
2349		if (trans_pcie->fw_mon_page) {
2350			data->len = cpu_to_le32(trans_pcie->fw_mon_size +
2351						sizeof(*fw_mon_data));
2352
2353			/*
2354			 * The firmware is now asserted, it won't write anything
2355			 * to the buffer. CPU can take ownership to fetch the
2356			 * data. The buffer will be handed back to the device
2357			 * before the firmware will be restarted.
2358			 */
2359			dma_sync_single_for_cpu(trans->dev,
2360						trans_pcie->fw_mon_phys,
2361						trans_pcie->fw_mon_size,
2362						DMA_FROM_DEVICE);
2363			memcpy(fw_mon_data->data,
2364			       page_address(trans_pcie->fw_mon_page),
2365			       trans_pcie->fw_mon_size);
2366
2367			len += trans_pcie->fw_mon_size;
2368		} else {
2369			/* If we are here then the buffer is internal */
2370
2371			/*
2372			 * Update pointers to reflect actual values after
2373			 * shifting
2374			 */
2375			base = iwl_read_prph(trans, base) <<
2376			       trans->dbg_dest_tlv->base_shift;
2377			iwl_trans_read_mem(trans, base, fw_mon_data->data,
2378					   monitor_len / sizeof(u32));
2379			data->len = cpu_to_le32(sizeof(*fw_mon_data) +
2380						monitor_len);
2381			len += monitor_len;
2382		}
2383	}
2384
2385	dump_data->len = len;
2386
2387	return dump_data;
2388}
2389
2390static const struct iwl_trans_ops trans_ops_pcie = {
2391	.start_hw = iwl_trans_pcie_start_hw,
2392	.op_mode_leave = iwl_trans_pcie_op_mode_leave,
2393	.fw_alive = iwl_trans_pcie_fw_alive,
2394	.start_fw = iwl_trans_pcie_start_fw,
2395	.stop_device = iwl_trans_pcie_stop_device,
2396
2397	.d3_suspend = iwl_trans_pcie_d3_suspend,
2398	.d3_resume = iwl_trans_pcie_d3_resume,
2399
2400	.send_cmd = iwl_trans_pcie_send_hcmd,
2401
2402	.tx = iwl_trans_pcie_tx,
2403	.reclaim = iwl_trans_pcie_reclaim,
2404
2405	.txq_disable = iwl_trans_pcie_txq_disable,
2406	.txq_enable = iwl_trans_pcie_txq_enable,
2407
2408	.dbgfs_register = iwl_trans_pcie_dbgfs_register,
2409
2410	.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
2411	.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
2412
2413	.write8 = iwl_trans_pcie_write8,
2414	.write32 = iwl_trans_pcie_write32,
2415	.read32 = iwl_trans_pcie_read32,
2416	.read_prph = iwl_trans_pcie_read_prph,
2417	.write_prph = iwl_trans_pcie_write_prph,
2418	.read_mem = iwl_trans_pcie_read_mem,
2419	.write_mem = iwl_trans_pcie_write_mem,
2420	.configure = iwl_trans_pcie_configure,
2421	.set_pmi = iwl_trans_pcie_set_pmi,
2422	.grab_nic_access = iwl_trans_pcie_grab_nic_access,
2423	.release_nic_access = iwl_trans_pcie_release_nic_access,
2424	.set_bits_mask = iwl_trans_pcie_set_bits_mask,
2425
2426	.ref = iwl_trans_pcie_ref,
2427	.unref = iwl_trans_pcie_unref,
2428
2429	.dump_data = iwl_trans_pcie_dump_data,
2430};
2431
2432struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2433				       const struct pci_device_id *ent,
2434				       const struct iwl_cfg *cfg)
2435{
2436	struct iwl_trans_pcie *trans_pcie;
2437	struct iwl_trans *trans;
2438	u16 pci_cmd;
2439	int err;
2440
2441	trans = kzalloc(sizeof(struct iwl_trans) +
2442			sizeof(struct iwl_trans_pcie), GFP_KERNEL);
2443	if (!trans) {
2444		err = -ENOMEM;
2445		goto out;
2446	}
2447
2448	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2449
2450	trans->ops = &trans_ops_pcie;
2451	trans->cfg = cfg;
2452	trans_lockdep_init(trans);
2453	trans_pcie->trans = trans;
2454	spin_lock_init(&trans_pcie->irq_lock);
2455	spin_lock_init(&trans_pcie->reg_lock);
2456	spin_lock_init(&trans_pcie->ref_lock);
2457	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2458
2459	err = pci_enable_device(pdev);
2460	if (err)
2461		goto out_no_pci;
2462
2463	if (!cfg->base_params->pcie_l1_allowed) {
2464		/*
2465		 * W/A - seems to solve weird behavior. We need to remove this
2466		 * if we don't want to stay in L1 all the time. This wastes a
2467		 * lot of power.
2468		 */
2469		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
2470				       PCIE_LINK_STATE_L1 |
2471				       PCIE_LINK_STATE_CLKPM);
2472	}
2473
2474	pci_set_master(pdev);
2475
2476	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2477	if (!err)
2478		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2479	if (err) {
2480		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2481		if (!err)
2482			err = pci_set_consistent_dma_mask(pdev,
2483							  DMA_BIT_MASK(32));
2484		/* both attempts failed: */
2485		if (err) {
2486			dev_err(&pdev->dev, "No suitable DMA available\n");
2487			goto out_pci_disable_device;
2488		}
2489	}
2490
2491	err = pci_request_regions(pdev, DRV_NAME);
2492	if (err) {
2493		dev_err(&pdev->dev, "pci_request_regions failed\n");
2494		goto out_pci_disable_device;
2495	}
2496
2497	trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2498	if (!trans_pcie->hw_base) {
2499		dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2500		err = -ENODEV;
2501		goto out_pci_release_regions;
2502	}
2503
2504	/* We disable the RETRY_TIMEOUT register (0x41) to keep
2505	 * PCI Tx retries from interfering with C3 CPU state */
2506	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2507
2508	trans->dev = &pdev->dev;
2509	trans_pcie->pci_dev = pdev;
2510	iwl_disable_interrupts(trans);
2511
2512	err = pci_enable_msi(pdev);
2513	if (err) {
2514		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
2515		/* enable rfkill interrupt: hw bug w/a */
2516		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2517		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2518			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2519			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2520		}
2521	}
2522
2523	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
2524	/*
2525	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
2526	 * changed, and now the revision step also includes bit 0-1 (no more
2527	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
2528	 * in the old format.
2529	 */
2530	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2531		unsigned long flags;
2532		int ret;
2533
2534		trans->hw_rev = (trans->hw_rev & 0xfff0) |
2535				(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2536
2537		ret = iwl_pcie_prepare_card_hw(trans);
2538		if (ret) {
2539			IWL_WARN(trans, "Exit HW not ready\n");
2540			goto out_pci_disable_msi;
2541		}
2542
2543		/*
2544		 * in-order to recognize C step driver should read chip version
2545		 * id located at the AUX bus MISC address space.
2546		 */
2547		iwl_set_bit(trans, CSR_GP_CNTRL,
2548			    CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2549		udelay(2);
2550
2551		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2552				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2553				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2554				   25000);
2555		if (ret < 0) {
2556			IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
2557			goto out_pci_disable_msi;
2558		}
2559
2560		if (iwl_trans_grab_nic_access(trans, false, &flags)) {
2561			u32 hw_step;
2562
2563			hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
2564			hw_step |= ENABLE_WFPM;
2565			__iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
2566			hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
2567			hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
2568			if (hw_step == 0x3)
2569				trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
2570						(SILICON_C_STEP << 2);
2571			iwl_trans_release_nic_access(trans, &flags);
2572		}
2573	}
2574
2575	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2576	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2577		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2578
2579	/* Initialize the wait queue for commands */
2580	init_waitqueue_head(&trans_pcie->wait_command_queue);
2581
2582	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
2583		 "iwl_cmd_pool:%s", dev_name(trans->dev));
2584
2585	trans->dev_cmd_headroom = 0;
2586	trans->dev_cmd_pool =
2587		kmem_cache_create(trans->dev_cmd_pool_name,
2588				  sizeof(struct iwl_device_cmd)
2589				  + trans->dev_cmd_headroom,
2590				  sizeof(void *),
2591				  SLAB_HWCACHE_ALIGN,
2592				  NULL);
2593
2594	if (!trans->dev_cmd_pool) {
2595		err = -ENOMEM;
2596		goto out_pci_disable_msi;
2597	}
2598
2599	if (iwl_pcie_alloc_ict(trans))
2600		goto out_free_cmd_pool;
2601
2602	err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2603				   iwl_pcie_irq_handler,
2604				   IRQF_SHARED, DRV_NAME, trans);
2605	if (err) {
2606		IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2607		goto out_free_ict;
2608	}
2609
2610	trans_pcie->inta_mask = CSR_INI_SET_MASK;
2611	trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND;
2612
2613	return trans;
2614
2615out_free_ict:
2616	iwl_pcie_free_ict(trans);
2617out_free_cmd_pool:
2618	kmem_cache_destroy(trans->dev_cmd_pool);
2619out_pci_disable_msi:
2620	pci_disable_msi(pdev);
2621out_pci_release_regions:
2622	pci_release_regions(pdev);
2623out_pci_disable_device:
2624	pci_disable_device(pdev);
2625out_no_pci:
2626	kfree(trans);
2627out:
2628	return ERR_PTR(err);
2629}
2630