Lines Matching refs:ioc
38 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
39 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
40 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
41 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
42 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
43 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
44 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
45 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
46 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
47 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
48 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
49 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
50 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
51 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
53 struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
54 static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
56 struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
57 static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
62 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
107 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) in bfa_nw_ioc_set_ct_hwif() argument
109 ioc->ioc_hwif = &nw_hwif_ct; in bfa_nw_ioc_set_ct_hwif()
113 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) in bfa_nw_ioc_set_ct2_hwif() argument
115 ioc->ioc_hwif = &nw_hwif_ct2; in bfa_nw_ioc_set_ct2_hwif()
120 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) in bfa_ioc_ct_firmware_lock() argument
129 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < in bfa_ioc_ct_firmware_lock()
133 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
134 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_lock()
140 writel(1, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_lock()
141 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
142 writel(0, ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_firmware_lock()
146 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_firmware_lock()
156 bfa_nw_ioc_fwver_get(ioc, &fwhdr); in bfa_ioc_ct_firmware_lock()
157 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { in bfa_ioc_ct_firmware_lock()
158 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
166 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_lock()
167 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
172 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) in bfa_ioc_ct_firmware_unlock() argument
179 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < in bfa_ioc_ct_firmware_unlock()
186 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_unlock()
187 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_unlock()
191 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_unlock()
193 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_unlock()
198 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) in bfa_ioc_ct_notify_fail() argument
200 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); in bfa_ioc_ct_notify_fail()
201 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); in bfa_ioc_ct_notify_fail()
203 readl(ioc->ioc_regs.ll_halt); in bfa_ioc_ct_notify_fail()
204 readl(ioc->ioc_regs.alt_ll_halt); in bfa_ioc_ct_notify_fail()
258 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) in bfa_ioc_ct_reg_init() argument
261 int pcifn = bfa_ioc_pcifn(ioc); in bfa_ioc_ct_reg_init()
263 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init()
265 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init()
266 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init()
267 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init()
269 if (ioc->port_id == 0) { in bfa_ioc_ct_reg_init()
270 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init()
271 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init()
272 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init()
273 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; in bfa_ioc_ct_reg_init()
274 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; in bfa_ioc_ct_reg_init()
275 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct_reg_init()
276 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct_reg_init()
278 ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; in bfa_ioc_ct_reg_init()
279 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init()
280 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init()
281 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; in bfa_ioc_ct_reg_init()
282 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; in bfa_ioc_ct_reg_init()
283 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct_reg_init()
284 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct_reg_init()
290 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; in bfa_ioc_ct_reg_init()
291 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; in bfa_ioc_ct_reg_init()
292 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; in bfa_ioc_ct_reg_init()
293 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; in bfa_ioc_ct_reg_init()
298 ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; in bfa_ioc_ct_reg_init()
299 ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; in bfa_ioc_ct_reg_init()
300 ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; in bfa_ioc_ct_reg_init()
301 ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; in bfa_ioc_ct_reg_init()
302 ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; in bfa_ioc_ct_reg_init()
307 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; in bfa_ioc_ct_reg_init()
308 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; in bfa_ioc_ct_reg_init()
313 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); in bfa_ioc_ct_reg_init()
317 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) in bfa_ioc_ct2_reg_init() argument
320 int port = bfa_ioc_portid(ioc); in bfa_ioc_ct2_reg_init()
322 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct2_reg_init()
324 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; in bfa_ioc_ct2_reg_init()
325 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; in bfa_ioc_ct2_reg_init()
326 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; in bfa_ioc_ct2_reg_init()
327 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; in bfa_ioc_ct2_reg_init()
328 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; in bfa_ioc_ct2_reg_init()
329 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; in bfa_ioc_ct2_reg_init()
332 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; in bfa_ioc_ct2_reg_init()
333 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; in bfa_ioc_ct2_reg_init()
334 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; in bfa_ioc_ct2_reg_init()
335 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct2_reg_init()
336 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct2_reg_init()
338 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; in bfa_ioc_ct2_reg_init()
339 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; in bfa_ioc_ct2_reg_init()
340 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; in bfa_ioc_ct2_reg_init()
341 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct2_reg_init()
342 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct2_reg_init()
348 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; in bfa_ioc_ct2_reg_init()
349 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; in bfa_ioc_ct2_reg_init()
350 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; in bfa_ioc_ct2_reg_init()
351 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; in bfa_ioc_ct2_reg_init()
356 ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; in bfa_ioc_ct2_reg_init()
357 ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; in bfa_ioc_ct2_reg_init()
358 ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; in bfa_ioc_ct2_reg_init()
359 ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; in bfa_ioc_ct2_reg_init()
360 ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; in bfa_ioc_ct2_reg_init()
365 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; in bfa_ioc_ct2_reg_init()
366 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; in bfa_ioc_ct2_reg_init()
371 ioc->ioc_regs.err_set = rb + ERR_SET_REG; in bfa_ioc_ct2_reg_init()
378 bfa_ioc_ct_map_port(struct bfa_ioc *ioc) in bfa_ioc_ct_map_port() argument
380 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_ioc_ct_map_port()
387 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); in bfa_ioc_ct_map_port()
388 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; in bfa_ioc_ct_map_port()
393 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) in bfa_ioc_ct2_map_port() argument
395 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_ioc_ct2_map_port()
399 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); in bfa_ioc_ct2_map_port()
404 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) in bfa_ioc_ct_isr_mode_set() argument
406 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_ioc_ct_isr_mode_set()
411 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & in bfa_ioc_ct_isr_mode_set()
425 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); in bfa_ioc_ct_isr_mode_set()
426 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); in bfa_ioc_ct_isr_mode_set()
432 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) in bfa_ioc_ct2_lpu_read_stat() argument
436 r32 = readl(ioc->ioc_regs.lpu_read_stat); in bfa_ioc_ct2_lpu_read_stat()
438 writel(1, ioc->ioc_regs.lpu_read_stat); in bfa_ioc_ct2_lpu_read_stat()
454 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) in bfa_nw_ioc_ct2_poweron() argument
456 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_nw_ioc_ct2_poweron()
467 HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), in bfa_nw_ioc_ct2_poweron()
469 writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), in bfa_nw_ioc_ct2_poweron()
475 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) in bfa_ioc_ct_ownership_reset() argument
477 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_ownership_reset()
478 writel(0, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_ownership_reset()
479 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_ownership_reset()
486 readl(ioc->ioc_regs.ioc_sem_reg); in bfa_ioc_ct_ownership_reset()
487 bfa_nw_ioc_hw_sem_release(ioc); in bfa_ioc_ct_ownership_reset()
492 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_start() argument
494 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_start()
504 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { in bfa_ioc_ct_sync_start()
505 writel(0, ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_start()
506 writel(1, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_sync_start()
507 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_sync_start()
508 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_sync_start()
512 return bfa_ioc_ct_sync_complete(ioc); in bfa_ioc_ct_sync_start()
516 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_join() argument
518 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_join()
519 u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); in bfa_ioc_ct_sync_join()
521 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_join()
525 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_leave() argument
527 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_leave()
528 u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | in bfa_ioc_ct_sync_leave()
529 bfa_ioc_ct_sync_pos(ioc); in bfa_ioc_ct_sync_leave()
531 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_leave()
535 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_ack() argument
537 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_ack()
539 writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_ack()
543 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_complete() argument
545 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_complete()
560 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && in bfa_ioc_ct_sync_complete()
561 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) in bfa_ioc_ct_sync_complete()
562 sync_ackd |= bfa_ioc_ct_sync_pos(ioc); in bfa_ioc_ct_sync_complete()
566 ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_complete()
567 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_sync_complete()
568 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_sync_complete()
578 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_complete()
584 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc, in bfa_ioc_ct_set_cur_ioc_fwstate() argument
587 writel(fwstate, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_set_cur_ioc_fwstate()
591 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc) in bfa_ioc_ct_get_cur_ioc_fwstate() argument
593 return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_get_cur_ioc_fwstate()
597 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc, in bfa_ioc_ct_set_alt_ioc_fwstate() argument
600 writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_set_alt_ioc_fwstate()
604 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc) in bfa_ioc_ct_get_alt_ioc_fwstate() argument
606 return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_get_alt_ioc_fwstate()