Lines Matching refs:ioc

37 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
39 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
40 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
41 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
42 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
43 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
44 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
45 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
46 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
47 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
48 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
49 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
50 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
52 struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
53 static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
55 struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
56 static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
61 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
106 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) in bfa_nw_ioc_set_ct_hwif() argument
108 ioc->ioc_hwif = &nw_hwif_ct; in bfa_nw_ioc_set_ct_hwif()
112 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) in bfa_nw_ioc_set_ct2_hwif() argument
114 ioc->ioc_hwif = &nw_hwif_ct2; in bfa_nw_ioc_set_ct2_hwif()
119 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) in bfa_ioc_ct_firmware_lock() argument
128 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < in bfa_ioc_ct_firmware_lock()
132 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
133 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_lock()
139 writel(1, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_lock()
140 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
141 writel(0, ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_firmware_lock()
145 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_firmware_lock()
155 bfa_nw_ioc_fwver_get(ioc, &fwhdr); in bfa_ioc_ct_firmware_lock()
156 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { in bfa_ioc_ct_firmware_lock()
157 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
165 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_lock()
166 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
171 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) in bfa_ioc_ct_firmware_unlock() argument
178 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < in bfa_ioc_ct_firmware_unlock()
185 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_unlock()
186 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_unlock()
190 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_unlock()
192 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_unlock()
197 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) in bfa_ioc_ct_notify_fail() argument
199 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); in bfa_ioc_ct_notify_fail()
200 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); in bfa_ioc_ct_notify_fail()
202 readl(ioc->ioc_regs.ll_halt); in bfa_ioc_ct_notify_fail()
203 readl(ioc->ioc_regs.alt_ll_halt); in bfa_ioc_ct_notify_fail()
257 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) in bfa_ioc_ct_reg_init() argument
260 int pcifn = bfa_ioc_pcifn(ioc); in bfa_ioc_ct_reg_init()
262 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init()
264 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init()
265 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init()
266 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init()
268 if (ioc->port_id == 0) { in bfa_ioc_ct_reg_init()
269 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init()
270 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init()
271 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init()
272 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; in bfa_ioc_ct_reg_init()
273 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; in bfa_ioc_ct_reg_init()
274 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct_reg_init()
275 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct_reg_init()
277 ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; in bfa_ioc_ct_reg_init()
278 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init()
279 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init()
280 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; in bfa_ioc_ct_reg_init()
281 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; in bfa_ioc_ct_reg_init()
282 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct_reg_init()
283 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct_reg_init()
289 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; in bfa_ioc_ct_reg_init()
290 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; in bfa_ioc_ct_reg_init()
291 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; in bfa_ioc_ct_reg_init()
292 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; in bfa_ioc_ct_reg_init()
297 ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; in bfa_ioc_ct_reg_init()
298 ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; in bfa_ioc_ct_reg_init()
299 ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; in bfa_ioc_ct_reg_init()
300 ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; in bfa_ioc_ct_reg_init()
301 ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; in bfa_ioc_ct_reg_init()
306 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; in bfa_ioc_ct_reg_init()
307 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; in bfa_ioc_ct_reg_init()
312 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); in bfa_ioc_ct_reg_init()
316 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) in bfa_ioc_ct2_reg_init() argument
319 int port = bfa_ioc_portid(ioc); in bfa_ioc_ct2_reg_init()
321 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct2_reg_init()
323 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; in bfa_ioc_ct2_reg_init()
324 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; in bfa_ioc_ct2_reg_init()
325 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; in bfa_ioc_ct2_reg_init()
326 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; in bfa_ioc_ct2_reg_init()
327 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; in bfa_ioc_ct2_reg_init()
328 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; in bfa_ioc_ct2_reg_init()
331 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; in bfa_ioc_ct2_reg_init()
332 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; in bfa_ioc_ct2_reg_init()
333 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; in bfa_ioc_ct2_reg_init()
334 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct2_reg_init()
335 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct2_reg_init()
337 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; in bfa_ioc_ct2_reg_init()
338 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; in bfa_ioc_ct2_reg_init()
339 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; in bfa_ioc_ct2_reg_init()
340 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct2_reg_init()
341 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct2_reg_init()
347 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; in bfa_ioc_ct2_reg_init()
348 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; in bfa_ioc_ct2_reg_init()
349 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; in bfa_ioc_ct2_reg_init()
350 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; in bfa_ioc_ct2_reg_init()
355 ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; in bfa_ioc_ct2_reg_init()
356 ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; in bfa_ioc_ct2_reg_init()
357 ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; in bfa_ioc_ct2_reg_init()
358 ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; in bfa_ioc_ct2_reg_init()
359 ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; in bfa_ioc_ct2_reg_init()
364 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; in bfa_ioc_ct2_reg_init()
365 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; in bfa_ioc_ct2_reg_init()
370 ioc->ioc_regs.err_set = rb + ERR_SET_REG; in bfa_ioc_ct2_reg_init()
377 bfa_ioc_ct_map_port(struct bfa_ioc *ioc) in bfa_ioc_ct_map_port() argument
379 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_ioc_ct_map_port()
386 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); in bfa_ioc_ct_map_port()
387 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; in bfa_ioc_ct_map_port()
392 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) in bfa_ioc_ct2_map_port() argument
394 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_ioc_ct2_map_port()
398 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); in bfa_ioc_ct2_map_port()
403 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) in bfa_ioc_ct_isr_mode_set() argument
405 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_ioc_ct_isr_mode_set()
410 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & in bfa_ioc_ct_isr_mode_set()
424 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); in bfa_ioc_ct_isr_mode_set()
425 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); in bfa_ioc_ct_isr_mode_set()
431 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) in bfa_ioc_ct2_lpu_read_stat() argument
435 r32 = readl(ioc->ioc_regs.lpu_read_stat); in bfa_ioc_ct2_lpu_read_stat()
437 writel(1, ioc->ioc_regs.lpu_read_stat); in bfa_ioc_ct2_lpu_read_stat()
453 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) in bfa_nw_ioc_ct2_poweron() argument
455 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_nw_ioc_ct2_poweron()
466 HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), in bfa_nw_ioc_ct2_poweron()
468 writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), in bfa_nw_ioc_ct2_poweron()
474 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) in bfa_ioc_ct_ownership_reset() argument
476 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_ownership_reset()
477 writel(0, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_ownership_reset()
478 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_ownership_reset()
485 readl(ioc->ioc_regs.ioc_sem_reg); in bfa_ioc_ct_ownership_reset()
486 bfa_nw_ioc_hw_sem_release(ioc); in bfa_ioc_ct_ownership_reset()
491 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_start() argument
493 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_start()
503 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { in bfa_ioc_ct_sync_start()
504 writel(0, ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_start()
505 writel(1, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_sync_start()
506 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_sync_start()
507 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_sync_start()
511 return bfa_ioc_ct_sync_complete(ioc); in bfa_ioc_ct_sync_start()
515 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_join() argument
517 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_join()
518 u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); in bfa_ioc_ct_sync_join()
520 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_join()
524 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_leave() argument
526 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_leave()
527 u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | in bfa_ioc_ct_sync_leave()
528 bfa_ioc_ct_sync_pos(ioc); in bfa_ioc_ct_sync_leave()
530 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_leave()
534 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_ack() argument
536 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_ack()
538 writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_ack()
542 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_complete() argument
544 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_complete()
559 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && in bfa_ioc_ct_sync_complete()
560 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) in bfa_ioc_ct_sync_complete()
561 sync_ackd |= bfa_ioc_ct_sync_pos(ioc); in bfa_ioc_ct_sync_complete()
565 ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_complete()
566 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_sync_complete()
567 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_sync_complete()
577 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_complete()
583 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc, in bfa_ioc_ct_set_cur_ioc_fwstate() argument
586 writel(fwstate, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_set_cur_ioc_fwstate()
590 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc) in bfa_ioc_ct_get_cur_ioc_fwstate() argument
592 return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_get_cur_ioc_fwstate()
596 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc, in bfa_ioc_ct_set_alt_ioc_fwstate() argument
599 writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_set_alt_ioc_fwstate()
603 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc) in bfa_ioc_ct_get_alt_ioc_fwstate() argument
605 return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_get_alt_ioc_fwstate()