root/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. bfa_nw_ioc_set_ct_hwif
  2. bfa_nw_ioc_set_ct2_hwif
  3. bfa_ioc_ct_firmware_lock
  4. bfa_ioc_ct_firmware_unlock
  5. bfa_ioc_ct_notify_fail
  6. bfa_ioc_ct_reg_init
  7. bfa_ioc_ct2_reg_init
  8. bfa_ioc_ct_map_port
  9. bfa_ioc_ct2_map_port
  10. bfa_ioc_ct_isr_mode_set
  11. bfa_ioc_ct2_lpu_read_stat
  12. bfa_nw_ioc_ct2_poweron
  13. bfa_ioc_ct_ownership_reset
  14. bfa_ioc_ct_sync_start
  15. bfa_ioc_ct_sync_join
  16. bfa_ioc_ct_sync_leave
  17. bfa_ioc_ct_sync_ack
  18. bfa_ioc_ct_sync_complete
  19. bfa_ioc_ct_set_cur_ioc_fwstate
  20. bfa_ioc_ct_get_cur_ioc_fwstate
  21. bfa_ioc_ct_set_alt_ioc_fwstate
  22. bfa_ioc_ct_get_alt_ioc_fwstate
  23. bfa_ioc_ct_pll_init
  24. bfa_ioc_ct2_sclk_init
  25. bfa_ioc_ct2_lclk_init
  26. bfa_ioc_ct2_mem_init
  27. bfa_ioc_ct2_mac_reset
  28. bfa_ioc_ct2_nfc_halted
  29. bfa_ioc_ct2_nfc_resume
  30. bfa_ioc_ct2_pll_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Linux network driver for QLogic BR-series Converged Network Adapter.
   4  */
   5 /*
   6  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   7  * Copyright (c) 2014-2015 QLogic Corporation
   8  * All rights reserved
   9  * www.qlogic.com
  10  */
  11 
  12 #include "bfa_ioc.h"
  13 #include "cna.h"
  14 #include "bfi.h"
  15 #include "bfi_reg.h"
  16 #include "bfa_defs.h"
  17 
  18 #define bfa_ioc_ct_sync_pos(__ioc)      BIT(bfa_ioc_pcifn(__ioc))
  19 #define BFA_IOC_SYNC_REQD_SH            16
  20 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  21 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  22 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  23 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  24                 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  25 
  26 /*
  27  * forward declarations
  28  */
  29 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
  30 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
  31 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
  32 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
  33 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
  34 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
  35 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
  36 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
  37 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
  38 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
  39 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
  40 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
  41 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
  42 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
  43 static void bfa_ioc_ct_set_cur_ioc_fwstate(
  44                         struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
  45 static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
  46 static void bfa_ioc_ct_set_alt_ioc_fwstate(
  47                         struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
  48 static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
  49 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
  50                                 enum bfi_asic_mode asic_mode);
  51 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
  52                                 enum bfi_asic_mode asic_mode);
  53 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
  54 
  55 static const struct bfa_ioc_hwif nw_hwif_ct = {
  56         .ioc_pll_init        = bfa_ioc_ct_pll_init,
  57         .ioc_firmware_lock   = bfa_ioc_ct_firmware_lock,
  58         .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
  59         .ioc_reg_init        = bfa_ioc_ct_reg_init,
  60         .ioc_map_port        = bfa_ioc_ct_map_port,
  61         .ioc_isr_mode_set    = bfa_ioc_ct_isr_mode_set,
  62         .ioc_notify_fail     = bfa_ioc_ct_notify_fail,
  63         .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
  64         .ioc_sync_start      = bfa_ioc_ct_sync_start,
  65         .ioc_sync_join       = bfa_ioc_ct_sync_join,
  66         .ioc_sync_leave      = bfa_ioc_ct_sync_leave,
  67         .ioc_sync_ack        = bfa_ioc_ct_sync_ack,
  68         .ioc_sync_complete   = bfa_ioc_ct_sync_complete,
  69         .ioc_set_fwstate     = bfa_ioc_ct_set_cur_ioc_fwstate,
  70         .ioc_get_fwstate     = bfa_ioc_ct_get_cur_ioc_fwstate,
  71         .ioc_set_alt_fwstate     = bfa_ioc_ct_set_alt_ioc_fwstate,
  72         .ioc_get_alt_fwstate     = bfa_ioc_ct_get_alt_ioc_fwstate,
  73 };
  74 
  75 static const struct bfa_ioc_hwif nw_hwif_ct2 = {
  76         .ioc_pll_init        = bfa_ioc_ct2_pll_init,
  77         .ioc_firmware_lock   = bfa_ioc_ct_firmware_lock,
  78         .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
  79         .ioc_reg_init        = bfa_ioc_ct2_reg_init,
  80         .ioc_map_port        = bfa_ioc_ct2_map_port,
  81         .ioc_lpu_read_stat   = bfa_ioc_ct2_lpu_read_stat,
  82         .ioc_isr_mode_set    = NULL,
  83         .ioc_notify_fail     = bfa_ioc_ct_notify_fail,
  84         .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
  85         .ioc_sync_start      = bfa_ioc_ct_sync_start,
  86         .ioc_sync_join       = bfa_ioc_ct_sync_join,
  87         .ioc_sync_leave      = bfa_ioc_ct_sync_leave,
  88         .ioc_sync_ack        = bfa_ioc_ct_sync_ack,
  89         .ioc_sync_complete   = bfa_ioc_ct_sync_complete,
  90         .ioc_set_fwstate     = bfa_ioc_ct_set_cur_ioc_fwstate,
  91         .ioc_get_fwstate     = bfa_ioc_ct_get_cur_ioc_fwstate,
  92         .ioc_set_alt_fwstate     = bfa_ioc_ct_set_alt_ioc_fwstate,
  93         .ioc_get_alt_fwstate     = bfa_ioc_ct_get_alt_ioc_fwstate,
  94 };
  95 
  96 /* Called from bfa_ioc_attach() to map asic specific calls. */
  97 void
  98 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
  99 {
 100         ioc->ioc_hwif = &nw_hwif_ct;
 101 }
 102 
 103 void
 104 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
 105 {
 106         ioc->ioc_hwif = &nw_hwif_ct2;
 107 }
 108 
 109 /* Return true if firmware of current driver matches the running firmware. */
 110 static bool
 111 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
 112 {
 113         enum bfi_ioc_state ioc_fwstate;
 114         u32 usecnt;
 115         struct bfi_ioc_image_hdr fwhdr;
 116 
 117         /**
 118          * If bios boot (flash based) -- do not increment usage count
 119          */
 120         if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
 121                                                 BFA_IOC_FWIMG_MINSZ)
 122                 return true;
 123 
 124         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 125         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
 126 
 127         /**
 128          * If usage count is 0, always return TRUE.
 129          */
 130         if (usecnt == 0) {
 131                 writel(1, ioc->ioc_regs.ioc_usage_reg);
 132                 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 133                 writel(0, ioc->ioc_regs.ioc_fail_sync);
 134                 return true;
 135         }
 136 
 137         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
 138 
 139         /**
 140          * Use count cannot be non-zero and chip in uninitialized state.
 141          */
 142         BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
 143 
 144         /**
 145          * Check if another driver with a different firmware is active
 146          */
 147         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
 148         if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
 149                 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 150                 return false;
 151         }
 152 
 153         /**
 154          * Same firmware version. Increment the reference count.
 155          */
 156         usecnt++;
 157         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
 158         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 159         return true;
 160 }
 161 
 162 static void
 163 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
 164 {
 165         u32 usecnt;
 166 
 167         /**
 168          * If bios boot (flash based) -- do not decrement usage count
 169          */
 170         if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
 171                                                 BFA_IOC_FWIMG_MINSZ)
 172                 return;
 173 
 174         /**
 175          * decrement usage count
 176          */
 177         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 178         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
 179         BUG_ON(!(usecnt > 0));
 180 
 181         usecnt--;
 182         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
 183 
 184         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 185 }
 186 
 187 /* Notify other functions on HB failure. */
 188 static void
 189 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
 190 {
 191         writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
 192         writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
 193         /* Wait for halt to take effect */
 194         readl(ioc->ioc_regs.ll_halt);
 195         readl(ioc->ioc_regs.alt_ll_halt);
 196 }
 197 
 198 /* Host to LPU mailbox message addresses */
 199 static const struct {
 200         u32     hfn_mbox;
 201         u32     lpu_mbox;
 202         u32     hfn_pgn;
 203 } ct_fnreg[] = {
 204         { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
 205         { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
 206         { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
 207         { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
 208 };
 209 
 210 /* Host <-> LPU mailbox command/status registers - port 0 */
 211 static const struct {
 212         u32     hfn;
 213         u32     lpu;
 214 } ct_p0reg[] = {
 215         { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
 216         { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
 217         { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
 218         { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
 219 };
 220 
 221 /* Host <-> LPU mailbox command/status registers - port 1 */
 222 static const struct {
 223         u32     hfn;
 224         u32     lpu;
 225 } ct_p1reg[] = {
 226         { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
 227         { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
 228         { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
 229         { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
 230 };
 231 
 232 static const struct {
 233         u32     hfn_mbox;
 234         u32     lpu_mbox;
 235         u32     hfn_pgn;
 236         u32     hfn;
 237         u32     lpu;
 238         u32     lpu_read;
 239 } ct2_reg[] = {
 240         { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
 241           CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
 242           CT2_HOSTFN_LPU0_READ_STAT},
 243         { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
 244           CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
 245           CT2_HOSTFN_LPU1_READ_STAT},
 246 };
 247 
 248 static void
 249 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
 250 {
 251         void __iomem *rb;
 252         int             pcifn = bfa_ioc_pcifn(ioc);
 253 
 254         rb = bfa_ioc_bar0(ioc);
 255 
 256         ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
 257         ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
 258         ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
 259 
 260         if (ioc->port_id == 0) {
 261                 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
 262                 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
 263                 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
 264                 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
 265                 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
 266                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
 267                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
 268         } else {
 269                 ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
 270                 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
 271                 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
 272                 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
 273                 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
 274                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
 275                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
 276         }
 277 
 278         /*
 279          * PSS control registers
 280          */
 281         ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
 282         ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
 283         ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
 284         ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
 285 
 286         /*
 287          * IOC semaphore registers and serialization
 288          */
 289         ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
 290         ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
 291         ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
 292         ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
 293         ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
 294 
 295         /**
 296          * sram memory access
 297          */
 298         ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
 299         ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
 300 
 301         /*
 302          * err set reg : for notification of hb failure in fcmode
 303          */
 304         ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
 305 }
 306 
 307 static void
 308 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
 309 {
 310         void __iomem *rb;
 311         int             port = bfa_ioc_portid(ioc);
 312 
 313         rb = bfa_ioc_bar0(ioc);
 314 
 315         ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
 316         ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
 317         ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
 318         ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
 319         ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
 320         ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
 321 
 322         if (port == 0) {
 323                 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
 324                 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
 325                 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
 326                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
 327                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
 328         } else {
 329                 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
 330                 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
 331                 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
 332                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
 333                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
 334         }
 335 
 336         /*
 337          * PSS control registers
 338          */
 339         ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
 340         ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
 341         ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
 342         ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
 343 
 344         /*
 345          * IOC semaphore registers and serialization
 346          */
 347         ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
 348         ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
 349         ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
 350         ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
 351         ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
 352 
 353         /**
 354          * sram memory access
 355          */
 356         ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
 357         ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
 358 
 359         /*
 360          * err set reg : for notification of hb failure in fcmode
 361          */
 362         ioc->ioc_regs.err_set = rb + ERR_SET_REG;
 363 }
 364 
 365 /* Initialize IOC to port mapping. */
 366 
 367 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
 368 static void
 369 bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
 370 {
 371         void __iomem *rb = ioc->pcidev.pci_bar_kva;
 372         u32     r32;
 373 
 374         /**
 375          * For catapult, base port id on personality register and IOC type
 376          */
 377         r32 = readl(rb + FNC_PERS_REG);
 378         r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
 379         ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
 380 
 381 }
 382 
 383 static void
 384 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
 385 {
 386         void __iomem *rb = ioc->pcidev.pci_bar_kva;
 387         u32     r32;
 388 
 389         r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
 390         ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
 391 }
 392 
 393 /* Set interrupt mode for a function: INTX or MSIX */
 394 static void
 395 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
 396 {
 397         void __iomem *rb = ioc->pcidev.pci_bar_kva;
 398         u32     r32, mode;
 399 
 400         r32 = readl(rb + FNC_PERS_REG);
 401 
 402         mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
 403                 __F0_INTX_STATUS;
 404 
 405         /**
 406          * If already in desired mode, do not change anything
 407          */
 408         if ((!msix && mode) || (msix && !mode))
 409                 return;
 410 
 411         if (msix)
 412                 mode = __F0_INTX_STATUS_MSIX;
 413         else
 414                 mode = __F0_INTX_STATUS_INTA;
 415 
 416         r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
 417         r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
 418 
 419         writel(r32, rb + FNC_PERS_REG);
 420 }
 421 
 422 static bool
 423 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
 424 {
 425         u32 r32;
 426 
 427         r32 = readl(ioc->ioc_regs.lpu_read_stat);
 428         if (r32) {
 429                 writel(1, ioc->ioc_regs.lpu_read_stat);
 430                 return true;
 431         }
 432 
 433         return false;
 434 }
 435 
 436 /* MSI-X resource allocation for 1860 with no asic block */
 437 #define HOSTFN_MSIX_DEFAULT             64
 438 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR   0x30138
 439 #define HOSTFN_MSIX_VT_OFST_NUMVT       0x3013c
 440 #define __MSIX_VT_NUMVT__MK             0x003ff800
 441 #define __MSIX_VT_NUMVT__SH             11
 442 #define __MSIX_VT_NUMVT_(_v)            ((_v) << __MSIX_VT_NUMVT__SH)
 443 #define __MSIX_VT_OFST_                 0x000007ff
 444 void
 445 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
 446 {
 447         void __iomem *rb = ioc->pcidev.pci_bar_kva;
 448         u32 r32;
 449 
 450         r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
 451         if (r32 & __MSIX_VT_NUMVT__MK) {
 452                 writel(r32 & __MSIX_VT_OFST_,
 453                         rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
 454                 return;
 455         }
 456 
 457         writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
 458                         HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
 459                         rb + HOSTFN_MSIX_VT_OFST_NUMVT);
 460         writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
 461                         rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
 462 }
 463 
 464 /* Cleanup hw semaphore and usecnt registers */
 465 static void
 466 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
 467 {
 468         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 469         writel(0, ioc->ioc_regs.ioc_usage_reg);
 470         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 471 
 472         /*
 473          * Read the hw sem reg to make sure that it is locked
 474          * before we clear it. If it is not locked, writing 1
 475          * will lock it instead of clearing it.
 476          */
 477         readl(ioc->ioc_regs.ioc_sem_reg);
 478         bfa_nw_ioc_hw_sem_release(ioc);
 479 }
 480 
 481 /* Synchronized IOC failure processing routines */
 482 static bool
 483 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
 484 {
 485         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
 486         u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
 487 
 488         /*
 489          * Driver load time.  If the sync required bit for this PCI fn
 490          * is set, it is due to an unclean exit by the driver for this
 491          * PCI fn in the previous incarnation. Whoever comes here first
 492          * should clean it up, no matter which PCI fn.
 493          */
 494 
 495         if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
 496                 writel(0, ioc->ioc_regs.ioc_fail_sync);
 497                 writel(1, ioc->ioc_regs.ioc_usage_reg);
 498                 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
 499                 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
 500                 return true;
 501         }
 502 
 503         return bfa_ioc_ct_sync_complete(ioc);
 504 }
 505 /* Synchronized IOC failure processing routines */
 506 static void
 507 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
 508 {
 509         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
 510         u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
 511 
 512         writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
 513 }
 514 
 515 static void
 516 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
 517 {
 518         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
 519         u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
 520                                         bfa_ioc_ct_sync_pos(ioc);
 521 
 522         writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
 523 }
 524 
 525 static void
 526 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
 527 {
 528         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
 529 
 530         writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync);
 531 }
 532 
 533 static bool
 534 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
 535 {
 536         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
 537         u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
 538         u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
 539         u32 tmp_ackd;
 540 
 541         if (sync_ackd == 0)
 542                 return true;
 543 
 544         /**
 545          * The check below is to see whether any other PCI fn
 546          * has reinitialized the ASIC (reset sync_ackd bits)
 547          * and failed again while this IOC was waiting for hw
 548          * semaphore (in bfa_iocpf_sm_semwait()).
 549          */
 550         tmp_ackd = sync_ackd;
 551         if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
 552                         !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
 553                 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
 554 
 555         if (sync_reqd == sync_ackd) {
 556                 writel(bfa_ioc_ct_clear_sync_ackd(r32),
 557                                 ioc->ioc_regs.ioc_fail_sync);
 558                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
 559                 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
 560                 return true;
 561         }
 562 
 563         /**
 564          * If another PCI fn reinitialized and failed again while
 565          * this IOC was waiting for hw sem, the sync_ackd bit for
 566          * this IOC need to be set again to allow reinitialization.
 567          */
 568         if (tmp_ackd != sync_ackd)
 569                 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
 570 
 571         return false;
 572 }
 573 
 574 static void
 575 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
 576                                enum bfi_ioc_state fwstate)
 577 {
 578         writel(fwstate, ioc->ioc_regs.ioc_fwstate);
 579 }
 580 
 581 static enum bfi_ioc_state
 582 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
 583 {
 584         return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
 585 }
 586 
 587 static void
 588 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
 589                                enum bfi_ioc_state fwstate)
 590 {
 591         writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
 592 }
 593 
 594 static enum bfi_ioc_state
 595 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
 596 {
 597         return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
 598 }
 599 
 600 static enum bfa_status
 601 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
 602 {
 603         u32     pll_sclk, pll_fclk, r32;
 604         bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
 605 
 606         pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
 607                 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
 608                 __APP_PLL_SCLK_JITLMT0_1(3U) |
 609                 __APP_PLL_SCLK_CNTLMT0_1(1U);
 610         pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
 611                 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
 612                 __APP_PLL_LCLK_JITLMT0_1(3U) |
 613                 __APP_PLL_LCLK_CNTLMT0_1(1U);
 614 
 615         if (fcmode) {
 616                 writel(0, (rb + OP_MODE));
 617                 writel(__APP_EMS_CMLCKSEL |
 618                                 __APP_EMS_REFCKBUFEN2 |
 619                                 __APP_EMS_CHANNEL_SEL,
 620                                 (rb + ETH_MAC_SER_REG));
 621         } else {
 622                 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
 623                 writel(__APP_EMS_REFCKBUFEN1,
 624                                 (rb + ETH_MAC_SER_REG));
 625         }
 626         writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
 627         writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
 628         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
 629         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
 630         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
 631         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
 632         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
 633         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
 634         writel(pll_sclk |
 635                 __APP_PLL_SCLK_LOGIC_SOFT_RESET,
 636                 rb + APP_PLL_SCLK_CTL_REG);
 637         writel(pll_fclk |
 638                 __APP_PLL_LCLK_LOGIC_SOFT_RESET,
 639                 rb + APP_PLL_LCLK_CTL_REG);
 640         writel(pll_sclk |
 641                 __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
 642                 rb + APP_PLL_SCLK_CTL_REG);
 643         writel(pll_fclk |
 644                 __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
 645                 rb + APP_PLL_LCLK_CTL_REG);
 646         readl(rb + HOSTFN0_INT_MSK);
 647         udelay(2000);
 648         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
 649         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
 650         writel(pll_sclk |
 651                 __APP_PLL_SCLK_ENABLE,
 652                 rb + APP_PLL_SCLK_CTL_REG);
 653         writel(pll_fclk |
 654                 __APP_PLL_LCLK_ENABLE,
 655                 rb + APP_PLL_LCLK_CTL_REG);
 656 
 657         if (!fcmode) {
 658                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
 659                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
 660         }
 661         r32 = readl(rb + PSS_CTL_REG);
 662         r32 &= ~__PSS_LMEM_RESET;
 663         writel(r32, (rb + PSS_CTL_REG));
 664         udelay(1000);
 665         if (!fcmode) {
 666                 writel(0, (rb + PMM_1T_RESET_REG_P0));
 667                 writel(0, (rb + PMM_1T_RESET_REG_P1));
 668         }
 669 
 670         writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
 671         udelay(1000);
 672         r32 = readl(rb + MBIST_STAT_REG);
 673         writel(0, (rb + MBIST_CTL_REG));
 674         return BFA_STATUS_OK;
 675 }
 676 
 677 static void
 678 bfa_ioc_ct2_sclk_init(void __iomem *rb)
 679 {
 680         u32 r32;
 681 
 682         /*
 683          * put s_clk PLL and PLL FSM in reset
 684          */
 685         r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
 686         r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
 687         r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
 688                 __APP_PLL_SCLK_LOGIC_SOFT_RESET);
 689         writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
 690 
 691         /*
 692          * Ignore mode and program for the max clock (which is FC16)
 693          * Firmware/NFC will do the PLL init appropriately
 694          */
 695         r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
 696         r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
 697         writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
 698 
 699         /*
 700          * while doing PLL init dont clock gate ethernet subsystem
 701          */
 702         r32 = readl(rb + CT2_CHIP_MISC_PRG);
 703         writel(r32 | __ETH_CLK_ENABLE_PORT0,
 704                rb + CT2_CHIP_MISC_PRG);
 705 
 706         r32 = readl(rb + CT2_PCIE_MISC_REG);
 707         writel(r32 | __ETH_CLK_ENABLE_PORT1,
 708                rb + CT2_PCIE_MISC_REG);
 709 
 710         /*
 711          * set sclk value
 712          */
 713         r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
 714         r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
 715                 __APP_PLL_SCLK_CLK_DIV2);
 716         writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG);
 717 
 718         /*
 719          * poll for s_clk lock or delay 1ms
 720          */
 721         udelay(1000);
 722 
 723         /*
 724          * Dont do clock gating for ethernet subsystem, firmware/NFC will
 725          * do this appropriately
 726          */
 727 }
 728 
 729 static void
 730 bfa_ioc_ct2_lclk_init(void __iomem *rb)
 731 {
 732         u32 r32;
 733 
 734         /*
 735          * put l_clk PLL and PLL FSM in reset
 736          */
 737         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
 738         r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
 739         r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
 740                 __APP_PLL_LCLK_LOGIC_SOFT_RESET);
 741         writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
 742 
 743         /*
 744          * set LPU speed (set for FC16 which will work for other modes)
 745          */
 746         r32 = readl(rb + CT2_CHIP_MISC_PRG);
 747         writel(r32, (rb + CT2_CHIP_MISC_PRG));
 748 
 749         /*
 750          * set LPU half speed (set for FC16 which will work for other modes)
 751          */
 752         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
 753         writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
 754 
 755         /*
 756          * set lclk for mode (set for FC16)
 757          */
 758         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
 759         r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
 760         r32 |= 0x20c1731b;
 761         writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
 762 
 763         /*
 764          * poll for s_clk lock or delay 1ms
 765          */
 766         udelay(1000);
 767 }
 768 
 769 static void
 770 bfa_ioc_ct2_mem_init(void __iomem *rb)
 771 {
 772         u32 r32;
 773 
 774         r32 = readl(rb + PSS_CTL_REG);
 775         r32 &= ~__PSS_LMEM_RESET;
 776         writel(r32, rb + PSS_CTL_REG);
 777         udelay(1000);
 778 
 779         writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG);
 780         udelay(1000);
 781         writel(0, rb + CT2_MBIST_CTL_REG);
 782 }
 783 
 784 static void
 785 bfa_ioc_ct2_mac_reset(void __iomem *rb)
 786 {
 787         volatile u32 r32;
 788 
 789         bfa_ioc_ct2_sclk_init(rb);
 790         bfa_ioc_ct2_lclk_init(rb);
 791 
 792         /*
 793          * release soft reset on s_clk & l_clk
 794          */
 795         r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
 796         writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
 797                rb + CT2_APP_PLL_SCLK_CTL_REG);
 798 
 799         /*
 800          * release soft reset on s_clk & l_clk
 801          */
 802         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
 803         writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
 804                rb + CT2_APP_PLL_LCLK_CTL_REG);
 805 
 806         /* put port0, port1 MAC & AHB in reset */
 807         writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
 808                rb + CT2_CSI_MAC_CONTROL_REG(0));
 809         writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
 810                rb + CT2_CSI_MAC_CONTROL_REG(1));
 811 }
 812 
 813 #define CT2_NFC_MAX_DELAY       1000
 814 #define CT2_NFC_VER_VALID       0x143
 815 #define BFA_IOC_PLL_POLL        1000000
 816 
 817 static bool
 818 bfa_ioc_ct2_nfc_halted(void __iomem *rb)
 819 {
 820         volatile u32 r32;
 821 
 822         r32 = readl(rb + CT2_NFC_CSR_SET_REG);
 823         if (r32 & __NFC_CONTROLLER_HALTED)
 824                 return true;
 825 
 826         return false;
 827 }
 828 
 829 static void
 830 bfa_ioc_ct2_nfc_resume(void __iomem *rb)
 831 {
 832         volatile u32 r32;
 833         int i;
 834 
 835         writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
 836         for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
 837                 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
 838                 if (!(r32 & __NFC_CONTROLLER_HALTED))
 839                         return;
 840                 udelay(1000);
 841         }
 842         BUG_ON(1);
 843 }
 844 
 845 static enum bfa_status
 846 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
 847 {
 848         volatile u32 wgn, r32;
 849         u32 nfc_ver, i;
 850 
 851         wgn = readl(rb + CT2_WGN_STATUS);
 852 
 853         nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
 854 
 855         if (wgn == (__A2T_AHB_LOAD | __WGN_READY) &&
 856             nfc_ver >= CT2_NFC_VER_VALID) {
 857                 if (bfa_ioc_ct2_nfc_halted(rb))
 858                         bfa_ioc_ct2_nfc_resume(rb);
 859                 writel(__RESET_AND_START_SCLK_LCLK_PLLS,
 860                                 rb + CT2_CSI_FW_CTL_SET_REG);
 861 
 862                 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
 863                         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
 864                         if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
 865                                 break;
 866                 }
 867                 BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
 868 
 869                 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
 870                         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
 871                         if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
 872                                 break;
 873                 }
 874                 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
 875                 udelay(1000);
 876 
 877                 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
 878                 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
 879         } else {
 880                 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
 881                 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
 882                         r32 = readl(rb + CT2_NFC_CSR_SET_REG);
 883                         if (r32 & __NFC_CONTROLLER_HALTED)
 884                                 break;
 885                         udelay(1000);
 886                 }
 887 
 888                 bfa_ioc_ct2_mac_reset(rb);
 889                 bfa_ioc_ct2_sclk_init(rb);
 890                 bfa_ioc_ct2_lclk_init(rb);
 891 
 892                 /* release soft reset on s_clk & l_clk */
 893                 r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
 894                 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
 895                                 rb + CT2_APP_PLL_SCLK_CTL_REG);
 896                 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
 897                 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
 898                                 rb + CT2_APP_PLL_LCLK_CTL_REG);
 899         }
 900 
 901         /* Announce flash device presence, if flash was corrupted. */
 902         if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
 903                 r32 = readl(rb + PSS_GPIO_OUT_REG);
 904                 writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
 905                 r32 = readl(rb + PSS_GPIO_OE_REG);
 906                 writel(r32 | 1, rb + PSS_GPIO_OE_REG);
 907         }
 908 
 909         /*
 910          * Mask the interrupts and clear any
 911          * pending interrupts left by BIOS/EFI
 912          */
 913         writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK);
 914         writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK);
 915 
 916         /* For first time initialization, no need to clear interrupts */
 917         r32 = readl(rb + HOST_SEM5_REG);
 918         if (r32 & 0x1) {
 919                 r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
 920                 if (r32 == 1) {
 921                         writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
 922                         readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
 923                 }
 924                 r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
 925                 if (r32 == 1) {
 926                         writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
 927                         readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
 928                 }
 929         }
 930 
 931         bfa_ioc_ct2_mem_init(rb);
 932 
 933         writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
 934         writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
 935         return BFA_STATUS_OK;
 936 }

/* [<][>][^][v][top][bottom][index][help] */