Lines Matching refs:pp
660 struct mv_port_priv *pp);
958 struct mv_port_priv *pp = ap->private_data; in mv_save_cached_regs() local
960 pp->cached.fiscfg = readl(port_mmio + FISCFG); in mv_save_cached_regs()
961 pp->cached.ltmode = readl(port_mmio + LTMODE); in mv_save_cached_regs()
962 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND); in mv_save_cached_regs()
963 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD); in mv_save_cached_regs()
1003 struct mv_port_priv *pp) in mv_set_edma_ptrs() argument
1010 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ in mv_set_edma_ptrs()
1011 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; in mv_set_edma_ptrs()
1013 WARN_ON(pp->crqb_dma & 0x3ff); in mv_set_edma_ptrs()
1014 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI); in mv_set_edma_ptrs()
1015 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, in mv_set_edma_ptrs()
1022 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ in mv_set_edma_ptrs()
1023 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; in mv_set_edma_ptrs()
1025 WARN_ON(pp->crpb_dma & 0xff); in mv_set_edma_ptrs()
1026 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI); in mv_set_edma_ptrs()
1028 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, in mv_set_edma_ptrs()
1174 struct mv_port_priv *pp, u8 protocol) in mv_start_edma() argument
1178 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { in mv_start_edma()
1179 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); in mv_start_edma()
1183 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { in mv_start_edma()
1188 mv_set_edma_ptrs(port_mmio, hpriv, pp); in mv_start_edma()
1192 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; in mv_start_edma()
1246 struct mv_port_priv *pp = ap->private_data; in mv_stop_edma() local
1249 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) in mv_stop_edma()
1251 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; in mv_stop_edma()
1439 struct mv_port_priv *pp = ap->private_data; in mv_qc_defer() local
1445 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) in mv_qc_defer()
1478 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && in mv_qc_defer()
1479 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { in mv_qc_defer()
1493 struct mv_port_priv *pp = ap->private_data; in mv_config_fbs() local
1496 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; in mv_config_fbs()
1497 u32 ltmode, *old_ltmode = &pp->cached.ltmode; in mv_config_fbs()
1498 u32 haltcond, *old_haltcond = &pp->cached.haltcond; in mv_config_fbs()
1549 struct mv_port_priv *pp = ap->private_data; in mv_bmdma_enable_iie() local
1550 u32 new, *old = &pp->cached.unknown_rsvd; in mv_bmdma_enable_iie()
1602 struct mv_port_priv *pp = this_ap->private_data; in mv_soc_led_blink_disable() local
1604 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) in mv_soc_led_blink_disable()
1617 struct mv_port_priv *pp = ap->private_data; in mv_edma_cfg() local
1623 pp->pp_flags &= in mv_edma_cfg()
1648 pp->pp_flags |= MV_PP_FLAG_FBS_EN; in mv_edma_cfg()
1672 pp->pp_flags |= MV_PP_FLAG_NCQ_EN; in mv_edma_cfg()
1681 struct mv_port_priv *pp = ap->private_data; in mv_port_free_dma_mem() local
1684 if (pp->crqb) { in mv_port_free_dma_mem()
1685 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); in mv_port_free_dma_mem()
1686 pp->crqb = NULL; in mv_port_free_dma_mem()
1688 if (pp->crpb) { in mv_port_free_dma_mem()
1689 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); in mv_port_free_dma_mem()
1690 pp->crpb = NULL; in mv_port_free_dma_mem()
1697 if (pp->sg_tbl[tag]) { in mv_port_free_dma_mem()
1700 pp->sg_tbl[tag], in mv_port_free_dma_mem()
1701 pp->sg_tbl_dma[tag]); in mv_port_free_dma_mem()
1702 pp->sg_tbl[tag] = NULL; in mv_port_free_dma_mem()
1721 struct mv_port_priv *pp; in mv_port_start() local
1725 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); in mv_port_start()
1726 if (!pp) in mv_port_start()
1728 ap->private_data = pp; in mv_port_start()
1730 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); in mv_port_start()
1731 if (!pp->crqb) in mv_port_start()
1733 memset(pp->crqb, 0, MV_CRQB_Q_SZ); in mv_port_start()
1735 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); in mv_port_start()
1736 if (!pp->crpb) in mv_port_start()
1738 memset(pp->crpb, 0, MV_CRPB_Q_SZ); in mv_port_start()
1749 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, in mv_port_start()
1750 GFP_KERNEL, &pp->sg_tbl_dma[tag]); in mv_port_start()
1751 if (!pp->sg_tbl[tag]) in mv_port_start()
1754 pp->sg_tbl[tag] = pp->sg_tbl[0]; in mv_port_start()
1755 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; in mv_port_start()
1802 struct mv_port_priv *pp = qc->ap->private_data; in mv_fill_sg() local
1807 mv_sg = pp->sg_tbl[qc->tag]; in mv_fill_sg()
1900 struct mv_port_priv *pp = ap->private_data; in mv_bmdma_setup() local
1908 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, in mv_bmdma_setup()
1910 writelfl(pp->sg_tbl_dma[qc->tag], in mv_bmdma_setup()
2052 struct mv_port_priv *pp = ap->private_data; in mv_qc_prep() local
2081 in_index = pp->req_idx; in mv_qc_prep()
2083 pp->crqb[in_index].sg_addr = in mv_qc_prep()
2084 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); in mv_qc_prep()
2085 pp->crqb[in_index].sg_addr_hi = in mv_qc_prep()
2086 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); in mv_qc_prep()
2087 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); in mv_qc_prep()
2089 cw = &pp->crqb[in_index].ata_cmd[0]; in mv_qc_prep()
2153 struct mv_port_priv *pp = ap->private_data; in mv_qc_prep_iie() local
2175 in_index = pp->req_idx; in mv_qc_prep_iie()
2177 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; in mv_qc_prep_iie()
2178 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); in mv_qc_prep_iie()
2179 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); in mv_qc_prep_iie()
2224 struct mv_port_priv *pp = ap->private_data; in mv_sff_check_status() local
2226 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { in mv_sff_check_status()
2228 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; in mv_sff_check_status()
2299 struct mv_port_priv *pp = ap->private_data; in mv_qc_issue_fis() local
2311 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; in mv_qc_issue_fis()
2317 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; in mv_qc_issue_fis()
2350 struct mv_port_priv *pp = ap->private_data; in mv_qc_issue() local
2354 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ in mv_qc_issue()
2365 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); in mv_qc_issue()
2366 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; in mv_qc_issue()
2367 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; in mv_qc_issue()
2370 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, in mv_qc_issue()
2436 struct mv_port_priv *pp = ap->private_data; in mv_get_active_qc() local
2439 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) in mv_get_active_qc()
2450 struct mv_port_priv *pp = ap->private_data; in mv_pmp_error_handler() local
2452 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { in mv_pmp_error_handler()
2459 pmp_map = pp->delayed_eh_pmp_map; in mv_pmp_error_handler()
2460 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; in mv_pmp_error_handler()
2520 struct mv_port_priv *pp = ap->private_data; in mv_handle_fbs_ncq_dev_err() local
2532 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { in mv_handle_fbs_ncq_dev_err()
2533 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; in mv_handle_fbs_ncq_dev_err()
2534 pp->delayed_eh_pmp_map = 0; in mv_handle_fbs_ncq_dev_err()
2536 old_map = pp->delayed_eh_pmp_map; in mv_handle_fbs_ncq_dev_err()
2540 pp->delayed_eh_pmp_map = new_map; in mv_handle_fbs_ncq_dev_err()
2547 __func__, pp->delayed_eh_pmp_map, in mv_handle_fbs_ncq_dev_err()
2552 mv_process_crpb_entries(ap, pp); in mv_handle_fbs_ncq_dev_err()
2580 struct mv_port_priv *pp = ap->private_data; in mv_handle_dev_err() local
2582 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) in mv_handle_dev_err()
2584 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) in mv_handle_dev_err()
2593 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { in mv_handle_dev_err()
2601 __func__, edma_err_cause, pp->pp_flags); in mv_handle_dev_err()
2613 __func__, edma_err_cause, pp->pp_flags); in mv_handle_dev_err()
2656 struct mv_port_priv *pp = ap->private_data; in mv_err_intr() local
2690 edma_err_cause, pp->pp_flags); in mv_err_intr()
2732 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; in mv_err_intr()
2738 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; in mv_err_intr()
2814 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) in mv_process_crpb_entries() argument
2821 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); in mv_process_crpb_entries()
2828 while (in_index != pp->resp_idx) { in mv_process_crpb_entries()
2830 struct mv_crpb *response = &pp->crpb[pp->resp_idx]; in mv_process_crpb_entries()
2832 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; in mv_process_crpb_entries()
2850 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | in mv_process_crpb_entries()
2851 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), in mv_process_crpb_entries()
2858 struct mv_port_priv *pp; in mv_port_intr() local
2866 pp = ap->private_data; in mv_port_intr()
2867 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); in mv_port_intr()
2872 mv_process_crpb_entries(ap, pp); in mv_port_intr()
2873 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) in mv_port_intr()
3639 struct mv_port_priv *pp = ap->private_data; in mv_hardreset() local
3646 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; in mv_hardreset()
3647 pp->pp_flags &= in mv_hardreset()