Lines Matching refs:qc
698 static void ata_pio_sector(struct ata_queued_cmd *qc) in ata_pio_sector() argument
700 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); in ata_pio_sector()
701 struct ata_port *ap = qc->ap; in ata_pio_sector()
706 if (qc->curbytes == qc->nbytes - qc->sect_size) in ata_pio_sector()
709 page = sg_page(qc->cursg); in ata_pio_sector()
710 offset = qc->cursg->offset + qc->cursg_ofs; in ata_pio_sector()
716 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); in ata_pio_sector()
726 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, in ata_pio_sector()
733 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, in ata_pio_sector()
740 qc->curbytes += qc->sect_size; in ata_pio_sector()
741 qc->cursg_ofs += qc->sect_size; in ata_pio_sector()
743 if (qc->cursg_ofs == qc->cursg->length) { in ata_pio_sector()
744 qc->cursg = sg_next(qc->cursg); in ata_pio_sector()
745 qc->cursg_ofs = 0; in ata_pio_sector()
759 static void ata_pio_sectors(struct ata_queued_cmd *qc) in ata_pio_sectors() argument
761 if (is_multi_taskfile(&qc->tf)) { in ata_pio_sectors()
765 WARN_ON_ONCE(qc->dev->multi_count == 0); in ata_pio_sectors()
767 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, in ata_pio_sectors()
768 qc->dev->multi_count); in ata_pio_sectors()
770 ata_pio_sector(qc); in ata_pio_sectors()
772 ata_pio_sector(qc); in ata_pio_sectors()
774 ata_sff_sync(qc->ap); /* flush */ in ata_pio_sectors()
788 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) in atapi_send_cdb() argument
792 WARN_ON_ONCE(qc->dev->cdb_len < 12); in atapi_send_cdb()
794 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); in atapi_send_cdb()
798 switch (qc->tf.protocol) { in atapi_send_cdb()
809 ap->ops->bmdma_start(qc); in atapi_send_cdb()
828 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) in __atapi_pio_bytes() argument
830 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; in __atapi_pio_bytes()
831 struct ata_port *ap = qc->ap; in __atapi_pio_bytes()
832 struct ata_device *dev = qc->dev; in __atapi_pio_bytes()
840 sg = qc->cursg; in __atapi_pio_bytes()
844 qc->nbytes, qc->curbytes, bytes); in __atapi_pio_bytes()
849 offset = sg->offset + qc->cursg_ofs; in __atapi_pio_bytes()
856 count = min(sg->length - qc->cursg_ofs, bytes); in __atapi_pio_bytes()
861 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); in __atapi_pio_bytes()
883 qc->curbytes += count; in __atapi_pio_bytes()
884 qc->cursg_ofs += count; in __atapi_pio_bytes()
886 if (qc->cursg_ofs == sg->length) { in __atapi_pio_bytes()
887 qc->cursg = sg_next(qc->cursg); in __atapi_pio_bytes()
888 qc->cursg_ofs = 0; in __atapi_pio_bytes()
911 static void atapi_pio_bytes(struct ata_queued_cmd *qc) in atapi_pio_bytes() argument
913 struct ata_port *ap = qc->ap; in atapi_pio_bytes()
914 struct ata_device *dev = qc->dev; in atapi_pio_bytes()
917 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; in atapi_pio_bytes()
925 ap->ops->sff_tf_read(ap, &qc->result_tf); in atapi_pio_bytes()
926 ireason = qc->result_tf.nsect; in atapi_pio_bytes()
927 bc_lo = qc->result_tf.lbam; in atapi_pio_bytes()
928 bc_hi = qc->result_tf.lbah; in atapi_pio_bytes()
945 if (unlikely(__atapi_pio_bytes(qc, bytes))) in atapi_pio_bytes()
955 qc->err_mask |= AC_ERR_HSM; in atapi_pio_bytes()
968 struct ata_queued_cmd *qc) in ata_hsm_ok_in_wq() argument
970 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_hsm_ok_in_wq()
974 if (qc->tf.protocol == ATA_PROT_PIO && in ata_hsm_ok_in_wq()
975 (qc->tf.flags & ATA_TFLAG_WRITE)) in ata_hsm_ok_in_wq()
978 if (ata_is_atapi(qc->tf.protocol) && in ata_hsm_ok_in_wq()
979 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) in ata_hsm_ok_in_wq()
997 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) in ata_hsm_qc_complete() argument
999 struct ata_port *ap = qc->ap; in ata_hsm_qc_complete()
1006 qc = ata_qc_from_tag(ap, qc->tag); in ata_hsm_qc_complete()
1007 if (qc) { in ata_hsm_qc_complete()
1008 if (likely(!(qc->err_mask & AC_ERR_HSM))) { in ata_hsm_qc_complete()
1010 ata_qc_complete(qc); in ata_hsm_qc_complete()
1015 if (likely(!(qc->err_mask & AC_ERR_HSM))) in ata_hsm_qc_complete()
1016 ata_qc_complete(qc); in ata_hsm_qc_complete()
1023 ata_qc_complete(qc); in ata_hsm_qc_complete()
1025 ata_qc_complete(qc); in ata_hsm_qc_complete()
1039 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, in ata_sff_hsm_move() argument
1042 struct ata_link *link = qc->dev->link; in ata_sff_hsm_move()
1048 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); in ata_sff_hsm_move()
1054 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); in ata_sff_hsm_move()
1058 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); in ata_sff_hsm_move()
1068 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); in ata_sff_hsm_move()
1075 qc->err_mask |= AC_ERR_DEV; in ata_sff_hsm_move()
1080 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
1099 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { in ata_sff_hsm_move()
1103 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
1109 if (qc->tf.protocol == ATA_PROT_PIO) { in ata_sff_hsm_move()
1119 ata_pio_sectors(qc); in ata_sff_hsm_move()
1122 atapi_send_cdb(ap, qc); in ata_sff_hsm_move()
1131 if (qc->tf.protocol == ATAPI_PROT_PIO) { in ata_sff_hsm_move()
1151 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
1156 atapi_pio_bytes(qc); in ata_sff_hsm_move()
1168 qc->err_mask |= AC_ERR_DEV; in ata_sff_hsm_move()
1174 if (qc->dev->horkage & in ata_sff_hsm_move()
1176 qc->err_mask |= in ata_sff_hsm_move()
1186 qc->err_mask |= AC_ERR_HSM | in ata_sff_hsm_move()
1206 qc->err_mask |= AC_ERR_DEV; in ata_sff_hsm_move()
1208 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { in ata_sff_hsm_move()
1209 ata_pio_sectors(qc); in ata_sff_hsm_move()
1217 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
1228 qc->err_mask |= AC_ERR_NODEV_HINT; in ata_sff_hsm_move()
1238 ata_pio_sectors(qc); in ata_sff_hsm_move()
1241 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { in ata_sff_hsm_move()
1253 qc->err_mask |= __ac_err_mask(status); in ata_sff_hsm_move()
1260 ap->print_id, qc->dev->devno, status); in ata_sff_hsm_move()
1262 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); in ata_sff_hsm_move()
1267 ata_hsm_qc_complete(qc, in_wq); in ata_sff_hsm_move()
1276 ata_hsm_qc_complete(qc, in_wq); in ata_sff_hsm_move()
1343 struct ata_queued_cmd *qc; in ata_sff_pio_task() local
1351 qc = ata_qc_from_tag(ap, link->active_tag); in ata_sff_pio_task()
1352 if (!qc) { in ata_sff_pio_task()
1386 poll_next = ata_sff_hsm_move(ap, qc, status, 1); in ata_sff_pio_task()
1410 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) in ata_sff_qc_issue() argument
1412 struct ata_port *ap = qc->ap; in ata_sff_qc_issue()
1413 struct ata_link *link = qc->dev->link; in ata_sff_qc_issue()
1419 qc->tf.flags |= ATA_TFLAG_POLLING; in ata_sff_qc_issue()
1422 ata_dev_select(ap, qc->dev->devno, 1, 0); in ata_sff_qc_issue()
1425 switch (qc->tf.protocol) { in ata_sff_qc_issue()
1427 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1428 ata_qc_set_polling(qc); in ata_sff_qc_issue()
1430 ata_tf_to_host(ap, &qc->tf); in ata_sff_qc_issue()
1433 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1439 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1440 ata_qc_set_polling(qc); in ata_sff_qc_issue()
1442 ata_tf_to_host(ap, &qc->tf); in ata_sff_qc_issue()
1444 if (qc->tf.flags & ATA_TFLAG_WRITE) { in ata_sff_qc_issue()
1456 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1469 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1470 ata_qc_set_polling(qc); in ata_sff_qc_issue()
1472 ata_tf_to_host(ap, &qc->tf); in ata_sff_qc_issue()
1477 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || in ata_sff_qc_issue()
1478 (qc->tf.flags & ATA_TFLAG_POLLING)) in ata_sff_qc_issue()
1504 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) in ata_sff_qc_fill_rtf() argument
1506 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); in ata_sff_qc_fill_rtf()
1528 struct ata_queued_cmd *qc, in __ata_sff_port_intr() argument
1534 ap->print_id, qc->tf.protocol, ap->hsm_task_state); in __ata_sff_port_intr()
1547 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) in __ata_sff_port_intr()
1561 qc->err_mask |= AC_ERR_HSM; in __ata_sff_port_intr()
1571 ata_sff_hsm_move(ap, qc, status, 0); in __ata_sff_port_intr()
1589 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) in ata_sff_port_intr() argument
1591 return __ata_sff_port_intr(ap, qc, false); in ata_sff_port_intr()
1611 struct ata_queued_cmd *qc; in __ata_sff_interrupt() local
1613 qc = ata_qc_from_tag(ap, ap->link.active_tag); in __ata_sff_interrupt()
1614 if (qc) { in __ata_sff_interrupt()
1615 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) in __ata_sff_interrupt()
1616 handled |= port_intr(ap, qc); in __ata_sff_interrupt()
1703 struct ata_queued_cmd *qc; in ata_sff_lost_interrupt() local
1706 qc = ata_qc_from_tag(ap, ap->link.active_tag); in ata_sff_lost_interrupt()
1708 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_lost_interrupt()
1722 ata_sff_port_intr(ap, qc); in ata_sff_lost_interrupt()
2163 void ata_sff_drain_fifo(struct ata_queued_cmd *qc) in ata_sff_drain_fifo() argument
2169 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) in ata_sff_drain_fifo()
2172 ap = qc->ap; in ata_sff_drain_fifo()
2201 struct ata_queued_cmd *qc; in ata_sff_error_handler() local
2204 qc = __ata_qc_from_tag(ap, ap->link.active_tag); in ata_sff_error_handler()
2205 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) in ata_sff_error_handler()
2206 qc = NULL; in ata_sff_error_handler()
2218 ap->ops->sff_drain_fifo(qc); in ata_sff_error_handler()
2632 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) in ata_bmdma_fill_sg() argument
2634 struct ata_port *ap = qc->ap; in ata_bmdma_fill_sg()
2640 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ata_bmdma_fill_sg()
2683 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) in ata_bmdma_fill_sg_dumb() argument
2685 struct ata_port *ap = qc->ap; in ata_bmdma_fill_sg_dumb()
2691 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ata_bmdma_fill_sg_dumb()
2739 void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) in ata_bmdma_qc_prep() argument
2741 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in ata_bmdma_qc_prep()
2744 ata_bmdma_fill_sg(qc); in ata_bmdma_qc_prep()
2757 void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) in ata_bmdma_dumb_qc_prep() argument
2759 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in ata_bmdma_dumb_qc_prep()
2762 ata_bmdma_fill_sg_dumb(qc); in ata_bmdma_dumb_qc_prep()
2780 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) in ata_bmdma_qc_issue() argument
2782 struct ata_port *ap = qc->ap; in ata_bmdma_qc_issue()
2783 struct ata_link *link = qc->dev->link; in ata_bmdma_qc_issue()
2786 if (!ata_is_dma(qc->tf.protocol)) in ata_bmdma_qc_issue()
2787 return ata_sff_qc_issue(qc); in ata_bmdma_qc_issue()
2790 ata_dev_select(ap, qc->dev->devno, 1, 0); in ata_bmdma_qc_issue()
2793 switch (qc->tf.protocol) { in ata_bmdma_qc_issue()
2795 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); in ata_bmdma_qc_issue()
2797 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in ata_bmdma_qc_issue()
2798 ap->ops->bmdma_setup(qc); /* set up bmdma */ in ata_bmdma_qc_issue()
2799 ap->ops->bmdma_start(qc); /* initiate bmdma */ in ata_bmdma_qc_issue()
2804 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); in ata_bmdma_qc_issue()
2806 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in ata_bmdma_qc_issue()
2807 ap->ops->bmdma_setup(qc); /* set up bmdma */ in ata_bmdma_qc_issue()
2811 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) in ata_bmdma_qc_issue()
2837 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) in ata_bmdma_port_intr() argument
2844 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { in ata_bmdma_port_intr()
2854 ap->ops->bmdma_stop(qc); in ata_bmdma_port_intr()
2859 qc->err_mask |= AC_ERR_HOST_BUS; in ata_bmdma_port_intr()
2864 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); in ata_bmdma_port_intr()
2866 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) in ata_bmdma_port_intr()
2907 struct ata_queued_cmd *qc; in ata_bmdma_error_handler() local
2911 qc = __ata_qc_from_tag(ap, ap->link.active_tag); in ata_bmdma_error_handler()
2912 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) in ata_bmdma_error_handler()
2913 qc = NULL; in ata_bmdma_error_handler()
2918 if (qc && ata_is_dma(qc->tf.protocol)) { in ata_bmdma_error_handler()
2928 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { in ata_bmdma_error_handler()
2929 qc->err_mask = AC_ERR_HOST_BUS; in ata_bmdma_error_handler()
2933 ap->ops->bmdma_stop(qc); in ata_bmdma_error_handler()
2959 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) in ata_bmdma_post_internal_cmd() argument
2961 struct ata_port *ap = qc->ap; in ata_bmdma_post_internal_cmd()
2964 if (ata_is_dma(qc->tf.protocol)) { in ata_bmdma_post_internal_cmd()
2966 ap->ops->bmdma_stop(qc); in ata_bmdma_post_internal_cmd()
3001 void ata_bmdma_setup(struct ata_queued_cmd *qc) in ata_bmdma_setup() argument
3003 struct ata_port *ap = qc->ap; in ata_bmdma_setup()
3004 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); in ata_bmdma_setup()
3019 ap->ops->sff_exec_command(ap, &qc->tf); in ata_bmdma_setup()
3030 void ata_bmdma_start(struct ata_queued_cmd *qc) in ata_bmdma_start() argument
3032 struct ata_port *ap = qc->ap; in ata_bmdma_start()
3067 void ata_bmdma_stop(struct ata_queued_cmd *qc) in ata_bmdma_stop() argument
3069 struct ata_port *ap = qc->ap; in ata_bmdma_stop()