Lines Matching refs:k
813 int k; in check_readiness() local
816 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); in check_readiness()
817 if (k != SDEBUG_NUM_UAS) { in check_readiness()
820 switch (k) { in check_readiness()
877 __func__, k); in check_readiness()
882 clear_bit(k, devip->uas_bm); in check_readiness()
1546 int k, alloc_len; in resp_readcap16() local
1554 for (k = 0; k < 8; ++k, capac >>= 8) in resp_readcap16()
1555 arr[7 - k] = capac & 0xff; in resp_readcap16()
1670 int k, offset, len, errsts, count, bump, na; in resp_rsup_opcodes() local
1721 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { in resp_rsup_opcodes()
1769 for (k = 0, oip = oip->arrp; k < na; in resp_rsup_opcodes()
1770 ++k, ++oip) { in resp_rsup_opcodes()
1774 supp = (k >= na) ? 1 : 3; in resp_rsup_opcodes()
1777 for (k = 0, oip = oip->arrp; k < na; in resp_rsup_opcodes()
1778 ++k, ++oip) { in resp_rsup_opcodes()
1782 supp = (k >= na) ? 1 : 3; in resp_rsup_opcodes()
1789 for (k = 1; k < u; ++k) in resp_rsup_opcodes()
1790 arr[4 + k] = (k < 16) ? in resp_rsup_opcodes()
1791 oip->len_mask[k] : 0xff; in resp_rsup_opcodes()
2006 int k, alloc_len, msense_6, offset, len, target_dev_id; in resp_mode_sense() local
2069 for (k = 0; k < 8; ++k, capac >>= 8) in resp_mode_sense()
2070 ap[7 - k] = capac & 0xff; in resp_mode_sense()
3483 int k, retval; in sdebug_q_cmd_complete() local
3491 k = find_last_bit(queued_in_use_bm, retval); in sdebug_q_cmd_complete()
3492 if ((k < scsi_debug_max_queue) || (k == retval)) in sdebug_q_cmd_complete()
3495 atomic_set(&retired_max_queue, k + 1); in sdebug_q_cmd_complete()
3543 int k, retval; in sdebug_q_cmd_hrt_complete() local
3551 k = find_last_bit(queued_in_use_bm, retval); in sdebug_q_cmd_hrt_complete()
3552 if ((k < scsi_debug_max_queue) || (k == retval)) in sdebug_q_cmd_hrt_complete()
3555 atomic_set(&retired_max_queue, k + 1); in sdebug_q_cmd_hrt_complete()
3666 int k, qmax, r_qmax; in stop_queued_cmnd() local
3675 for (k = 0; k < qmax; ++k) { in stop_queued_cmnd()
3676 if (test_bit(k, queued_in_use_bm)) { in stop_queued_cmnd()
3677 sqcp = &queued_arr[k]; in stop_queued_cmnd()
3698 clear_bit(k, queued_in_use_bm); in stop_queued_cmnd()
3711 int k; in stop_all_queued() local
3716 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { in stop_all_queued()
3717 if (test_bit(k, queued_in_use_bm)) { in stop_all_queued()
3718 sqcp = &queued_arr[k]; in stop_all_queued()
3739 clear_bit(k, queued_in_use_bm); in stop_all_queued()
3751 int k; in free_all_queued() local
3755 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { in free_all_queued()
3756 sqcp = &queued_arr[k]; in free_all_queued()
3803 int k = 0; in scsi_debug_target_reset() local
3823 ++k; in scsi_debug_target_reset()
3828 "%s: %d device(s) found in target\n", __func__, k); in scsi_debug_target_reset()
3839 int k = 0; in scsi_debug_bus_reset() local
3855 ++k; in scsi_debug_bus_reset()
3861 "%s: %d device(s) found in host\n", __func__, k); in scsi_debug_bus_reset()
3870 int k = 0; in scsi_debug_host_reset() local
3880 ++k; in scsi_debug_host_reset()
3887 "%s: %d device(s) found\n", __func__, k); in scsi_debug_host_reset()
3896 int sectors_per_part, num_sectors, k; in sdebug_build_parts() local
3912 for (k = 1; k < scsi_debug_num_parts; ++k) in sdebug_build_parts()
3913 starts[k] = ((k * sectors_per_part) / heads_by_sects) in sdebug_build_parts()
3921 for (k = 0; starts[k + 1]; ++k, ++pp) { in sdebug_build_parts()
3922 start_sec = starts[k]; in sdebug_build_parts()
3923 end_sec = starts[k + 1] - 1; in sdebug_build_parts()
3947 int k, num_in_q, qdepth, inject; in schedule_resp() local
3986 k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue); in schedule_resp()
3987 if (k >= scsi_debug_max_queue) { in schedule_resp()
4004 __set_bit(k, queued_in_use_bm); in schedule_resp()
4006 sqcp = &queued_arr[k]; in schedule_resp()
4019 sqcp->cmnd_timerp->data = k; in schedule_resp()
4034 sd_hp->qa_indx = k; in schedule_resp()
4044 sdebug_q_cmd_complete, k); in schedule_resp()
4244 int k; in delay_store() local
4247 k = find_first_bit(queued_in_use_bm, in delay_store()
4249 if (k != scsi_debug_max_queue) in delay_store()
4273 int ndelay, res, k; in ndelay_store() local
4280 k = find_first_bit(queued_in_use_bm, in ndelay_store()
4282 if (k != scsi_debug_max_queue) in ndelay_store()
4514 int n, k; in max_queue_store() local
4519 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE); in max_queue_store()
4521 if (SCSI_DEBUG_CANQUEUE == k) in max_queue_store()
4523 else if (k >= n) in max_queue_store()
4524 atomic_set(&retired_max_queue, k + 1); in max_queue_store()
4703 int k; in host_lock_store() local
4706 k = find_first_bit(queued_in_use_bm, in host_lock_store()
4708 if (k != scsi_debug_max_queue) in host_lock_store()
4783 int k; in scsi_debug_init() local
4953 for (k = 0; k < host_to_add; k++) { in scsi_debug_init()
4956 __func__, k); in scsi_debug_init()
4983 int k = scsi_debug_add_host; in scsi_debug_exit() local
4987 for (; k; k--) in scsi_debug_exit()
5012 int k, devs_per_host; in sdebug_add_adapter() local
5027 for (k = 0; k < devs_per_host; k++) { in sdebug_add_adapter()
5160 int k, na; in scsi_debug_queuecommand() local
5179 for (k = 0, n = 0; k < len && n < sb; ++k) in scsi_debug_queuecommand()
5181 (u32)cmd[k]); in scsi_debug_queuecommand()
5206 for (k = 0; k <= na; oip = r_oip->arrp + k++) { in scsi_debug_queuecommand()
5211 for (k = 0; k <= na; oip = r_oip->arrp + k++) { in scsi_debug_queuecommand()
5216 if (k > na) { in scsi_debug_queuecommand()
5242 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) { in scsi_debug_queuecommand()
5243 rem = ~oip->len_mask[k] & cmd[k]; in scsi_debug_queuecommand()
5249 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j); in scsi_debug_queuecommand()