queue_depth 928 arch/um/drivers/ubd_kern.c ubd_dev->tag_set.queue_depth = 64; queue_depth 226 arch/um/drivers/vector_kern.c int queue_depth; queue_depth 234 arch/um/drivers/vector_kern.c qi->queue_depth -= advance; queue_depth 240 arch/um/drivers/vector_kern.c if (qi->queue_depth == 0) { queue_depth 244 arch/um/drivers/vector_kern.c queue_depth = qi->queue_depth; queue_depth 246 arch/um/drivers/vector_kern.c return queue_depth; queue_depth 256 arch/um/drivers/vector_kern.c int queue_depth; queue_depth 262 arch/um/drivers/vector_kern.c qi->queue_depth += advance; queue_depth 263 arch/um/drivers/vector_kern.c queue_depth = qi->queue_depth; queue_depth 265 arch/um/drivers/vector_kern.c return queue_depth; queue_depth 312 arch/um/drivers/vector_kern.c int queue_depth; queue_depth 319 arch/um/drivers/vector_kern.c queue_depth = qi->queue_depth; queue_depth 325 arch/um/drivers/vector_kern.c if (queue_depth < qi->max_depth) { queue_depth 339 arch/um/drivers/vector_kern.c queue_depth = vector_advancetail(qi, 1); queue_depth 343 arch/um/drivers/vector_kern.c return queue_depth; queue_depth 352 arch/um/drivers/vector_kern.c return queue_depth; queue_depth 387 arch/um/drivers/vector_kern.c int result = 0, send_len, queue_depth = qi->max_depth; queue_depth 392 arch/um/drivers/vector_kern.c queue_depth = qi->queue_depth; queue_depth 394 arch/um/drivers/vector_kern.c while (queue_depth > 0) { queue_depth 396 arch/um/drivers/vector_kern.c send_len = queue_depth; queue_depth 426 arch/um/drivers/vector_kern.c queue_depth = queue_depth 452 arch/um/drivers/vector_kern.c return queue_depth; queue_depth 571 arch/um/drivers/vector_kern.c result->queue_depth = 0; queue_depth 659 arch/um/drivers/vector_kern.c if (qi->queue_depth == 0) queue_depth 661 arch/um/drivers/vector_kern.c for (i = 0; i < qi->queue_depth; i++) { queue_depth 671 arch/um/drivers/vector_kern.c qi->queue_depth = 0; queue_depth 965 arch/um/drivers/vector_kern.c qi->queue_depth = packet_count; queue_depth 1043 arch/um/drivers/vector_kern.c int queue_depth = 0; queue_depth 1062 arch/um/drivers/vector_kern.c queue_depth = vector_enqueue(vp->tx_queue, skb); queue_depth 1068 arch/um/drivers/vector_kern.c if (queue_depth >= vp->tx_queue->max_depth - 1) { queue_depth 1214 arch/um/drivers/vector_kern.c vp->rx_queue->queue_depth = get_depth(vp->parsed); queue_depth 43 arch/um/drivers/vector_kern.h int queue_depth, head, tail, max_depth, max_iov_frags; queue_depth 978 block/blk-iolatency.c iolat->rq_depth.queue_depth = blkg->q->nr_requests; queue_depth 980 block/blk-iolatency.c iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth; queue_depth 532 block/blk-mq-sched.c q->nr_requests = q->tag_set->queue_depth; queue_depth 541 block/blk-mq-sched.c q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, queue_depth 2453 block/blk-mq.c set->queue_depth, set->reserved_tags); queue_depth 2458 block/blk-mq.c set->queue_depth); queue_depth 2730 block/blk-mq.c unsigned int queue_depth, queue_depth 2740 block/blk-mq.c set->queue_depth = queue_depth; queue_depth 2919 block/blk-mq.c q->nr_requests = set->queue_depth; queue_depth 2985 block/blk-mq.c depth = set->queue_depth; queue_depth 2991 block/blk-mq.c set->queue_depth >>= 1; queue_depth 2992 block/blk-mq.c if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { queue_depth 2996 block/blk-mq.c } while (set->queue_depth); queue_depth 2998 block/blk-mq.c if (!set->queue_depth || err) { queue_depth 3003 block/blk-mq.c if (depth != set->queue_depth) queue_depth 3005 block/blk-mq.c depth, set->queue_depth); queue_depth 3061 block/blk-mq.c if (!set->queue_depth) queue_depth 3063 block/blk-mq.c if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) queue_depth 3072 block/blk-mq.c if (set->queue_depth > BLK_MQ_MAX_DEPTH) { queue_depth 3075 block/blk-mq.c set->queue_depth = BLK_MQ_MAX_DEPTH; queue_depth 3091 block/blk-mq.c set->queue_depth = min(64U, set->queue_depth); queue_depth 128 block/blk-rq-qos.c if (rqd->queue_depth == 1) { queue_depth 144 block/blk-rq-qos.c rqd->queue_depth); queue_depth 148 block/blk-rq-qos.c unsigned int maxd = 3 * rqd->queue_depth / 4; queue_depth 56 block/blk-rq-qos.h unsigned int queue_depth; queue_depth 811 block/blk-settings.c q->queue_depth = depth; queue_depth 687 block/blk-wbt.c RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q); queue_depth 380 block/bsg-lib.c set->queue_depth = 128; queue_depth 1389 drivers/ata/libata-scsi.c int queue_depth) queue_depth 1394 drivers/ata/libata-scsi.c if (queue_depth < 1 || queue_depth == sdev->queue_depth) queue_depth 1395 drivers/ata/libata-scsi.c return sdev->queue_depth; queue_depth 1399 drivers/ata/libata-scsi.c return sdev->queue_depth; queue_depth 1404 drivers/ata/libata-scsi.c if (queue_depth == 1 || !ata_ncq_enabled(dev)) { queue_depth 1406 drivers/ata/libata-scsi.c queue_depth = 1; queue_depth 1411 drivers/ata/libata-scsi.c queue_depth = min(queue_depth, sdev->host->can_queue); queue_depth 1412 drivers/ata/libata-scsi.c queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); queue_depth 1413 drivers/ata/libata-scsi.c queue_depth = min(queue_depth, ATA_MAX_QUEUE); queue_depth 1415 drivers/ata/libata-scsi.c if (sdev->queue_depth == queue_depth) queue_depth 1418 drivers/ata/libata-scsi.c return scsi_change_queue_depth(sdev, queue_depth); queue_depth 1436 drivers/ata/libata-scsi.c int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) queue_depth 1440 drivers/ata/libata-scsi.c return __ata_change_queue_depth(ap, sdev, queue_depth); queue_depth 1918 drivers/ata/sata_nv.c sdev->queue_depth); queue_depth 384 drivers/block/aoe/aoeblk.c set->queue_depth = 128; queue_depth 2029 drivers/block/loop.c lo->tag_set.queue_depth = 128; queue_depth 3623 drivers/block/mtip32xx/mtip32xx.c dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS; queue_depth 1690 drivers/block/nbd.c nbd->tag_set.queue_depth = 128; queue_depth 31 drivers/block/null_blk.h unsigned int queue_depth; queue_depth 80 drivers/block/null_blk.h unsigned int queue_depth; queue_depth 558 drivers/block/null_blk_main.c tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); queue_depth 559 drivers/block/null_blk_main.c if (tag >= nq->queue_depth) queue_depth 1450 drivers/block/null_blk_main.c nq->queue_depth = nullb->queue_depth; queue_depth 1476 drivers/block/null_blk_main.c nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL); queue_depth 1480 drivers/block/null_blk_main.c tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; queue_depth 1487 drivers/block/null_blk_main.c for (i = 0; i < nq->queue_depth; i++) { queue_depth 1505 drivers/block/null_blk_main.c nullb->queue_depth = nullb->dev->hw_queue_depth; queue_depth 1563 drivers/block/null_blk_main.c set->queue_depth = nullb ? nullb->dev->hw_queue_depth : queue_depth 909 drivers/block/paride/pd.c disk->tag_set.queue_depth = 2; queue_depth 857 drivers/block/rbd.c int queue_depth; queue_depth 905 drivers/block/rbd.c pctx->opts->queue_depth = intval; queue_depth 5146 drivers/block/rbd.c rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; queue_depth 6583 drivers/block/rbd.c pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; queue_depth 2838 drivers/block/skd_main.c skdev->tag_set.queue_depth = skd_max_queue_depth; queue_depth 1464 drivers/block/sx8.c host->tag_set.queue_depth = max_queue; queue_depth 794 drivers/block/virtio_blk.c module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); queue_depth 865 drivers/block/virtio_blk.c vblk->tag_set.queue_depth = virtblk_queue_depth; queue_depth 977 drivers/block/xen-blkfront.c info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2; queue_depth 979 drivers/block/xen-blkfront.c info->tag_set.queue_depth = BLK_RING_SIZE(info); queue_depth 782 drivers/ide/ide-probe.c set->queue_depth = 32; queue_depth 548 drivers/md/dm-rq.c md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); queue_depth 4078 drivers/message/fusion/mptsas.c if (current_depth > sdev->queue_depth) { queue_depth 4082 drivers/message/fusion/mptsas.c "depth (%d)\n", sdev->queue_depth, queue_depth 4087 drivers/message/fusion/mptsas.c sdev->queue_depth - 1); queue_depth 2377 drivers/message/fusion/mptscsih.c ioc->name, sdev->queue_depth, vtarget->tflags)); queue_depth 428 drivers/mmc/core/queue.c mq->tag_set.queue_depth = queue_depth 431 drivers/mmc/core/queue.c mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; queue_depth 420 drivers/mtd/ubi/block.c dev->tag_set.queue_depth = 64; queue_depth 2462 drivers/nvme/host/fc.c ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; queue_depth 3121 drivers/nvme/host/fc.c ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; queue_depth 1627 drivers/nvme/host/pci.c dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; queue_depth 2280 drivers/nvme/host/pci.c dev->tagset.queue_depth = queue_depth 730 drivers/nvme/host/rdma.c set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; queue_depth 743 drivers/nvme/host/rdma.c set->queue_depth = nctrl->sqsize + 1; queue_depth 1474 drivers/nvme/host/tcp.c set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; queue_depth 1485 drivers/nvme/host/tcp.c set->queue_depth = nctrl->sqsize + 1; queue_depth 342 drivers/nvme/target/loop.c ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; queue_depth 515 drivers/nvme/target/loop.c ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; queue_depth 44 drivers/s390/block/dasd.c static unsigned int queue_depth = 32; queue_depth 47 drivers/s390/block/dasd.c module_param(queue_depth, uint, 0444); queue_depth 48 drivers/s390/block/dasd.c MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); queue_depth 3275 drivers/s390/block/dasd.c block->tag_set.queue_depth = queue_depth; queue_depth 456 drivers/s390/block/scm_blk.c bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; queue_depth 295 drivers/s390/crypto/ap_bus.c static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type, queue_depth 308 drivers/s390/crypto/ap_bus.c *queue_depth = (int)(info & 0xff); queue_depth 163 drivers/s390/crypto/ap_bus.h int queue_depth; /* AP queue depth.*/ queue_depth 46 drivers/s390/crypto/ap_card.c return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth); queue_depth 173 drivers/s390/crypto/ap_card.c struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, queue_depth 187 drivers/s390/crypto/ap_card.c ac->queue_depth = queue_depth; queue_depth 253 drivers/s390/crypto/ap_queue.c if (aq->queue_count < aq->card->queue_depth) { queue_depth 25 drivers/s390/scsi/zfcp_scsi.c module_param_named(queue_depth, default_depth, uint, 0600); queue_depth 26 drivers/s390/scsi/zfcp_scsi.c MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); queue_depth 600 drivers/scsi/53c700.c NCR_700_get_depth(SCp->device) > SCp->device->queue_depth) queue_depth 1764 drivers/scsi/53c700.c if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) { queue_depth 1279 drivers/scsi/aacraid/aacraid.h u16 queue_depth; queue_depth 549 drivers/scsi/aacraid/linit.c return sdev->queue_depth; queue_depth 142 drivers/scsi/arcmsr/arcmsr_hba.c static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) queue_depth 144 drivers/scsi/arcmsr/arcmsr_hba.c if (queue_depth > ARCMSR_MAX_CMD_PERLUN) queue_depth 145 drivers/scsi/arcmsr/arcmsr_hba.c queue_depth = ARCMSR_MAX_CMD_PERLUN; queue_depth 146 drivers/scsi/arcmsr/arcmsr_hba.c return scsi_change_queue_depth(sdev, queue_depth); queue_depth 90 drivers/scsi/bfa/bfad_im.c (bfa_lun_queue_depth > cmnd->device->queue_depth)) { queue_depth 118 drivers/scsi/bfa/bfad_im.c if (bfa_lun_queue_depth > cmnd->device->queue_depth) { queue_depth 878 drivers/scsi/bfa/bfad_im.c if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { queue_depth 882 drivers/scsi/bfa/bfad_im.c tmp_sdev->queue_depth + 1); queue_depth 900 drivers/scsi/bfa/bfad_im.c scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); queue_depth 122 drivers/scsi/bnx2fc/bnx2fc_fcoe.c module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO); queue_depth 123 drivers/scsi/bnx2fc/bnx2fc_fcoe.c MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices " queue_depth 2572 drivers/scsi/cxlflash/main.c return sdev->queue_depth; queue_depth 1379 drivers/scsi/hpsa.c h->dev[entry]->queue_depth = new_entry->queue_depth; queue_depth 1514 drivers/scsi/hpsa.c if (dev1->queue_depth != dev2->queue_depth) queue_depth 1724 drivers/scsi/hpsa.c logical_drive->phys_disk[i]->queue_depth); queue_depth 1743 drivers/scsi/hpsa.c logical_drive->queue_depth = 8; queue_depth 1751 drivers/scsi/hpsa.c logical_drive->queue_depth = qdepth; queue_depth 1754 drivers/scsi/hpsa.c logical_drive->queue_depth = EXTERNAL_QD; queue_depth 1756 drivers/scsi/hpsa.c logical_drive->queue_depth = h->nr_cmds; queue_depth 2131 drivers/scsi/hpsa.c int queue_depth; queue_depth 2139 drivers/scsi/hpsa.c queue_depth = EXTERNAL_QD; queue_depth 2144 drivers/scsi/hpsa.c queue_depth = sd->queue_depth != 0 ? queue_depth 2145 drivers/scsi/hpsa.c sd->queue_depth : sdev->host->can_queue; queue_depth 2148 drivers/scsi/hpsa.c queue_depth = sdev->host->can_queue; queue_depth 2150 drivers/scsi/hpsa.c scsi_change_queue_depth(sdev, queue_depth); queue_depth 4003 drivers/scsi/hpsa.c this_device->queue_depth = h->nr_cmds; queue_depth 4007 drivers/scsi/hpsa.c this_device->queue_depth = EXTERNAL_QD; queue_depth 4176 drivers/scsi/hpsa.c dev->queue_depth = queue_depth 4180 drivers/scsi/hpsa.c dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ queue_depth 5029 drivers/scsi/hpsa.c phys_disk->queue_depth) { queue_depth 5769 drivers/scsi/hpsa.c else if (qdepth > logical_drive->queue_depth) queue_depth 5770 drivers/scsi/hpsa.c qdepth = logical_drive->queue_depth; queue_depth 78 drivers/scsi/hpsa.h u16 queue_depth; /* max queue_depth for this device */ queue_depth 1110 drivers/scsi/hptiop.c int queue_depth) queue_depth 1114 drivers/scsi/hptiop.c if (queue_depth > hba->max_requests) queue_depth 1115 drivers/scsi/hptiop.c queue_depth = hba->max_requests; queue_depth 1116 drivers/scsi/hptiop.c return scsi_change_queue_depth(sdev, queue_depth); queue_depth 4502 drivers/scsi/ipr.c return sdev->queue_depth; queue_depth 269 drivers/scsi/lpfc/lpfc_scsi.c sdev->queue_depth * num_rsrc_err / queue_depth 272 drivers/scsi/lpfc/lpfc_scsi.c new_queue_depth = sdev->queue_depth - 1; queue_depth 274 drivers/scsi/lpfc/lpfc_scsi.c new_queue_depth = sdev->queue_depth - queue_depth 66 drivers/scsi/qedf/qedf_main.c module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO); queue_depth 67 drivers/scsi/qedf/qedf_main.c MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered " queue_depth 3919 drivers/scsi/qla1280.c printk(", Tagged queuing: depth %d", device->queue_depth); queue_depth 9038 drivers/scsi/qla4xxx/ql4_os.c int queue_depth = QL4_DEF_QDEPTH; queue_depth 9047 drivers/scsi/qla4xxx/ql4_os.c queue_depth = ql4xmaxqdepth; queue_depth 9049 drivers/scsi/qla4xxx/ql4_os.c scsi_change_queue_depth(sdev, queue_depth); queue_depth 241 drivers/scsi/scsi.c sdev->queue_depth = depth; queue_depth 248 drivers/scsi/scsi.c return sdev->queue_depth; queue_depth 4285 drivers/scsi/scsi_debug.c qdepth = cmnd->device->queue_depth; queue_depth 5566 drivers/scsi/scsi_debug.c return sdev->queue_depth; queue_depth 649 drivers/scsi/scsi_error.c sdev->queue_depth >= sdev->max_queue_depth) queue_depth 667 drivers/scsi/scsi_error.c tmp_sdev->queue_depth == sdev->max_queue_depth) queue_depth 670 drivers/scsi/scsi_error.c scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1); queue_depth 692 drivers/scsi/scsi_error.c scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); queue_depth 413 drivers/scsi/scsi_lib.c if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) queue_depth 1300 drivers/scsi/scsi_lib.c if (busy >= sdev->queue_depth) queue_depth 1899 drivers/scsi/scsi_lib.c shost->tag_set.queue_depth = shost->can_queue; queue_depth 982 drivers/scsi/scsi_scan.c sdev->max_queue_depth = sdev->queue_depth; queue_depth 988 drivers/scsi/scsi_sysfs.c sdev->max_queue_depth = sdev->queue_depth; queue_depth 992 drivers/scsi/scsi_sysfs.c sdev_show_function(queue_depth, "%d\n"); queue_depth 994 drivers/scsi/scsi_sysfs.c static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, queue_depth 2697 drivers/scsi/scsi_transport_iscsi.c uint16_t queue_depth) queue_depth 2703 drivers/scsi/scsi_transport_iscsi.c session = transport->create_session(ep, cmds_max, queue_depth, queue_depth 3530 drivers/scsi/scsi_transport_iscsi.c ev->u.c_session.queue_depth); queue_depth 3543 drivers/scsi/scsi_transport_iscsi.c ev->u.c_bound_session.queue_depth); queue_depth 986 drivers/scsi/sg.c __put_user((short) sdp->device->queue_depth, queue_depth 2489 drivers/scsi/sg.c (int) scsidp->queue_depth, queue_depth 926 drivers/scsi/smartpqi/smartpqi.h u16 queue_depth; /* max. queue_depth for this device */ queue_depth 1352 drivers/scsi/smartpqi/smartpqi_init.c device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; queue_depth 1358 drivers/scsi/smartpqi/smartpqi_init.c device->queue_depth = queue_depth 1644 drivers/scsi/smartpqi/smartpqi_init.c " qd=%-6d", device->queue_depth); queue_depth 1678 drivers/scsi/smartpqi/smartpqi_init.c existing_device->queue_depth = new_device->queue_depth; queue_depth 1848 drivers/scsi/smartpqi/smartpqi_init.c if (device->sdev && device->queue_depth != queue_depth 1850 drivers/scsi/smartpqi/smartpqi_init.c device->advertised_queue_depth = device->queue_depth; queue_depth 5794 drivers/scsi/smartpqi/smartpqi_init.c if (device->queue_depth) { queue_depth 5795 drivers/scsi/smartpqi/smartpqi_init.c device->advertised_queue_depth = device->queue_depth; queue_depth 105 drivers/scsi/snic/snic_main.c if (qsz < sdev->queue_depth) queue_depth 107 drivers/scsi/snic/snic_main.c else if (qsz > sdev->queue_depth) queue_depth 110 drivers/scsi/snic/snic_main.c atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth); queue_depth 114 drivers/scsi/snic/snic_main.c return sdev->queue_depth; queue_depth 378 drivers/scsi/storvsc_drv.c static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth); queue_depth 1911 drivers/scsi/storvsc_drv.c static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth) queue_depth 1913 drivers/scsi/storvsc_drv.c if (queue_depth > scsi_driver.can_queue) queue_depth 1914 drivers/scsi/storvsc_drv.c queue_depth = scsi_driver.can_queue; queue_depth 1916 drivers/scsi/storvsc_drv.c return scsi_change_queue_depth(sdev, queue_depth); queue_depth 292 drivers/scsi/ufs/ufs-sysfs.c UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1); queue_depth 619 drivers/target/iscsi/iscsi_target_configfs.c return sprintf(page, "%u\n", acl_to_nacl(item)->queue_depth); queue_depth 41 drivers/target/iscsi/iscsi_target_device.c sess->cmdsn_window = se_nacl->queue_depth; queue_depth 42 drivers/target/iscsi/iscsi_target_device.c atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn); queue_depth 1050 drivers/target/iscsi/iscsi_target_nego.c u32 payload_length, queue_depth = 0; queue_depth 1248 drivers/target/iscsi/iscsi_target_nego.c queue_depth = se_nacl->queue_depth; queue_depth 1258 drivers/target/iscsi/iscsi_target_nego.c tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); queue_depth 542 drivers/target/target_core_configfs.c DEF_CONFIGFS_ATTRIB_SHOW(queue_depth); queue_depth 1019 drivers/target/target_core_configfs.c if (val > dev->dev_attrib.queue_depth) { queue_depth 1028 drivers/target/target_core_configfs.c da->queue_depth = dev->queue_depth = val; queue_depth 1140 drivers/target/target_core_configfs.c CONFIGFS_ATTR(, queue_depth); queue_depth 948 drivers/target/target_core_device.c dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; queue_depth 202 drivers/target/target_core_file.c fd_dev->fd_queue_depth = dev->queue_depth; queue_depth 295 drivers/target/target_core_pscsi.c if (!sd->queue_depth) { queue_depth 296 drivers/target/target_core_pscsi.c sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; queue_depth 300 drivers/target/target_core_pscsi.c sd->lun, sd->queue_depth); queue_depth 307 drivers/target/target_core_pscsi.c dev->dev_attrib.hw_queue_depth = sd->queue_depth; queue_depth 160 drivers/target/target_core_tpg.c struct se_node_acl *acl, u32 queue_depth) queue_depth 162 drivers/target/target_core_tpg.c acl->queue_depth = queue_depth; queue_depth 164 drivers/target/target_core_tpg.c if (!acl->queue_depth) { queue_depth 168 drivers/target/target_core_tpg.c acl->queue_depth = 1; queue_depth 176 drivers/target/target_core_tpg.c u32 queue_depth; queue_depth 193 drivers/target/target_core_tpg.c queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); queue_depth 195 drivers/target/target_core_tpg.c queue_depth = 1; queue_depth 196 drivers/target/target_core_tpg.c target_set_nacl_queue_depth(tpg, acl, queue_depth); queue_depth 220 drivers/target/target_core_tpg.c acl->queue_depth, queue_depth 371 drivers/target/target_core_tpg.c tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, queue_depth 383 drivers/target/target_core_tpg.c u32 queue_depth) queue_depth 392 drivers/target/target_core_tpg.c if (acl->queue_depth == queue_depth) queue_depth 399 drivers/target/target_core_tpg.c target_set_nacl_queue_depth(tpg, acl, queue_depth); queue_depth 407 drivers/target/target_core_tpg.c " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, queue_depth 967 drivers/target/target_core_transport.c *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); queue_depth 104 include/linux/blk-mq.h unsigned int queue_depth; /* max hw supported */ queue_depth 416 include/linux/blkdev.h unsigned int queue_depth; queue_depth 774 include/linux/blkdev.h if (q->queue_depth) queue_depth 775 include/linux/blkdev.h return q->queue_depth; queue_depth 1186 include/linux/libata.h int queue_depth); queue_depth 1188 include/linux/libata.h int queue_depth); queue_depth 102 include/scsi/iscsi_if.h uint16_t queue_depth; queue_depth 108 include/scsi/iscsi_if.h uint16_t queue_depth; queue_depth 115 include/scsi/scsi_device.h unsigned short queue_depth; /* How deep of a queue we want */ queue_depth 552 include/target/target_core_base.h u32 queue_depth; queue_depth 691 include/target/target_core_base.h u32 queue_depth; queue_depth 771 include/target/target_core_base.h u32 queue_depth; queue_depth 459 include/uapi/linux/hdreg.h unsigned short queue_depth; /* (word 75)