sbq 1095 block/blk-mq.c struct sbitmap_queue *sbq; sbq 1098 block/blk-mq.c sbq = &hctx->tags->bitmap_tags; sbq 1099 block/blk-mq.c atomic_dec(&sbq->ws_active); sbq 1116 block/blk-mq.c struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; sbq 1139 block/blk-mq.c wq = &bt_wait_ptr(sbq, hctx)->wait; sbq 1149 block/blk-mq.c atomic_inc(&sbq->ws_active); sbq 1170 block/blk-mq.c atomic_dec(&sbq->ws_active); sbq 493 block/kyber-iosched.c khd->domain_wait[i].sbq = NULL; sbq 1458 drivers/staging/qlge/qlge.h struct bq_desc *sbq; /* array of control blocks */ sbq 1786 drivers/staging/qlge/qlge_dbg.c pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); sbq 1066 drivers/staging/qlge/qlge_main.c struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; sbq 1202 drivers/staging/qlge/qlge_main.c sbq_desc = &rx_ring->sbq[clean_idx]; sbq 2861 drivers/staging/qlge/qlge_main.c sbq_desc = &rx_ring->sbq[i]; sbq 2890 drivers/staging/qlge/qlge_main.c if (rx_ring->sbq) sbq 2931 drivers/staging/qlge/qlge_main.c memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); sbq 2933 drivers/staging/qlge/qlge_main.c sbq_desc = &rx_ring->sbq[i]; sbq 2953 drivers/staging/qlge/qlge_main.c kfree(rx_ring->sbq); sbq 2954 drivers/staging/qlge/qlge_main.c rx_ring->sbq = NULL; sbq 3012 drivers/staging/qlge/qlge_main.c rx_ring->sbq = kmalloc_array(rx_ring->sbq_len, sbq 3015 drivers/staging/qlge/qlge_main.c if (rx_ring->sbq == NULL) sbq 147 drivers/target/iscsi/iscsi_target_util.c struct sbitmap_queue *sbq; sbq 152 drivers/target/iscsi/iscsi_target_util.c sbq = &se_sess->sess_tag_pool; sbq 153 drivers/target/iscsi/iscsi_target_util.c ws = &sbq->ws[0]; sbq 155 drivers/target/iscsi/iscsi_target_util.c sbitmap_prepare_to_wait(sbq, ws, &wait, state); sbq 158 drivers/target/iscsi/iscsi_target_util.c tag = sbitmap_queue_get(sbq, cpup); sbq 164 drivers/target/iscsi/iscsi_target_util.c sbitmap_finish_wait(sbq, ws, &wait); sbq 375 include/linux/sbitmap.h int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, sbq 383 include/linux/sbitmap.h static inline void sbitmap_queue_free(struct sbitmap_queue *sbq) sbq 385 include/linux/sbitmap.h kfree(sbq->ws); sbq 386 include/linux/sbitmap.h free_percpu(sbq->alloc_hint); sbq 387 include/linux/sbitmap.h sbitmap_free(&sbq->sb); sbq 399 include/linux/sbitmap.h void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); sbq 408 include/linux/sbitmap.h int __sbitmap_queue_get(struct sbitmap_queue *sbq); sbq 423 include/linux/sbitmap.h int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, sbq 435 include/linux/sbitmap.h static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, sbq 441 include/linux/sbitmap.h nr = __sbitmap_queue_get(sbq); sbq 460 include/linux/sbitmap.h static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, sbq 467 include/linux/sbitmap.h nr = __sbitmap_queue_get_shallow(sbq, shallow_depth); sbq 486 include/linux/sbitmap.h void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, sbq 496 include/linux/sbitmap.h void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, sbq 517 include/linux/sbitmap.h static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq, sbq 522 include/linux/sbitmap.h ws = &sbq->ws[atomic_read(wait_index)]; sbq 532 include/linux/sbitmap.h void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); sbq 539 include/linux/sbitmap.h void sbitmap_queue_wake_up(struct sbitmap_queue *sbq); sbq 549 include/linux/sbitmap.h void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); sbq 552 include/linux/sbitmap.h struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */ sbq 558 include/linux/sbitmap.h .sbq = NULL, \ sbq 570 include/linux/sbitmap.h void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, sbq 577 include/linux/sbitmap.h void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, sbq 583 include/linux/sbitmap.h void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, sbq 338 lib/sbitmap.c static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, sbq 360 lib/sbitmap.c shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); sbq 361 lib/sbitmap.c depth = ((depth >> sbq->sb.shift) * shallow_depth + sbq 362 lib/sbitmap.c min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); sbq 369 lib/sbitmap.c int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, sbq 375 lib/sbitmap.c ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); sbq 379 lib/sbitmap.c sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); sbq 380 lib/sbitmap.c if (!sbq->alloc_hint) { sbq 381 lib/sbitmap.c sbitmap_free(&sbq->sb); sbq 387 lib/sbitmap.c *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; sbq 390 lib/sbitmap.c sbq->min_shallow_depth = UINT_MAX; sbq 391 lib/sbitmap.c sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); sbq 392 lib/sbitmap.c atomic_set(&sbq->wake_index, 0); sbq 393 lib/sbitmap.c atomic_set(&sbq->ws_active, 0); sbq 395 lib/sbitmap.c sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); sbq 396 lib/sbitmap.c if (!sbq->ws) { sbq 397 lib/sbitmap.c free_percpu(sbq->alloc_hint); sbq 398 lib/sbitmap.c sbitmap_free(&sbq->sb); sbq 403 lib/sbitmap.c init_waitqueue_head(&sbq->ws[i].wait); sbq 404 lib/sbitmap.c atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); sbq 407 lib/sbitmap.c sbq->round_robin = round_robin; sbq 412 lib/sbitmap.c static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, sbq 415 lib/sbitmap.c unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); sbq 418 lib/sbitmap.c if (sbq->wake_batch != wake_batch) { sbq 419 lib/sbitmap.c WRITE_ONCE(sbq->wake_batch, wake_batch); sbq 427 lib/sbitmap.c atomic_set(&sbq->ws[i].wait_cnt, 1); sbq 431 lib/sbitmap.c void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) sbq 433 lib/sbitmap.c sbitmap_queue_update_wake_batch(sbq, depth); sbq 434 lib/sbitmap.c sbitmap_resize(&sbq->sb, depth); sbq 438 lib/sbitmap.c int __sbitmap_queue_get(struct sbitmap_queue *sbq) sbq 443 lib/sbitmap.c hint = this_cpu_read(*sbq->alloc_hint); sbq 444 lib/sbitmap.c depth = READ_ONCE(sbq->sb.depth); sbq 447 lib/sbitmap.c this_cpu_write(*sbq->alloc_hint, hint); sbq 449 lib/sbitmap.c nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); sbq 453 lib/sbitmap.c this_cpu_write(*sbq->alloc_hint, 0); sbq 454 lib/sbitmap.c } else if (nr == hint || unlikely(sbq->round_robin)) { sbq 459 lib/sbitmap.c this_cpu_write(*sbq->alloc_hint, hint); sbq 466 lib/sbitmap.c int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, sbq 472 lib/sbitmap.c WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); sbq 474 lib/sbitmap.c hint = this_cpu_read(*sbq->alloc_hint); sbq 475 lib/sbitmap.c depth = READ_ONCE(sbq->sb.depth); sbq 478 lib/sbitmap.c this_cpu_write(*sbq->alloc_hint, hint); sbq 480 lib/sbitmap.c nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); sbq 484 lib/sbitmap.c this_cpu_write(*sbq->alloc_hint, 0); sbq 485 lib/sbitmap.c } else if (nr == hint || unlikely(sbq->round_robin)) { sbq 490 lib/sbitmap.c this_cpu_write(*sbq->alloc_hint, hint); sbq 497 lib/sbitmap.c void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, sbq 500 lib/sbitmap.c sbq->min_shallow_depth = min_shallow_depth; sbq 501 lib/sbitmap.c sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); sbq 505 lib/sbitmap.c static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) sbq 509 lib/sbitmap.c if (!atomic_read(&sbq->ws_active)) sbq 512 lib/sbitmap.c wake_index = atomic_read(&sbq->wake_index); sbq 514 lib/sbitmap.c struct sbq_wait_state *ws = &sbq->ws[wake_index]; sbq 517 lib/sbitmap.c if (wake_index != atomic_read(&sbq->wake_index)) sbq 518 lib/sbitmap.c atomic_set(&sbq->wake_index, wake_index); sbq 528 lib/sbitmap.c static bool __sbq_wake_up(struct sbitmap_queue *sbq) sbq 534 lib/sbitmap.c ws = sbq_wake_ptr(sbq); sbq 542 lib/sbitmap.c wake_batch = READ_ONCE(sbq->wake_batch); sbq 558 lib/sbitmap.c sbq_index_atomic_inc(&sbq->wake_index); sbq 569 lib/sbitmap.c void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) sbq 571 lib/sbitmap.c while (__sbq_wake_up(sbq)) sbq 576 lib/sbitmap.c void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, sbq 590 lib/sbitmap.c sbitmap_deferred_clear_bit(&sbq->sb, nr); sbq 599 lib/sbitmap.c sbitmap_queue_wake_up(sbq); sbq 601 lib/sbitmap.c if (likely(!sbq->round_robin && nr < sbq->sb.depth)) sbq 602 lib/sbitmap.c *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; sbq 606 lib/sbitmap.c void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) sbq 615 lib/sbitmap.c wake_index = atomic_read(&sbq->wake_index); sbq 617 lib/sbitmap.c struct sbq_wait_state *ws = &sbq->ws[wake_index]; sbq 627 lib/sbitmap.c void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) sbq 632 lib/sbitmap.c sbitmap_show(&sbq->sb, m); sbq 640 lib/sbitmap.c seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); sbq 644 lib/sbitmap.c seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); sbq 645 lib/sbitmap.c seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); sbq 646 lib/sbitmap.c seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); sbq 650 lib/sbitmap.c struct sbq_wait_state *ws = &sbq->ws[i]; sbq 658 lib/sbitmap.c seq_printf(m, "round_robin=%d\n", sbq->round_robin); sbq 659 lib/sbitmap.c seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); sbq 663 lib/sbitmap.c void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, sbq 667 lib/sbitmap.c if (!sbq_wait->sbq) { sbq 668 lib/sbitmap.c sbq_wait->sbq = sbq; sbq 669 lib/sbitmap.c atomic_inc(&sbq->ws_active); sbq 678 lib/sbitmap.c if (sbq_wait->sbq) { sbq 679 lib/sbitmap.c atomic_dec(&sbq_wait->sbq->ws_active); sbq 680 lib/sbitmap.c sbq_wait->sbq = NULL; sbq 685 lib/sbitmap.c void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, sbq 689 lib/sbitmap.c if (!sbq_wait->sbq) { sbq 690 lib/sbitmap.c atomic_inc(&sbq->ws_active); sbq 691 lib/sbitmap.c sbq_wait->sbq = sbq; sbq 697 lib/sbitmap.c void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, sbq 701 lib/sbitmap.c if (sbq_wait->sbq) { sbq 702 lib/sbitmap.c atomic_dec(&sbq->ws_active); sbq 703 lib/sbitmap.c sbq_wait->sbq = NULL;