hb 2782 drivers/atm/fore200e.c u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat); hb 2788 drivers/atm/fore200e.c if (hb >> 16 != 0xDEAD) hb 2789 drivers/atm/fore200e.c len += sprintf(page + len, "0x%08x\n", hb); hb 2791 drivers/atm/fore200e.c len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF); hb 200 drivers/atm/nicstar.c struct sk_buff *hb; hb 225 drivers/atm/nicstar.c while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { hb 226 drivers/atm/nicstar.c dev_kfree_skb_any(hb); hb 646 drivers/atm/nicstar.c struct sk_buff *hb; hb 647 drivers/atm/nicstar.c hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); hb 648 drivers/atm/nicstar.c if (hb == NULL) { hb 656 drivers/atm/nicstar.c NS_PRV_BUFTYPE(hb) = BUF_NONE; hb 657 drivers/atm/nicstar.c skb_queue_tail(&card->hbpool.queue, hb); hb 835 drivers/atm/nicstar.c struct sk_buff *hb; hb 836 drivers/atm/nicstar.c while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) hb 837 drivers/atm/nicstar.c dev_kfree_skb_any(hb); hb 2215 drivers/atm/nicstar.c struct sk_buff *hb, *sb, *lb; hb 2219 drivers/atm/nicstar.c hb = skb_dequeue(&(card->hbpool.queue)); hb 2220 drivers/atm/nicstar.c if (hb == NULL) { /* No buffers in the queue */ hb 2222 drivers/atm/nicstar.c hb = dev_alloc_skb(NS_HBUFSIZE); hb 2223 drivers/atm/nicstar.c if (hb == NULL) { hb 2245 drivers/atm/nicstar.c NS_PRV_BUFTYPE(hb) = BUF_NONE; hb 2270 drivers/atm/nicstar.c if (!atm_charge(vcc, hb->truesize)) { hb 2274 drivers/atm/nicstar.c skb_queue_tail(&card->hbpool.queue, hb); hb 2277 drivers/atm/nicstar.c dev_kfree_skb_any(hb); hb 2282 drivers/atm/nicstar.c skb_copy_from_linear_data(sb, hb->data, hb 2284 drivers/atm/nicstar.c skb_put(hb, iov->iov_len); hb 2297 drivers/atm/nicstar.c (hb), tocopy); hb 2298 drivers/atm/nicstar.c skb_put(hb, tocopy); hb 2304 drivers/atm/nicstar.c if (remaining != 0 || hb->len != len) hb 2309 drivers/atm/nicstar.c ATM_SKB(hb)->vcc = vcc; hb 2310 drivers/atm/nicstar.c __net_timestamp(hb); hb 2311 drivers/atm/nicstar.c vcc->push(vcc, hb); hb 2601 drivers/atm/nicstar.c struct sk_buff *hb; hb 2604 drivers/atm/nicstar.c hb = skb_dequeue(&card->hbpool.queue); hb 2607 drivers/atm/nicstar.c if (hb == NULL) hb 2612 drivers/atm/nicstar.c dev_kfree_skb_any(hb); hb 2616 drivers/atm/nicstar.c struct sk_buff *hb; hb 2618 drivers/atm/nicstar.c hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); hb 2619 drivers/atm/nicstar.c if (hb == NULL) hb 2621 drivers/atm/nicstar.c NS_PRV_BUFTYPE(hb) = BUF_NONE; hb 2623 drivers/atm/nicstar.c skb_queue_tail(&card->hbpool.queue, hb); hb 146 drivers/gpu/drm/vmwgfx/vmwgfx_msg.c const char *msg, bool hb) hb 151 drivers/gpu/drm/vmwgfx/vmwgfx_msg.c if (hb) { hb 201 drivers/gpu/drm/vmwgfx/vmwgfx_msg.c unsigned long reply_len, bool hb) hb 205 drivers/gpu/drm/vmwgfx/vmwgfx_msg.c if (hb) { hb 56 drivers/hwmon/w83773g.c static inline long temp_of_remote(s8 hb, u8 lb) hb 58 drivers/hwmon/w83773g.c return (hb << 3 | lb >> 5) * 125; hb 1302 drivers/md/dm-cache-policy-smq.c dm_oblock_t hb = to_hblock(mq, b); hb 1303 drivers/md/dm-cache-policy-smq.c struct entry *e = h_lookup(&mq->hotspot_table, hb); hb 1329 drivers/md/dm-cache-policy-smq.c e->oblock = hb; hb 461 drivers/media/i2c/mt9v111.c unsigned int hb; hb 492 drivers/media/i2c/mt9v111.c best_fps = vb = hb = 0; hb 500 drivers/media/i2c/mt9v111.c for (hb = MT9V111_CORE_R05_MIN_HBLANK; hb 501 drivers/media/i2c/mt9v111.c hb < MT9V111_CORE_R05_MAX_HBLANK; hb += 10) { hb 502 drivers/media/i2c/mt9v111.c unsigned int t_frame = (row_pclk + hb) * hb 520 drivers/media/i2c/mt9v111.c ret = v4l2_ctrl_s_ctrl_int64(mt9v111->hblank, hb); hb 141 drivers/net/ethernet/sun/sunhme.c struct hmeal_init_block *hb = hp->happy_block; hb 142 drivers/net/ethernet/sun/sunhme.c struct happy_meal_txd *tp = &hb->happy_meal_txd[0]; hb 1252 drivers/net/ethernet/sun/sunhme.c struct hmeal_init_block *hb = hp->happy_block; hb 1270 drivers/net/ethernet/sun/sunhme.c hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); hb 1281 drivers/net/ethernet/sun/sunhme.c hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); hb 1284 drivers/net/ethernet/sun/sunhme.c hme_write_rxd(hp, &hb->happy_meal_rxd[i], hb 1292 drivers/net/ethernet/sun/sunhme.c hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0); hb 4158 drivers/net/wireless/atmel/atmel.c u8 hb = *src++; hb 4159 drivers/net/wireless/atmel/atmel.c atmel_write16(dev, DR, lb | (hb << 8)); hb 419 drivers/net/wireless/intel/iwlwifi/fw/api/power.h struct iwl_per_chain_offset hb; hb 416 drivers/scsi/constants.c int hb = host_byte(result); hb 418 drivers/scsi/constants.c if (hb < ARRAY_SIZE(hostbyte_table)) hb 419 drivers/scsi/constants.c hb_string = hostbyte_table[hb]; hb 240 drivers/scsi/cxlflash/common.h u64 hb; hb 1887 drivers/scsi/cxlflash/main.c afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); hb 248 drivers/staging/comedi/drivers/dt2801.c int hb = 0; hb 254 drivers/staging/comedi/drivers/dt2801.c ret = dt2801_readdata(dev, &hb); hb 258 drivers/staging/comedi/drivers/dt2801.c *data = (hb << 8) + lb; hb 39 drivers/staging/vt6655/tmacro.h #define MAKEWORD(lb, hb) ((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8))) hb 1050 drivers/video/fbdev/intelfb/intelfbhw.c u32 *vs, *vb, *vt, *hs, *hb, *ht, *ss, *pipe_conf; hb 1064 drivers/video/fbdev/intelfb/intelfbhw.c hb = &hw->hblank_b; hb 1076 drivers/video/fbdev/intelfb/intelfbhw.c hb = &hw->hblank_a; hb 1237 drivers/video/fbdev/intelfb/intelfbhw.c *hb = (hblank_start << HBLANKSTART_SHIFT) | hb 1282 drivers/video/fbdev/intelfb/intelfbhw.c const u32 *hs, *ht, *hb, *vs, *vt, *vb, *ss; hb 1308 drivers/video/fbdev/intelfb/intelfbhw.c hb = &hw->hblank_b; hb 1332 drivers/video/fbdev/intelfb/intelfbhw.c hb = &hw->hblank_a; hb 1422 drivers/video/fbdev/intelfb/intelfbhw.c OUTREG(hblank_reg, *hb); hb 158 include/linux/hdlcdrv.h static inline int hdlcdrv_hbuf_full(struct hdlcdrv_hdlcbuffer *hb) hb 163 include/linux/hdlcdrv.h spin_lock_irqsave(&hb->lock, flags); hb 164 include/linux/hdlcdrv.h ret = !((HDLCDRV_HDLCBUFFER - 1 + hb->rd - hb->wr) % HDLCDRV_HDLCBUFFER); hb 165 include/linux/hdlcdrv.h spin_unlock_irqrestore(&hb->lock, flags); hb 171 include/linux/hdlcdrv.h static inline int hdlcdrv_hbuf_empty(struct hdlcdrv_hdlcbuffer *hb) hb 176 include/linux/hdlcdrv.h spin_lock_irqsave(&hb->lock, flags); hb 177 include/linux/hdlcdrv.h ret = (hb->rd == hb->wr); hb 178 include/linux/hdlcdrv.h spin_unlock_irqrestore(&hb->lock, flags); hb 184 include/linux/hdlcdrv.h static inline unsigned short hdlcdrv_hbuf_get(struct hdlcdrv_hdlcbuffer *hb) hb 190 include/linux/hdlcdrv.h spin_lock_irqsave(&hb->lock, flags); hb 191 include/linux/hdlcdrv.h if (hb->rd == hb->wr) hb 194 include/linux/hdlcdrv.h newr = (hb->rd+1) % HDLCDRV_HDLCBUFFER; hb 195 include/linux/hdlcdrv.h val = hb->buf[hb->rd]; hb 196 include/linux/hdlcdrv.h hb->rd = newr; hb 198 include/linux/hdlcdrv.h spin_unlock_irqrestore(&hb->lock, flags); hb 204 include/linux/hdlcdrv.h static inline void hdlcdrv_hbuf_put(struct hdlcdrv_hdlcbuffer *hb, hb 210 include/linux/hdlcdrv.h spin_lock_irqsave(&hb->lock, flags); hb 211 include/linux/hdlcdrv.h newp = (hb->wr+1) % HDLCDRV_HDLCBUFFER; hb 212 include/linux/hdlcdrv.h if (newp != hb->rd) { hb 213 include/linux/hdlcdrv.h hb->buf[hb->wr] = val & 0xffff; hb 214 include/linux/hdlcdrv.h hb->wr = newp; hb 216 include/linux/hdlcdrv.h spin_unlock_irqrestore(&hb->lock, flags); hb 139 include/net/rose.h unsigned long t1, t2, t3, hb, idle; hb 348 kernel/futex.c static inline void hb_waiters_inc(struct futex_hash_bucket *hb) hb 351 kernel/futex.c atomic_inc(&hb->waiters); hb 363 kernel/futex.c static inline void hb_waiters_dec(struct futex_hash_bucket *hb) hb 366 kernel/futex.c atomic_dec(&hb->waiters); hb 370 kernel/futex.c static inline int hb_waiters_pending(struct futex_hash_bucket *hb) hb 373 kernel/futex.c return atomic_read(&hb->waiters); hb 788 kernel/futex.c static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, hb 793 kernel/futex.c plist_for_each_entry(this, &hb->chain, list) { hb 920 kernel/futex.c struct futex_hash_bucket *hb; hb 935 kernel/futex.c hb = hash_futex(&key); hb 955 kernel/futex.c spin_lock(&hb->lock); hb 965 kernel/futex.c spin_unlock(&hb->lock); hb 977 kernel/futex.c spin_unlock(&hb->lock); hb 1377 kernel/futex.c struct futex_hash_bucket *hb, hb 1381 kernel/futex.c struct futex_q *top_waiter = futex_top_waiter(hb, key); hb 1437 kernel/futex.c static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, hb 1471 kernel/futex.c top_waiter = futex_top_waiter(hb, key); hb 1523 kernel/futex.c struct futex_hash_bucket *hb; hb 1529 kernel/futex.c hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); hb 1530 kernel/futex.c plist_del(&q->list, &hb->chain); hb 1531 kernel/futex.c hb_waiters_dec(hb); hb 1674 kernel/futex.c struct futex_hash_bucket *hb; hb 1687 kernel/futex.c hb = hash_futex(&key); hb 1690 kernel/futex.c if (!hb_waiters_pending(hb)) hb 1693 kernel/futex.c spin_lock(&hb->lock); hb 1695 kernel/futex.c plist_for_each_entry_safe(this, next, &hb->chain, list) { hb 1712 kernel/futex.c spin_unlock(&hb->lock); hb 1907 kernel/futex.c struct futex_hash_bucket *hb) hb 1917 kernel/futex.c q->lock_ptr = &hb->lock; hb 2316 kernel/futex.c __acquires(&hb->lock) hb 2318 kernel/futex.c struct futex_hash_bucket *hb; hb 2320 kernel/futex.c hb = hash_futex(&q->key); hb 2330 kernel/futex.c hb_waiters_inc(hb); /* implies smp_mb(); (A) */ hb 2332 kernel/futex.c q->lock_ptr = &hb->lock; hb 2334 kernel/futex.c spin_lock(&hb->lock); hb 2335 kernel/futex.c return hb; hb 2339 kernel/futex.c queue_unlock(struct futex_hash_bucket *hb) hb 2340 kernel/futex.c __releases(&hb->lock) hb 2342 kernel/futex.c spin_unlock(&hb->lock); hb 2343 kernel/futex.c hb_waiters_dec(hb); hb 2346 kernel/futex.c static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) hb 2361 kernel/futex.c plist_add(&q->list, &hb->chain); hb 2377 kernel/futex.c static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) hb 2378 kernel/futex.c __releases(&hb->lock) hb 2380 kernel/futex.c __queue_me(q, hb); hb 2381 kernel/futex.c spin_unlock(&hb->lock); hb 2694 kernel/futex.c static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, hb 2704 kernel/futex.c queue_me(q, hb); hb 2744 kernel/futex.c struct futex_q *q, struct futex_hash_bucket **hb) hb 2773 kernel/futex.c *hb = queue_lock(q); hb 2778 kernel/futex.c queue_unlock(*hb); hb 2792 kernel/futex.c queue_unlock(*hb); hb 2807 kernel/futex.c struct futex_hash_bucket *hb; hb 2822 kernel/futex.c ret = futex_wait_setup(uaddr, val, flags, &q, &hb); hb 2827 kernel/futex.c futex_wait_queue_me(hb, &q, to); hb 2900 kernel/futex.c struct futex_hash_bucket *hb; hb 2918 kernel/futex.c hb = queue_lock(&q); hb 2920 kernel/futex.c ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, hb 2942 kernel/futex.c queue_unlock(hb); hb 2962 kernel/futex.c __queue_me(&q, hb); hb 3054 kernel/futex.c queue_unlock(hb); hb 3066 kernel/futex.c queue_unlock(hb); hb 3088 kernel/futex.c struct futex_hash_bucket *hb; hb 3108 kernel/futex.c hb = hash_futex(&key); hb 3109 kernel/futex.c spin_lock(&hb->lock); hb 3116 kernel/futex.c top_waiter = futex_top_waiter(hb, &key); hb 3143 kernel/futex.c spin_unlock(&hb->lock); hb 3182 kernel/futex.c spin_unlock(&hb->lock); hb 3202 kernel/futex.c spin_unlock(&hb->lock); hb 3239 kernel/futex.c int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, hb 3253 kernel/futex.c WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr)); hb 3258 kernel/futex.c plist_del(&q->list, &hb->chain); hb 3259 kernel/futex.c hb_waiters_dec(hb); hb 3318 kernel/futex.c struct futex_hash_bucket *hb; hb 3353 kernel/futex.c ret = futex_wait_setup(uaddr, val, flags, &q, &hb); hb 3362 kernel/futex.c queue_unlock(hb); hb 3368 kernel/futex.c futex_wait_queue_me(hb, &q, to); hb 3370 kernel/futex.c spin_lock(&hb->lock); hb 3371 kernel/futex.c ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); hb 3372 kernel/futex.c spin_unlock(&hb->lock); hb 231 kernel/power/swap.c static void hib_init_batch(struct hib_bio_batch *hb) hb 233 kernel/power/swap.c atomic_set(&hb->count, 0); hb 234 kernel/power/swap.c init_waitqueue_head(&hb->wait); hb 235 kernel/power/swap.c hb->error = BLK_STS_OK; hb 240 kernel/power/swap.c struct hib_bio_batch *hb = bio->bi_private; hb 255 kernel/power/swap.c if (bio->bi_status && !hb->error) hb 256 kernel/power/swap.c hb->error = bio->bi_status; hb 257 kernel/power/swap.c if (atomic_dec_and_test(&hb->count)) hb 258 kernel/power/swap.c wake_up(&hb->wait); hb 264 kernel/power/swap.c struct hib_bio_batch *hb) hb 282 kernel/power/swap.c if (hb) { hb 284 kernel/power/swap.c bio->bi_private = hb; hb 285 kernel/power/swap.c atomic_inc(&hb->count); hb 295 kernel/power/swap.c static blk_status_t hib_wait_io(struct hib_bio_batch *hb) hb 297 kernel/power/swap.c wait_event(hb->wait, atomic_read(&hb->count) == 0); hb 298 kernel/power/swap.c return blk_status_to_errno(hb->error); hb 368 kernel/power/swap.c static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) hb 376 kernel/power/swap.c if (hb) { hb 382 kernel/power/swap.c ret = hib_wait_io(hb); /* Free pages */ hb 392 kernel/power/swap.c hb = NULL; /* Go synchronous */ hb 399 kernel/power/swap.c return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb); hb 441 kernel/power/swap.c struct hib_bio_batch *hb) hb 449 kernel/power/swap.c error = write_page(buf, offset, hb); hb 458 kernel/power/swap.c error = write_page(handle->cur, handle->cur_swap, hb); hb 465 kernel/power/swap.c if (hb && low_free_pages() <= handle->reqd_free_pages) { hb 466 kernel/power/swap.c error = hib_wait_io(hb); hb 538 kernel/power/swap.c struct hib_bio_batch hb; hb 542 kernel/power/swap.c hib_init_batch(&hb); hb 555 kernel/power/swap.c ret = swap_write_page(handle, data_of(*snapshot), &hb); hb 563 kernel/power/swap.c err2 = hib_wait_io(&hb); hb 674 kernel/power/swap.c struct hib_bio_batch hb; hb 683 kernel/power/swap.c hib_init_batch(&hb); hb 838 kernel/power/swap.c ret = swap_write_page(handle, page, &hb); hb 849 kernel/power/swap.c err2 = hib_wait_io(&hb); hb 1008 kernel/power/swap.c struct hib_bio_batch *hb) hb 1019 kernel/power/swap.c error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb); hb 1057 kernel/power/swap.c struct hib_bio_batch hb; hb 1061 kernel/power/swap.c hib_init_batch(&hb); hb 1074 kernel/power/swap.c ret = swap_read_page(handle, data_of(*snapshot), &hb); hb 1078 kernel/power/swap.c ret = hib_wait_io(&hb); hb 1086 kernel/power/swap.c err2 = hib_wait_io(&hb); hb 1161 kernel/power/swap.c struct hib_bio_batch hb; hb 1174 kernel/power/swap.c hib_init_batch(&hb); hb 1293 kernel/power/swap.c ret = swap_read_page(handle, page[ring], &hb); hb 1320 kernel/power/swap.c ret = hib_wait_io(&hb); hb 1374 kernel/power/swap.c ret = hib_wait_io(&hb); hb 83 lib/zstd/huf.h U32 name##hb[maxSymbolValue + 1]; \ hb 84 lib/zstd/huf.h void *name##hv = &(name##hb); \ hb 749 net/ipv4/tcp_metrics.c struct tcpm_hash_bucket *hb = tcp_metrics_hash + row; hb 752 net/ipv4/tcp_metrics.c for (col = 0, tm = rcu_dereference(hb->chain); tm; hb 873 net/ipv4/tcp_metrics.c struct tcpm_hash_bucket *hb = tcp_metrics_hash; hb 877 net/ipv4/tcp_metrics.c for (row = 0; row < max_rows; row++, hb++) { hb 882 net/ipv4/tcp_metrics.c pp = &hb->chain; hb 899 net/ipv4/tcp_metrics.c struct tcpm_hash_bucket *hb; hb 921 net/ipv4/tcp_metrics.c hb = tcp_metrics_hash + hash; hb 922 net/ipv4/tcp_metrics.c pp = &hb->chain; hb 387 net/rose/af_rose.c rose->hb = opt * HZ; hb 440 net/rose/af_rose.c val = rose->hb / HZ; hb 523 net/rose/af_rose.c rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); hb 568 net/rose/af_rose.c rose->hb = orose->hb; hb 1410 net/rose/af_rose.c rose->hb / HZ, hb 85 net/rose/rose_timer.c rose->timer.expires = jiffies + rose->hb; hb 2298 tools/perf/builtin-c2c.c static void c2c_browser__update_nr_entries(struct hist_browser *hb) hb 2301 tools/perf/builtin-c2c.c struct rb_node *nd = rb_first_cached(&hb->hists->entries); hb 2312 tools/perf/builtin-c2c.c hb->nr_non_filtered_entries = nr_entries; hb 2316 tools/perf/builtin-c2c.c struct hist_browser hb; hb 2328 tools/perf/builtin-c2c.c cl_browser = container_of(browser, struct c2c_cacheline_browser, hb); hb 2345 tools/perf/builtin-c2c.c hist_browser__init(&browser->hb, hists); hb 2346 tools/perf/builtin-c2c.c browser->hb.c2c_filter = true; hb 2347 tools/perf/builtin-c2c.c browser->hb.title = perf_c2c_cacheline_browser__title; hb 2380 tools/perf/builtin-c2c.c browser = &cl_browser->hb; hb 48 tools/perf/ui/browsers/hists.c static void hist_browser__update_nr_entries(struct hist_browser *hb); hb 53 tools/perf/ui/browsers/hists.c static bool hist_browser__has_filter(struct hist_browser *hb) hb 55 tools/perf/ui/browsers/hists.c return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter || hb->c2c_filter; hb 76 tools/perf/ui/browsers/hists.c static void hist_browser__set_title_space(struct hist_browser *hb) hb 78 tools/perf/ui/browsers/hists.c struct ui_browser *browser = &hb->b; hb 79 tools/perf/ui/browsers/hists.c struct hists *hists = hb->hists; hb 82 tools/perf/ui/browsers/hists.c browser->extra_title_lines = hb->show_headers ? hpp_list->nr_header_lines : 0; hb 85 tools/perf/ui/browsers/hists.c static u32 hist_browser__nr_entries(struct hist_browser *hb) hb 90 tools/perf/ui/browsers/hists.c nr_entries = hb->nr_hierarchy_entries; hb 91 tools/perf/ui/browsers/hists.c else if (hist_browser__has_filter(hb)) hb 92 tools/perf/ui/browsers/hists.c nr_entries = hb->nr_non_filtered_entries; hb 94 tools/perf/ui/browsers/hists.c nr_entries = hb->hists->nr_entries; hb 96 tools/perf/ui/browsers/hists.c hb->nr_callchain_rows = hist_browser__get_folding(hb); hb 97 tools/perf/ui/browsers/hists.c return nr_entries + hb->nr_callchain_rows; hb 100 tools/perf/ui/browsers/hists.c static void hist_browser__update_rows(struct hist_browser *hb) hb 102 tools/perf/ui/browsers/hists.c struct ui_browser *browser = &hb->b; hb 103 tools/perf/ui/browsers/hists.c struct hists *hists = hb->hists; hb 107 tools/perf/ui/browsers/hists.c if (!hb->show_headers) { hb 126 tools/perf/ui/browsers/hists.c struct hist_browser *hb = container_of(browser, struct hist_browser, b); hb 129 tools/perf/ui/browsers/hists.c browser->width = 3 + (hists__sort_list_width(hb->hists) + sizeof("[k]")); hb 269 tools/perf/ui/browsers/hists.c static int hierarchy_count_rows(struct hist_browser *hb, struct hist_entry *he, hb 289 tools/perf/ui/browsers/hists.c if (!child->filtered && percent >= hb->min_pcnt) { hb 293 tools/perf/ui/browsers/hists.c count += hierarchy_count_rows(hb, child, true); hb 515 tools/perf/ui/browsers/hists.c static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he, hb 526 tools/perf/ui/browsers/hists.c if (!child->filtered && percent >= hb->min_pcnt) hb 534 tools/perf/ui/browsers/hists.c struct hist_browser *hb, bool unfold) hb 545 tools/perf/ui/browsers/hists.c n = hierarchy_set_folding(hb, he, unfold); hb 1756 tools/perf/ui/browsers/hists.c struct hist_browser *hb; hb 1758 tools/perf/ui/browsers/hists.c hb = container_of(browser, struct hist_browser, b); hb 1759 tools/perf/ui/browsers/hists.c browser->top = rb_first_cached(&hb->hists->entries); hb 1767 tools/perf/ui/browsers/hists.c struct hist_browser *hb = container_of(browser, struct hist_browser, b); hb 1769 tools/perf/ui/browsers/hists.c if (hb->show_headers) hb 1770 tools/perf/ui/browsers/hists.c hist_browser__show_headers(hb); hb 1773 tools/perf/ui/browsers/hists.c hb->he_selection = NULL; hb 1774 tools/perf/ui/browsers/hists.c hb->selection = NULL; hb 1787 tools/perf/ui/browsers/hists.c if (percent < hb->min_pcnt) hb 1791 tools/perf/ui/browsers/hists.c row += hist_browser__show_hierarchy_entry(hb, h, row, hb 1797 tools/perf/ui/browsers/hists.c hist_browser__show_no_entry(hb, row, h->depth + 1); hb 1801 tools/perf/ui/browsers/hists.c row += hist_browser__show_entry(hb, h, row); hb 1856 tools/perf/ui/browsers/hists.c struct hist_browser *hb; hb 1858 tools/perf/ui/browsers/hists.c hb = container_of(browser, struct hist_browser, b); hb 1868 tools/perf/ui/browsers/hists.c hb->min_pcnt); hb 1875 tools/perf/ui/browsers/hists.c nd = hists__filter_prev_entries(nd, hb->min_pcnt); hb 1922 tools/perf/ui/browsers/hists.c hb->min_pcnt); hb 1956 tools/perf/ui/browsers/hists.c hb->min_pcnt); hb 2762 tools/perf/ui/browsers/hists.c static void hist_browser__update_nr_entries(struct hist_browser *hb) hb 2765 tools/perf/ui/browsers/hists.c struct rb_node *nd = rb_first_cached(&hb->hists->entries); hb 2767 tools/perf/ui/browsers/hists.c if (hb->min_pcnt == 0 && !symbol_conf.report_hierarchy) { hb 2768 tools/perf/ui/browsers/hists.c hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries; hb 2772 tools/perf/ui/browsers/hists.c while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) { hb 2777 tools/perf/ui/browsers/hists.c hb->nr_non_filtered_entries = nr_entries; hb 2778 tools/perf/ui/browsers/hists.c hb->nr_hierarchy_entries = nr_entries; hb 2781 tools/perf/ui/browsers/hists.c static void hist_browser__update_percent_limit(struct hist_browser *hb, hb 2785 tools/perf/ui/browsers/hists.c struct rb_node *nd = rb_first_cached(&hb->hists->entries); hb 2786 tools/perf/ui/browsers/hists.c u64 total = hists__total_period(hb->hists); hb 2789 tools/perf/ui/browsers/hists.c hb->min_pcnt = callchain_param.min_percent = percent; hb 2791 tools/perf/ui/browsers/hists.c while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) { hb 2819 tools/perf/ui/browsers/hists.c hist_entry__set_folding(he, hb, false);