congested 994 drivers/block/drbd/drbd_req.c bool congested = false; congested 1018 drivers/block/drbd/drbd_req.c congested = true; congested 1023 drivers/block/drbd/drbd_req.c congested = true; congested 1026 drivers/block/drbd/drbd_req.c if (congested) { congested 526 drivers/crypto/caam/qi.c static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) congested 528 drivers/crypto/caam/qi.c caam_congested = congested; congested 530 drivers/crypto/caam/qi.c if (congested) { congested 696 drivers/md/bcache/bcache.h atomic_t congested; congested 150 drivers/md/bcache/io.c int congested = atomic_read(&c->congested); congested 157 drivers/md/bcache/io.c ms = min(ms, CONGESTED_MAX + congested); congested 158 drivers/md/bcache/io.c atomic_sub(ms, &c->congested); congested 159 drivers/md/bcache/io.c } else if (congested < 0) congested 160 drivers/md/bcache/io.c atomic_inc(&c->congested); congested 348 drivers/md/bcache/request.c i += atomic_read(&c->congested); congested 379 drivers/md/bcache/request.c unsigned int sectors, congested; congested 422 drivers/md/bcache/request.c congested = bch_get_congested(c); congested 423 drivers/md/bcache/request.c if (!congested && !dc->sequential_cutoff) congested 460 drivers/md/bcache/request.c if (congested && sectors >= congested) { congested 100 drivers/md/bcache/sysfs.c read_attribute(congested); congested 754 drivers/md/bcache/sysfs.c sysfs_hprint(congested, congested 1814 drivers/md/dm.c r = bdi->wb.congested->state & bdi_bits; congested 325 drivers/md/md-linear.c .congested = linear_congested, congested 481 drivers/md/md-multipath.c .congested = multipath_congested, congested 462 drivers/md/md.c else if (pers && pers->congested) congested 463 drivers/md/md.c ret = pers->congested(mddev, bits); congested 594 drivers/md/md.h int (*congested)(struct mddev *mddev, int bits); congested 821 drivers/md/raid0.c .congested = raid0_congested, congested 3386 drivers/md/raid1.c .congested = raid1_congested, congested 4932 drivers/md/raid10.c .congested = raid10_congested, congested 8431 drivers/md/raid5.c .congested = raid5_congested, congested 8456 drivers/md/raid5.c .congested = raid5_congested, congested 8482 drivers/md/raid5.c .congested = raid5_congested, congested 795 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c int congested) congested 800 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c if (congested) { congested 788 fs/fs-writeback.c bool congested; congested 791 fs/fs-writeback.c congested = wb_congested(wb, cong_bits); congested 793 fs/fs-writeback.c return congested; congested 144 include/linux/backing-dev-defs.h struct bdi_writeback_congested *congested; congested 238 include/linux/backing-dev-defs.h void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); congested 239 include/linux/backing-dev-defs.h void set_wb_congested(struct bdi_writeback_congested *congested, int sync); congested 243 include/linux/backing-dev-defs.h clear_wb_congested(bdi->wb.congested, sync); congested 248 include/linux/backing-dev-defs.h set_wb_congested(bdi->wb.congested, sync); congested 180 include/linux/backing-dev.h return wb->congested->state & cong_bits; congested 233 include/linux/backing-dev.h void wb_congested_put(struct bdi_writeback_congested *congested); congested 418 include/linux/backing-dev.h static inline void wb_congested_put(struct bdi_writeback_congested *congested) congested 420 include/linux/backing-dev.h if (refcount_dec_and_test(&congested->refcnt)) congested 421 include/linux/backing-dev.h kfree(congested); congested 137 include/linux/memcontrol.h bool congested; /* memcg has many dirty pages */ congested 312 mm/backing-dev.c wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp); congested 313 mm/backing-dev.c if (!wb->congested) { congested 335 mm/backing-dev.c wb_congested_put(wb->congested); congested 378 mm/backing-dev.c wb_congested_put(wb->congested); congested 408 mm/backing-dev.c struct bdi_writeback_congested *new_congested = NULL, *congested; congested 419 mm/backing-dev.c congested = rb_entry(parent, struct bdi_writeback_congested, congested 421 mm/backing-dev.c if (congested->blkcg_id < blkcg_id) congested 423 mm/backing-dev.c else if (congested->blkcg_id > blkcg_id) congested 431 mm/backing-dev.c congested = new_congested; congested 432 mm/backing-dev.c rb_link_node(&congested->rb_node, parent, node); congested 433 mm/backing-dev.c rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree); congested 435 mm/backing-dev.c return congested; congested 451 mm/backing-dev.c refcount_inc(&congested->refcnt); congested 454 mm/backing-dev.c return congested; congested 463 mm/backing-dev.c void wb_congested_put(struct bdi_writeback_congested *congested) congested 467 mm/backing-dev.c if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags)) congested 471 mm/backing-dev.c if (congested->__bdi) { congested 472 mm/backing-dev.c rb_erase(&congested->rb_node, congested 473 mm/backing-dev.c &congested->__bdi->cgwb_congested_tree); congested 474 mm/backing-dev.c congested->__bdi = NULL; congested 478 mm/backing-dev.c kfree(congested); congested 779 mm/backing-dev.c struct bdi_writeback_congested *congested = congested 783 mm/backing-dev.c congested->__bdi = NULL; /* mark @congested unlinked */ congested 1061 mm/backing-dev.c void clear_wb_congested(struct bdi_writeback_congested *congested, int sync) congested 1067 mm/backing-dev.c if (test_and_clear_bit(bit, &congested->state)) congested 1075 mm/backing-dev.c void set_wb_congested(struct bdi_writeback_congested *congested, int sync) congested 1080 mm/backing-dev.c if (!test_and_set_bit(bit, &congested->state)) congested 125 mm/vmscan.c unsigned int congested; congested 275 mm/vmscan.c bool congested) congested 283 mm/vmscan.c WRITE_ONCE(mn->congested, congested); congested 292 mm/vmscan.c return READ_ONCE(mn->congested); congested 316 mm/vmscan.c struct mem_cgroup *memcg, bool congested) congested 2030 mm/vmscan.c sc->nr.congested += stat.nr_congested; congested 2863 mm/vmscan.c if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) congested 2885 mm/vmscan.c sc->nr.dirty && sc->nr.dirty == sc->nr.congested) congested 1395 net/netlink/af_netlink.c int congested; congested 1473 net/netlink/af_netlink.c p->congested |= val; congested 1498 net/netlink/af_netlink.c info.congested = 0; congested 1524 net/netlink/af_netlink.c if (info.congested && gfpflags_allow_blocking(allocation))