collector 159 drivers/md/bcache/stats.c unsigned int t = atomic_xchg(&acc->collector.name, 0); \ collector 208 drivers/md/bcache/stats.c mark_cache_stats(&dc->accounting.collector, hit, bypass); collector 209 drivers/md/bcache/stats.c mark_cache_stats(&c->accounting.collector, hit, bypass); collector 216 drivers/md/bcache/stats.c atomic_inc(&dc->accounting.collector.cache_readaheads); collector 217 drivers/md/bcache/stats.c atomic_inc(&c->accounting.collector.cache_readaheads); collector 224 drivers/md/bcache/stats.c atomic_inc(&dc->accounting.collector.cache_miss_collisions); collector 225 drivers/md/bcache/stats.c atomic_inc(&c->accounting.collector.cache_miss_collisions); collector 231 drivers/md/bcache/stats.c atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); collector 232 drivers/md/bcache/stats.c atomic_add(sectors, &c->accounting.collector.sectors_bypassed); collector 34 drivers/md/bcache/stats.h struct cache_stat_collector collector; collector 302 net/rxrpc/peer_event.c struct list_head *collector, collector 313 net/rxrpc/peer_event.c while (!list_empty(collector)) { collector 314 net/rxrpc/peer_event.c peer = list_entry(collector->next, collector 362 net/rxrpc/peer_event.c LIST_HEAD(collector); collector 380 net/rxrpc/peer_event.c list_splice_init(&rxnet->peer_keepalive_new, &collector); collector 385 net/rxrpc/peer_event.c &collector); collector 395 net/rxrpc/peer_event.c rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor); collector 396 net/rxrpc/peer_event.c ASSERT(list_empty(&collector));