backlogs           69 include/net/fq.h 	struct list_head backlogs;
backlogs           36 include/net/fq_impl.h 		list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
backlogs          145 include/net/fq_impl.h 		list_add_tail(&flow->backlogchain, &fq->backlogs);
backlogs          148 include/net/fq_impl.h 	list_for_each_entry_continue_reverse(i, &fq->backlogs,
backlogs          187 include/net/fq_impl.h 		flow = list_first_entry_or_null(&fq->backlogs,
backlogs          308 include/net/fq_impl.h 	INIT_LIST_HEAD(&fq->backlogs);
backlogs          151 net/sched/sch_cake.c 	u32	backlogs[CAKE_QUEUES];
backlogs         1372 net/sched/sch_cake.c 	return q->tins[ii.t].backlogs[ii.b];
backlogs         1498 net/sched/sch_cake.c 	b->backlogs[idx]    -= len;
backlogs         1705 net/sched/sch_cake.c 		b->backlogs[idx]    += slen;
backlogs         1740 net/sched/sch_cake.c 		b->backlogs[idx]    += len;
backlogs         1859 net/sched/sch_cake.c 		b->backlogs[q->cur_flow] -= len;
backlogs         2970 net/sched/sch_cake.c 		qs.backlog = b->backlogs[idx % CAKE_QUEUES];
backlogs           55 net/sched/sch_fq_codel.c 	u32		*backlogs;	/* backlog table [flows_cnt] */
backlogs          156 net/sched/sch_fq_codel.c 		if (q->backlogs[i] > maxbacklog) {
backlogs          157 net/sched/sch_fq_codel.c 			maxbacklog = q->backlogs[i];
backlogs          177 net/sched/sch_fq_codel.c 	q->backlogs[idx] -= len;
backlogs          207 net/sched/sch_fq_codel.c 	q->backlogs[idx] += qdisc_pkt_len(skb);
backlogs          266 net/sched/sch_fq_codel.c 		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
backlogs          350 net/sched/sch_fq_codel.c 	memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
backlogs          446 net/sched/sch_fq_codel.c 	kvfree(q->backlogs);
backlogs          487 net/sched/sch_fq_codel.c 		q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
backlogs          488 net/sched/sch_fq_codel.c 		if (!q->backlogs) {
backlogs          652 net/sched/sch_fq_codel.c 		qs.backlog = q->backlogs[idx];