Lines Matching refs:q

179 static unsigned int skb_hash(const struct hhf_sched_data *q,  in skb_hash()  argument
191 (__force u32)keys.ports, q->perturbation); in skb_hash()
198 struct hhf_sched_data *q) in seek_list() argument
207 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in seek_list()
217 q->hh_flows_current_cnt--; in seek_list()
229 struct hhf_sched_data *q) in alloc_new_hh() argument
237 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in alloc_new_hh()
244 if (q->hh_flows_current_cnt >= q->hh_flows_limit) { in alloc_new_hh()
245 q->hh_flows_overlimit++; in alloc_new_hh()
253 q->hh_flows_current_cnt++; in alloc_new_hh()
265 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_classify() local
275 prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout; in hhf_classify()
278 bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN); in hhf_classify()
279 q->hhf_arrays_reset_timestamp = now; in hhf_classify()
283 hash = skb_hash(q, skb); in hhf_classify()
287 flow = seek_list(hash, &q->hh_flows[flow_pos], q); in hhf_classify()
310 if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) { in hhf_classify()
311 q->hhf_arrays[i][filter_pos[i]] = 0; in hhf_classify()
312 __set_bit(filter_pos[i], q->hhf_valid_bits[i]); in hhf_classify()
315 val = q->hhf_arrays[i][filter_pos[i]] + pkt_len; in hhf_classify()
321 if (min_hhf_val > q->hhf_admit_bytes) { in hhf_classify()
323 flow = alloc_new_hh(&q->hh_flows[flow_pos], q); in hhf_classify()
328 q->hh_flows_total_cnt++; in hhf_classify()
338 if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val) in hhf_classify()
339 q->hhf_arrays[i][filter_pos[i]] = min_hhf_val; in hhf_classify()
367 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_drop() local
371 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop()
373 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH]; in hhf_drop()
378 sch->q.qlen--; in hhf_drop()
385 return bucket - q->buckets; in hhf_drop()
390 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_enqueue() local
396 bucket = &q->buckets[idx]; in hhf_enqueue()
410 list_add_tail(&bucket->bucketchain, &q->old_buckets); in hhf_enqueue()
412 weight = q->hhf_non_hh_weight; in hhf_enqueue()
413 list_add_tail(&bucket->bucketchain, &q->new_buckets); in hhf_enqueue()
415 bucket->deficit = weight * q->quantum; in hhf_enqueue()
417 if (++sch->q.qlen <= sch->limit) in hhf_enqueue()
420 q->drop_overlimit++; in hhf_enqueue()
434 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_dequeue() local
440 head = &q->new_buckets; in hhf_dequeue()
442 head = &q->old_buckets; in hhf_dequeue()
449 int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ? in hhf_dequeue()
450 1 : q->hhf_non_hh_weight; in hhf_dequeue()
452 bucket->deficit += weight * q->quantum; in hhf_dequeue()
453 list_move_tail(&bucket->bucketchain, &q->old_buckets); in hhf_dequeue()
459 sch->q.qlen--; in hhf_dequeue()
465 if ((head == &q->new_buckets) && !list_empty(&q->old_buckets)) in hhf_dequeue()
466 list_move_tail(&bucket->bucketchain, &q->old_buckets); in hhf_dequeue()
503 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_destroy() local
506 hhf_free(q->hhf_arrays[i]); in hhf_destroy()
507 hhf_free(q->hhf_valid_bits[i]); in hhf_destroy()
512 struct list_head *head = &q->hh_flows[i]; in hhf_destroy()
521 hhf_free(q->hh_flows); in hhf_destroy()
536 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_change() local
541 u32 new_quantum = q->quantum; in hhf_change()
542 u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight; in hhf_change()
566 q->quantum = new_quantum; in hhf_change()
567 q->hhf_non_hh_weight = new_hhf_non_hh_weight; in hhf_change()
570 q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]); in hhf_change()
575 q->hhf_reset_timeout = usecs_to_jiffies(us); in hhf_change()
579 q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]); in hhf_change()
584 q->hhf_evict_timeout = usecs_to_jiffies(us); in hhf_change()
587 qlen = sch->q.qlen; in hhf_change()
588 while (sch->q.qlen > sch->limit) { in hhf_change()
593 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); in hhf_change()
601 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_init() local
605 q->quantum = psched_mtu(qdisc_dev(sch)); in hhf_init()
606 q->perturbation = prandom_u32(); in hhf_init()
607 INIT_LIST_HEAD(&q->new_buckets); in hhf_init()
608 INIT_LIST_HEAD(&q->old_buckets); in hhf_init()
611 q->hhf_reset_timeout = HZ / 25; /* 40 ms */ in hhf_init()
612 q->hhf_admit_bytes = 131072; /* 128 KB */ in hhf_init()
613 q->hhf_evict_timeout = HZ; /* 1 sec */ in hhf_init()
614 q->hhf_non_hh_weight = 2; in hhf_init()
623 if (!q->hh_flows) { in hhf_init()
625 q->hh_flows = hhf_zalloc(HH_FLOWS_CNT * in hhf_init()
627 if (!q->hh_flows) in hhf_init()
630 INIT_LIST_HEAD(&q->hh_flows[i]); in hhf_init()
633 q->hh_flows_limit = 2 * HH_FLOWS_CNT; in hhf_init()
634 q->hh_flows_overlimit = 0; in hhf_init()
635 q->hh_flows_total_cnt = 0; in hhf_init()
636 q->hh_flows_current_cnt = 0; in hhf_init()
640 q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * in hhf_init()
642 if (!q->hhf_arrays[i]) { in hhf_init()
647 q->hhf_arrays_reset_timestamp = hhf_time_stamp(); in hhf_init()
651 q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / in hhf_init()
653 if (!q->hhf_valid_bits[i]) { in hhf_init()
661 struct wdrr_bucket *bucket = q->buckets + i; in hhf_init()
672 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_dump() local
680 nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) || in hhf_dump()
681 nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) || in hhf_dump()
683 jiffies_to_usecs(q->hhf_reset_timeout)) || in hhf_dump()
684 nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) || in hhf_dump()
686 jiffies_to_usecs(q->hhf_evict_timeout)) || in hhf_dump()
687 nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight)) in hhf_dump()
698 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_dump_stats() local
700 .drop_overlimit = q->drop_overlimit, in hhf_dump_stats()
701 .hh_overlimit = q->hh_flows_overlimit, in hhf_dump_stats()
702 .hh_tot_count = q->hh_flows_total_cnt, in hhf_dump_stats()
703 .hh_cur_count = q->hh_flows_current_cnt, in hhf_dump_stats()