Lines Matching refs:qp
98 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
125 const struct ipq *qp; in ip4_frag_match() local
128 qp = container_of(q, struct ipq, q); in ip4_frag_match()
129 return qp->id == arg->iph->id && in ip4_frag_match()
130 qp->saddr == arg->iph->saddr && in ip4_frag_match()
131 qp->daddr == arg->iph->daddr && in ip4_frag_match()
132 qp->protocol == arg->iph->protocol && in ip4_frag_match()
133 qp->user == arg->user && in ip4_frag_match()
134 qp->vif == arg->vif; in ip4_frag_match()
139 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init() local
146 qp->protocol = arg->iph->protocol; in ip4_frag_init()
147 qp->id = arg->iph->id; in ip4_frag_init()
148 qp->ecn = ip4_frag_ecn(arg->iph->tos); in ip4_frag_init()
149 qp->saddr = arg->iph->saddr; in ip4_frag_init()
150 qp->daddr = arg->iph->daddr; in ip4_frag_init()
151 qp->vif = arg->vif; in ip4_frag_init()
152 qp->user = arg->user; in ip4_frag_init()
153 qp->peer = sysctl_ipfrag_max_dist ? in ip4_frag_init()
160 struct ipq *qp; in ip4_frag_free() local
162 qp = container_of(q, struct ipq, q); in ip4_frag_free()
163 if (qp->peer) in ip4_frag_free()
164 inet_putpeer(qp->peer); in ip4_frag_free()
197 struct ipq *qp; in ip_expire() local
200 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); in ip_expire()
201 net = container_of(qp->q.net, struct net, ipv4.frags); in ip_expire()
203 spin_lock(&qp->q.lock); in ip_expire()
205 if (qp->q.flags & INET_FRAG_COMPLETE) in ip_expire()
208 ipq_kill(qp); in ip_expire()
211 if (!inet_frag_evicting(&qp->q)) { in ip_expire()
212 struct sk_buff *head = qp->q.fragments; in ip_expire()
218 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) in ip_expire()
222 head->dev = dev_get_by_index_rcu(net, qp->iif); in ip_expire()
236 if (frag_expire_skip_icmp(qp->user) && in ip_expire()
246 spin_unlock(&qp->q.lock); in ip_expire()
247 ipq_put(qp); in ip_expire()
275 static int ip_frag_too_far(struct ipq *qp) in ip_frag_too_far() argument
277 struct inet_peer *peer = qp->peer; in ip_frag_too_far()
286 start = qp->rid; in ip_frag_too_far()
288 qp->rid = end; in ip_frag_too_far()
290 rc = qp->q.fragments && (end - start) > max; in ip_frag_too_far()
295 net = container_of(qp->q.net, struct net, ipv4.frags); in ip_frag_too_far()
302 static int ip_frag_reinit(struct ipq *qp) in ip_frag_reinit() argument
307 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { in ip_frag_reinit()
308 atomic_inc(&qp->q.refcnt); in ip_frag_reinit()
312 fp = qp->q.fragments; in ip_frag_reinit()
320 sub_frag_mem_limit(qp->q.net, sum_truesize); in ip_frag_reinit()
322 qp->q.flags = 0; in ip_frag_reinit()
323 qp->q.len = 0; in ip_frag_reinit()
324 qp->q.meat = 0; in ip_frag_reinit()
325 qp->q.fragments = NULL; in ip_frag_reinit()
326 qp->q.fragments_tail = NULL; in ip_frag_reinit()
327 qp->iif = 0; in ip_frag_reinit()
328 qp->ecn = 0; in ip_frag_reinit()
334 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) in ip_frag_queue() argument
344 if (qp->q.flags & INET_FRAG_COMPLETE) in ip_frag_queue()
348 unlikely(ip_frag_too_far(qp)) && in ip_frag_queue()
349 unlikely(err = ip_frag_reinit(qp))) { in ip_frag_queue()
350 ipq_kill(qp); in ip_frag_queue()
370 if (end < qp->q.len || in ip_frag_queue()
371 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) in ip_frag_queue()
373 qp->q.flags |= INET_FRAG_LAST_IN; in ip_frag_queue()
374 qp->q.len = end; in ip_frag_queue()
381 if (end > qp->q.len) { in ip_frag_queue()
383 if (qp->q.flags & INET_FRAG_LAST_IN) in ip_frag_queue()
385 qp->q.len = end; in ip_frag_queue()
403 prev = qp->q.fragments_tail; in ip_frag_queue()
409 for (next = qp->q.fragments; next != NULL; next = next->next) { in ip_frag_queue()
448 qp->q.meat -= i; in ip_frag_queue()
463 qp->q.fragments = next; in ip_frag_queue()
465 qp->q.meat -= free_it->len; in ip_frag_queue()
466 sub_frag_mem_limit(qp->q.net, free_it->truesize); in ip_frag_queue()
476 qp->q.fragments_tail = skb; in ip_frag_queue()
480 qp->q.fragments = skb; in ip_frag_queue()
484 qp->iif = dev->ifindex; in ip_frag_queue()
487 qp->q.stamp = skb->tstamp; in ip_frag_queue()
488 qp->q.meat += skb->len; in ip_frag_queue()
489 qp->ecn |= ecn; in ip_frag_queue()
490 add_frag_mem_limit(qp->q.net, skb->truesize); in ip_frag_queue()
492 qp->q.flags |= INET_FRAG_FIRST_IN; in ip_frag_queue()
496 if (fragsize > qp->q.max_size) in ip_frag_queue()
497 qp->q.max_size = fragsize; in ip_frag_queue()
500 fragsize > qp->max_df_size) in ip_frag_queue()
501 qp->max_df_size = fragsize; in ip_frag_queue()
503 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && in ip_frag_queue()
504 qp->q.meat == qp->q.len) { in ip_frag_queue()
508 err = ip_frag_reasm(qp, prev, dev); in ip_frag_queue()
524 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, in ip_frag_reasm() argument
527 struct net *net = container_of(qp->q.net, struct net, ipv4.frags); in ip_frag_reasm()
529 struct sk_buff *fp, *head = qp->q.fragments; in ip_frag_reasm()
535 ipq_kill(qp); in ip_frag_reasm()
537 ecn = ip_frag_ecn_table[qp->ecn]; in ip_frag_reasm()
551 qp->q.fragments_tail = fp; in ip_frag_reasm()
554 skb_morph(head, qp->q.fragments); in ip_frag_reasm()
555 head->next = qp->q.fragments->next; in ip_frag_reasm()
557 consume_skb(qp->q.fragments); in ip_frag_reasm()
558 qp->q.fragments = head; in ip_frag_reasm()
566 len = ihlen + qp->q.len; in ip_frag_reasm()
597 add_frag_mem_limit(qp->q.net, clone->truesize); in ip_frag_reasm()
612 sub_frag_mem_limit(qp->q.net, head->truesize); in ip_frag_reasm()
616 head->tstamp = qp->q.stamp; in ip_frag_reasm()
617 IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); in ip_frag_reasm()
631 if (qp->max_df_size == qp->q.max_size) { in ip_frag_reasm()
641 qp->q.fragments = NULL; in ip_frag_reasm()
642 qp->q.fragments_tail = NULL; in ip_frag_reasm()
646 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); in ip_frag_reasm()
650 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); in ip_frag_reasm()
661 struct ipq *qp; in ip_defrag() local
667 qp = ip_find(net, ip_hdr(skb), user, vif); in ip_defrag()
668 if (qp) { in ip_defrag()
671 spin_lock(&qp->q.lock); in ip_defrag()
673 ret = ip_frag_queue(qp, skb); in ip_defrag()
675 spin_unlock(&qp->q.lock); in ip_defrag()
676 ipq_put(qp); in ip_defrag()