Lines Matching refs:req

298 	struct request_sock *req;  in inet_csk_accept()  local
324 req = reqsk_queue_remove(queue); in inet_csk_accept()
325 newsk = req->sk; in inet_csk_accept()
329 tcp_rsk(req)->tfo_listener && in inet_csk_accept()
332 if (tcp_rsk(req)->tfo_listener) { in inet_csk_accept()
339 req->sk = NULL; in inet_csk_accept()
340 req = NULL; in inet_csk_accept()
346 if (req) in inet_csk_accept()
347 reqsk_put(req); in inet_csk_accept()
351 req = NULL; in inet_csk_accept()
404 const struct request_sock *req) in inet_csk_route_req() argument
406 const struct inet_request_sock *ireq = inet_rsk(req); in inet_csk_route_req()
417 security_req_classify_flow(req, flowi4_to_flowi(fl4)); in inet_csk_route_req()
435 const struct request_sock *req) in inet_csk_route_child_sock() argument
437 const struct inet_request_sock *ireq = inet_rsk(req); in inet_csk_route_child_sock()
454 security_req_classify_flow(req, flowi4_to_flowi(fl4)); in inet_csk_route_child_sock()
494 struct request_sock *req; in inet_csk_search_req() local
499 for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { in inet_csk_search_req()
500 const struct inet_request_sock *ireq = inet_rsk(req); in inet_csk_search_req()
505 AF_INET_FAMILY(req->rsk_ops->family)) { in inet_csk_search_req()
506 atomic_inc(&req->rsk_refcnt); in inet_csk_search_req()
507 WARN_ON(req->sk); in inet_csk_search_req()
513 return req; in inet_csk_search_req()
517 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, in inet_csk_reqsk_queue_hash_add() argument
522 const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr, in inet_csk_reqsk_queue_hash_add()
523 inet_rsk(req)->ir_rmt_port, in inet_csk_reqsk_queue_hash_add()
526 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); in inet_csk_reqsk_queue_hash_add()
536 static inline void syn_ack_recalc(struct request_sock *req, const int thresh, in syn_ack_recalc() argument
542 *expire = req->num_timeout >= thresh; in syn_ack_recalc()
546 *expire = req->num_timeout >= thresh && in syn_ack_recalc()
547 (!inet_rsk(req)->acked || req->num_timeout >= max_retries); in syn_ack_recalc()
553 *resend = !inet_rsk(req)->acked || in syn_ack_recalc()
554 req->num_timeout >= rskq_defer_accept - 1; in syn_ack_recalc()
557 int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req) in inet_rtx_syn_ack() argument
559 int err = req->rsk_ops->rtx_syn_ack(parent, req); in inet_rtx_syn_ack()
562 req->num_retrans++; in inet_rtx_syn_ack()
569 struct request_sock *req) in reqsk_queue_unlink() argument
578 for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; in reqsk_queue_unlink()
580 if (*prev == req) { in reqsk_queue_unlink()
581 *prev = req->dl_next; in reqsk_queue_unlink()
588 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) in reqsk_queue_unlink()
589 reqsk_put(req); in reqsk_queue_unlink()
593 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) in inet_csk_reqsk_queue_drop() argument
595 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) { in inet_csk_reqsk_queue_drop()
596 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in inet_csk_reqsk_queue_drop()
597 reqsk_put(req); in inet_csk_reqsk_queue_drop()
604 struct request_sock *req = (struct request_sock *)data; in reqsk_timer_handler() local
605 struct sock *sk_listener = req->rsk_listener; in reqsk_timer_handler()
614 reqsk_put(req); in reqsk_timer_handler()
651 syn_ack_recalc(req, thresh, max_retries, defer_accept, in reqsk_timer_handler()
653 req->rsk_ops->syn_ack_timeout(req); in reqsk_timer_handler()
656 !inet_rtx_syn_ack(sk_listener, req) || in reqsk_timer_handler()
657 inet_rsk(req)->acked)) { in reqsk_timer_handler()
660 if (req->num_timeout++ == 0) in reqsk_timer_handler()
662 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); in reqsk_timer_handler()
663 mod_timer_pinned(&req->rsk_timer, jiffies + timeo); in reqsk_timer_handler()
666 inet_csk_reqsk_queue_drop(sk_listener, req); in reqsk_timer_handler()
667 reqsk_put(req); in reqsk_timer_handler()
671 u32 hash, struct request_sock *req, in reqsk_queue_hash_req() argument
676 req->num_retrans = 0; in reqsk_queue_hash_req()
677 req->num_timeout = 0; in reqsk_queue_hash_req()
678 req->sk = NULL; in reqsk_queue_hash_req()
680 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); in reqsk_queue_hash_req()
681 mod_timer_pinned(&req->rsk_timer, jiffies + timeout); in reqsk_queue_hash_req()
682 req->rsk_hash = hash; in reqsk_queue_hash_req()
688 atomic_set(&req->rsk_refcnt, 2); in reqsk_queue_hash_req()
691 req->dl_next = lopt->syn_table[hash]; in reqsk_queue_hash_req()
692 lopt->syn_table[hash] = req; in reqsk_queue_hash_req()
706 const struct request_sock *req, in inet_csk_clone_lock() argument
717 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; in inet_csk_clone_lock()
718 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num; in inet_csk_clone_lock()
719 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); in inet_csk_clone_lock()
722 newsk->sk_mark = inet_rsk(req)->ir_mark; in inet_csk_clone_lock()
724 atomic64_read(&inet_rsk(req)->ir_cookie)); in inet_csk_clone_lock()
733 security_inet_csk_clone(newsk, req); in inet_csk_clone_lock()
829 struct request_sock *req; in inet_csk_listen_stop() local
844 while ((req = acc_req) != NULL) { in inet_csk_listen_stop()
845 struct sock *child = req->sk; in inet_csk_listen_stop()
847 acc_req = req->dl_next; in inet_csk_listen_stop()
860 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { in inet_csk_listen_stop()
861 BUG_ON(tcp_sk(child)->fastopen_rsk != req); in inet_csk_listen_stop()
862 BUG_ON(sk != req->rsk_listener); in inet_csk_listen_stop()
879 reqsk_put(req); in inet_csk_listen_stop()
887 while ((req = acc_req) != NULL) { in inet_csk_listen_stop()
888 acc_req = req->dl_next; in inet_csk_listen_stop()
889 reqsk_put(req); in inet_csk_listen_stop()