/linux-4.1.27/net/netrom/ |
H A D | nr_timer.c | 38 void nr_init_timers(struct sock *sk) nr_init_timers() argument 40 struct nr_sock *nr = nr_sk(sk); nr_init_timers() 42 setup_timer(&nr->t1timer, nr_t1timer_expiry, (unsigned long)sk); nr_init_timers() 43 setup_timer(&nr->t2timer, nr_t2timer_expiry, (unsigned long)sk); nr_init_timers() 44 setup_timer(&nr->t4timer, nr_t4timer_expiry, (unsigned long)sk); nr_init_timers() 45 setup_timer(&nr->idletimer, nr_idletimer_expiry, (unsigned long)sk); nr_init_timers() 48 sk->sk_timer.data = (unsigned long)sk; nr_init_timers() 49 sk->sk_timer.function = &nr_heartbeat_expiry; nr_init_timers() 52 void nr_start_t1timer(struct sock *sk) nr_start_t1timer() argument 54 struct nr_sock *nr = nr_sk(sk); nr_start_t1timer() 59 void nr_start_t2timer(struct sock *sk) nr_start_t2timer() argument 61 struct nr_sock *nr = nr_sk(sk); nr_start_t2timer() 66 void nr_start_t4timer(struct sock *sk) nr_start_t4timer() argument 68 struct nr_sock *nr = nr_sk(sk); nr_start_t4timer() 73 void nr_start_idletimer(struct sock *sk) nr_start_idletimer() argument 75 struct nr_sock *nr = nr_sk(sk); nr_start_idletimer() 81 void nr_start_heartbeat(struct sock *sk) nr_start_heartbeat() argument 83 mod_timer(&sk->sk_timer, jiffies + 5 * HZ); nr_start_heartbeat() 86 void nr_stop_t1timer(struct sock *sk) nr_stop_t1timer() argument 88 del_timer(&nr_sk(sk)->t1timer); nr_stop_t1timer() 91 void nr_stop_t2timer(struct sock *sk) nr_stop_t2timer() argument 93 del_timer(&nr_sk(sk)->t2timer); nr_stop_t2timer() 96 void nr_stop_t4timer(struct sock *sk) nr_stop_t4timer() argument 98 del_timer(&nr_sk(sk)->t4timer); nr_stop_t4timer() 101 void nr_stop_idletimer(struct sock *sk) nr_stop_idletimer() argument 103 del_timer(&nr_sk(sk)->idletimer); nr_stop_idletimer() 106 void nr_stop_heartbeat(struct sock *sk) nr_stop_heartbeat() argument 108 del_timer(&sk->sk_timer); nr_stop_heartbeat() 111 int nr_t1timer_running(struct sock *sk) nr_t1timer_running() argument 113 return timer_pending(&nr_sk(sk)->t1timer); nr_t1timer_running() 118 struct sock *sk = (struct sock *)param; nr_heartbeat_expiry() local 119 struct nr_sock *nr = nr_sk(sk); nr_heartbeat_expiry() 121 bh_lock_sock(sk); nr_heartbeat_expiry() 126 if (sock_flag(sk, SOCK_DESTROY) || nr_heartbeat_expiry() 127 (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { nr_heartbeat_expiry() 128 sock_hold(sk); nr_heartbeat_expiry() 129 bh_unlock_sock(sk); nr_heartbeat_expiry() 130 nr_destroy_socket(sk); nr_heartbeat_expiry() 131 sock_put(sk); nr_heartbeat_expiry() 140 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && nr_heartbeat_expiry() 145 nr_write_internal(sk, NR_INFOACK); nr_heartbeat_expiry() 151 nr_start_heartbeat(sk); nr_heartbeat_expiry() 152 bh_unlock_sock(sk); nr_heartbeat_expiry() 157 struct sock *sk = (struct sock *)param; nr_t2timer_expiry() local 158 struct nr_sock *nr = nr_sk(sk); nr_t2timer_expiry() 160 bh_lock_sock(sk); nr_t2timer_expiry() 163 nr_enquiry_response(sk); nr_t2timer_expiry() 165 bh_unlock_sock(sk); nr_t2timer_expiry() 170 struct sock *sk = (struct sock *)param; nr_t4timer_expiry() local 172 bh_lock_sock(sk); nr_t4timer_expiry() 173 nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY; nr_t4timer_expiry() 174 bh_unlock_sock(sk); nr_t4timer_expiry() 179 struct sock *sk = (struct sock *)param; nr_idletimer_expiry() local 180 struct nr_sock *nr = nr_sk(sk); nr_idletimer_expiry() 182 bh_lock_sock(sk); nr_idletimer_expiry() 184 nr_clear_queues(sk); nr_idletimer_expiry() 187 nr_write_internal(sk, NR_DISCREQ); nr_idletimer_expiry() 190 nr_start_t1timer(sk); nr_idletimer_expiry() 191 nr_stop_t2timer(sk); nr_idletimer_expiry() 192 nr_stop_t4timer(sk); nr_idletimer_expiry() 194 sk->sk_state = TCP_CLOSE; nr_idletimer_expiry() 195 sk->sk_err = 0; nr_idletimer_expiry() 196 sk->sk_shutdown |= SEND_SHUTDOWN; nr_idletimer_expiry() 198 if (!sock_flag(sk, SOCK_DEAD)) { nr_idletimer_expiry() 199 sk->sk_state_change(sk); nr_idletimer_expiry() 200 sock_set_flag(sk, SOCK_DEAD); nr_idletimer_expiry() 202 bh_unlock_sock(sk); nr_idletimer_expiry() 207 struct sock *sk = (struct sock *)param; nr_t1timer_expiry() local 208 struct nr_sock *nr = nr_sk(sk); nr_t1timer_expiry() 210 bh_lock_sock(sk); nr_t1timer_expiry() 214 nr_disconnect(sk, ETIMEDOUT); nr_t1timer_expiry() 215 bh_unlock_sock(sk); nr_t1timer_expiry() 219 nr_write_internal(sk, NR_CONNREQ); nr_t1timer_expiry() 225 nr_disconnect(sk, ETIMEDOUT); nr_t1timer_expiry() 226 bh_unlock_sock(sk); nr_t1timer_expiry() 230 nr_write_internal(sk, NR_DISCREQ); nr_t1timer_expiry() 236 nr_disconnect(sk, ETIMEDOUT); nr_t1timer_expiry() 237 bh_unlock_sock(sk); nr_t1timer_expiry() 241 nr_requeue_frames(sk); nr_t1timer_expiry() 246 nr_start_t1timer(sk); nr_t1timer_expiry() 247 bh_unlock_sock(sk); nr_t1timer_expiry()
|
H A D | af_netrom.c | 94 static void nr_remove_socket(struct sock *sk) nr_remove_socket() argument 97 sk_del_node_init(sk); nr_remove_socket() 137 static void nr_insert_socket(struct sock *sk) nr_insert_socket() argument 140 sk_add_node(sk, &nr_list); nr_insert_socket() 218 struct sock *sk; nr_find_next_circuit() local 225 if ((sk=nr_find_socket(i, j)) == NULL) nr_find_next_circuit() 227 bh_unlock_sock(sk); nr_find_next_circuit() 246 struct sock *sk=(struct sock *)data; nr_destroy_timer() local 247 bh_lock_sock(sk); nr_destroy_timer() 248 sock_hold(sk); nr_destroy_timer() 249 nr_destroy_socket(sk); nr_destroy_timer() 250 bh_unlock_sock(sk); nr_destroy_timer() 251 sock_put(sk); nr_destroy_timer() 260 void nr_destroy_socket(struct sock *sk) nr_destroy_socket() argument 264 nr_remove_socket(sk); nr_destroy_socket() 266 nr_stop_heartbeat(sk); nr_destroy_socket() 267 nr_stop_t1timer(sk); nr_destroy_socket() 268 nr_stop_t2timer(sk); nr_destroy_socket() 269 nr_stop_t4timer(sk); nr_destroy_socket() 270 nr_stop_idletimer(sk); nr_destroy_socket() 272 nr_clear_queues(sk); /* Flush the queues */ nr_destroy_socket() 274 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { nr_destroy_socket() 275 if (skb->sk != sk) { /* A pending connection */ nr_destroy_socket() 277 sock_set_flag(skb->sk, SOCK_DEAD); nr_destroy_socket() 278 nr_start_heartbeat(skb->sk); nr_destroy_socket() 279 nr_sk(skb->sk)->state = NR_STATE_0; nr_destroy_socket() 285 if (sk_has_allocations(sk)) { nr_destroy_socket() 287 sk->sk_timer.function = nr_destroy_timer; nr_destroy_socket() 288 sk->sk_timer.expires = jiffies + 2 * HZ; nr_destroy_socket() 289 add_timer(&sk->sk_timer); nr_destroy_socket() 291 sock_put(sk); nr_destroy_socket() 302 struct sock *sk = sock->sk; nr_setsockopt() local 303 struct nr_sock *nr = nr_sk(sk); nr_setsockopt() 354 struct sock *sk = sock->sk; nr_getsockopt() local 355 struct nr_sock *nr = nr_sk(sk); nr_getsockopt() 403 struct sock *sk = sock->sk; nr_listen() local 405 lock_sock(sk); nr_listen() 406 if (sk->sk_state != TCP_LISTEN) { nr_listen() 407 memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN); nr_listen() 408 sk->sk_max_ack_backlog = backlog; nr_listen() 409 sk->sk_state = TCP_LISTEN; nr_listen() 410 release_sock(sk); nr_listen() 413 release_sock(sk); nr_listen() 427 struct sock *sk; nr_create() local 436 sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto); nr_create() 437 if (sk == NULL) nr_create() 440 nr = nr_sk(sk); nr_create() 442 sock_init_data(sock, sk); nr_create() 445 sk->sk_protocol = protocol; nr_create() 451 nr_init_timers(sk); nr_create() 473 struct sock *sk; nr_make_new() local 479 sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot); nr_make_new() 480 if (sk == NULL) nr_make_new() 483 nr = nr_sk(sk); nr_make_new() 485 sock_init_data(NULL, sk); nr_make_new() 487 sk->sk_type = osk->sk_type; nr_make_new() 488 sk->sk_priority = osk->sk_priority; nr_make_new() 489 sk->sk_protocol = osk->sk_protocol; nr_make_new() 490 sk->sk_rcvbuf = osk->sk_rcvbuf; nr_make_new() 491 sk->sk_sndbuf = osk->sk_sndbuf; nr_make_new() 492 sk->sk_state = TCP_ESTABLISHED; nr_make_new() 493 sock_copy_flags(sk, osk); nr_make_new() 499 nr_init_timers(sk); nr_make_new() 513 return sk; nr_make_new() 518 struct sock *sk = sock->sk; nr_release() local 521 if (sk == NULL) return 0; nr_release() 523 sock_hold(sk); nr_release() 524 sock_orphan(sk); nr_release() 525 lock_sock(sk); nr_release() 526 nr = nr_sk(sk); nr_release() 532 nr_disconnect(sk, 0); nr_release() 533 nr_destroy_socket(sk); nr_release() 537 nr_clear_queues(sk); nr_release() 539 nr_write_internal(sk, NR_DISCREQ); nr_release() 540 nr_start_t1timer(sk); nr_release() 541 nr_stop_t2timer(sk); nr_release() 542 nr_stop_t4timer(sk); nr_release() 543 nr_stop_idletimer(sk); nr_release() 545 sk->sk_state = TCP_CLOSE; nr_release() 546 sk->sk_shutdown |= SEND_SHUTDOWN; nr_release() 547 sk->sk_state_change(sk); nr_release() 548 sock_set_flag(sk, SOCK_DESTROY); nr_release() 555 sock->sk = NULL; nr_release() 556 release_sock(sk); nr_release() 557 sock_put(sk); nr_release() 564 struct sock *sk = sock->sk; nr_bind() local 565 struct nr_sock *nr = nr_sk(sk); nr_bind() 571 lock_sock(sk); nr_bind() 572 if (!sock_flag(sk, SOCK_ZAPPED)) { nr_bind() 573 release_sock(sk); nr_bind() 577 release_sock(sk); nr_bind() 581 release_sock(sk); nr_bind() 585 release_sock(sk); nr_bind() 589 release_sock(sk); nr_bind() 599 release_sock(sk); nr_bind() 613 release_sock(sk); nr_bind() 624 nr_insert_socket(sk); nr_bind() 626 sock_reset_flag(sk, SOCK_ZAPPED); nr_bind() 628 release_sock(sk); nr_bind() 636 struct sock *sk = sock->sk; nr_connect() local 637 struct nr_sock *nr = nr_sk(sk); nr_connect() 644 lock_sock(sk); nr_connect() 645 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { nr_connect() 650 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { nr_connect() 656 if (sk->sk_state == TCP_ESTABLISHED) { nr_connect() 661 sk->sk_state = TCP_CLOSE; nr_connect() 672 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ nr_connect() 673 sock_reset_flag(sk, SOCK_ZAPPED); nr_connect() 698 nr_insert_socket(sk); /* Finish the bind */ nr_connect() 703 release_sock(sk); nr_connect() 705 lock_sock(sk); nr_connect() 714 sk->sk_state = TCP_SYN_SENT; nr_connect() 716 nr_establish_data_link(sk); nr_connect() 720 nr_start_heartbeat(sk); nr_connect() 723 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { nr_connect() 732 if (sk->sk_state == TCP_SYN_SENT) { nr_connect() 736 prepare_to_wait(sk_sleep(sk), &wait, nr_connect() 738 if (sk->sk_state != TCP_SYN_SENT) nr_connect() 741 release_sock(sk); nr_connect() 743 lock_sock(sk); nr_connect() 749 finish_wait(sk_sleep(sk), &wait); nr_connect() 754 if (sk->sk_state != TCP_ESTABLISHED) { nr_connect() 756 err = sock_error(sk); /* Always set at this point */ nr_connect() 763 release_sock(sk); nr_connect() 773 struct sock *sk; nr_accept() local 776 if ((sk = sock->sk) == NULL) nr_accept() 779 lock_sock(sk); nr_accept() 780 if (sk->sk_type != SOCK_SEQPACKET) { nr_accept() 785 if (sk->sk_state != TCP_LISTEN) { nr_accept() 795 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); nr_accept() 796 skb = skb_dequeue(&sk->sk_receive_queue); nr_accept() 805 release_sock(sk); nr_accept() 807 lock_sock(sk); nr_accept() 813 finish_wait(sk_sleep(sk), &wait); nr_accept() 817 newsk = skb->sk; nr_accept() 822 sk_acceptq_removed(sk); nr_accept() 825 release_sock(sk); nr_accept() 834 struct sock *sk = sock->sk; nr_getname() local 835 struct nr_sock *nr = nr_sk(sk); nr_getname() 839 lock_sock(sk); nr_getname() 841 if (sk->sk_state != TCP_ESTABLISHED) { nr_getname() 842 release_sock(sk); nr_getname() 857 release_sock(sk); nr_getname() 864 struct sock *sk; nr_rx_frame() local 873 skb->sk = NULL; /* Initially we don't know who it's for */ nr_rx_frame() 908 sk = NULL; nr_rx_frame() 912 sk = nr_find_peer(peer_circuit_index, peer_circuit_id, src); nr_rx_frame() 915 sk = nr_find_peer(circuit_index, circuit_id, src); nr_rx_frame() 917 sk = nr_find_socket(circuit_index, circuit_id); nr_rx_frame() 920 if (sk != NULL) { nr_rx_frame() 924 nr_sk(sk)->bpqext = 1; nr_rx_frame() 926 nr_sk(sk)->bpqext = 0; nr_rx_frame() 928 ret = nr_process_rx_frame(sk, skb); nr_rx_frame() 929 bh_unlock_sock(sk); nr_rx_frame() 953 sk = nr_find_listener(dest); nr_rx_frame() 957 if (sk == NULL || sk_acceptq_is_full(sk) || nr_rx_frame() 958 (make = nr_make_new(sk)) == NULL) { nr_rx_frame() 960 if (sk) nr_rx_frame() 961 bh_unlock_sock(sk); nr_rx_frame() 967 skb->sk = make; nr_rx_frame() 979 bh_unlock_sock(sk); nr_rx_frame() 981 bh_lock_sock(sk); nr_rx_frame() 1010 sk_acceptq_added(sk); nr_rx_frame() 1011 skb_queue_head(&sk->sk_receive_queue, skb); nr_rx_frame() 1013 if (!sock_flag(sk, SOCK_DEAD)) nr_rx_frame() 1014 sk->sk_data_ready(sk); nr_rx_frame() 1016 bh_unlock_sock(sk); nr_rx_frame() 1028 struct sock *sk = sock->sk; nr_sendmsg() local 1029 struct nr_sock *nr = nr_sk(sk); nr_sendmsg() 1040 lock_sock(sk); nr_sendmsg() 1041 if (sock_flag(sk, SOCK_ZAPPED)) { nr_sendmsg() 1046 if (sk->sk_shutdown & SEND_SHUTDOWN) { nr_sendmsg() 1072 if (sk->sk_state != TCP_ESTABLISHED) { nr_sendmsg() 1089 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) nr_sendmsg() 1121 if (sk->sk_state != TCP_ESTABLISHED) { nr_sendmsg() 1127 nr_output(sk, skb); /* Shove it onto the queue */ nr_sendmsg() 1131 release_sock(sk); nr_sendmsg() 1138 struct sock *sk = sock->sk; nr_recvmsg() local 1149 lock_sock(sk); nr_recvmsg() 1150 if (sk->sk_state != TCP_ESTABLISHED) { nr_recvmsg() 1151 release_sock(sk); nr_recvmsg() 1156 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) { nr_recvmsg() 1157 release_sock(sk); nr_recvmsg() 1171 skb_free_datagram(sk, skb); nr_recvmsg() 1172 release_sock(sk); nr_recvmsg() 1184 skb_free_datagram(sk, skb); nr_recvmsg() 1186 release_sock(sk); nr_recvmsg() 1193 struct sock *sk = sock->sk; nr_ioctl() local 1201 lock_sock(sk); nr_ioctl() 1202 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); nr_ioctl() 1205 release_sock(sk); nr_ioctl() 1213 lock_sock(sk); nr_ioctl() 1215 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) nr_ioctl() 1217 release_sock(sk); nr_ioctl() 1222 lock_sock(sk); nr_ioctl() 1223 ret = sock_get_timestamp(sk, argp); nr_ioctl() 1224 release_sock(sk); nr_ioctl() 1228 lock_sock(sk); nr_ioctl() 1229 ret = sock_get_timestampns(sk, argp); nr_ioctl() 1230 release_sock(sk); nr_ioctl()
|
H A D | nr_in.c | 32 static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) nr_queue_rx_frame() argument 35 struct nr_sock *nr = nr_sk(sk); nr_queue_rx_frame() 39 nr_start_idletimer(sk); nr_queue_rx_frame() 66 return sock_queue_rcv_skb(sk, skbn); nr_queue_rx_frame() 74 static int nr_state1_machine(struct sock *sk, struct sk_buff *skb, nr_state1_machine() argument 79 struct nr_sock *nr = nr_sk(sk); nr_state1_machine() 81 nr_stop_t1timer(sk); nr_state1_machine() 82 nr_start_idletimer(sk); nr_state1_machine() 92 sk->sk_state = TCP_ESTABLISHED; nr_state1_machine() 93 if (!sock_flag(sk, SOCK_DEAD)) nr_state1_machine() 94 sk->sk_state_change(sk); nr_state1_machine() 99 nr_disconnect(sk, ECONNREFUSED); nr_state1_machine() 104 nr_disconnect(sk, ECONNRESET); nr_state1_machine() 118 static int nr_state2_machine(struct sock *sk, struct sk_buff *skb, nr_state2_machine() argument 123 nr_disconnect(sk, ECONNRESET); nr_state2_machine() 127 nr_write_internal(sk, NR_DISCACK); nr_state2_machine() 130 nr_disconnect(sk, 0); nr_state2_machine() 135 nr_disconnect(sk, ECONNRESET); nr_state2_machine() 149 static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype) nr_state3_machine() argument 151 struct nr_sock *nrom = nr_sk(sk); nr_state3_machine() 163 nr_write_internal(sk, NR_CONNACK); nr_state3_machine() 167 nr_write_internal(sk, NR_DISCACK); nr_state3_machine() 168 nr_disconnect(sk, 0); nr_state3_machine() 173 nr_disconnect(sk, ECONNRESET); nr_state3_machine() 182 nr_start_t4timer(sk); nr_state3_machine() 185 nr_stop_t4timer(sk); nr_state3_machine() 187 if (!nr_validate_nr(sk, nr)) { nr_state3_machine() 191 nr_frames_acked(sk, nr); nr_state3_machine() 192 nr_send_nak_frame(sk); nr_state3_machine() 195 nr_frames_acked(sk, nr); nr_state3_machine() 197 nr_check_iframes_acked(sk, nr); nr_state3_machine() 212 nr_start_t4timer(sk); nr_state3_machine() 215 nr_stop_t4timer(sk); nr_state3_machine() 217 if (nr_validate_nr(sk, nr)) { nr_state3_machine() 219 nr_frames_acked(sk, nr); nr_state3_machine() 220 nr_send_nak_frame(sk); nr_state3_machine() 223 nr_frames_acked(sk, nr); nr_state3_machine() 225 nr_check_iframes_acked(sk, nr); nr_state3_machine() 239 if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) { nr_state3_machine() 245 } else if (nr_in_rx_window(sk, ns)) { nr_state3_machine() 259 nr_enquiry_response(sk); nr_state3_machine() 263 nr_start_t2timer(sk); nr_state3_machine() 270 nr_disconnect(sk, ECONNRESET); nr_state3_machine() 279 /* Higher level upcall for a LAPB frame - called with sk locked */ nr_process_rx_frame() 280 int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb) nr_process_rx_frame() argument 282 struct nr_sock *nr = nr_sk(sk); nr_process_rx_frame() 292 queued = nr_state1_machine(sk, skb, frametype); nr_process_rx_frame() 295 queued = nr_state2_machine(sk, skb, frametype); nr_process_rx_frame() 298 queued = nr_state3_machine(sk, skb, frametype); nr_process_rx_frame() 302 nr_kick(sk); nr_process_rx_frame()
|
H A D | nr_out.c | 35 void nr_output(struct sock *sk, struct sk_buff *skb) nr_output() argument 49 if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL) nr_output() 67 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ nr_output() 72 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ nr_output() 75 nr_kick(sk); nr_output() 82 static void nr_send_iframe(struct sock *sk, struct sk_buff *skb) nr_send_iframe() argument 84 struct nr_sock *nr = nr_sk(sk); nr_send_iframe() 95 nr_start_idletimer(sk); nr_send_iframe() 97 nr_transmit_buffer(sk, skb); nr_send_iframe() 100 void nr_send_nak_frame(struct sock *sk) nr_send_nak_frame() argument 103 struct nr_sock *nr = nr_sk(sk); nr_send_nak_frame() 117 nr_transmit_buffer(sk, skbn); nr_send_nak_frame() 122 nr_stop_t1timer(sk); nr_send_nak_frame() 125 void nr_kick(struct sock *sk) nr_kick() argument 127 struct nr_sock *nr = nr_sk(sk); nr_kick() 137 if (!skb_peek(&sk->sk_write_queue)) nr_kick() 156 skb = skb_dequeue(&sk->sk_write_queue); nr_kick() 160 skb_queue_head(&sk->sk_write_queue, skb); nr_kick() 164 skb_set_owner_w(skbn, sk); nr_kick() 169 nr_send_iframe(sk, skbn); nr_kick() 179 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); nr_kick() 184 if (!nr_t1timer_running(sk)) nr_kick() 185 nr_start_t1timer(sk); nr_kick() 188 void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb) nr_transmit_buffer() argument 190 struct nr_sock *nr = nr_sk(sk); nr_transmit_buffer() 214 nr_disconnect(sk, ENETUNREACH); nr_transmit_buffer() 223 void nr_establish_data_link(struct sock *sk) nr_establish_data_link() argument 225 struct nr_sock *nr = nr_sk(sk); nr_establish_data_link() 230 nr_write_internal(sk, NR_CONNREQ); nr_establish_data_link() 232 nr_stop_t2timer(sk); nr_establish_data_link() 233 nr_stop_t4timer(sk); nr_establish_data_link() 234 nr_stop_idletimer(sk); nr_establish_data_link() 235 nr_start_t1timer(sk); nr_establish_data_link() 241 void nr_enquiry_response(struct sock *sk) nr_enquiry_response() argument 243 struct nr_sock *nr = nr_sk(sk); nr_enquiry_response() 253 nr_write_internal(sk, frametype); nr_enquiry_response() 259 void nr_check_iframes_acked(struct sock *sk, unsigned short nr) nr_check_iframes_acked() argument 261 struct nr_sock *nrom = nr_sk(sk); nr_check_iframes_acked() 264 nr_frames_acked(sk, nr); nr_check_iframes_acked() 265 nr_stop_t1timer(sk); nr_check_iframes_acked() 269 nr_frames_acked(sk, nr); nr_check_iframes_acked() 270 nr_start_t1timer(sk); nr_check_iframes_acked()
|
H A D | nr_subr.c | 34 void nr_clear_queues(struct sock *sk) nr_clear_queues() argument 36 struct nr_sock *nr = nr_sk(sk); nr_clear_queues() 38 skb_queue_purge(&sk->sk_write_queue); nr_clear_queues() 49 void nr_frames_acked(struct sock *sk, unsigned short nr) nr_frames_acked() argument 51 struct nr_sock *nrom = nr_sk(sk); nr_frames_acked() 71 void nr_requeue_frames(struct sock *sk) nr_requeue_frames() argument 75 while ((skb = skb_dequeue(&nr_sk(sk)->ack_queue)) != NULL) { nr_requeue_frames() 77 skb_queue_head(&sk->sk_write_queue, skb); nr_requeue_frames() 79 skb_append(skb_prev, skb, &sk->sk_write_queue); nr_requeue_frames() 88 int nr_validate_nr(struct sock *sk, unsigned short nr) nr_validate_nr() argument 90 struct nr_sock *nrom = nr_sk(sk); nr_validate_nr() 104 int nr_in_rx_window(struct sock *sk, unsigned short ns) nr_in_rx_window() argument 106 struct nr_sock *nr = nr_sk(sk); nr_in_rx_window() 122 void nr_write_internal(struct sock *sk, int frametype) nr_write_internal() argument 124 struct nr_sock *nr = nr_sk(sk); nr_write_internal() 208 nr_transmit_buffer(sk, skb); nr_write_internal() 262 void nr_disconnect(struct sock *sk, int reason) nr_disconnect() argument 264 nr_stop_t1timer(sk); nr_disconnect() 265 nr_stop_t2timer(sk); nr_disconnect() 266 nr_stop_t4timer(sk); nr_disconnect() 267 nr_stop_idletimer(sk); nr_disconnect() 269 nr_clear_queues(sk); nr_disconnect() 271 nr_sk(sk)->state = NR_STATE_0; nr_disconnect() 273 sk->sk_state = TCP_CLOSE; nr_disconnect() 274 sk->sk_err = reason; nr_disconnect() 275 sk->sk_shutdown |= SEND_SHUTDOWN; nr_disconnect() 277 if (!sock_flag(sk, SOCK_DEAD)) { nr_disconnect() 278 sk->sk_state_change(sk); nr_disconnect() 279 sock_set_flag(sk, SOCK_DEAD); nr_disconnect()
|
/linux-4.1.27/net/bluetooth/rfcomm/ |
H A D | sock.c | 42 static void rfcomm_sock_close(struct sock *sk); 43 static void rfcomm_sock_kill(struct sock *sk); 51 struct sock *sk = d->owner; rfcomm_sk_data_ready() local 52 if (!sk) rfcomm_sk_data_ready() 55 atomic_add(skb->len, &sk->sk_rmem_alloc); rfcomm_sk_data_ready() 56 skb_queue_tail(&sk->sk_receive_queue, skb); rfcomm_sk_data_ready() 57 sk->sk_data_ready(sk); rfcomm_sk_data_ready() 59 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) rfcomm_sk_data_ready() 65 struct sock *sk = d->owner, *parent; rfcomm_sk_state_change() local 68 if (!sk) rfcomm_sk_state_change() 74 bh_lock_sock(sk); rfcomm_sk_state_change() 77 sk->sk_err = err; rfcomm_sk_state_change() 79 sk->sk_state = d->state; rfcomm_sk_state_change() 81 parent = bt_sk(sk)->parent; rfcomm_sk_state_change() 84 sock_set_flag(sk, SOCK_ZAPPED); rfcomm_sk_state_change() 85 bt_accept_unlink(sk); rfcomm_sk_state_change() 91 &rfcomm_pi(sk)->src, NULL); rfcomm_sk_state_change() 92 sk->sk_state_change(sk); rfcomm_sk_state_change() 95 bh_unlock_sock(sk); rfcomm_sk_state_change() 98 if (parent && sock_flag(sk, SOCK_ZAPPED)) { rfcomm_sk_state_change() 102 rfcomm_sock_kill(sk); rfcomm_sk_state_change() 110 struct sock *sk = NULL; __rfcomm_get_listen_sock_by_addr() local 112 sk_for_each(sk, &rfcomm_sk_list.head) { __rfcomm_get_listen_sock_by_addr() 113 if (rfcomm_pi(sk)->channel != channel) __rfcomm_get_listen_sock_by_addr() 116 if (bacmp(&rfcomm_pi(sk)->src, src)) __rfcomm_get_listen_sock_by_addr() 119 if (sk->sk_state == BT_BOUND || sk->sk_state == BT_LISTEN) __rfcomm_get_listen_sock_by_addr() 123 return sk ? sk : NULL; __rfcomm_get_listen_sock_by_addr() 131 struct sock *sk = NULL, *sk1 = NULL; rfcomm_get_sock_by_channel() local 135 sk_for_each(sk, &rfcomm_sk_list.head) { rfcomm_get_sock_by_channel() 136 if (state && sk->sk_state != state) rfcomm_get_sock_by_channel() 139 if (rfcomm_pi(sk)->channel == channel) { rfcomm_get_sock_by_channel() 141 if (!bacmp(&rfcomm_pi(sk)->src, src)) rfcomm_get_sock_by_channel() 145 if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY)) rfcomm_get_sock_by_channel() 146 sk1 = sk; rfcomm_get_sock_by_channel() 152 return sk ? sk : sk1; rfcomm_get_sock_by_channel() 155 static void rfcomm_sock_destruct(struct sock *sk) rfcomm_sock_destruct() argument 157 struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; rfcomm_sock_destruct() 159 BT_DBG("sk %p dlc %p", sk, d); rfcomm_sock_destruct() 161 skb_queue_purge(&sk->sk_receive_queue); rfcomm_sock_destruct() 162 skb_queue_purge(&sk->sk_write_queue); rfcomm_sock_destruct() 165 rfcomm_pi(sk)->dlc = NULL; rfcomm_sock_destruct() 168 if (d->owner == sk) rfcomm_sock_destruct() 177 struct sock *sk; rfcomm_sock_cleanup_listen() local 182 while ((sk = bt_accept_dequeue(parent, NULL))) { rfcomm_sock_cleanup_listen() 183 rfcomm_sock_close(sk); rfcomm_sock_cleanup_listen() 184 rfcomm_sock_kill(sk); rfcomm_sock_cleanup_listen() 194 static void rfcomm_sock_kill(struct sock *sk) rfcomm_sock_kill() argument 196 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) rfcomm_sock_kill() 199 BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); rfcomm_sock_kill() 202 bt_sock_unlink(&rfcomm_sk_list, sk); rfcomm_sock_kill() 203 sock_set_flag(sk, SOCK_DEAD); rfcomm_sock_kill() 204 sock_put(sk); rfcomm_sock_kill() 207 static void __rfcomm_sock_close(struct sock *sk) __rfcomm_sock_close() argument 209 struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; __rfcomm_sock_close() 211 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); __rfcomm_sock_close() 213 switch (sk->sk_state) { __rfcomm_sock_close() 215 rfcomm_sock_cleanup_listen(sk); __rfcomm_sock_close() 225 sock_set_flag(sk, SOCK_ZAPPED); __rfcomm_sock_close() 233 static void rfcomm_sock_close(struct sock *sk) rfcomm_sock_close() argument 235 lock_sock(sk); rfcomm_sock_close() 236 __rfcomm_sock_close(sk); rfcomm_sock_close() 237 release_sock(sk); rfcomm_sock_close() 240 static void rfcomm_sock_init(struct sock *sk, struct sock *parent) rfcomm_sock_init() argument 242 struct rfcomm_pinfo *pi = rfcomm_pi(sk); rfcomm_sock_init() 244 BT_DBG("sk %p", sk); rfcomm_sock_init() 247 sk->sk_type = parent->sk_type; rfcomm_sock_init() 254 security_sk_clone(parent, sk); rfcomm_sock_init() 275 struct sock *sk; rfcomm_sock_alloc() local 277 sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto); rfcomm_sock_alloc() 278 if (!sk) rfcomm_sock_alloc() 281 sock_init_data(sock, sk); rfcomm_sock_alloc() 282 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); rfcomm_sock_alloc() 286 sk_free(sk); rfcomm_sock_alloc() 293 rfcomm_pi(sk)->dlc = d; rfcomm_sock_alloc() 294 d->owner = sk; rfcomm_sock_alloc() 296 sk->sk_destruct = rfcomm_sock_destruct; rfcomm_sock_alloc() 297 sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; rfcomm_sock_alloc() 299 sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; rfcomm_sock_alloc() 300 sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; rfcomm_sock_alloc() 302 sock_reset_flag(sk, SOCK_ZAPPED); rfcomm_sock_alloc() 304 sk->sk_protocol = proto; rfcomm_sock_alloc() 305 sk->sk_state = BT_OPEN; rfcomm_sock_alloc() 307 bt_sock_link(&rfcomm_sk_list, sk); rfcomm_sock_alloc() 309 BT_DBG("sk %p", sk); rfcomm_sock_alloc() 310 return sk; rfcomm_sock_alloc() 316 struct sock *sk; rfcomm_sock_create() local 327 sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC); rfcomm_sock_create() 328 if (!sk) rfcomm_sock_create() 331 rfcomm_sock_init(sk, NULL); rfcomm_sock_create() 338 struct sock *sk = sock->sk; rfcomm_sock_bind() local 342 BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr); rfcomm_sock_bind() 347 lock_sock(sk); rfcomm_sock_bind() 349 if (sk->sk_state != BT_OPEN) { rfcomm_sock_bind() 354 if (sk->sk_type != SOCK_STREAM) { rfcomm_sock_bind() 365 bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr); rfcomm_sock_bind() 366 rfcomm_pi(sk)->channel = chan; rfcomm_sock_bind() 367 sk->sk_state = BT_BOUND; rfcomm_sock_bind() 373 release_sock(sk); rfcomm_sock_bind() 380 struct sock *sk = sock->sk; rfcomm_sock_connect() local 381 struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; rfcomm_sock_connect() 384 BT_DBG("sk %p", sk); rfcomm_sock_connect() 390 lock_sock(sk); rfcomm_sock_connect() 392 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { rfcomm_sock_connect() 397 if (sk->sk_type != SOCK_STREAM) { rfcomm_sock_connect() 402 sk->sk_state = BT_CONNECT; rfcomm_sock_connect() 403 bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr); rfcomm_sock_connect() 404 rfcomm_pi(sk)->channel = sa->rc_channel; rfcomm_sock_connect() 406 d->sec_level = rfcomm_pi(sk)->sec_level; rfcomm_sock_connect() 407 d->role_switch = rfcomm_pi(sk)->role_switch; rfcomm_sock_connect() 409 err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr, rfcomm_sock_connect() 412 err = bt_sock_wait_state(sk, BT_CONNECTED, rfcomm_sock_connect() 413 sock_sndtimeo(sk, flags & O_NONBLOCK)); rfcomm_sock_connect() 416 release_sock(sk); rfcomm_sock_connect() 422 struct sock *sk = sock->sk; rfcomm_sock_listen() local 425 BT_DBG("sk %p backlog %d", sk, backlog); rfcomm_sock_listen() 427 lock_sock(sk); rfcomm_sock_listen() 429 if (sk->sk_state != BT_BOUND) { rfcomm_sock_listen() 434 if (sk->sk_type != SOCK_STREAM) { rfcomm_sock_listen() 439 if (!rfcomm_pi(sk)->channel) { rfcomm_sock_listen() 440 bdaddr_t *src = &rfcomm_pi(sk)->src; rfcomm_sock_listen() 449 rfcomm_pi(sk)->channel = channel; rfcomm_sock_listen() 460 sk->sk_max_ack_backlog = backlog; rfcomm_sock_listen() 461 sk->sk_ack_backlog = 0; rfcomm_sock_listen() 462 sk->sk_state = BT_LISTEN; rfcomm_sock_listen() 465 release_sock(sk); rfcomm_sock_listen() 472 struct sock *sk = sock->sk, *nsk; rfcomm_sock_accept() local 476 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); rfcomm_sock_accept() 478 if (sk->sk_type != SOCK_STREAM) { rfcomm_sock_accept() 483 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); rfcomm_sock_accept() 485 BT_DBG("sk %p timeo %ld", sk, timeo); rfcomm_sock_accept() 488 add_wait_queue_exclusive(sk_sleep(sk), &wait); rfcomm_sock_accept() 490 if (sk->sk_state != BT_LISTEN) { rfcomm_sock_accept() 495 nsk = bt_accept_dequeue(sk, newsock); rfcomm_sock_accept() 509 release_sock(sk); rfcomm_sock_accept() 513 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); rfcomm_sock_accept() 515 remove_wait_queue(sk_sleep(sk), &wait); rfcomm_sock_accept() 525 release_sock(sk); rfcomm_sock_accept() 532 struct sock *sk = sock->sk; rfcomm_sock_getname() local 534 BT_DBG("sock %p, sk %p", sock, sk); rfcomm_sock_getname() 536 if (peer && sk->sk_state != BT_CONNECTED && rfcomm_sock_getname() 537 sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2) rfcomm_sock_getname() 542 sa->rc_channel = rfcomm_pi(sk)->channel; rfcomm_sock_getname() 544 bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst); rfcomm_sock_getname() 546 bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src); rfcomm_sock_getname() 555 struct sock *sk = sock->sk; rfcomm_sock_sendmsg() local 556 struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; rfcomm_sock_sendmsg() 566 if (sk->sk_shutdown & SEND_SHUTDOWN) rfcomm_sock_sendmsg() 569 BT_DBG("sock %p, sk %p", sock, sk); rfcomm_sock_sendmsg() 571 lock_sock(sk); rfcomm_sock_sendmsg() 573 sent = bt_sock_wait_ready(sk, msg->msg_flags); rfcomm_sock_sendmsg() 581 skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, rfcomm_sock_sendmsg() 598 skb->priority = sk->sk_priority; rfcomm_sock_sendmsg() 613 release_sock(sk); rfcomm_sock_sendmsg() 621 struct sock *sk = sock->sk; rfcomm_sock_recvmsg() local 622 struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; rfcomm_sock_recvmsg() 632 lock_sock(sk); rfcomm_sock_recvmsg() 634 atomic_sub(len, &sk->sk_rmem_alloc); rfcomm_sock_recvmsg() 636 if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_sock_recvmsg() 637 rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); rfcomm_sock_recvmsg() 638 release_sock(sk); rfcomm_sock_recvmsg() 645 struct sock *sk = sock->sk; rfcomm_sock_setsockopt_old() local 649 BT_DBG("sk %p", sk); rfcomm_sock_setsockopt_old() 651 lock_sock(sk); rfcomm_sock_setsockopt_old() 666 rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW; rfcomm_sock_setsockopt_old() 668 rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM; rfcomm_sock_setsockopt_old() 670 rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH; rfcomm_sock_setsockopt_old() 672 rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER); rfcomm_sock_setsockopt_old() 680 release_sock(sk); rfcomm_sock_setsockopt_old() 686 struct sock *sk = sock->sk; rfcomm_sock_setsockopt() local 692 BT_DBG("sk %p", sk); rfcomm_sock_setsockopt() 700 lock_sock(sk); rfcomm_sock_setsockopt() 704 if (sk->sk_type != SOCK_STREAM) { rfcomm_sock_setsockopt() 722 rfcomm_pi(sk)->sec_level = sec.level; rfcomm_sock_setsockopt() 726 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { rfcomm_sock_setsockopt() 737 set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); rfcomm_sock_setsockopt() 739 clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); rfcomm_sock_setsockopt() 748 release_sock(sk); rfcomm_sock_setsockopt() 754 struct sock *sk = sock->sk; rfcomm_sock_getsockopt_old() local 761 BT_DBG("sk %p", sk); rfcomm_sock_getsockopt_old() 766 lock_sock(sk); rfcomm_sock_getsockopt_old() 770 switch (rfcomm_pi(sk)->sec_level) { rfcomm_sock_getsockopt_old() 790 if (rfcomm_pi(sk)->role_switch) rfcomm_sock_getsockopt_old() 799 if (sk->sk_state != BT_CONNECTED && rfcomm_sock_getsockopt_old() 800 !rfcomm_pi(sk)->dlc->defer_setup) { rfcomm_sock_getsockopt_old() 805 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; rfcomm_sock_getsockopt_old() 823 release_sock(sk); rfcomm_sock_getsockopt_old() 829 struct sock *sk = sock->sk; rfcomm_sock_getsockopt() local 833 BT_DBG("sk %p", sk); rfcomm_sock_getsockopt() 844 lock_sock(sk); rfcomm_sock_getsockopt() 848 if (sk->sk_type != SOCK_STREAM) { rfcomm_sock_getsockopt() 853 sec.level = rfcomm_pi(sk)->sec_level; rfcomm_sock_getsockopt() 863 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { rfcomm_sock_getsockopt() 868 if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), rfcomm_sock_getsockopt() 879 release_sock(sk); rfcomm_sock_getsockopt() 885 struct sock *sk __maybe_unused = sock->sk; rfcomm_sock_ioctl() 888 BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); rfcomm_sock_ioctl() 894 lock_sock(sk); rfcomm_sock_ioctl() 895 err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); rfcomm_sock_ioctl() 896 release_sock(sk); rfcomm_sock_ioctl() 907 struct sock *sk = sock->sk; rfcomm_sock_shutdown() local 910 BT_DBG("sock %p, sk %p", sock, sk); rfcomm_sock_shutdown() 912 if (!sk) rfcomm_sock_shutdown() 915 lock_sock(sk); rfcomm_sock_shutdown() 916 if (!sk->sk_shutdown) { rfcomm_sock_shutdown() 917 sk->sk_shutdown = SHUTDOWN_MASK; rfcomm_sock_shutdown() 918 __rfcomm_sock_close(sk); rfcomm_sock_shutdown() 920 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && rfcomm_sock_shutdown() 922 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); rfcomm_sock_shutdown() 924 release_sock(sk); rfcomm_sock_shutdown() 930 struct sock *sk = sock->sk; rfcomm_sock_release() local 933 BT_DBG("sock %p, sk %p", sock, sk); rfcomm_sock_release() 935 if (!sk) rfcomm_sock_release() 940 sock_orphan(sk); rfcomm_sock_release() 941 rfcomm_sock_kill(sk); rfcomm_sock_release() 951 struct sock *sk, *parent; rfcomm_connect_ind() local 972 sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC); rfcomm_connect_ind() 973 if (!sk) rfcomm_connect_ind() 976 bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM); rfcomm_connect_ind() 978 rfcomm_sock_init(sk, parent); rfcomm_connect_ind() 979 bacpy(&rfcomm_pi(sk)->src, &src); rfcomm_connect_ind() 980 bacpy(&rfcomm_pi(sk)->dst, &dst); rfcomm_connect_ind() 981 rfcomm_pi(sk)->channel = channel; rfcomm_connect_ind() 983 sk->sk_state = BT_CONFIG; rfcomm_connect_ind() 984 bt_accept_enqueue(parent, sk); rfcomm_connect_ind() 987 *d = rfcomm_pi(sk)->dlc; rfcomm_connect_ind() 1001 struct sock *sk; rfcomm_sock_debugfs_show() local 1005 sk_for_each(sk, &rfcomm_sk_list.head) { rfcomm_sock_debugfs_show() 1007 &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst, rfcomm_sock_debugfs_show() 1008 sk->sk_state, rfcomm_pi(sk)->channel); rfcomm_sock_debugfs_show()
|
/linux-4.1.27/net/x25/ |
H A D | x25_timer.c | 32 void x25_init_timers(struct sock *sk) x25_init_timers() argument 34 struct x25_sock *x25 = x25_sk(sk); x25_init_timers() 36 setup_timer(&x25->timer, x25_timer_expiry, (unsigned long)sk); x25_init_timers() 39 sk->sk_timer.data = (unsigned long)sk; x25_init_timers() 40 sk->sk_timer.function = &x25_heartbeat_expiry; x25_init_timers() 43 void x25_start_heartbeat(struct sock *sk) x25_start_heartbeat() argument 45 mod_timer(&sk->sk_timer, jiffies + 5 * HZ); x25_start_heartbeat() 48 void x25_stop_heartbeat(struct sock *sk) x25_stop_heartbeat() argument 50 del_timer(&sk->sk_timer); x25_stop_heartbeat() 53 void x25_start_t2timer(struct sock *sk) x25_start_t2timer() argument 55 struct x25_sock *x25 = x25_sk(sk); x25_start_t2timer() 60 void x25_start_t21timer(struct sock *sk) x25_start_t21timer() argument 62 struct x25_sock *x25 = x25_sk(sk); x25_start_t21timer() 67 void x25_start_t22timer(struct sock *sk) x25_start_t22timer() argument 69 struct x25_sock *x25 = x25_sk(sk); x25_start_t22timer() 74 void x25_start_t23timer(struct sock *sk) x25_start_t23timer() argument 76 struct x25_sock *x25 = x25_sk(sk); x25_start_t23timer() 81 void x25_stop_timer(struct sock *sk) x25_stop_timer() argument 83 del_timer(&x25_sk(sk)->timer); x25_stop_timer() 86 unsigned long x25_display_timer(struct sock *sk) x25_display_timer() argument 88 struct x25_sock *x25 = x25_sk(sk); x25_display_timer() 98 struct sock *sk = (struct sock *)param; x25_heartbeat_expiry() local 100 bh_lock_sock(sk); x25_heartbeat_expiry() 101 if (sock_owned_by_user(sk)) /* can currently only occur in state 3 */ x25_heartbeat_expiry() 104 switch (x25_sk(sk)->state) { x25_heartbeat_expiry() 112 if (sock_flag(sk, SOCK_DESTROY) || x25_heartbeat_expiry() 113 (sk->sk_state == TCP_LISTEN && x25_heartbeat_expiry() 114 sock_flag(sk, SOCK_DEAD))) { x25_heartbeat_expiry() 115 bh_unlock_sock(sk); x25_heartbeat_expiry() 116 x25_destroy_socket_from_timer(sk); x25_heartbeat_expiry() 125 x25_check_rbuf(sk); x25_heartbeat_expiry() 129 x25_start_heartbeat(sk); x25_heartbeat_expiry() 130 bh_unlock_sock(sk); x25_heartbeat_expiry() 137 static inline void x25_do_timer_expiry(struct sock * sk) x25_do_timer_expiry() argument 139 struct x25_sock *x25 = x25_sk(sk); x25_do_timer_expiry() 146 x25_enquiry_response(sk); x25_do_timer_expiry() 152 x25_write_internal(sk, X25_CLEAR_REQUEST); x25_do_timer_expiry() 154 x25_start_t23timer(sk); x25_do_timer_expiry() 158 x25_disconnect(sk, ETIMEDOUT, 0, 0); x25_do_timer_expiry() 165 struct sock *sk = (struct sock *)param; x25_timer_expiry() local 167 bh_lock_sock(sk); x25_timer_expiry() 168 if (sock_owned_by_user(sk)) { /* can currently only occur in state 3 */ x25_timer_expiry() 169 if (x25_sk(sk)->state == X25_STATE_3) x25_timer_expiry() 170 x25_start_t2timer(sk); x25_timer_expiry() 172 x25_do_timer_expiry(sk); x25_timer_expiry() 173 bh_unlock_sock(sk); x25_timer_expiry()
|
H A D | x25_in.c | 37 static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) x25_queue_rx_frame() argument 40 struct x25_sock *x25 = x25_sk(sk); x25_queue_rx_frame() 45 skb_set_owner_r(skb, sk); x25_queue_rx_frame() 79 skb_set_owner_r(skbn, sk); x25_queue_rx_frame() 80 skb_queue_tail(&sk->sk_receive_queue, skbn); x25_queue_rx_frame() 81 if (!sock_flag(sk, SOCK_DEAD)) x25_queue_rx_frame() 82 sk->sk_data_ready(sk); x25_queue_rx_frame() 92 static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) x25_state1_machine() argument 96 struct x25_sock *x25 = x25_sk(sk); x25_state1_machine() 101 x25_stop_timer(sk); x25_state1_machine() 108 sk->sk_state = TCP_ESTABLISHED; x25_state1_machine() 141 if (!sock_flag(sk, SOCK_DEAD)) x25_state1_machine() 142 sk->sk_state_change(sk); x25_state1_machine() 149 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_state1_machine() 150 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); x25_state1_machine() 160 x25_write_internal(sk, X25_CLEAR_REQUEST); x25_state1_machine() 162 x25_start_t23timer(sk); x25_state1_machine() 171 static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) x25_state2_machine() argument 179 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_state2_machine() 180 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); x25_state2_machine() 184 x25_disconnect(sk, 0, 0, 0); x25_state2_machine() 194 x25_write_internal(sk, X25_CLEAR_REQUEST); x25_state2_machine() 195 x25_start_t23timer(sk); x25_state2_machine() 204 static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m) x25_state3_machine() argument 208 struct x25_sock *x25 = x25_sk(sk); x25_state3_machine() 215 x25_write_internal(sk, X25_RESET_CONFIRMATION); x25_state3_machine() 216 x25_stop_timer(sk); x25_state3_machine() 222 x25_requeue_frames(sk); x25_state3_machine() 229 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_state3_machine() 230 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); x25_state3_machine() 235 if (!x25_validate_nr(sk, nr)) { x25_state3_machine() 236 x25_clear_queues(sk); x25_state3_machine() 237 x25_write_internal(sk, X25_RESET_REQUEST); x25_state3_machine() 238 x25_start_t22timer(sk); x25_state3_machine() 246 x25_frames_acked(sk, nr); x25_state3_machine() 257 if ((ns != x25->vr) || !x25_validate_nr(sk, nr)) { x25_state3_machine() 258 x25_clear_queues(sk); x25_state3_machine() 259 x25_write_internal(sk, X25_RESET_REQUEST); x25_state3_machine() 260 x25_start_t22timer(sk); x25_state3_machine() 269 x25_frames_acked(sk, nr); x25_state3_machine() 271 if (x25_queue_rx_frame(sk, skb, m) == 0) { x25_state3_machine() 276 x25_clear_queues(sk); x25_state3_machine() 277 x25_write_internal(sk, X25_RESET_REQUEST); x25_state3_machine() 278 x25_start_t22timer(sk); x25_state3_machine() 287 if (atomic_read(&sk->sk_rmem_alloc) > x25_state3_machine() 288 (sk->sk_rcvbuf >> 1)) x25_state3_machine() 297 x25_stop_timer(sk); x25_state3_machine() 298 x25_enquiry_response(sk); x25_state3_machine() 301 x25_start_t2timer(sk); x25_state3_machine() 310 if (sock_flag(sk, SOCK_URGINLINE)) x25_state3_machine() 311 queued = !sock_queue_rcv_skb(sk, skb); x25_state3_machine() 313 skb_set_owner_r(skb, sk); x25_state3_machine() 317 sk_send_sigurg(sk); x25_state3_machine() 318 x25_write_internal(sk, X25_INTERRUPT_CONFIRMATION); x25_state3_machine() 329 x25_write_internal(sk, X25_CLEAR_REQUEST); x25_state3_machine() 331 x25_start_t23timer(sk); x25_state3_machine() 340 static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) x25_state4_machine() argument 342 struct x25_sock *x25 = x25_sk(sk); x25_state4_machine() 347 x25_write_internal(sk, X25_RESET_CONFIRMATION); x25_state4_machine() 349 x25_stop_timer(sk); x25_state4_machine() 356 x25_requeue_frames(sk); x25_state4_machine() 363 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); x25_state4_machine() 364 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); x25_state4_machine() 374 x25_write_internal(sk, X25_CLEAR_REQUEST); x25_state4_machine() 376 x25_start_t23timer(sk); x25_state4_machine() 381 int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb) x25_process_rx_frame() argument 383 struct x25_sock *x25 = x25_sk(sk); x25_process_rx_frame() 389 frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m); x25_process_rx_frame() 393 queued = x25_state1_machine(sk, skb, frametype); x25_process_rx_frame() 396 queued = x25_state2_machine(sk, skb, frametype); x25_process_rx_frame() 399 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); x25_process_rx_frame() 402 queued = x25_state4_machine(sk, skb, frametype); x25_process_rx_frame() 406 x25_kick(sk); x25_process_rx_frame() 411 int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb) x25_backlog_rcv() argument 413 int queued = x25_process_rx_frame(sk, skb); x25_backlog_rcv()
|
H A D | af_x25.c | 200 static void x25_remove_socket(struct sock *sk) x25_remove_socket() argument 203 sk_del_node_init(sk); x25_remove_socket() 265 static void x25_insert_socket(struct sock *sk) x25_insert_socket() argument 268 sk_add_node(sk, &x25_list); x25_insert_socket() 353 struct sock *sk; x25_new_lci() local 357 while ((sk = __x25_find_socket(lci, nb)) != NULL) { x25_new_lci() 358 sock_put(sk); x25_new_lci() 389 static void __x25_destroy_socket(struct sock *sk) __x25_destroy_socket() argument 393 x25_stop_heartbeat(sk); __x25_destroy_socket() 394 x25_stop_timer(sk); __x25_destroy_socket() 396 x25_remove_socket(sk); __x25_destroy_socket() 397 x25_clear_queues(sk); /* Flush the queues */ __x25_destroy_socket() 399 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { __x25_destroy_socket() 400 if (skb->sk != sk) { /* A pending connection */ __x25_destroy_socket() 404 skb->sk->sk_state = TCP_LISTEN; __x25_destroy_socket() 405 sock_set_flag(skb->sk, SOCK_DEAD); __x25_destroy_socket() 406 x25_start_heartbeat(skb->sk); __x25_destroy_socket() 407 x25_sk(skb->sk)->state = X25_STATE_0; __x25_destroy_socket() 413 if (sk_has_allocations(sk)) { __x25_destroy_socket() 415 sk->sk_timer.expires = jiffies + 10 * HZ; __x25_destroy_socket() 416 sk->sk_timer.function = x25_destroy_timer; __x25_destroy_socket() 417 sk->sk_timer.data = (unsigned long)sk; __x25_destroy_socket() 418 add_timer(&sk->sk_timer); __x25_destroy_socket() 421 __sock_put(sk); __x25_destroy_socket() 425 void x25_destroy_socket_from_timer(struct sock *sk) x25_destroy_socket_from_timer() argument 427 sock_hold(sk); x25_destroy_socket_from_timer() 428 bh_lock_sock(sk); x25_destroy_socket_from_timer() 429 __x25_destroy_socket(sk); x25_destroy_socket_from_timer() 430 bh_unlock_sock(sk); x25_destroy_socket_from_timer() 431 sock_put(sk); x25_destroy_socket_from_timer() 443 struct sock *sk = sock->sk; x25_setsockopt() local 458 set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); x25_setsockopt() 460 clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); x25_setsockopt() 469 struct sock *sk = sock->sk; x25_getsockopt() local 489 val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); x25_getsockopt() 497 struct sock *sk = sock->sk; x25_listen() local 500 lock_sock(sk); x25_listen() 501 if (sk->sk_state != TCP_LISTEN) { x25_listen() 502 memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); x25_listen() 503 sk->sk_max_ack_backlog = backlog; x25_listen() 504 sk->sk_state = TCP_LISTEN; x25_listen() 507 release_sock(sk); x25_listen() 521 struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto); x25_alloc_socket() local 523 if (!sk) x25_alloc_socket() 526 sock_init_data(NULL, sk); x25_alloc_socket() 528 x25 = x25_sk(sk); x25_alloc_socket() 534 return sk; x25_alloc_socket() 540 struct sock *sk; x25_create() local 556 if ((sk = x25_alloc_socket(net)) == NULL) x25_create() 559 x25 = x25_sk(sk); x25_create() 561 sock_init_data(sock, sk); x25_create() 563 x25_init_timers(sk); x25_create() 566 sk->sk_protocol = protocol; x25_create() 567 sk->sk_backlog_rcv = x25_backlog_rcv; x25_create() 599 struct sock *sk = NULL; x25_make_new() local 605 if ((sk = x25_alloc_socket(sock_net(osk))) == NULL) x25_make_new() 608 x25 = x25_sk(sk); x25_make_new() 610 sk->sk_type = osk->sk_type; x25_make_new() 611 sk->sk_priority = osk->sk_priority; x25_make_new() 612 sk->sk_protocol = osk->sk_protocol; x25_make_new() 613 sk->sk_rcvbuf = osk->sk_rcvbuf; x25_make_new() 614 sk->sk_sndbuf = osk->sk_sndbuf; x25_make_new() 615 sk->sk_state = TCP_ESTABLISHED; x25_make_new() 616 sk->sk_backlog_rcv = osk->sk_backlog_rcv; x25_make_new() 617 sock_copy_flags(sk, osk); x25_make_new() 630 x25_init_timers(sk); x25_make_new() 632 return sk; x25_make_new() 637 struct sock *sk = sock->sk; x25_release() local 640 if (!sk) x25_release() 643 x25 = x25_sk(sk); x25_release() 645 sock_hold(sk); x25_release() 646 lock_sock(sk); x25_release() 651 x25_disconnect(sk, 0, 0, 0); x25_release() 652 __x25_destroy_socket(sk); x25_release() 658 x25_clear_queues(sk); x25_release() 659 x25_write_internal(sk, X25_CLEAR_REQUEST); x25_release() 660 x25_start_t23timer(sk); x25_release() 662 sk->sk_state = TCP_CLOSE; x25_release() 663 sk->sk_shutdown |= SEND_SHUTDOWN; x25_release() 664 sk->sk_state_change(sk); x25_release() 665 sock_set_flag(sk, SOCK_DEAD); x25_release() 666 sock_set_flag(sk, SOCK_DESTROY); x25_release() 670 sock_orphan(sk); x25_release() 672 release_sock(sk); x25_release() 673 sock_put(sk); x25_release() 679 struct sock *sk = sock->sk; x25_bind() local 683 if (!sock_flag(sk, SOCK_ZAPPED) || x25_bind() 698 lock_sock(sk); x25_bind() 699 x25_sk(sk)->source_addr = addr->sx25_addr; x25_bind() 700 x25_insert_socket(sk); x25_bind() 701 sock_reset_flag(sk, SOCK_ZAPPED); x25_bind() 702 release_sock(sk); x25_bind() 703 SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); x25_bind() 708 static int x25_wait_for_connection_establishment(struct sock *sk) x25_wait_for_connection_establishment() argument 713 add_wait_queue_exclusive(sk_sleep(sk), &wait); x25_wait_for_connection_establishment() 719 rc = sock_error(sk); x25_wait_for_connection_establishment() 721 sk->sk_socket->state = SS_UNCONNECTED; x25_wait_for_connection_establishment() 725 if (sk->sk_state != TCP_ESTABLISHED) { x25_wait_for_connection_establishment() 726 release_sock(sk); x25_wait_for_connection_establishment() 728 lock_sock(sk); x25_wait_for_connection_establishment() 733 remove_wait_queue(sk_sleep(sk), &wait); x25_wait_for_connection_establishment() 740 struct sock *sk = sock->sk; x25_connect() local 741 struct x25_sock *x25 = x25_sk(sk); x25_connect() 746 lock_sock(sk); x25_connect() 747 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { x25_connect() 753 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { x25_connect() 759 if (sk->sk_state == TCP_ESTABLISHED) x25_connect() 762 sk->sk_state = TCP_CLOSE; x25_connect() 786 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ x25_connect() 796 sk->sk_state = TCP_SYN_SENT; x25_connect() 800 x25_write_internal(sk, X25_CALL_REQUEST); x25_connect() 802 x25_start_heartbeat(sk); x25_connect() 803 x25_start_t21timer(sk); x25_connect() 807 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) x25_connect() 810 rc = x25_wait_for_connection_establishment(sk); x25_connect() 822 release_sock(sk); x25_connect() 826 static int x25_wait_for_data(struct sock *sk, long timeout) x25_wait_for_data() argument 831 add_wait_queue_exclusive(sk_sleep(sk), &wait); x25_wait_for_data() 834 if (sk->sk_shutdown & RCV_SHUTDOWN) x25_wait_for_data() 843 if (skb_queue_empty(&sk->sk_receive_queue)) { x25_wait_for_data() 844 release_sock(sk); x25_wait_for_data() 846 lock_sock(sk); x25_wait_for_data() 851 remove_wait_queue(sk_sleep(sk), &wait); x25_wait_for_data() 857 struct sock *sk = sock->sk; x25_accept() local 862 if (!sk) x25_accept() 866 if (sk->sk_type != SOCK_SEQPACKET) x25_accept() 869 lock_sock(sk); x25_accept() 871 if (sk->sk_state != TCP_LISTEN) x25_accept() 874 rc = x25_wait_for_data(sk, sk->sk_rcvtimeo); x25_accept() 877 skb = skb_dequeue(&sk->sk_receive_queue); x25_accept() 879 if (!skb->sk) x25_accept() 881 newsk = skb->sk; x25_accept() 885 skb->sk = NULL; x25_accept() 887 sk->sk_ack_backlog--; x25_accept() 891 release_sock(sk); x25_accept() 900 struct sock *sk = sock->sk; x25_getname() local 901 struct x25_sock *x25 = x25_sk(sk); x25_getname() 905 if (sk->sk_state != TCP_ESTABLISHED) { x25_getname() 923 struct sock *sk; x25_rx_call_request() local 977 sk = x25_find_listener(&source_addr,skb); x25_rx_call_request() 980 if (sk != NULL && sk_acceptq_is_full(sk)) { x25_rx_call_request() 988 if (sk == NULL) { x25_rx_call_request() 1006 len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities); x25_rx_call_request() 1020 make = x25_make_new(sk); x25_rx_call_request() 1029 skb->sk = make; x25_rx_call_request() 1039 makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; x25_rx_call_request() 1044 makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; x25_rx_call_request() 1058 sk->sk_ack_backlog++; x25_rx_call_request() 1062 skb_queue_head(&sk->sk_receive_queue, skb); x25_rx_call_request() 1066 if (!sock_flag(sk, SOCK_DEAD)) x25_rx_call_request() 1067 sk->sk_data_ready(sk); x25_rx_call_request() 1069 sock_put(sk); x25_rx_call_request() 1073 sock_put(sk); x25_rx_call_request() 1082 struct sock *sk = sock->sk; x25_sendmsg() local 1083 struct x25_sock *x25 = x25_sk(sk); x25_sendmsg() 1092 lock_sock(sk); x25_sendmsg() 1101 if (sock_flag(sk, SOCK_ZAPPED)) x25_sendmsg() 1105 if (sk->sk_shutdown & SEND_SHUTDOWN) { x25_sendmsg() 1132 if (sk->sk_state != TCP_ESTABLISHED) x25_sendmsg() 1145 SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); x25_sendmsg() 1148 SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n"); x25_sendmsg() 1155 release_sock(sk); x25_sendmsg() 1156 skb = sock_alloc_send_skb(sk, size, noblock, &rc); x25_sendmsg() 1157 lock_sock(sk); x25_sendmsg() 1167 SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n"); x25_sendmsg() 1191 SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n"); x25_sendmsg() 1225 SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n"); x25_sendmsg() 1226 SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n"); x25_sendmsg() 1229 if (sk->sk_state != TCP_ESTABLISHED) x25_sendmsg() 1235 rc = x25_output(sk, skb); x25_sendmsg() 1243 x25_kick(sk); x25_sendmsg() 1246 release_sock(sk); x25_sendmsg() 1257 struct sock *sk = sock->sk; x25_recvmsg() local 1258 struct x25_sock *x25 = x25_sk(sk); x25_recvmsg() 1266 lock_sock(sk); x25_recvmsg() 1278 if (sk->sk_state != TCP_ESTABLISHED) x25_recvmsg() 1283 if (sock_flag(sk, SOCK_URGINLINE) || x25_recvmsg() 1305 release_sock(sk); x25_recvmsg() 1306 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, x25_recvmsg() 1308 lock_sock(sk); x25_recvmsg() 1346 x25_check_rbuf(sk); x25_recvmsg() 1349 skb_free_datagram(sk, skb); x25_recvmsg() 1351 release_sock(sk); x25_recvmsg() 1358 struct sock *sk = sock->sk; x25_ioctl() local 1359 struct x25_sock *x25 = x25_sk(sk); x25_ioctl() 1367 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); x25_ioctl() 1381 lock_sock(sk); x25_ioctl() 1382 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) x25_ioctl() 1384 release_sock(sk); x25_ioctl() 1391 if (sk) x25_ioctl() 1392 rc = sock_get_timestamp(sk, x25_ioctl() 1397 if (sk) x25_ioctl() 1398 rc = sock_get_timestampns(sk, x25_ioctl() 1430 lock_sock(sk); x25_ioctl() 1434 release_sock(sk); x25_ioctl() 1444 lock_sock(sk); x25_ioctl() 1445 if (sk->sk_state != TCP_LISTEN && x25_ioctl() 1446 sk->sk_state != TCP_CLOSE) x25_ioctl() 1477 release_sock(sk); x25_ioctl() 1482 lock_sock(sk); x25_ioctl() 1485 release_sock(sk); x25_ioctl() 1497 lock_sock(sk); x25_ioctl() 1498 if (sk->sk_state != TCP_LISTEN && x25_ioctl() 1499 sk->sk_state != TCP_CLOSE) x25_ioctl() 1512 release_sock(sk); x25_ioctl() 1517 lock_sock(sk); x25_ioctl() 1521 release_sock(sk); x25_ioctl() 1534 lock_sock(sk); x25_ioctl() 1536 release_sock(sk); x25_ioctl() 1542 lock_sock(sk); x25_ioctl() 1545 release_sock(sk); x25_ioctl() 1554 lock_sock(sk); x25_ioctl() 1556 release_sock(sk); x25_ioctl() 1565 lock_sock(sk); x25_ioctl() 1566 if(sk->sk_state != TCP_CLOSE) x25_ioctl() 1578 release_sock(sk); x25_ioctl() 1584 lock_sock(sk); x25_ioctl() 1585 if (sk->sk_state == TCP_CLOSE) { x25_ioctl() 1589 release_sock(sk); x25_ioctl() 1595 lock_sock(sk); x25_ioctl() 1596 if (sk->sk_state != TCP_ESTABLISHED) x25_ioctl() 1601 x25_write_internal(sk, X25_CALL_ACCEPTED); x25_ioctl() 1605 release_sock(sk); x25_ioctl() 1676 struct sock *sk = sock->sk; compat_x25_ioctl() local 1687 if (sk) compat_x25_ioctl() 1688 rc = compat_sock_get_timestamp(sk, compat_x25_ioctl() 1693 if (sk) compat_x25_ioctl() 1694 rc = compat_sock_get_timestampns(sk, compat_x25_ioctl()
|
H A D | x25_out.c | 52 int x25_output(struct sock *sk, struct sk_buff *skb) x25_output() argument 58 struct x25_sock *x25 = x25_sk(sk); x25_output() 71 release_sock(sk); x25_output() 72 skbn = sock_alloc_send_skb(sk, frontlen + max_len, x25_output() 74 lock_sock(sk); x25_output() 80 SOCK_DEBUG(sk, "x25_output: fragment alloc" x25_output() 105 skb_queue_tail(&sk->sk_write_queue, skbn); x25_output() 111 skb_queue_tail(&sk->sk_write_queue, skb); x25_output() 121 static void x25_send_iframe(struct sock *sk, struct sk_buff *skb) x25_send_iframe() argument 123 struct x25_sock *x25 = x25_sk(sk); x25_send_iframe() 141 void x25_kick(struct sock *sk) x25_kick() argument 146 struct x25_sock *x25 = x25_sk(sk); x25_kick() 164 if (!skb_peek(&sk->sk_write_queue)) x25_kick() 182 skb = skb_dequeue(&sk->sk_write_queue); x25_kick() 186 skb_queue_head(&sk->sk_write_queue, skb); x25_kick() 190 skb_set_owner_w(skbn, sk); x25_kick() 195 x25_send_iframe(sk, skbn); x25_kick() 205 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); x25_kick() 210 x25_stop_timer(sk); x25_kick() 218 void x25_enquiry_response(struct sock *sk) x25_enquiry_response() argument 220 struct x25_sock *x25 = x25_sk(sk); x25_enquiry_response() 223 x25_write_internal(sk, X25_RNR); x25_enquiry_response() 225 x25_write_internal(sk, X25_RR); x25_enquiry_response() 230 x25_stop_timer(sk); x25_enquiry_response()
|
H A D | x25_subr.c | 39 void x25_clear_queues(struct sock *sk) x25_clear_queues() argument 41 struct x25_sock *x25 = x25_sk(sk); x25_clear_queues() 43 skb_queue_purge(&sk->sk_write_queue); x25_clear_queues() 56 void x25_frames_acked(struct sock *sk, unsigned short nr) x25_frames_acked() argument 59 struct x25_sock *x25 = x25_sk(sk); x25_frames_acked() 73 void x25_requeue_frames(struct sock *sk) x25_requeue_frames() argument 82 while ((skb = skb_dequeue(&x25_sk(sk)->ack_queue)) != NULL) { x25_requeue_frames() 84 skb_queue_head(&sk->sk_write_queue, skb); x25_requeue_frames() 86 skb_append(skb_prev, skb, &sk->sk_write_queue); x25_requeue_frames() 95 int x25_validate_nr(struct sock *sk, unsigned short nr) x25_validate_nr() argument 97 struct x25_sock *x25 = x25_sk(sk); x25_validate_nr() 114 void x25_write_internal(struct sock *sk, int frametype) x25_write_internal() argument 116 struct x25_sock *x25 = x25_sk(sk); x25_write_internal() 270 int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, x25_decode() argument 273 struct x25_sock *x25 = x25_sk(sk); x25_decode() 349 void x25_disconnect(struct sock *sk, int reason, unsigned char cause, x25_disconnect() argument 352 struct x25_sock *x25 = x25_sk(sk); x25_disconnect() 354 x25_clear_queues(sk); x25_disconnect() 355 x25_stop_timer(sk); x25_disconnect() 363 sk->sk_state = TCP_CLOSE; x25_disconnect() 364 sk->sk_err = reason; x25_disconnect() 365 sk->sk_shutdown |= SEND_SHUTDOWN; x25_disconnect() 367 if (!sock_flag(sk, SOCK_DEAD)) { x25_disconnect() 368 sk->sk_state_change(sk); x25_disconnect() 369 sock_set_flag(sk, SOCK_DEAD); x25_disconnect() 377 void x25_check_rbuf(struct sock *sk) x25_check_rbuf() argument 379 struct x25_sock *x25 = x25_sk(sk); x25_check_rbuf() 381 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) && x25_check_rbuf() 386 x25_write_internal(sk, X25_RR); x25_check_rbuf() 387 x25_stop_timer(sk); x25_check_rbuf()
|
/linux-4.1.27/net/nfc/ |
H A D | rawsock.c | 34 static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk) nfc_sock_link() argument 37 sk_add_node(sk, &l->head); nfc_sock_link() 41 static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk) nfc_sock_unlink() argument 44 sk_del_node_init(sk); nfc_sock_unlink() 48 static void rawsock_write_queue_purge(struct sock *sk) rawsock_write_queue_purge() argument 50 pr_debug("sk=%p\n", sk); rawsock_write_queue_purge() 52 spin_lock_bh(&sk->sk_write_queue.lock); rawsock_write_queue_purge() 53 __skb_queue_purge(&sk->sk_write_queue); rawsock_write_queue_purge() 54 nfc_rawsock(sk)->tx_work_scheduled = false; rawsock_write_queue_purge() 55 spin_unlock_bh(&sk->sk_write_queue.lock); rawsock_write_queue_purge() 58 static void rawsock_report_error(struct sock *sk, int err) rawsock_report_error() argument 60 pr_debug("sk=%p err=%d\n", sk, err); rawsock_report_error() 62 sk->sk_shutdown = SHUTDOWN_MASK; rawsock_report_error() 63 sk->sk_err = -err; rawsock_report_error() 64 sk->sk_error_report(sk); rawsock_report_error() 66 rawsock_write_queue_purge(sk); rawsock_report_error() 71 struct sock *sk = sock->sk; rawsock_release() local 73 pr_debug("sock=%p sk=%p\n", sock, sk); rawsock_release() 75 if (!sk) rawsock_release() 79 nfc_sock_unlink(&raw_sk_list, sk); rawsock_release() 81 sock_orphan(sk); rawsock_release() 82 sock_put(sk); rawsock_release() 90 struct sock *sk = sock->sk; rawsock_connect() local 95 pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags); rawsock_connect() 104 lock_sock(sk); rawsock_connect() 127 nfc_rawsock(sk)->dev = dev; rawsock_connect() 128 nfc_rawsock(sk)->target_idx = addr->target_idx; rawsock_connect() 130 sk->sk_state = TCP_ESTABLISHED; rawsock_connect() 131 sk->sk_state_change(sk); rawsock_connect() 133 release_sock(sk); rawsock_connect() 139 release_sock(sk); rawsock_connect() 153 struct sock *sk = (struct sock *) context; rawsock_data_exchange_complete() local 157 pr_debug("sk=%p err=%d\n", sk, err); rawsock_data_exchange_complete() 166 err = sock_queue_rcv_skb(sk, skb); rawsock_data_exchange_complete() 170 spin_lock_bh(&sk->sk_write_queue.lock); rawsock_data_exchange_complete() 171 if (!skb_queue_empty(&sk->sk_write_queue)) rawsock_data_exchange_complete() 172 schedule_work(&nfc_rawsock(sk)->tx_work); rawsock_data_exchange_complete() 174 nfc_rawsock(sk)->tx_work_scheduled = false; rawsock_data_exchange_complete() 175 spin_unlock_bh(&sk->sk_write_queue.lock); rawsock_data_exchange_complete() 177 sock_put(sk); rawsock_data_exchange_complete() 184 rawsock_report_error(sk, err); rawsock_data_exchange_complete() 185 sock_put(sk); rawsock_data_exchange_complete() 190 struct sock *sk = to_rawsock_sk(work); rawsock_tx_work() local 191 struct nfc_dev *dev = nfc_rawsock(sk)->dev; rawsock_tx_work() 192 u32 target_idx = nfc_rawsock(sk)->target_idx; rawsock_tx_work() 196 pr_debug("sk=%p target_idx=%u\n", sk, target_idx); rawsock_tx_work() 198 if (sk->sk_shutdown & SEND_SHUTDOWN) { rawsock_tx_work() 199 rawsock_write_queue_purge(sk); rawsock_tx_work() 203 skb = skb_dequeue(&sk->sk_write_queue); rawsock_tx_work() 205 sock_hold(sk); rawsock_tx_work() 207 rawsock_data_exchange_complete, sk); rawsock_tx_work() 209 rawsock_report_error(sk, rc); rawsock_tx_work() 210 sock_put(sk); rawsock_tx_work() 216 struct sock *sk = sock->sk; rawsock_sendmsg() local 217 struct nfc_dev *dev = nfc_rawsock(sk)->dev; rawsock_sendmsg() 221 pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len); rawsock_sendmsg() 229 skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc); rawsock_sendmsg() 239 spin_lock_bh(&sk->sk_write_queue.lock); rawsock_sendmsg() 240 __skb_queue_tail(&sk->sk_write_queue, skb); rawsock_sendmsg() 241 if (!nfc_rawsock(sk)->tx_work_scheduled) { rawsock_sendmsg() 242 schedule_work(&nfc_rawsock(sk)->tx_work); rawsock_sendmsg() 243 nfc_rawsock(sk)->tx_work_scheduled = true; rawsock_sendmsg() 245 spin_unlock_bh(&sk->sk_write_queue.lock); rawsock_sendmsg() 254 struct sock *sk = sock->sk; rawsock_recvmsg() local 259 pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); rawsock_recvmsg() 261 skb = skb_recv_datagram(sk, flags, noblock, &rc); rawsock_recvmsg() 273 skb_free_datagram(sk, skb); rawsock_recvmsg() 318 static void rawsock_destruct(struct sock *sk) rawsock_destruct() argument 320 pr_debug("sk=%p\n", sk); rawsock_destruct() 322 if (sk->sk_state == TCP_ESTABLISHED) { rawsock_destruct() 323 nfc_deactivate_target(nfc_rawsock(sk)->dev, rawsock_destruct() 324 nfc_rawsock(sk)->target_idx); rawsock_destruct() 325 nfc_put_device(nfc_rawsock(sk)->dev); rawsock_destruct() 328 skb_queue_purge(&sk->sk_receive_queue); rawsock_destruct() 330 if (!sock_flag(sk, SOCK_DEAD)) { rawsock_destruct() 331 pr_err("Freeing alive NFC raw socket %p\n", sk); rawsock_destruct() 339 struct sock *sk; rawsock_create() local 351 sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto); rawsock_create() 352 if (!sk) rawsock_create() 355 sock_init_data(sock, sk); rawsock_create() 356 sk->sk_protocol = nfc_proto->id; rawsock_create() 357 sk->sk_destruct = rawsock_destruct; rawsock_create() 360 nfc_sock_link(&raw_sk_list, sk); rawsock_create() 362 INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); rawsock_create() 363 nfc_rawsock(sk)->tx_work_scheduled = false; rawsock_create() 373 struct sock *sk; nfc_send_to_raw_sock() local 378 sk_for_each(sk, &raw_sk_list.head) { nfc_send_to_raw_sock() 396 if (sock_queue_rcv_skb(sk, nskb)) nfc_send_to_raw_sock()
|
H A D | llcp_sock.c | 28 static int sock_wait_state(struct sock *sk, int state, unsigned long timeo) sock_wait_state() argument 33 pr_debug("sk %p", sk); sock_wait_state() 35 add_wait_queue(sk_sleep(sk), &wait); sock_wait_state() 38 while (sk->sk_state != state) { sock_wait_state() 49 release_sock(sk); sock_wait_state() 51 lock_sock(sk); sock_wait_state() 54 err = sock_error(sk); sock_wait_state() 60 remove_wait_queue(sk_sleep(sk), &wait); sock_wait_state() 72 struct sock *sk = sock->sk; llcp_sock_bind() local 73 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); llcp_sock_bind() 82 pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); llcp_sock_bind() 92 lock_sock(sk); llcp_sock_bind() 94 if (sk->sk_state != LLCP_CLOSED) { llcp_sock_bind() 129 nfc_llcp_sock_link(&local->sockets, sk); llcp_sock_bind() 133 sk->sk_state = LLCP_BOUND; llcp_sock_bind() 139 release_sock(sk); llcp_sock_bind() 146 struct sock *sk = sock->sk; llcp_raw_sock_bind() local 147 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); llcp_raw_sock_bind() 156 pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); llcp_raw_sock_bind() 162 lock_sock(sk); llcp_raw_sock_bind() 164 if (sk->sk_state != LLCP_CLOSED) { llcp_raw_sock_bind() 185 nfc_llcp_sock_link(&local->raw_sockets, sk); llcp_raw_sock_bind() 187 sk->sk_state = LLCP_BOUND; llcp_raw_sock_bind() 193 release_sock(sk); llcp_raw_sock_bind() 199 struct sock *sk = sock->sk; llcp_sock_listen() local 202 pr_debug("sk %p backlog %d\n", sk, backlog); llcp_sock_listen() 204 lock_sock(sk); llcp_sock_listen() 207 sk->sk_state != LLCP_BOUND) { llcp_sock_listen() 212 sk->sk_max_ack_backlog = backlog; llcp_sock_listen() 213 sk->sk_ack_backlog = 0; llcp_sock_listen() 216 sk->sk_state = LLCP_LISTEN; llcp_sock_listen() 219 release_sock(sk); llcp_sock_listen() 227 struct sock *sk = sock->sk; nfc_llcp_setsockopt() local 228 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); nfc_llcp_setsockopt() 232 pr_debug("%p optname %d\n", sk, optname); nfc_llcp_setsockopt() 237 lock_sock(sk); nfc_llcp_setsockopt() 241 if (sk->sk_state == LLCP_CONNECTED || nfc_llcp_setsockopt() 242 sk->sk_state == LLCP_BOUND || nfc_llcp_setsockopt() 243 sk->sk_state == LLCP_LISTEN) { nfc_llcp_setsockopt() 263 if (sk->sk_state == LLCP_CONNECTED || nfc_llcp_setsockopt() 264 sk->sk_state == LLCP_BOUND || nfc_llcp_setsockopt() 265 sk->sk_state == LLCP_LISTEN) { nfc_llcp_setsockopt() 289 release_sock(sk); nfc_llcp_setsockopt() 301 struct sock *sk = sock->sk; nfc_llcp_getsockopt() local 302 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); nfc_llcp_getsockopt() 307 pr_debug("%p optname %d\n", sk, optname); nfc_llcp_getsockopt() 321 lock_sock(sk); nfc_llcp_getsockopt() 366 release_sock(sk); nfc_llcp_getsockopt() 374 void nfc_llcp_accept_unlink(struct sock *sk) nfc_llcp_accept_unlink() argument 376 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); nfc_llcp_accept_unlink() 378 pr_debug("state %d\n", sk->sk_state); nfc_llcp_accept_unlink() 384 sock_put(sk); nfc_llcp_accept_unlink() 387 void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk) nfc_llcp_accept_enqueue() argument 389 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); nfc_llcp_accept_enqueue() 393 sock_hold(sk); nfc_llcp_accept_enqueue() 405 struct sock *sk; nfc_llcp_accept_dequeue() local 411 sk = &lsk->sk; nfc_llcp_accept_dequeue() 412 lock_sock(sk); nfc_llcp_accept_dequeue() 414 if (sk->sk_state == LLCP_CLOSED) { nfc_llcp_accept_dequeue() 415 release_sock(sk); nfc_llcp_accept_dequeue() 416 nfc_llcp_accept_unlink(sk); nfc_llcp_accept_dequeue() 420 if (sk->sk_state == LLCP_CONNECTED || !newsock) { nfc_llcp_accept_dequeue() 422 sock_put(sk); nfc_llcp_accept_dequeue() 425 sock_graft(sk, newsock); nfc_llcp_accept_dequeue() 427 release_sock(sk); nfc_llcp_accept_dequeue() 429 pr_debug("Returning sk state %d\n", sk->sk_state); nfc_llcp_accept_dequeue() 433 return sk; nfc_llcp_accept_dequeue() 436 release_sock(sk); nfc_llcp_accept_dequeue() 446 struct sock *sk = sock->sk, *new_sk; llcp_sock_accept() local 450 pr_debug("parent %p\n", sk); llcp_sock_accept() 452 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); llcp_sock_accept() 454 if (sk->sk_state != LLCP_LISTEN) { llcp_sock_accept() 459 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); llcp_sock_accept() 462 add_wait_queue_exclusive(sk_sleep(sk), &wait); llcp_sock_accept() 463 while (!(new_sk = nfc_llcp_accept_dequeue(sk, newsock))) { llcp_sock_accept() 476 release_sock(sk); llcp_sock_accept() 478 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); llcp_sock_accept() 481 remove_wait_queue(sk_sleep(sk), &wait); llcp_sock_accept() 491 release_sock(sk); llcp_sock_accept() 499 struct sock *sk = sock->sk; llcp_sock_getname() local 500 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); llcp_sock_getname() 506 pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx, llcp_sock_getname() 528 struct sock *sk; llcp_accept_poll() local 534 sk = &llcp_sock->sk; llcp_accept_poll() 536 if (sk->sk_state == LLCP_CONNECTED) llcp_accept_poll() 546 struct sock *sk = sock->sk; llcp_sock_poll() local 549 pr_debug("%p\n", sk); llcp_sock_poll() 551 sock_poll_wait(file, sk_sleep(sk), wait); llcp_sock_poll() 553 if (sk->sk_state == LLCP_LISTEN) llcp_sock_poll() 554 return llcp_accept_poll(sk); llcp_sock_poll() 556 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) llcp_sock_poll() 558 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); llcp_sock_poll() 560 if (!skb_queue_empty(&sk->sk_receive_queue)) llcp_sock_poll() 563 if (sk->sk_state == LLCP_CLOSED) llcp_sock_poll() 566 if (sk->sk_shutdown & RCV_SHUTDOWN) llcp_sock_poll() 569 if (sk->sk_shutdown == SHUTDOWN_MASK) llcp_sock_poll() 572 if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) llcp_sock_poll() 575 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); llcp_sock_poll() 584 struct sock *sk = sock->sk; llcp_sock_release() local 586 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); llcp_sock_release() 589 if (!sk) llcp_sock_release() 592 pr_debug("%p\n", sk); llcp_sock_release() 600 lock_sock(sk); llcp_sock_release() 603 if (sk->sk_state == LLCP_CONNECTED) llcp_sock_release() 606 if (sk->sk_state == LLCP_LISTEN) { llcp_sock_release() 612 accept_sk = &lsk->sk; llcp_sock_release() 625 release_sock(sk); llcp_sock_release() 631 if (sk->sk_state == LLCP_DISCONNECTING) llcp_sock_release() 635 nfc_llcp_sock_unlink(&local->raw_sockets, sk); llcp_sock_release() 637 nfc_llcp_sock_unlink(&local->sockets, sk); llcp_sock_release() 640 sock_orphan(sk); llcp_sock_release() 641 sock_put(sk); llcp_sock_release() 649 struct sock *sk = sock->sk; llcp_sock_connect() local 650 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); llcp_sock_connect() 656 pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); llcp_sock_connect() 668 lock_sock(sk); llcp_sock_connect() 670 if (sk->sk_state == LLCP_CONNECTED) { llcp_sock_connect() 723 nfc_llcp_sock_link(&local->connecting_sockets, sk); llcp_sock_connect() 729 sk->sk_state = LLCP_CONNECTING; llcp_sock_connect() 731 ret = sock_wait_state(sk, LLCP_CONNECTED, llcp_sock_connect() 732 sock_sndtimeo(sk, flags & O_NONBLOCK)); llcp_sock_connect() 736 release_sock(sk); llcp_sock_connect() 743 nfc_llcp_sock_unlink(&local->connecting_sockets, sk); llcp_sock_connect() 749 release_sock(sk); llcp_sock_connect() 756 struct sock *sk = sock->sk; llcp_sock_sendmsg() local 757 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); llcp_sock_sendmsg() 760 pr_debug("sock %p sk %p", sock, sk); llcp_sock_sendmsg() 762 ret = sock_error(sk); llcp_sock_sendmsg() 769 lock_sock(sk); llcp_sock_sendmsg() 771 if (sk->sk_type == SOCK_DGRAM) { llcp_sock_sendmsg() 776 release_sock(sk); llcp_sock_sendmsg() 780 release_sock(sk); llcp_sock_sendmsg() 786 if (sk->sk_state != LLCP_CONNECTED) { llcp_sock_sendmsg() 787 release_sock(sk); llcp_sock_sendmsg() 791 release_sock(sk); llcp_sock_sendmsg() 800 struct sock *sk = sock->sk; llcp_sock_recvmsg() local 805 pr_debug("%p %zu\n", sk, len); llcp_sock_recvmsg() 807 lock_sock(sk); llcp_sock_recvmsg() 809 if (sk->sk_state == LLCP_CLOSED && llcp_sock_recvmsg() 810 skb_queue_empty(&sk->sk_receive_queue)) { llcp_sock_recvmsg() 811 release_sock(sk); llcp_sock_recvmsg() 815 release_sock(sk); llcp_sock_recvmsg() 820 skb = skb_recv_datagram(sk, flags, noblock, &err); llcp_sock_recvmsg() 823 sk->sk_state, err, sock_error(sk)); llcp_sock_recvmsg() 825 if (sk->sk_shutdown & RCV_SHUTDOWN) llcp_sock_recvmsg() 837 skb_queue_head(&sk->sk_receive_queue, skb); llcp_sock_recvmsg() 841 sock_recv_timestamp(msg, sk, skb); llcp_sock_recvmsg() 843 if (sk->sk_type == SOCK_DGRAM && msg->msg_name) { llcp_sock_recvmsg() 863 if (sk->sk_type == SOCK_STREAM || llcp_sock_recvmsg() 864 sk->sk_type == SOCK_DGRAM || llcp_sock_recvmsg() 865 sk->sk_type == SOCK_RAW) { llcp_sock_recvmsg() 868 skb_queue_head(&sk->sk_receive_queue, skb); llcp_sock_recvmsg() 880 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) llcp_sock_recvmsg() 926 static void llcp_sock_destruct(struct sock *sk) llcp_sock_destruct() argument 928 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); llcp_sock_destruct() 930 pr_debug("%p\n", sk); llcp_sock_destruct() 932 if (sk->sk_state == LLCP_CONNECTED) llcp_sock_destruct() 935 skb_queue_purge(&sk->sk_receive_queue); llcp_sock_destruct() 939 if (!sock_flag(sk, SOCK_DEAD)) { llcp_sock_destruct() 940 pr_err("Freeing alive NFC LLCP socket %p\n", sk); llcp_sock_destruct() 947 struct sock *sk; nfc_llcp_sock_alloc() local 950 sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto); nfc_llcp_sock_alloc() 951 if (!sk) nfc_llcp_sock_alloc() 954 llcp_sock = nfc_llcp_sock(sk); nfc_llcp_sock_alloc() 956 sock_init_data(sock, sk); nfc_llcp_sock_alloc() 957 sk->sk_state = LLCP_CLOSED; nfc_llcp_sock_alloc() 958 sk->sk_protocol = NFC_SOCKPROTO_LLCP; nfc_llcp_sock_alloc() 959 sk->sk_type = type; nfc_llcp_sock_alloc() 960 sk->sk_destruct = llcp_sock_destruct; nfc_llcp_sock_alloc() 978 return sk; nfc_llcp_sock_alloc() 998 struct sock *sk; llcp_sock_create() local 1012 sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC); llcp_sock_create() 1013 if (sk == NULL) llcp_sock_create()
|
H A D | llcp_core.c | 35 void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *sk) nfc_llcp_sock_link() argument 38 sk_add_node(sk, &l->head); nfc_llcp_sock_link() 42 void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk) nfc_llcp_sock_unlink() argument 45 sk_del_node_init(sk); nfc_llcp_sock_unlink() 60 pr_debug("%p\n", &sock->sk); nfc_llcp_socket_purge() 70 if (s->sk != &sock->sk) nfc_llcp_socket_purge() 81 struct sock *sk; nfc_llcp_socket_release() local 89 sk_for_each_safe(sk, tmp, &local->sockets.head) { nfc_llcp_socket_release() 90 llcp_sock = nfc_llcp_sock(sk); nfc_llcp_socket_release() 92 bh_lock_sock(sk); nfc_llcp_socket_release() 96 if (sk->sk_state == LLCP_CONNECTED) nfc_llcp_socket_release() 99 if (sk->sk_state == LLCP_LISTEN) { nfc_llcp_socket_release() 106 accept_sk = &lsk->sk; nfc_llcp_socket_release() 114 accept_sk->sk_state_change(sk); nfc_llcp_socket_release() 121 sk->sk_err = err; nfc_llcp_socket_release() 122 sk->sk_state = LLCP_CLOSED; nfc_llcp_socket_release() 123 sk->sk_state_change(sk); nfc_llcp_socket_release() 125 bh_unlock_sock(sk); nfc_llcp_socket_release() 127 sk_del_node_init(sk); nfc_llcp_socket_release() 138 sk_for_each_safe(sk, tmp, &local->raw_sockets.head) { nfc_llcp_socket_release() 139 llcp_sock = nfc_llcp_sock(sk); nfc_llcp_socket_release() 141 bh_lock_sock(sk); nfc_llcp_socket_release() 146 sk->sk_err = err; nfc_llcp_socket_release() 147 sk->sk_state = LLCP_CLOSED; nfc_llcp_socket_release() 148 sk->sk_state_change(sk); nfc_llcp_socket_release() 150 bh_unlock_sock(sk); nfc_llcp_socket_release() 152 sk_del_node_init(sk); nfc_llcp_socket_release() 201 struct sock *sk; nfc_llcp_sock_get() local 213 sk_for_each(sk, &local->sockets.head) { nfc_llcp_sock_get() 214 tmp_sock = nfc_llcp_sock(sk); nfc_llcp_sock_get() 227 sock_hold(&llcp_sock->sk); nfc_llcp_sock_get() 234 sock_put(&sock->sk); nfc_llcp_sock_put() 342 struct sock *sk; nfc_llcp_sock_from_sn() local 354 sk_for_each(sk, &local->sockets.head) { nfc_llcp_sock_from_sn() 355 tmp_sock = nfc_llcp_sock(sk); nfc_llcp_sock_from_sn() 359 if (tmp_sock->sk.sk_type == SOCK_STREAM && nfc_llcp_sock_from_sn() 360 tmp_sock->sk.sk_state != LLCP_LISTEN) nfc_llcp_sock_from_sn() 363 if (tmp_sock->sk.sk_type == SOCK_DGRAM && nfc_llcp_sock_from_sn() 364 tmp_sock->sk.sk_state != LLCP_BOUND) nfc_llcp_sock_from_sn() 674 struct sock *sk; nfc_llcp_send_to_raw_sock() local 679 sk_for_each(sk, &local->raw_sockets.head) { nfc_llcp_send_to_raw_sock() 680 if (sk->sk_state != LLCP_BOUND) nfc_llcp_send_to_raw_sock() 701 if (sock_queue_rcv_skb(sk, nskb)) nfc_llcp_send_to_raw_sock() 715 struct sock *sk; nfc_llcp_tx_work() local 720 sk = skb->sk; nfc_llcp_tx_work() 721 llcp_sock = nfc_llcp_sock(sk); nfc_llcp_tx_work() 739 if (ptype == LLCP_PDU_DISC && sk != NULL && nfc_llcp_tx_work() 740 sk->sk_state == LLCP_DISCONNECTING) { nfc_llcp_tx_work() 741 nfc_llcp_sock_unlink(&local->sockets, sk); nfc_llcp_tx_work() 742 sock_orphan(sk); nfc_llcp_tx_work() 743 sock_put(sk); nfc_llcp_tx_work() 778 struct sock *sk; nfc_llcp_connecting_sock_get() local 783 sk_for_each(sk, &local->connecting_sockets.head) { nfc_llcp_connecting_sock_get() 784 llcp_sock = nfc_llcp_sock(sk); nfc_llcp_connecting_sock_get() 787 sock_hold(&llcp_sock->sk); nfc_llcp_connecting_sock_get() 810 sock_hold(&llcp_sock->sk); nfc_llcp_sock_get_sn() 856 if (llcp_sock == NULL || llcp_sock->sk.sk_type != SOCK_DGRAM) nfc_llcp_recv_ui() 861 if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) { nfc_llcp_recv_ui() 888 if (sock == NULL || sock->sk.sk_state != LLCP_LISTEN) { nfc_llcp_recv_connect() 911 lock_sock(&sock->sk); nfc_llcp_recv_connect() 913 parent = &sock->sk; nfc_llcp_recv_connect() 917 release_sock(&sock->sk); nfc_llcp_recv_connect() 918 sock_put(&sock->sk); nfc_llcp_recv_connect() 929 release_sock(&sock->sk); nfc_llcp_recv_connect() 930 sock_put(&sock->sk); nfc_llcp_recv_connect() 940 release_sock(&sock->sk); nfc_llcp_recv_connect() 941 sock_put(&sock->sk); nfc_llcp_recv_connect() 970 pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk); nfc_llcp_recv_connect() 974 nfc_llcp_accept_enqueue(&sock->sk, new_sk); nfc_llcp_recv_connect() 986 release_sock(&sock->sk); nfc_llcp_recv_connect() 987 sock_put(&sock->sk); nfc_llcp_recv_connect() 1028 struct sock *sk; nfc_llcp_recv_hdlc() local 1045 sk = &llcp_sock->sk; nfc_llcp_recv_hdlc() 1046 lock_sock(sk); nfc_llcp_recv_hdlc() 1047 if (sk->sk_state == LLCP_CLOSED) { nfc_llcp_recv_hdlc() 1048 release_sock(sk); nfc_llcp_recv_hdlc() 1054 pr_debug("I frame, queueing on %p\n", &llcp_sock->sk); nfc_llcp_recv_hdlc() 1062 if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) { nfc_llcp_recv_hdlc() 1107 release_sock(sk); nfc_llcp_recv_hdlc() 1115 struct sock *sk; nfc_llcp_recv_disc() local 1133 sk = &llcp_sock->sk; nfc_llcp_recv_disc() 1134 lock_sock(sk); nfc_llcp_recv_disc() 1138 if (sk->sk_state == LLCP_CLOSED) { nfc_llcp_recv_disc() 1139 release_sock(sk); nfc_llcp_recv_disc() 1143 if (sk->sk_state == LLCP_CONNECTED) { nfc_llcp_recv_disc() 1145 sk->sk_state = LLCP_CLOSED; nfc_llcp_recv_disc() 1146 sk->sk_state_change(sk); nfc_llcp_recv_disc() 1151 release_sock(sk); nfc_llcp_recv_disc() 1158 struct sock *sk; nfc_llcp_recv_cc() local 1172 sk = &llcp_sock->sk; nfc_llcp_recv_cc() 1175 nfc_llcp_sock_unlink(&local->connecting_sockets, sk); nfc_llcp_recv_cc() 1176 nfc_llcp_sock_link(&local->sockets, sk); nfc_llcp_recv_cc() 1182 sk->sk_state = LLCP_CONNECTED; nfc_llcp_recv_cc() 1183 sk->sk_state_change(sk); nfc_llcp_recv_cc() 1191 struct sock *sk; nfc_llcp_recv_dm() local 1216 sk = &llcp_sock->sk; nfc_llcp_recv_dm() 1218 sk->sk_err = ENXIO; nfc_llcp_recv_dm() 1219 sk->sk_state = LLCP_CLOSED; nfc_llcp_recv_dm() 1220 sk->sk_state_change(sk); nfc_llcp_recv_dm()
|
/linux-4.1.27/net/rose/ |
H A D | rose_timer.c | 35 void rose_start_heartbeat(struct sock *sk) rose_start_heartbeat() argument 37 del_timer(&sk->sk_timer); rose_start_heartbeat() 39 sk->sk_timer.data = (unsigned long)sk; rose_start_heartbeat() 40 sk->sk_timer.function = &rose_heartbeat_expiry; rose_start_heartbeat() 41 sk->sk_timer.expires = jiffies + 5 * HZ; rose_start_heartbeat() 43 add_timer(&sk->sk_timer); rose_start_heartbeat() 46 void rose_start_t1timer(struct sock *sk) rose_start_t1timer() argument 48 struct rose_sock *rose = rose_sk(sk); rose_start_t1timer() 52 rose->timer.data = (unsigned long)sk; rose_start_t1timer() 59 void rose_start_t2timer(struct sock *sk) rose_start_t2timer() argument 61 struct rose_sock *rose = rose_sk(sk); rose_start_t2timer() 65 rose->timer.data = (unsigned long)sk; rose_start_t2timer() 72 void rose_start_t3timer(struct sock *sk) rose_start_t3timer() argument 74 struct rose_sock *rose = rose_sk(sk); rose_start_t3timer() 78 rose->timer.data = (unsigned long)sk; rose_start_t3timer() 85 void rose_start_hbtimer(struct sock *sk) rose_start_hbtimer() argument 87 struct rose_sock *rose = rose_sk(sk); rose_start_hbtimer() 91 rose->timer.data = (unsigned long)sk; rose_start_hbtimer() 98 void rose_start_idletimer(struct sock *sk) rose_start_idletimer() argument 100 struct rose_sock *rose = rose_sk(sk); rose_start_idletimer() 105 rose->idletimer.data = (unsigned long)sk; rose_start_idletimer() 113 void rose_stop_heartbeat(struct sock *sk) rose_stop_heartbeat() argument 115 del_timer(&sk->sk_timer); rose_stop_heartbeat() 118 void rose_stop_timer(struct sock *sk) rose_stop_timer() argument 120 del_timer(&rose_sk(sk)->timer); rose_stop_timer() 123 void rose_stop_idletimer(struct sock *sk) rose_stop_idletimer() argument 125 del_timer(&rose_sk(sk)->idletimer); rose_stop_idletimer() 130 struct sock *sk = (struct sock *)param; rose_heartbeat_expiry() local 131 struct rose_sock *rose = rose_sk(sk); rose_heartbeat_expiry() 133 bh_lock_sock(sk); rose_heartbeat_expiry() 138 if (sock_flag(sk, SOCK_DESTROY) || rose_heartbeat_expiry() 139 (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { rose_heartbeat_expiry() 140 bh_unlock_sock(sk); rose_heartbeat_expiry() 141 rose_destroy_socket(sk); rose_heartbeat_expiry() 150 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && rose_heartbeat_expiry() 155 rose_write_internal(sk, ROSE_RR); rose_heartbeat_expiry() 156 rose_stop_timer(sk); /* HB */ rose_heartbeat_expiry() 162 rose_start_heartbeat(sk); rose_heartbeat_expiry() 163 bh_unlock_sock(sk); rose_heartbeat_expiry() 168 struct sock *sk = (struct sock *)param; rose_timer_expiry() local 169 struct rose_sock *rose = rose_sk(sk); rose_timer_expiry() 171 bh_lock_sock(sk); rose_timer_expiry() 175 rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_timer_expiry() 177 rose_start_t3timer(sk); rose_timer_expiry() 182 rose_disconnect(sk, ETIMEDOUT, -1, -1); rose_timer_expiry() 188 rose_enquiry_response(sk); rose_timer_expiry() 192 bh_unlock_sock(sk); rose_timer_expiry() 197 struct sock *sk = (struct sock *)param; rose_idletimer_expiry() local 199 bh_lock_sock(sk); rose_idletimer_expiry() 200 rose_clear_queues(sk); rose_idletimer_expiry() 202 rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_idletimer_expiry() 203 rose_sk(sk)->state = ROSE_STATE_2; rose_idletimer_expiry() 205 rose_start_t3timer(sk); rose_idletimer_expiry() 207 sk->sk_state = TCP_CLOSE; rose_idletimer_expiry() 208 sk->sk_err = 0; rose_idletimer_expiry() 209 sk->sk_shutdown |= SEND_SHUTDOWN; rose_idletimer_expiry() 211 if (!sock_flag(sk, SOCK_DEAD)) { rose_idletimer_expiry() 212 sk->sk_state_change(sk); rose_idletimer_expiry() 213 sock_set_flag(sk, SOCK_DEAD); rose_idletimer_expiry() 215 bh_unlock_sock(sk); rose_idletimer_expiry()
|
H A D | rose_in.c | 39 static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) rose_state1_machine() argument 41 struct rose_sock *rose = rose_sk(sk); rose_state1_machine() 45 rose_stop_timer(sk); rose_state1_machine() 46 rose_start_idletimer(sk); rose_state1_machine() 53 sk->sk_state = TCP_ESTABLISHED; rose_state1_machine() 54 if (!sock_flag(sk, SOCK_DEAD)) rose_state1_machine() 55 sk->sk_state_change(sk); rose_state1_machine() 59 rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); rose_state1_machine() 60 rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); rose_state1_machine() 76 static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) rose_state2_machine() argument 78 struct rose_sock *rose = rose_sk(sk); rose_state2_machine() 82 rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); rose_state2_machine() 83 rose_disconnect(sk, 0, skb->data[3], skb->data[4]); rose_state2_machine() 88 rose_disconnect(sk, 0, -1, -1); rose_state2_machine() 104 static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m) rose_state3_machine() argument 106 struct rose_sock *rose = rose_sk(sk); rose_state3_machine() 111 rose_stop_timer(sk); rose_state3_machine() 112 rose_start_idletimer(sk); rose_state3_machine() 113 rose_write_internal(sk, ROSE_RESET_CONFIRMATION); rose_state3_machine() 119 rose_requeue_frames(sk); rose_state3_machine() 123 rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); rose_state3_machine() 124 rose_disconnect(sk, 0, skb->data[3], skb->data[4]); rose_state3_machine() 130 if (!rose_validate_nr(sk, nr)) { rose_state3_machine() 131 rose_write_internal(sk, ROSE_RESET_REQUEST); rose_state3_machine() 138 rose_start_t2timer(sk); rose_state3_machine() 139 rose_stop_idletimer(sk); rose_state3_machine() 141 rose_frames_acked(sk, nr); rose_state3_machine() 152 if (!rose_validate_nr(sk, nr)) { rose_state3_machine() 153 rose_write_internal(sk, ROSE_RESET_REQUEST); rose_state3_machine() 160 rose_start_t2timer(sk); rose_state3_machine() 161 rose_stop_idletimer(sk); rose_state3_machine() 164 rose_frames_acked(sk, nr); rose_state3_machine() 166 rose_start_idletimer(sk); rose_state3_machine() 167 if (sock_queue_rcv_skb(sk, skb) == 0) { rose_state3_machine() 172 rose_write_internal(sk, ROSE_RESET_REQUEST); rose_state3_machine() 179 rose_start_t2timer(sk); rose_state3_machine() 180 rose_stop_idletimer(sk); rose_state3_machine() 183 if (atomic_read(&sk->sk_rmem_alloc) > rose_state3_machine() 184 (sk->sk_rcvbuf >> 1)) rose_state3_machine() 193 rose_stop_timer(sk); rose_state3_machine() 194 rose_enquiry_response(sk); rose_state3_machine() 197 rose_start_hbtimer(sk); rose_state3_machine() 214 static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) rose_state4_machine() argument 216 struct rose_sock *rose = rose_sk(sk); rose_state4_machine() 220 rose_write_internal(sk, ROSE_RESET_CONFIRMATION); rose_state4_machine() 222 rose_stop_timer(sk); rose_state4_machine() 223 rose_start_idletimer(sk); rose_state4_machine() 230 rose_requeue_frames(sk); rose_state4_machine() 234 rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); rose_state4_machine() 235 rose_disconnect(sk, 0, skb->data[3], skb->data[4]); rose_state4_machine() 251 static int rose_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype) rose_state5_machine() argument 254 rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); rose_state5_machine() 255 rose_disconnect(sk, 0, skb->data[3], skb->data[4]); rose_state5_machine() 256 rose_sk(sk)->neighbour->use--; rose_state5_machine() 263 int rose_process_rx_frame(struct sock *sk, struct sk_buff *skb) rose_process_rx_frame() argument 265 struct rose_sock *rose = rose_sk(sk); rose_process_rx_frame() 275 queued = rose_state1_machine(sk, skb, frametype); rose_process_rx_frame() 278 queued = rose_state2_machine(sk, skb, frametype); rose_process_rx_frame() 281 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); rose_process_rx_frame() 284 queued = rose_state4_machine(sk, skb, frametype); rose_process_rx_frame() 287 queued = rose_state5_machine(sk, skb, frametype); rose_process_rx_frame() 291 rose_kick(sk); rose_process_rx_frame()
|
H A D | af_rose.c | 154 static void rose_remove_socket(struct sock *sk) rose_remove_socket() argument 157 sk_del_node_init(sk); rose_remove_socket() 232 static void rose_insert_socket(struct sock *sk) rose_insert_socket() argument 236 sk_add_node(sk, &rose_list); rose_insert_socket() 331 void rose_destroy_socket(struct sock *sk) rose_destroy_socket() argument 335 rose_remove_socket(sk); rose_destroy_socket() 336 rose_stop_heartbeat(sk); rose_destroy_socket() 337 rose_stop_idletimer(sk); rose_destroy_socket() 338 rose_stop_timer(sk); rose_destroy_socket() 340 rose_clear_queues(sk); /* Flush the queues */ rose_destroy_socket() 342 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { rose_destroy_socket() 343 if (skb->sk != sk) { /* A pending connection */ rose_destroy_socket() 345 sock_set_flag(skb->sk, SOCK_DEAD); rose_destroy_socket() 346 rose_start_heartbeat(skb->sk); rose_destroy_socket() 347 rose_sk(skb->sk)->state = ROSE_STATE_0; rose_destroy_socket() 353 if (sk_has_allocations(sk)) { rose_destroy_socket() 355 setup_timer(&sk->sk_timer, rose_destroy_timer, rose_destroy_socket() 356 (unsigned long)sk); rose_destroy_socket() 357 sk->sk_timer.expires = jiffies + 10 * HZ; rose_destroy_socket() 358 add_timer(&sk->sk_timer); rose_destroy_socket() 360 sock_put(sk); rose_destroy_socket() 371 struct sock *sk = sock->sk; rose_setsockopt() local 372 struct rose_sock *rose = rose_sk(sk); rose_setsockopt() 431 struct sock *sk = sock->sk; rose_getsockopt() local 432 struct rose_sock *rose = rose_sk(sk); rose_getsockopt() 488 struct sock *sk = sock->sk; rose_listen() local 490 if (sk->sk_state != TCP_LISTEN) { rose_listen() 491 struct rose_sock *rose = rose_sk(sk); rose_listen() 497 sk->sk_max_ack_backlog = backlog; rose_listen() 498 sk->sk_state = TCP_LISTEN; rose_listen() 514 struct sock *sk; rose_create() local 523 sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); rose_create() 524 if (sk == NULL) rose_create() 527 rose = rose_sk(sk); rose_create() 529 sock_init_data(sock, sk); rose_create() 538 sk->sk_protocol = protocol; rose_create() 556 struct sock *sk; rose_make_new() local 562 sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto); rose_make_new() 563 if (sk == NULL) rose_make_new() 566 rose = rose_sk(sk); rose_make_new() 568 sock_init_data(NULL, sk); rose_make_new() 576 sk->sk_type = osk->sk_type; rose_make_new() 577 sk->sk_priority = osk->sk_priority; rose_make_new() 578 sk->sk_protocol = osk->sk_protocol; rose_make_new() 579 sk->sk_rcvbuf = osk->sk_rcvbuf; rose_make_new() 580 sk->sk_sndbuf = osk->sk_sndbuf; rose_make_new() 581 sk->sk_state = TCP_ESTABLISHED; rose_make_new() 582 sock_copy_flags(sk, osk); rose_make_new() 597 return sk; rose_make_new() 602 struct sock *sk = sock->sk; rose_release() local 605 if (sk == NULL) return 0; rose_release() 607 sock_hold(sk); rose_release() 608 sock_orphan(sk); rose_release() 609 lock_sock(sk); rose_release() 610 rose = rose_sk(sk); rose_release() 614 release_sock(sk); rose_release() 615 rose_disconnect(sk, 0, -1, -1); rose_release() 616 lock_sock(sk); rose_release() 617 rose_destroy_socket(sk); rose_release() 622 release_sock(sk); rose_release() 623 rose_disconnect(sk, 0, -1, -1); rose_release() 624 lock_sock(sk); rose_release() 625 rose_destroy_socket(sk); rose_release() 632 rose_clear_queues(sk); rose_release() 633 rose_stop_idletimer(sk); rose_release() 634 rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_release() 635 rose_start_t3timer(sk); rose_release() 637 sk->sk_state = TCP_CLOSE; rose_release() 638 sk->sk_shutdown |= SEND_SHUTDOWN; rose_release() 639 sk->sk_state_change(sk); rose_release() 640 sock_set_flag(sk, SOCK_DEAD); rose_release() 641 sock_set_flag(sk, SOCK_DESTROY); rose_release() 648 sock->sk = NULL; rose_release() 649 release_sock(sk); rose_release() 650 sock_put(sk); rose_release() 657 struct sock *sk = sock->sk; rose_bind() local 658 struct rose_sock *rose = rose_sk(sk); rose_bind() 665 if (!sock_flag(sk, SOCK_ZAPPED)) rose_bind() 709 rose_insert_socket(sk); rose_bind() 711 sock_reset_flag(sk, SOCK_ZAPPED); rose_bind() 718 struct sock *sk = sock->sk; rose_connect() local 719 struct rose_sock *rose = rose_sk(sk); rose_connect() 742 lock_sock(sk); rose_connect() 744 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { rose_connect() 750 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { rose_connect() 756 if (sk->sk_state == TCP_ESTABLISHED) { rose_connect() 762 sk->sk_state = TCP_CLOSE; rose_connect() 778 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ rose_connect() 779 sock_reset_flag(sk, SOCK_ZAPPED); rose_connect() 797 rose_insert_socket(sk); /* Finish the bind */ rose_connect() 816 sk->sk_state = TCP_SYN_SENT; rose_connect() 822 rose_write_internal(sk, ROSE_CALL_REQUEST); rose_connect() 823 rose_start_heartbeat(sk); rose_connect() 824 rose_start_t1timer(sk); rose_connect() 827 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { rose_connect() 836 if (sk->sk_state == TCP_SYN_SENT) { rose_connect() 840 prepare_to_wait(sk_sleep(sk), &wait, rose_connect() 842 if (sk->sk_state != TCP_SYN_SENT) rose_connect() 845 release_sock(sk); rose_connect() 847 lock_sock(sk); rose_connect() 853 finish_wait(sk_sleep(sk), &wait); rose_connect() 859 if (sk->sk_state != TCP_ESTABLISHED) { rose_connect() 861 err = sock_error(sk); /* Always set at this point */ rose_connect() 868 release_sock(sk); rose_connect() 878 struct sock *sk; rose_accept() local 881 if ((sk = sock->sk) == NULL) rose_accept() 884 lock_sock(sk); rose_accept() 885 if (sk->sk_type != SOCK_SEQPACKET) { rose_accept() 890 if (sk->sk_state != TCP_LISTEN) { rose_accept() 900 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); rose_accept() 902 skb = skb_dequeue(&sk->sk_receive_queue); rose_accept() 911 release_sock(sk); rose_accept() 913 lock_sock(sk); rose_accept() 919 finish_wait(sk_sleep(sk), &wait); rose_accept() 923 newsk = skb->sk; rose_accept() 927 skb->sk = NULL; rose_accept() 929 sk->sk_ack_backlog--; rose_accept() 932 release_sock(sk); rose_accept() 941 struct sock *sk = sock->sk; rose_getname() local 942 struct rose_sock *rose = rose_sk(sk); rose_getname() 947 if (sk->sk_state != TCP_ESTABLISHED) rose_getname() 970 struct sock *sk; rose_rx_call_request() local 976 skb->sk = NULL; /* Initially we don't know who it's for */ rose_rx_call_request() 990 sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); rose_rx_call_request() 995 if (sk == NULL || sk_acceptq_is_full(sk) || rose_rx_call_request() 996 (make = rose_make_new(sk)) == NULL) { rose_rx_call_request() 1001 skb->sk = make; rose_rx_call_request() 1022 if (rose_sk(sk)->defer) { rose_rx_call_request() 1035 sk->sk_ack_backlog++; rose_rx_call_request() 1039 skb_queue_head(&sk->sk_receive_queue, skb); rose_rx_call_request() 1043 if (!sock_flag(sk, SOCK_DEAD)) rose_rx_call_request() 1044 sk->sk_data_ready(sk); rose_rx_call_request() 1051 struct sock *sk = sock->sk; rose_sendmsg() local 1052 struct rose_sock *rose = rose_sk(sk); rose_sendmsg() 1063 if (sock_flag(sk, SOCK_ZAPPED)) rose_sendmsg() 1066 if (sk->sk_shutdown & SEND_SHUTDOWN) { rose_sendmsg() 1093 if (sk->sk_state != TCP_ESTABLISHED) rose_sendmsg() 1111 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) rose_sendmsg() 1151 if (sk->sk_state != TCP_ESTABLISHED) { rose_sendmsg() 1171 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { rose_sendmsg() 1176 skbn->sk = sk; rose_sendmsg() 1195 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ rose_sendmsg() 1201 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ rose_sendmsg() 1204 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ rose_sendmsg() 1207 rose_kick(sk); rose_sendmsg() 1216 struct sock *sk = sock->sk; rose_recvmsg() local 1217 struct rose_sock *rose = rose_sk(sk); rose_recvmsg() 1227 if (sk->sk_state != TCP_ESTABLISHED) rose_recvmsg() 1231 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) rose_recvmsg() 1269 skb_free_datagram(sk, skb); rose_recvmsg() 1277 struct sock *sk = sock->sk; rose_ioctl() local 1278 struct rose_sock *rose = rose_sk(sk); rose_ioctl() 1285 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); rose_ioctl() 1295 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) rose_ioctl() 1301 return sock_get_timestamp(sk, (struct timeval __user *) argp); rose_ioctl() 1304 return sock_get_timestampns(sk, (struct timespec __user *) argp); rose_ioctl() 1357 rose_write_internal(sk, ROSE_CALL_ACCEPTED); rose_ioctl() 1358 rose_start_idletimer(sk); rose_ioctl()
|
H A D | rose_out.c | 33 static void rose_send_iframe(struct sock *sk, struct sk_buff *skb) rose_send_iframe() argument 35 struct rose_sock *rose = rose_sk(sk); rose_send_iframe() 43 rose_start_idletimer(sk); rose_send_iframe() 48 void rose_kick(struct sock *sk) rose_kick() argument 50 struct rose_sock *rose = rose_sk(sk); rose_kick() 60 if (!skb_peek(&sk->sk_write_queue)) rose_kick() 76 skb = skb_dequeue(&sk->sk_write_queue); rose_kick() 80 skb_queue_head(&sk->sk_write_queue, skb); rose_kick() 84 skb_set_owner_w(skbn, sk); rose_kick() 89 rose_send_iframe(sk, skbn); rose_kick() 99 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); rose_kick() 104 rose_stop_timer(sk); rose_kick() 112 void rose_enquiry_response(struct sock *sk) rose_enquiry_response() argument 114 struct rose_sock *rose = rose_sk(sk); rose_enquiry_response() 117 rose_write_internal(sk, ROSE_RNR); rose_enquiry_response() 119 rose_write_internal(sk, ROSE_RR); rose_enquiry_response() 124 rose_stop_timer(sk); rose_enquiry_response()
|
H A D | rose_subr.c | 35 void rose_clear_queues(struct sock *sk) rose_clear_queues() argument 37 skb_queue_purge(&sk->sk_write_queue); rose_clear_queues() 38 skb_queue_purge(&rose_sk(sk)->ack_queue); rose_clear_queues() 46 void rose_frames_acked(struct sock *sk, unsigned short nr) rose_frames_acked() argument 49 struct rose_sock *rose = rose_sk(sk); rose_frames_acked() 63 void rose_requeue_frames(struct sock *sk) rose_requeue_frames() argument 72 while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) { rose_requeue_frames() 74 skb_queue_head(&sk->sk_write_queue, skb); rose_requeue_frames() 76 skb_append(skb_prev, skb, &sk->sk_write_queue); rose_requeue_frames() 85 int rose_validate_nr(struct sock *sk, unsigned short nr) rose_validate_nr() argument 87 struct rose_sock *rose = rose_sk(sk); rose_validate_nr() 102 void rose_write_internal(struct sock *sk, int frametype) rose_write_internal() argument 104 struct rose_sock *rose = rose_sk(sk); rose_write_internal() 530 void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic) rose_disconnect() argument 532 struct rose_sock *rose = rose_sk(sk); rose_disconnect() 534 rose_stop_timer(sk); rose_disconnect() 535 rose_stop_idletimer(sk); rose_disconnect() 537 rose_clear_queues(sk); rose_disconnect() 548 sk->sk_state = TCP_CLOSE; rose_disconnect() 549 sk->sk_err = reason; rose_disconnect() 550 sk->sk_shutdown |= SEND_SHUTDOWN; rose_disconnect() 552 if (!sock_flag(sk, SOCK_DEAD)) { rose_disconnect() 553 sk->sk_state_change(sk); rose_disconnect() 554 sock_set_flag(sk, SOCK_DEAD); rose_disconnect()
|
/linux-4.1.27/sound/usb/usx2y/ |
H A D | usb_stream.c | 27 static unsigned usb_stream_next_packet_size(struct usb_stream_kernel *sk) usb_stream_next_packet_size() argument 29 struct usb_stream *s = sk->s; usb_stream_next_packet_size() 30 sk->out_phase_peeked = (sk->out_phase & 0xffff) + sk->freqn; usb_stream_next_packet_size() 31 return (sk->out_phase_peeked >> 16) * s->cfg.frame_size; usb_stream_next_packet_size() 34 static void playback_prep_freqn(struct usb_stream_kernel *sk, struct urb *urb) playback_prep_freqn() argument 36 struct usb_stream *s = sk->s; playback_prep_freqn() 39 for (pack = 0; pack < sk->n_o_ps; pack++) { playback_prep_freqn() 40 int l = usb_stream_next_packet_size(sk); playback_prep_freqn() 44 sk->out_phase = sk->out_phase_peeked; playback_prep_freqn() 59 static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize, init_pipe_urbs() argument 66 int transfer_length = maxpacket * sk->n_o_ps; init_pipe_urbs() 75 urb->number_of_packets = sk->n_o_ps; init_pipe_urbs() 76 urb->context = sk; init_pipe_urbs() 85 for (p = 1; p < sk->n_o_ps; ++p) { init_pipe_urbs() 92 static void init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize, init_urbs() argument 95 struct usb_stream *s = sk->s; init_urbs() 102 sk->inurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL); init_urbs() 103 sk->outurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL); init_urbs() 106 init_pipe_urbs(sk, use_packsize, sk->inurb, indata, dev, in_pipe); init_urbs() 107 init_pipe_urbs(sk, use_packsize, sk->outurb, sk->write_page, dev, init_urbs() 130 void usb_stream_free(struct usb_stream_kernel *sk) usb_stream_free() argument 136 usb_free_urb(sk->inurb[u]); usb_stream_free() 137 sk->inurb[u] = NULL; usb_stream_free() 138 usb_free_urb(sk->outurb[u]); usb_stream_free() 139 sk->outurb[u] = NULL; usb_stream_free() 142 s = sk->s; usb_stream_free() 146 free_pages((unsigned long)sk->write_page, get_order(s->write_size)); usb_stream_free() 147 sk->write_page = NULL; usb_stream_free() 149 sk->s = NULL; usb_stream_free() 152 struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk, usb_stream_new() argument 194 sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); usb_stream_new() 195 if (!sk->s) { usb_stream_new() 199 sk->s->cfg.version = USB_STREAM_INTERFACE_VERSION; usb_stream_new() 201 sk->s->read_size = read_size; usb_stream_new() 203 sk->s->cfg.sample_rate = sample_rate; usb_stream_new() 204 sk->s->cfg.frame_size = frame_size; usb_stream_new() 205 sk->n_o_ps = packets; usb_stream_new() 206 sk->s->inpackets = packets * USB_STREAM_URBDEPTH; usb_stream_new() 207 sk->s->cfg.period_frames = period_frames; usb_stream_new() 208 sk->s->period_size = frame_size * period_frames; usb_stream_new() 210 sk->s->write_size = write_size; usb_stream_new() 213 sk->write_page = usb_stream_new() 215 if (!sk->write_page) { usb_stream_new() 217 usb_stream_free(sk); usb_stream_new() 223 sk->freqn = get_usb_full_speed_rate(sample_rate); usb_stream_new() 225 sk->freqn = get_usb_high_speed_rate(sample_rate); usb_stream_new() 227 init_urbs(sk, use_packsize, dev, in_pipe, out_pipe); usb_stream_new() 228 sk->s->state = usb_stream_stopped; usb_stream_new() 230 return sk->s; usb_stream_new() 236 static bool balance_check(struct usb_stream_kernel *sk, struct urb *urb) balance_check() argument 242 sk->iso_frame_balance = 0x7FFFFFFF; balance_check() 245 r = sk->iso_frame_balance == 0; balance_check() 247 sk->i_urb = urb; balance_check() 251 static bool balance_playback(struct usb_stream_kernel *sk, struct urb *urb) balance_playback() argument 253 sk->iso_frame_balance += urb->number_of_packets; balance_playback() 254 return balance_check(sk, urb); balance_playback() 257 static bool balance_capture(struct usb_stream_kernel *sk, struct urb *urb) balance_capture() argument 259 sk->iso_frame_balance -= urb->number_of_packets; balance_capture() 260 return balance_check(sk, urb); balance_capture() 273 static int usb_stream_prepare_playback(struct usb_stream_kernel *sk, usb_stream_prepare_playback() argument 276 struct usb_stream *s = sk->s; usb_stream_prepare_playback() 281 io = sk->idle_outurb; usb_stream_prepare_playback() 285 struct urb *ii = sk->completed_inurb; usb_stream_prepare_playback() 296 s->sync_packet < inurb->number_of_packets && p < sk->n_o_ps; usb_stream_prepare_playback() 349 static int submit_urbs(struct usb_stream_kernel *sk, submit_urbs() argument 353 prepare_inurb(sk->idle_outurb->number_of_packets, sk->idle_inurb); submit_urbs() 354 err = usb_submit_urb(sk->idle_inurb, GFP_ATOMIC); submit_urbs() 359 sk->idle_inurb = sk->completed_inurb; submit_urbs() 360 sk->completed_inurb = inurb; submit_urbs() 361 err = usb_submit_urb(sk->idle_outurb, GFP_ATOMIC); submit_urbs() 366 sk->idle_outurb = sk->completed_outurb; submit_urbs() 367 sk->completed_outurb = outurb; submit_urbs() 387 iu = sk->idle_inurb; loop_back() 412 if (iu == sk->completed_inurb) { loop_back() 420 iu = sk->completed_inurb; loop_back() 432 static void stream_idle(struct usb_stream_kernel *sk, stream_idle() argument 435 struct usb_stream *s = sk->s; stream_idle() 478 s->outpacket[0].offset = (sk->idle_outurb->transfer_buffer - stream_idle() 479 sk->write_page) - l; stream_idle() 481 if (usb_stream_prepare_playback(sk, inurb) < 0) stream_idle() 484 s->outpacket[0].length = sk->idle_outurb->transfer_buffer_length + l; stream_idle() 485 s->outpacket[1].offset = sk->completed_outurb->transfer_buffer - stream_idle() 486 sk->write_page; stream_idle() 488 if (submit_urbs(sk, inurb, outurb) < 0) stream_idle() 493 wake_up_all(&sk->sleep); stream_idle() 497 wake_up_all(&sk->sleep); stream_idle() 502 struct usb_stream_kernel *sk = urb->context; i_capture_idle() local 503 if (balance_capture(sk, urb)) i_capture_idle() 504 stream_idle(sk, urb, sk->i_urb); i_capture_idle() 509 struct usb_stream_kernel *sk = urb->context; i_playback_idle() local 510 if (balance_playback(sk, urb)) i_playback_idle() 511 stream_idle(sk, sk->i_urb, urb); i_playback_idle() 514 static void stream_start(struct usb_stream_kernel *sk, stream_start() argument 517 struct usb_stream *s = sk->s; stream_start() 577 if (usb_stream_prepare_playback(sk, inurb) < 0) stream_start() 581 playback_prep_freqn(sk, sk->idle_outurb); stream_start() 583 if (submit_urbs(sk, inurb, outurb) < 0) stream_start() 589 subs_set_complete(sk->inurb, i_capture_idle); stream_start() 590 subs_set_complete(sk->outurb, i_playback_idle); stream_start() 597 struct usb_stream_kernel *sk = urb->context; i_capture_start() local 598 struct usb_stream *s = sk->s; i_capture_start() 636 if (balance_capture(sk, urb)) i_capture_start() 637 stream_start(sk, urb, sk->i_urb); i_capture_start() 642 struct usb_stream_kernel *sk = urb->context; i_playback_start() local 643 if (balance_playback(sk, urb)) i_playback_start() 644 stream_start(sk, sk->i_urb, urb); i_playback_start() 647 int usb_stream_start(struct usb_stream_kernel *sk) usb_stream_start() argument 649 struct usb_stream *s = sk->s; usb_stream_start() 657 subs_set_complete(sk->inurb, i_capture_start); usb_stream_start() 658 subs_set_complete(sk->outurb, i_playback_start); usb_stream_start() 659 memset(sk->write_page, 0, s->write_size); usb_stream_start() 666 sk->iso_frame_balance = 0; usb_stream_start() 669 struct urb *inurb = sk->inurb[u]; usb_stream_start() 670 struct urb *outurb = sk->outurb[u]; usb_stream_start() 671 playback_prep_freqn(sk, outurb); usb_stream_start() 688 snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])" usb_stream_start() 694 snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])" usb_stream_start() 710 usb_stream_stop(sk); usb_stream_start() 721 sk->idle_inurb = sk->inurb[USB_STREAM_NURBS - 2]; usb_stream_start() 722 sk->idle_outurb = sk->outurb[USB_STREAM_NURBS - 2]; usb_stream_start() 723 sk->completed_inurb = sk->inurb[USB_STREAM_NURBS - 1]; usb_stream_start() 724 sk->completed_outurb = sk->outurb[USB_STREAM_NURBS - 1]; usb_stream_start() 742 void usb_stream_stop(struct usb_stream_kernel *sk) usb_stream_stop() argument 745 if (!sk->s) usb_stream_stop() 748 usb_kill_urb(sk->inurb[u]); usb_stream_stop() 749 usb_kill_urb(sk->outurb[u]); usb_stream_stop() 751 sk->s->state = usb_stream_stopped; usb_stream_stop()
|
/linux-4.1.27/net/bluetooth/ |
H A D | sco.c | 48 struct sock *sk; member in struct:sco_conn 56 static void sco_sock_close(struct sock *sk); 57 static void sco_sock_kill(struct sock *sk); 60 #define sco_pi(sk) ((struct sco_pinfo *) sk) 77 struct sock *sk = (struct sock *) arg; sco_sock_timeout() local 79 BT_DBG("sock %p state %d", sk, sk->sk_state); sco_sock_timeout() 81 bh_lock_sock(sk); sco_sock_timeout() 82 sk->sk_err = ETIMEDOUT; sco_sock_timeout() 83 sk->sk_state_change(sk); sco_sock_timeout() 84 bh_unlock_sock(sk); sco_sock_timeout() 86 sco_sock_kill(sk); sco_sock_timeout() 87 sock_put(sk); sco_sock_timeout() 90 static void sco_sock_set_timer(struct sock *sk, long timeout) sco_sock_set_timer() argument 92 BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); sco_sock_set_timer() 93 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout); sco_sock_set_timer() 96 static void sco_sock_clear_timer(struct sock *sk) sco_sock_clear_timer() argument 98 BT_DBG("sock %p state %d", sk, sk->sk_state); sco_sock_clear_timer() 99 sk_stop_timer(sk, &sk->sk_timer); sco_sock_clear_timer() 132 static void sco_chan_del(struct sock *sk, int err) sco_chan_del() argument 136 conn = sco_pi(sk)->conn; sco_chan_del() 138 BT_DBG("sk %p, conn %p, err %d", sk, conn, err); sco_chan_del() 142 conn->sk = NULL; sco_chan_del() 143 sco_pi(sk)->conn = NULL; sco_chan_del() 150 sk->sk_state = BT_CLOSED; sco_chan_del() 151 sk->sk_err = err; sco_chan_del() 152 sk->sk_state_change(sk); sco_chan_del() 154 sock_set_flag(sk, SOCK_ZAPPED); sco_chan_del() 160 struct sock *sk; sco_conn_del() local 169 sk = conn->sk; sco_conn_del() 172 if (sk) { sco_conn_del() 173 bh_lock_sock(sk); sco_conn_del() 174 sco_sock_clear_timer(sk); sco_conn_del() 175 sco_chan_del(sk, err); sco_conn_del() 176 bh_unlock_sock(sk); sco_conn_del() 177 sco_sock_kill(sk); sco_conn_del() 185 static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) __sco_chan_add() argument 189 sco_pi(sk)->conn = conn; __sco_chan_add() 190 conn->sk = sk; __sco_chan_add() 193 bt_accept_enqueue(parent, sk); __sco_chan_add() 196 static int sco_chan_add(struct sco_conn *conn, struct sock *sk, sco_chan_add() argument 202 if (conn->sk) sco_chan_add() 205 __sco_chan_add(conn, sk, parent); sco_chan_add() 211 static int sco_connect(struct sock *sk) sco_connect() argument 218 BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); sco_connect() 220 hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src); sco_connect() 231 if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && sco_connect() 237 hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, sco_connect() 238 sco_pi(sk)->setting); sco_connect() 252 bacpy(&sco_pi(sk)->src, &hcon->src); sco_connect() 254 err = sco_chan_add(conn, sk, NULL); sco_connect() 259 sco_sock_clear_timer(sk); sco_connect() 260 sk->sk_state = BT_CONNECTED; sco_connect() 262 sk->sk_state = BT_CONNECT; sco_connect() 263 sco_sock_set_timer(sk, sk->sk_sndtimeo); sco_connect() 272 static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) sco_send_frame() argument 274 struct sco_conn *conn = sco_pi(sk)->conn; sco_send_frame() 282 BT_DBG("sk %p len %d", sk, len); sco_send_frame() 284 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); sco_send_frame() 300 struct sock *sk; sco_recv_frame() local 303 sk = conn->sk; sco_recv_frame() 306 if (!sk) sco_recv_frame() 309 BT_DBG("sk %p len %d", sk, skb->len); sco_recv_frame() 311 if (sk->sk_state != BT_CONNECTED) sco_recv_frame() 314 if (!sock_queue_rcv_skb(sk, skb)) sco_recv_frame() 324 struct sock *sk; __sco_get_sock_listen_by_addr() local 326 sk_for_each(sk, &sco_sk_list.head) { __sco_get_sock_listen_by_addr() 327 if (sk->sk_state != BT_LISTEN) __sco_get_sock_listen_by_addr() 330 if (!bacmp(&sco_pi(sk)->src, ba)) __sco_get_sock_listen_by_addr() 331 return sk; __sco_get_sock_listen_by_addr() 342 struct sock *sk = NULL, *sk1 = NULL; sco_get_sock_listen() local 346 sk_for_each(sk, &sco_sk_list.head) { sco_get_sock_listen() 347 if (sk->sk_state != BT_LISTEN) sco_get_sock_listen() 351 if (!bacmp(&sco_pi(sk)->src, src)) sco_get_sock_listen() 355 if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY)) sco_get_sock_listen() 356 sk1 = sk; sco_get_sock_listen() 361 return sk ? sk : sk1; sco_get_sock_listen() 364 static void sco_sock_destruct(struct sock *sk) sco_sock_destruct() argument 366 BT_DBG("sk %p", sk); sco_sock_destruct() 368 skb_queue_purge(&sk->sk_receive_queue); sco_sock_destruct() 369 skb_queue_purge(&sk->sk_write_queue); sco_sock_destruct() 374 struct sock *sk; sco_sock_cleanup_listen() local 379 while ((sk = bt_accept_dequeue(parent, NULL))) { sco_sock_cleanup_listen() 380 sco_sock_close(sk); sco_sock_cleanup_listen() 381 sco_sock_kill(sk); sco_sock_cleanup_listen() 391 static void sco_sock_kill(struct sock *sk) sco_sock_kill() argument 393 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) sco_sock_kill() 396 BT_DBG("sk %p state %d", sk, sk->sk_state); sco_sock_kill() 399 bt_sock_unlink(&sco_sk_list, sk); sco_sock_kill() 400 sock_set_flag(sk, SOCK_DEAD); sco_sock_kill() 401 sock_put(sk); sco_sock_kill() 404 static void __sco_sock_close(struct sock *sk) __sco_sock_close() argument 406 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); __sco_sock_close() 408 switch (sk->sk_state) { __sco_sock_close() 410 sco_sock_cleanup_listen(sk); __sco_sock_close() 415 if (sco_pi(sk)->conn->hcon) { __sco_sock_close() 416 sk->sk_state = BT_DISCONN; __sco_sock_close() 417 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); __sco_sock_close() 418 hci_conn_drop(sco_pi(sk)->conn->hcon); __sco_sock_close() 419 sco_pi(sk)->conn->hcon = NULL; __sco_sock_close() 421 sco_chan_del(sk, ECONNRESET); __sco_sock_close() 427 sco_chan_del(sk, ECONNRESET); __sco_sock_close() 431 sock_set_flag(sk, SOCK_ZAPPED); __sco_sock_close() 437 static void sco_sock_close(struct sock *sk) sco_sock_close() argument 439 sco_sock_clear_timer(sk); sco_sock_close() 440 lock_sock(sk); sco_sock_close() 441 __sco_sock_close(sk); sco_sock_close() 442 release_sock(sk); sco_sock_close() 443 sco_sock_kill(sk); sco_sock_close() 446 static void sco_sock_init(struct sock *sk, struct sock *parent) sco_sock_init() argument 448 BT_DBG("sk %p", sk); sco_sock_init() 451 sk->sk_type = parent->sk_type; sco_sock_init() 452 bt_sk(sk)->flags = bt_sk(parent)->flags; sco_sock_init() 453 security_sk_clone(parent, sk); sco_sock_init() 465 struct sock *sk; sco_sock_alloc() local 467 sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto); sco_sock_alloc() 468 if (!sk) sco_sock_alloc() 471 sock_init_data(sock, sk); sco_sock_alloc() 472 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sco_sock_alloc() 474 sk->sk_destruct = sco_sock_destruct; sco_sock_alloc() 475 sk->sk_sndtimeo = SCO_CONN_TIMEOUT; sco_sock_alloc() 477 sock_reset_flag(sk, SOCK_ZAPPED); sco_sock_alloc() 479 sk->sk_protocol = proto; sco_sock_alloc() 480 sk->sk_state = BT_OPEN; sco_sock_alloc() 482 sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; sco_sock_alloc() 484 setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk); sco_sock_alloc() 486 bt_sock_link(&sco_sk_list, sk); sco_sock_alloc() 487 return sk; sco_sock_alloc() 493 struct sock *sk; sco_sock_create() local 504 sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC); sco_sock_create() 505 if (!sk) sco_sock_create() 508 sco_sock_init(sk, NULL); sco_sock_create() 515 struct sock *sk = sock->sk; sco_sock_bind() local 518 BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); sco_sock_bind() 526 lock_sock(sk); sco_sock_bind() 528 if (sk->sk_state != BT_OPEN) { sco_sock_bind() 533 if (sk->sk_type != SOCK_SEQPACKET) { sco_sock_bind() 538 bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr); sco_sock_bind() 540 sk->sk_state = BT_BOUND; sco_sock_bind() 543 release_sock(sk); sco_sock_bind() 550 struct sock *sk = sock->sk; sco_sock_connect() local 553 BT_DBG("sk %p", sk); sco_sock_connect() 559 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) sco_sock_connect() 562 if (sk->sk_type != SOCK_SEQPACKET) sco_sock_connect() 565 lock_sock(sk); sco_sock_connect() 568 bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr); sco_sock_connect() 570 err = sco_connect(sk); sco_sock_connect() 574 err = bt_sock_wait_state(sk, BT_CONNECTED, sco_sock_connect() 575 sock_sndtimeo(sk, flags & O_NONBLOCK)); sco_sock_connect() 578 release_sock(sk); sco_sock_connect() 584 struct sock *sk = sock->sk; sco_sock_listen() local 585 bdaddr_t *src = &sco_pi(sk)->src; sco_sock_listen() 588 BT_DBG("sk %p backlog %d", sk, backlog); sco_sock_listen() 590 lock_sock(sk); sco_sock_listen() 592 if (sk->sk_state != BT_BOUND) { sco_sock_listen() 597 if (sk->sk_type != SOCK_SEQPACKET) { sco_sock_listen() 609 sk->sk_max_ack_backlog = backlog; sco_sock_listen() 610 sk->sk_ack_backlog = 0; sco_sock_listen() 612 sk->sk_state = BT_LISTEN; sco_sock_listen() 618 release_sock(sk); sco_sock_listen() 625 struct sock *sk = sock->sk, *ch; sco_sock_accept() local 629 lock_sock(sk); sco_sock_accept() 631 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); sco_sock_accept() 633 BT_DBG("sk %p timeo %ld", sk, timeo); sco_sock_accept() 636 add_wait_queue_exclusive(sk_sleep(sk), &wait); sco_sock_accept() 638 if (sk->sk_state != BT_LISTEN) { sco_sock_accept() 643 ch = bt_accept_dequeue(sk, newsock); sco_sock_accept() 657 release_sock(sk); sco_sock_accept() 660 lock_sock(sk); sco_sock_accept() 662 remove_wait_queue(sk_sleep(sk), &wait); sco_sock_accept() 672 release_sock(sk); sco_sock_accept() 679 struct sock *sk = sock->sk; sco_sock_getname() local 681 BT_DBG("sock %p, sk %p", sock, sk); sco_sock_getname() 687 bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst); sco_sock_getname() 689 bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src); sco_sock_getname() 697 struct sock *sk = sock->sk; sco_sock_sendmsg() local 700 BT_DBG("sock %p, sk %p", sock, sk); sco_sock_sendmsg() 702 err = sock_error(sk); sco_sock_sendmsg() 709 lock_sock(sk); sco_sock_sendmsg() 711 if (sk->sk_state == BT_CONNECTED) sco_sock_sendmsg() 712 err = sco_send_frame(sk, msg, len); sco_sock_sendmsg() 716 release_sock(sk); sco_sock_sendmsg() 767 struct sock *sk = sock->sk; sco_sock_recvmsg() local 768 struct sco_pinfo *pi = sco_pi(sk); sco_sock_recvmsg() 770 lock_sock(sk); sco_sock_recvmsg() 772 if (sk->sk_state == BT_CONNECT2 && sco_sock_recvmsg() 773 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { sco_sock_recvmsg() 775 sk->sk_state = BT_CONFIG; sco_sock_recvmsg() 777 release_sock(sk); sco_sock_recvmsg() 781 release_sock(sk); sco_sock_recvmsg() 788 struct sock *sk = sock->sk; sco_sock_setsockopt() local 793 BT_DBG("sk %p", sk); sco_sock_setsockopt() 795 lock_sock(sk); sco_sock_setsockopt() 800 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { sco_sock_setsockopt() 811 set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); sco_sock_setsockopt() 813 clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); sco_sock_setsockopt() 817 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && sco_sock_setsockopt() 818 sk->sk_state != BT_CONNECT2) { sco_sock_setsockopt() 823 voice.setting = sco_pi(sk)->setting; sco_sock_setsockopt() 838 sco_pi(sk)->setting = voice.setting; sco_sock_setsockopt() 846 release_sock(sk); sco_sock_setsockopt() 852 struct sock *sk = sock->sk; sco_sock_getsockopt_old() local 857 BT_DBG("sk %p", sk); sco_sock_getsockopt_old() 862 lock_sock(sk); sco_sock_getsockopt_old() 866 if (sk->sk_state != BT_CONNECTED && sco_sock_getsockopt_old() 867 !(sk->sk_state == BT_CONNECT2 && sco_sock_getsockopt_old() 868 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { sco_sock_getsockopt_old() 873 opts.mtu = sco_pi(sk)->conn->mtu; sco_sock_getsockopt_old() 884 if (sk->sk_state != BT_CONNECTED && sco_sock_getsockopt_old() 885 !(sk->sk_state == BT_CONNECT2 && sco_sock_getsockopt_old() 886 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { sco_sock_getsockopt_old() 892 cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; sco_sock_getsockopt_old() 893 memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); sco_sock_getsockopt_old() 906 release_sock(sk); sco_sock_getsockopt_old() 912 struct sock *sk = sock->sk; sco_sock_getsockopt() local 916 BT_DBG("sk %p", sk); sco_sock_getsockopt() 924 lock_sock(sk); sco_sock_getsockopt() 929 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { sco_sock_getsockopt() 934 if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), sco_sock_getsockopt() 941 voice.setting = sco_pi(sk)->setting; sco_sock_getsockopt() 954 release_sock(sk); sco_sock_getsockopt() 960 struct sock *sk = sock->sk; sco_sock_shutdown() local 963 BT_DBG("sock %p, sk %p", sock, sk); sco_sock_shutdown() 965 if (!sk) sco_sock_shutdown() 968 lock_sock(sk); sco_sock_shutdown() 969 if (!sk->sk_shutdown) { sco_sock_shutdown() 970 sk->sk_shutdown = SHUTDOWN_MASK; sco_sock_shutdown() 971 sco_sock_clear_timer(sk); sco_sock_shutdown() 972 __sco_sock_close(sk); sco_sock_shutdown() 974 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && sco_sock_shutdown() 976 err = bt_sock_wait_state(sk, BT_CLOSED, sco_sock_shutdown() 977 sk->sk_lingertime); sco_sock_shutdown() 979 release_sock(sk); sco_sock_shutdown() 985 struct sock *sk = sock->sk; sco_sock_release() local 988 BT_DBG("sock %p, sk %p", sock, sk); sco_sock_release() 990 if (!sk) sco_sock_release() 993 sco_sock_close(sk); sco_sock_release() 995 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && sco_sock_release() 997 lock_sock(sk); sco_sock_release() 998 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); sco_sock_release() 999 release_sock(sk); sco_sock_release() 1002 sock_orphan(sk); sco_sock_release() 1003 sco_sock_kill(sk); sco_sock_release() 1010 struct sock *sk = conn->sk; sco_conn_ready() local 1014 if (sk) { sco_conn_ready() 1015 sco_sock_clear_timer(sk); sco_conn_ready() 1016 bh_lock_sock(sk); sco_conn_ready() 1017 sk->sk_state = BT_CONNECTED; sco_conn_ready() 1018 sk->sk_state_change(sk); sco_conn_ready() 1019 bh_unlock_sock(sk); sco_conn_ready() 1031 sk = sco_sock_alloc(sock_net(parent), NULL, sco_conn_ready() 1033 if (!sk) { sco_conn_ready() 1039 sco_sock_init(sk, parent); sco_conn_ready() 1041 bacpy(&sco_pi(sk)->src, &conn->hcon->src); sco_conn_ready() 1042 bacpy(&sco_pi(sk)->dst, &conn->hcon->dst); sco_conn_ready() 1045 __sco_chan_add(conn, sk, parent); sco_conn_ready() 1048 sk->sk_state = BT_CONNECT2; sco_conn_ready() 1050 sk->sk_state = BT_CONNECTED; sco_conn_ready() 1064 struct sock *sk; sco_connect_ind() local 1071 sk_for_each(sk, &sco_sk_list.head) { sco_connect_ind() 1072 if (sk->sk_state != BT_LISTEN) sco_connect_ind() 1075 if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) || sco_connect_ind() 1076 !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) { sco_connect_ind() 1079 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) sco_connect_ind() 1143 struct sock *sk; sco_debugfs_show() local 1147 sk_for_each(sk, &sco_sk_list.head) { sco_debugfs_show() 1148 seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src, sco_debugfs_show() 1149 &sco_pi(sk)->dst, sk->sk_state); sco_debugfs_show()
|
H A D | af_bluetooth.c | 67 void bt_sock_reclassify_lock(struct sock *sk, int proto) bt_sock_reclassify_lock() argument 69 BUG_ON(!sk); bt_sock_reclassify_lock() 70 BUG_ON(sock_owned_by_user(sk)); bt_sock_reclassify_lock() 72 sock_lock_init_class_and_name(sk, bt_sock_reclassify_lock() 130 bt_sock_reclassify_lock(sock->sk, proto); bt_sock_create() 139 void bt_sock_link(struct bt_sock_list *l, struct sock *sk) bt_sock_link() argument 142 sk_add_node(sk, &l->head); bt_sock_link() 147 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk) bt_sock_unlink() argument 150 sk_del_node_init(sk); bt_sock_unlink() 155 void bt_accept_enqueue(struct sock *parent, struct sock *sk) bt_accept_enqueue() argument 157 BT_DBG("parent %p, sk %p", parent, sk); bt_accept_enqueue() 159 sock_hold(sk); bt_accept_enqueue() 160 list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); bt_accept_enqueue() 161 bt_sk(sk)->parent = parent; bt_accept_enqueue() 166 void bt_accept_unlink(struct sock *sk) bt_accept_unlink() argument 168 BT_DBG("sk %p state %d", sk, sk->sk_state); bt_accept_unlink() 170 list_del_init(&bt_sk(sk)->accept_q); bt_accept_unlink() 171 bt_sk(sk)->parent->sk_ack_backlog--; bt_accept_unlink() 172 bt_sk(sk)->parent = NULL; bt_accept_unlink() 173 sock_put(sk); bt_accept_unlink() 180 struct sock *sk; bt_accept_dequeue() local 185 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); bt_accept_dequeue() 187 lock_sock(sk); bt_accept_dequeue() 190 if (sk->sk_state == BT_CLOSED) { bt_accept_dequeue() 191 release_sock(sk); bt_accept_dequeue() 192 bt_accept_unlink(sk); bt_accept_dequeue() 196 if (sk->sk_state == BT_CONNECTED || !newsock || bt_accept_dequeue() 198 bt_accept_unlink(sk); bt_accept_dequeue() 200 sock_graft(sk, newsock); bt_accept_dequeue() 202 release_sock(sk); bt_accept_dequeue() 203 return sk; bt_accept_dequeue() 206 release_sock(sk); bt_accept_dequeue() 217 struct sock *sk = sock->sk; bt_sock_recvmsg() local 222 BT_DBG("sock %p sk %p len %zu", sock, sk, len); bt_sock_recvmsg() 227 skb = skb_recv_datagram(sk, flags, noblock, &err); bt_sock_recvmsg() 229 if (sk->sk_shutdown & RCV_SHUTDOWN) bt_sock_recvmsg() 244 sock_recv_ts_and_drops(msg, sk, skb); bt_sock_recvmsg() 246 if (bt_sk(sk)->skb_msg_name) bt_sock_recvmsg() 247 bt_sk(sk)->skb_msg_name(skb, msg->msg_name, bt_sock_recvmsg() 251 skb_free_datagram(sk, skb); bt_sock_recvmsg() 257 static long bt_sock_data_wait(struct sock *sk, long timeo) bt_sock_data_wait() argument 261 add_wait_queue(sk_sleep(sk), &wait); bt_sock_data_wait() 265 if (!skb_queue_empty(&sk->sk_receive_queue)) bt_sock_data_wait() 268 if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN)) bt_sock_data_wait() 274 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); bt_sock_data_wait() 275 release_sock(sk); bt_sock_data_wait() 277 lock_sock(sk); bt_sock_data_wait() 278 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); bt_sock_data_wait() 282 remove_wait_queue(sk_sleep(sk), &wait); bt_sock_data_wait() 289 struct sock *sk = sock->sk; bt_sock_stream_recvmsg() local 297 BT_DBG("sk %p size %zu", sk, size); bt_sock_stream_recvmsg() 299 lock_sock(sk); bt_sock_stream_recvmsg() 301 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); bt_sock_stream_recvmsg() 302 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); bt_sock_stream_recvmsg() 308 skb = skb_dequeue(&sk->sk_receive_queue); bt_sock_stream_recvmsg() 313 err = sock_error(sk); bt_sock_stream_recvmsg() 316 if (sk->sk_shutdown & RCV_SHUTDOWN) bt_sock_stream_recvmsg() 323 timeo = bt_sock_data_wait(sk, timeo); bt_sock_stream_recvmsg() 334 skb_queue_head(&sk->sk_receive_queue, skb); bt_sock_stream_recvmsg() 342 sock_recv_ts_and_drops(msg, sk, skb); bt_sock_stream_recvmsg() 373 skb_queue_head(&sk->sk_receive_queue, skb); 380 skb_queue_head(&sk->sk_receive_queue, skb); 386 release_sock(sk); 394 struct sock *sk; bt_accept_poll() local 397 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); bt_accept_poll() 398 if (sk->sk_state == BT_CONNECTED || bt_accept_poll() 400 sk->sk_state == BT_CONNECT2)) bt_accept_poll() 410 struct sock *sk = sock->sk; bt_sock_poll() local 413 BT_DBG("sock %p, sk %p", sock, sk); bt_sock_poll() 415 poll_wait(file, sk_sleep(sk), wait); bt_sock_poll() 417 if (sk->sk_state == BT_LISTEN) bt_sock_poll() 418 return bt_accept_poll(sk); bt_sock_poll() 420 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) bt_sock_poll() 422 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); bt_sock_poll() 424 if (sk->sk_shutdown & RCV_SHUTDOWN) bt_sock_poll() 427 if (sk->sk_shutdown == SHUTDOWN_MASK) bt_sock_poll() 430 if (!skb_queue_empty(&sk->sk_receive_queue)) bt_sock_poll() 433 if (sk->sk_state == BT_CLOSED) bt_sock_poll() 436 if (sk->sk_state == BT_CONNECT || bt_sock_poll() 437 sk->sk_state == BT_CONNECT2 || bt_sock_poll() 438 sk->sk_state == BT_CONFIG) bt_sock_poll() 441 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) bt_sock_poll() 444 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); bt_sock_poll() 452 struct sock *sk = sock->sk; bt_sock_ioctl() local 457 BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); bt_sock_ioctl() 461 if (sk->sk_state == BT_LISTEN) bt_sock_ioctl() 464 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); bt_sock_ioctl() 471 if (sk->sk_state == BT_LISTEN) bt_sock_ioctl() 474 lock_sock(sk); bt_sock_ioctl() 475 skb = skb_peek(&sk->sk_receive_queue); bt_sock_ioctl() 477 release_sock(sk); bt_sock_ioctl() 482 err = sock_get_timestamp(sk, (struct timeval __user *) arg); bt_sock_ioctl() 486 err = sock_get_timestampns(sk, (struct timespec __user *) arg); bt_sock_ioctl() 498 /* This function expects the sk lock to be held when called */ bt_sock_wait_state() 499 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) bt_sock_wait_state() argument 504 BT_DBG("sk %p", sk); bt_sock_wait_state() 506 add_wait_queue(sk_sleep(sk), &wait); bt_sock_wait_state() 508 while (sk->sk_state != state) { bt_sock_wait_state() 519 release_sock(sk); bt_sock_wait_state() 521 lock_sock(sk); bt_sock_wait_state() 524 err = sock_error(sk); bt_sock_wait_state() 529 remove_wait_queue(sk_sleep(sk), &wait); bt_sock_wait_state() 534 /* This function expects the sk lock to be held when called */ bt_sock_wait_ready() 535 int bt_sock_wait_ready(struct sock *sk, unsigned long flags) bt_sock_wait_ready() argument 541 BT_DBG("sk %p", sk); bt_sock_wait_ready() 543 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); bt_sock_wait_ready() 545 add_wait_queue(sk_sleep(sk), &wait); bt_sock_wait_ready() 547 while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) { bt_sock_wait_ready() 558 release_sock(sk); bt_sock_wait_ready() 560 lock_sock(sk); bt_sock_wait_ready() 563 err = sock_error(sk); bt_sock_wait_ready() 568 remove_wait_queue(sk_sleep(sk), &wait); bt_sock_wait_ready() 612 seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent"); bt_seq_show() 621 struct sock *sk = sk_entry(v); bt_seq_show() local 622 struct bt_sock *bt = bt_sk(sk); bt_seq_show() 626 sk, bt_seq_show() 627 atomic_read(&sk->sk_refcnt), bt_seq_show() 628 sk_rmem_alloc_get(sk), bt_seq_show() 629 sk_wmem_alloc_get(sk), bt_seq_show() 630 from_kuid(seq_user_ns(seq), sock_i_uid(sk)), bt_seq_show() 631 sock_i_ino(sk), bt_seq_show()
|
H A D | l2cap_sock.c | 44 static void l2cap_sock_init(struct sock *sk, struct sock *parent); 82 struct sock *sk = sock->sk; l2cap_sock_bind() local 83 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_bind() 87 BT_DBG("sk %p", sk); l2cap_sock_bind() 109 lock_sock(sk); l2cap_sock_bind() 111 if (sk->sk_state != BT_OPEN) { l2cap_sock_bind() 166 sk->sk_state = BT_BOUND; l2cap_sock_bind() 169 release_sock(sk); l2cap_sock_bind() 176 struct sock *sk = sock->sk; l2cap_sock_connect() local 177 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_connect() 181 BT_DBG("sk %p", sk); l2cap_sock_connect() 241 lock_sock(sk); l2cap_sock_connect() 243 err = bt_sock_wait_state(sk, BT_CONNECTED, l2cap_sock_connect() 244 sock_sndtimeo(sk, flags & O_NONBLOCK)); l2cap_sock_connect() 246 release_sock(sk); l2cap_sock_connect() 253 struct sock *sk = sock->sk; l2cap_sock_listen() local 254 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_listen() 257 BT_DBG("sk %p backlog %d", sk, backlog); l2cap_sock_listen() 259 lock_sock(sk); l2cap_sock_listen() 261 if (sk->sk_state != BT_BOUND) { l2cap_sock_listen() 266 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) { l2cap_sock_listen() 285 sk->sk_max_ack_backlog = backlog; l2cap_sock_listen() 286 sk->sk_ack_backlog = 0; l2cap_sock_listen() 295 sk->sk_state = BT_LISTEN; l2cap_sock_listen() 298 release_sock(sk); l2cap_sock_listen() 306 struct sock *sk = sock->sk, *nsk; l2cap_sock_accept() local 310 lock_sock_nested(sk, L2CAP_NESTING_PARENT); l2cap_sock_accept() 312 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); l2cap_sock_accept() 314 BT_DBG("sk %p timeo %ld", sk, timeo); l2cap_sock_accept() 317 add_wait_queue_exclusive(sk_sleep(sk), &wait); l2cap_sock_accept() 319 if (sk->sk_state != BT_LISTEN) { l2cap_sock_accept() 324 nsk = bt_accept_dequeue(sk, newsock); l2cap_sock_accept() 338 release_sock(sk); l2cap_sock_accept() 342 lock_sock_nested(sk, L2CAP_NESTING_PARENT); l2cap_sock_accept() 344 remove_wait_queue(sk_sleep(sk), &wait); l2cap_sock_accept() 354 release_sock(sk); l2cap_sock_accept() 362 struct sock *sk = sock->sk; l2cap_sock_getname() local 363 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_getname() 365 BT_DBG("sock %p, sk %p", sock, sk); l2cap_sock_getname() 367 if (peer && sk->sk_state != BT_CONNECTED && l2cap_sock_getname() 368 sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2 && l2cap_sock_getname() 369 sk->sk_state != BT_CONFIG) l2cap_sock_getname() 394 struct sock *sk = sock->sk; l2cap_sock_getsockopt_old() local 395 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_getsockopt_old() 401 BT_DBG("sk %p", sk); l2cap_sock_getsockopt_old() 406 lock_sock(sk); l2cap_sock_getsockopt_old() 468 if (sk->sk_state != BT_CONNECTED && l2cap_sock_getsockopt_old() 469 !(sk->sk_state == BT_CONNECT2 && l2cap_sock_getsockopt_old() 470 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { l2cap_sock_getsockopt_old() 490 release_sock(sk); l2cap_sock_getsockopt_old() 497 struct sock *sk = sock->sk; l2cap_sock_getsockopt() local 498 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_getsockopt() 503 BT_DBG("sk %p", sk); l2cap_sock_getsockopt() 514 lock_sock(sk); l2cap_sock_getsockopt() 529 if (sk->sk_state == BT_CONNECTED) l2cap_sock_getsockopt() 542 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { l2cap_sock_getsockopt() 547 if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), l2cap_sock_getsockopt() 561 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM l2cap_sock_getsockopt() 562 && sk->sk_type != SOCK_RAW) { l2cap_sock_getsockopt() 586 if (sk->sk_state != BT_CONNECTED) { l2cap_sock_getsockopt() 610 release_sock(sk); l2cap_sock_getsockopt() 633 struct sock *sk = sock->sk; l2cap_sock_setsockopt_old() local 634 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_setsockopt_old() 639 BT_DBG("sk %p", sk); l2cap_sock_setsockopt_old() 641 lock_sock(sk); l2cap_sock_setsockopt_old() 650 if (sk->sk_state == BT_CONNECTED) { l2cap_sock_setsockopt_old() 738 release_sock(sk); l2cap_sock_setsockopt_old() 745 struct sock *sk = sock->sk; l2cap_sock_setsockopt() local 746 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_setsockopt() 753 BT_DBG("sk %p", sk); l2cap_sock_setsockopt() 761 lock_sock(sk); l2cap_sock_setsockopt() 798 sk->sk_state = BT_CONFIG; l2cap_sock_setsockopt() 802 } else if ((sk->sk_state == BT_CONNECT2 && l2cap_sock_setsockopt() 803 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) || l2cap_sock_setsockopt() 804 sk->sk_state == BT_CONNECTED) { l2cap_sock_setsockopt() 806 set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); l2cap_sock_setsockopt() 808 sk->sk_state_change(sk); l2cap_sock_setsockopt() 815 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { l2cap_sock_setsockopt() 826 set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); l2cap_sock_setsockopt() 829 clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); l2cap_sock_setsockopt() 901 if (sk->sk_state == BT_CONNECTED && l2cap_sock_setsockopt() 925 if (sk->sk_state == BT_CONNECTED) { l2cap_sock_setsockopt() 943 release_sock(sk); l2cap_sock_setsockopt() 950 struct sock *sk = sock->sk; l2cap_sock_sendmsg() local 951 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_sendmsg() 954 BT_DBG("sock %p, sk %p", sock, sk); l2cap_sock_sendmsg() 956 err = sock_error(sk); l2cap_sock_sendmsg() 963 if (sk->sk_state != BT_CONNECTED) l2cap_sock_sendmsg() 966 lock_sock(sk); l2cap_sock_sendmsg() 967 err = bt_sock_wait_ready(sk, msg->msg_flags); l2cap_sock_sendmsg() 968 release_sock(sk); l2cap_sock_sendmsg() 982 struct sock *sk = sock->sk; l2cap_sock_recvmsg() local 983 struct l2cap_pinfo *pi = l2cap_pi(sk); l2cap_sock_recvmsg() 986 lock_sock(sk); l2cap_sock_recvmsg() 988 if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, l2cap_sock_recvmsg() 989 &bt_sk(sk)->flags)) { l2cap_sock_recvmsg() 991 sk->sk_state = BT_CONNECTED; l2cap_sock_recvmsg() 995 sk->sk_state = BT_CONFIG; l2cap_sock_recvmsg() 1004 release_sock(sk); l2cap_sock_recvmsg() 1016 lock_sock(sk); l2cap_sock_recvmsg() 1022 if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb)) l2cap_sock_recvmsg() 1032 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1) l2cap_sock_recvmsg() 1036 release_sock(sk); l2cap_sock_recvmsg() 1043 static void l2cap_sock_kill(struct sock *sk) l2cap_sock_kill() argument 1045 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) l2cap_sock_kill() 1048 BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state)); l2cap_sock_kill() 1052 l2cap_chan_put(l2cap_pi(sk)->chan); l2cap_sock_kill() 1053 sock_set_flag(sk, SOCK_DEAD); l2cap_sock_kill() 1054 sock_put(sk); l2cap_sock_kill() 1057 static int __l2cap_wait_ack(struct sock *sk) __l2cap_wait_ack() argument 1059 struct l2cap_chan *chan = l2cap_pi(sk)->chan; __l2cap_wait_ack() 1064 add_wait_queue(sk_sleep(sk), &wait); __l2cap_wait_ack() 1075 release_sock(sk); __l2cap_wait_ack() 1077 lock_sock(sk); __l2cap_wait_ack() 1080 err = sock_error(sk); __l2cap_wait_ack() 1085 remove_wait_queue(sk_sleep(sk), &wait); __l2cap_wait_ack() 1091 struct sock *sk = sock->sk; l2cap_sock_shutdown() local 1096 BT_DBG("sock %p, sk %p", sock, sk); l2cap_sock_shutdown() 1098 if (!sk) l2cap_sock_shutdown() 1101 chan = l2cap_pi(sk)->chan; l2cap_sock_shutdown() 1110 lock_sock(sk); l2cap_sock_shutdown() 1112 if (!sk->sk_shutdown) { l2cap_sock_shutdown() 1114 err = __l2cap_wait_ack(sk); l2cap_sock_shutdown() 1116 sk->sk_shutdown = SHUTDOWN_MASK; l2cap_sock_shutdown() 1118 release_sock(sk); l2cap_sock_shutdown() 1120 lock_sock(sk); l2cap_sock_shutdown() 1122 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && l2cap_sock_shutdown() 1124 err = bt_sock_wait_state(sk, BT_CLOSED, l2cap_sock_shutdown() 1125 sk->sk_lingertime); l2cap_sock_shutdown() 1128 if (!err && sk->sk_err) l2cap_sock_shutdown() 1129 err = -sk->sk_err; l2cap_sock_shutdown() 1131 release_sock(sk); l2cap_sock_shutdown() 1142 struct sock *sk = sock->sk; l2cap_sock_release() local 1145 BT_DBG("sock %p, sk %p", sock, sk); l2cap_sock_release() 1147 if (!sk) l2cap_sock_release() 1150 bt_sock_unlink(&l2cap_sk_list, sk); l2cap_sock_release() 1154 sock_orphan(sk); l2cap_sock_release() 1155 l2cap_sock_kill(sk); l2cap_sock_release() 1161 struct sock *sk; l2cap_sock_cleanup_listen() local 1167 while ((sk = bt_accept_dequeue(parent, NULL))) { l2cap_sock_cleanup_listen() 1168 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_cleanup_listen() 1178 l2cap_sock_kill(sk); l2cap_sock_cleanup_listen() 1184 struct sock *sk, *parent = chan->data; l2cap_sock_new_connection_cb() local 1195 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, l2cap_sock_new_connection_cb() 1197 if (!sk) { l2cap_sock_new_connection_cb() 1202 bt_sock_reclassify_lock(sk, BTPROTO_L2CAP); l2cap_sock_new_connection_cb() 1204 l2cap_sock_init(sk, parent); l2cap_sock_new_connection_cb() 1206 bt_accept_enqueue(parent, sk); l2cap_sock_new_connection_cb() 1210 return l2cap_pi(sk)->chan; l2cap_sock_new_connection_cb() 1215 struct sock *sk = chan->data; l2cap_sock_recv_cb() local 1218 lock_sock(sk); l2cap_sock_recv_cb() 1220 if (l2cap_pi(sk)->rx_busy_skb) { l2cap_sock_recv_cb() 1225 err = sock_queue_rcv_skb(sk, skb); l2cap_sock_recv_cb() 1237 l2cap_pi(sk)->rx_busy_skb = skb; l2cap_sock_recv_cb() 1243 release_sock(sk); l2cap_sock_recv_cb() 1250 struct sock *sk = chan->data; l2cap_sock_close_cb() local 1252 l2cap_sock_kill(sk); l2cap_sock_close_cb() 1257 struct sock *sk = chan->data; l2cap_sock_teardown_cb() local 1269 lock_sock_nested(sk, atomic_read(&chan->nesting)); l2cap_sock_teardown_cb() 1271 parent = bt_sk(sk)->parent; l2cap_sock_teardown_cb() 1273 sock_set_flag(sk, SOCK_ZAPPED); l2cap_sock_teardown_cb() 1281 l2cap_sock_cleanup_listen(sk); l2cap_sock_teardown_cb() 1282 sk->sk_state = BT_CLOSED; l2cap_sock_teardown_cb() 1287 sk->sk_state = BT_CLOSED; l2cap_sock_teardown_cb() 1290 sk->sk_err = err; l2cap_sock_teardown_cb() 1293 bt_accept_unlink(sk); l2cap_sock_teardown_cb() 1296 sk->sk_state_change(sk); l2cap_sock_teardown_cb() 1302 release_sock(sk); l2cap_sock_teardown_cb() 1308 struct sock *sk = chan->data; l2cap_sock_state_change_cb() local 1310 sk->sk_state = state; l2cap_sock_state_change_cb() 1313 sk->sk_err = err; l2cap_sock_state_change_cb() 1320 struct sock *sk = chan->data; l2cap_sock_alloc_skb_cb() local 1325 skb = bt_skb_send_alloc(sk, hdr_len + len, nb, &err); l2cap_sock_alloc_skb_cb() 1331 skb->priority = sk->sk_priority; l2cap_sock_alloc_skb_cb() 1340 struct sock *sk = chan->data; l2cap_sock_ready_cb() local 1343 lock_sock(sk); l2cap_sock_ready_cb() 1345 parent = bt_sk(sk)->parent; l2cap_sock_ready_cb() 1347 BT_DBG("sk %p, parent %p", sk, parent); l2cap_sock_ready_cb() 1349 sk->sk_state = BT_CONNECTED; l2cap_sock_ready_cb() 1350 sk->sk_state_change(sk); l2cap_sock_ready_cb() 1355 release_sock(sk); l2cap_sock_ready_cb() 1360 struct sock *parent, *sk = chan->data; l2cap_sock_defer_cb() local 1362 lock_sock(sk); l2cap_sock_defer_cb() 1364 parent = bt_sk(sk)->parent; l2cap_sock_defer_cb() 1368 release_sock(sk); l2cap_sock_defer_cb() 1373 struct sock *sk = chan->data; l2cap_sock_resume_cb() local 1376 sk->sk_state = BT_CONNECTED; l2cap_sock_resume_cb() 1380 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); l2cap_sock_resume_cb() 1381 sk->sk_state_change(sk); l2cap_sock_resume_cb() 1386 struct sock *sk = chan->data; l2cap_sock_set_shutdown_cb() local 1388 lock_sock(sk); l2cap_sock_set_shutdown_cb() 1389 sk->sk_shutdown = SHUTDOWN_MASK; l2cap_sock_set_shutdown_cb() 1390 release_sock(sk); l2cap_sock_set_shutdown_cb() 1395 struct sock *sk = chan->data; l2cap_sock_get_sndtimeo_cb() local 1397 return sk->sk_sndtimeo; l2cap_sock_get_sndtimeo_cb() 1402 struct sock *sk = chan->data; l2cap_sock_suspend_cb() local 1404 set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); l2cap_sock_suspend_cb() 1405 sk->sk_state_change(sk); l2cap_sock_suspend_cb() 1424 static void l2cap_sock_destruct(struct sock *sk) l2cap_sock_destruct() argument 1426 BT_DBG("sk %p", sk); l2cap_sock_destruct() 1428 if (l2cap_pi(sk)->chan) l2cap_sock_destruct() 1429 l2cap_chan_put(l2cap_pi(sk)->chan); l2cap_sock_destruct() 1431 if (l2cap_pi(sk)->rx_busy_skb) { l2cap_sock_destruct() 1432 kfree_skb(l2cap_pi(sk)->rx_busy_skb); l2cap_sock_destruct() 1433 l2cap_pi(sk)->rx_busy_skb = NULL; l2cap_sock_destruct() 1436 skb_queue_purge(&sk->sk_receive_queue); l2cap_sock_destruct() 1437 skb_queue_purge(&sk->sk_write_queue); l2cap_sock_destruct() 1453 static void l2cap_sock_init(struct sock *sk, struct sock *parent) l2cap_sock_init() argument 1455 struct l2cap_chan *chan = l2cap_pi(sk)->chan; l2cap_sock_init() 1457 BT_DBG("sk %p", sk); l2cap_sock_init() 1462 sk->sk_type = parent->sk_type; l2cap_sock_init() 1463 bt_sk(sk)->flags = bt_sk(parent)->flags; l2cap_sock_init() 1484 security_sk_clone(parent, sk); l2cap_sock_init() 1486 switch (sk->sk_type) { l2cap_sock_init() 1492 bt_sk(sk)->skb_msg_name = l2cap_skb_msg_name; l2cap_sock_init() 1502 if (!disable_ertm && sk->sk_type == SOCK_STREAM) { l2cap_sock_init() 1515 chan->data = sk; l2cap_sock_init() 1528 struct sock *sk; l2cap_sock_alloc() local 1531 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto); l2cap_sock_alloc() 1532 if (!sk) l2cap_sock_alloc() 1535 sock_init_data(sock, sk); l2cap_sock_alloc() 1536 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); l2cap_sock_alloc() 1538 sk->sk_destruct = l2cap_sock_destruct; l2cap_sock_alloc() 1539 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; l2cap_sock_alloc() 1541 sock_reset_flag(sk, SOCK_ZAPPED); l2cap_sock_alloc() 1543 sk->sk_protocol = proto; l2cap_sock_alloc() 1544 sk->sk_state = BT_OPEN; l2cap_sock_alloc() 1548 sk_free(sk); l2cap_sock_alloc() 1554 l2cap_pi(sk)->chan = chan; l2cap_sock_alloc() 1556 return sk; l2cap_sock_alloc() 1562 struct sock *sk; l2cap_sock_create() local 1577 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC); l2cap_sock_create() 1578 if (!sk) l2cap_sock_create() 1581 l2cap_sock_init(sk, NULL); l2cap_sock_create() 1582 bt_sock_link(&l2cap_sk_list, sk); l2cap_sock_create()
|
H A D | hci_sock.c | 45 #define hci_pi(sk) ((struct hci_pinfo *) sk) 56 void hci_sock_set_flag(struct sock *sk, int nr) hci_sock_set_flag() argument 58 set_bit(nr, &hci_pi(sk)->flags); hci_sock_set_flag() 61 void hci_sock_clear_flag(struct sock *sk, int nr) hci_sock_clear_flag() argument 63 clear_bit(nr, &hci_pi(sk)->flags); hci_sock_clear_flag() 66 int hci_sock_test_flag(struct sock *sk, int nr) hci_sock_test_flag() argument 68 return test_bit(nr, &hci_pi(sk)->flags); hci_sock_test_flag() 71 unsigned short hci_sock_get_channel(struct sock *sk) hci_sock_get_channel() argument 73 return hci_pi(sk)->channel; hci_sock_get_channel() 115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb) is_filtered_packet() argument 121 flt = &hci_pi(sk)->filter; is_filtered_packet() 158 struct sock *sk; hci_send_to_sock() local 165 sk_for_each(sk, &hci_sk_list.head) { hci_send_to_sock() 168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) hci_send_to_sock() 172 if (skb->sk == sk) hci_send_to_sock() 175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) { hci_send_to_sock() 176 if (is_filtered_packet(sk, skb)) hci_send_to_sock() 178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { hci_send_to_sock() 204 if (sock_queue_rcv_skb(sk, nskb)) hci_send_to_sock() 217 struct sock *sk; hci_send_to_channel() local 223 sk_for_each(sk, &hci_sk_list.head) { hci_send_to_channel() 227 if (!hci_sock_test_flag(sk, flag)) hci_send_to_channel() 231 if (sk == skip_sk) hci_send_to_channel() 234 if (sk->sk_state != BT_BOUND) hci_send_to_channel() 237 if (hci_pi(sk)->channel != channel) hci_send_to_channel() 244 if (sock_queue_rcv_skb(sk, nskb)) hci_send_to_channel() 346 static void send_monitor_replay(struct sock *sk) send_monitor_replay() argument 359 if (sock_queue_rcv_skb(sk, skb)) send_monitor_replay() 417 struct sock *sk; hci_sock_dev_event() local 421 sk_for_each(sk, &hci_sk_list.head) { hci_sock_dev_event() 422 bh_lock_sock_nested(sk); hci_sock_dev_event() 423 if (hci_pi(sk)->hdev == hdev) { hci_sock_dev_event() 424 hci_pi(sk)->hdev = NULL; hci_sock_dev_event() 425 sk->sk_err = EPIPE; hci_sock_dev_event() 426 sk->sk_state = BT_OPEN; hci_sock_dev_event() 427 sk->sk_state_change(sk); hci_sock_dev_event() 431 bh_unlock_sock(sk); hci_sock_dev_event() 489 struct sock *sk = sock->sk; hci_sock_release() local 492 BT_DBG("sock %p sk %p", sock, sk); hci_sock_release() 494 if (!sk) hci_sock_release() 497 hdev = hci_pi(sk)->hdev; hci_sock_release() 499 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR) hci_sock_release() 502 bt_sock_unlink(&hci_sk_list, sk); hci_sock_release() 505 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { hci_sock_release() 515 sock_orphan(sk); hci_sock_release() 517 skb_queue_purge(&sk->sk_receive_queue); hci_sock_release() 518 skb_queue_purge(&sk->sk_write_queue); hci_sock_release() 520 sock_put(sk); hci_sock_release() 559 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, hci_sock_bound_ioctl() argument 562 struct hci_dev *hdev = hci_pi(sk)->hdev; hci_sock_bound_ioctl() 606 struct sock *sk = sock->sk; hci_sock_ioctl() local 611 lock_sock(sk); hci_sock_ioctl() 613 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { hci_sock_ioctl() 618 release_sock(sk); hci_sock_ioctl() 666 lock_sock(sk); hci_sock_ioctl() 668 err = hci_sock_bound_ioctl(sk, cmd, arg); hci_sock_ioctl() 671 release_sock(sk); hci_sock_ioctl() 679 struct sock *sk = sock->sk; hci_sock_bind() local 683 BT_DBG("sock %p sk %p", sock, sk); hci_sock_bind() 695 lock_sock(sk); hci_sock_bind() 697 if (sk->sk_state == BT_BOUND) { hci_sock_bind() 704 if (hci_pi(sk)->hdev) { hci_sock_bind() 719 hci_pi(sk)->hdev = hdev; hci_sock_bind() 723 if (hci_pi(sk)->hdev) { hci_sock_bind() 783 hci_pi(sk)->hdev = hdev; hci_sock_bind() 800 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); hci_sock_bind() 802 send_monitor_replay(sk); hci_sock_bind() 824 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); hci_sock_bind() 837 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS); hci_sock_bind() 838 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); hci_sock_bind() 839 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS); hci_sock_bind() 845 hci_pi(sk)->channel = haddr.hci_channel; hci_sock_bind() 846 sk->sk_state = BT_BOUND; hci_sock_bind() 849 release_sock(sk); hci_sock_bind() 857 struct sock *sk = sock->sk; hci_sock_getname() local 861 BT_DBG("sock %p sk %p", sock, sk); hci_sock_getname() 866 lock_sock(sk); hci_sock_getname() 868 hdev = hci_pi(sk)->hdev; hci_sock_getname() 877 haddr->hci_channel= hci_pi(sk)->channel; hci_sock_getname() 880 release_sock(sk); hci_sock_getname() 884 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, hci_sock_cmsg() argument 887 __u32 mask = hci_pi(sk)->cmsg_mask; hci_sock_cmsg() 925 struct sock *sk = sock->sk; hci_sock_recvmsg() local 929 BT_DBG("sock %p, sk %p", sock, sk); hci_sock_recvmsg() 934 if (sk->sk_state == BT_CLOSED) hci_sock_recvmsg() 937 skb = skb_recv_datagram(sk, flags, noblock, &err); hci_sock_recvmsg() 950 switch (hci_pi(sk)->channel) { hci_sock_recvmsg() 952 hci_sock_cmsg(sk, msg, skb); hci_sock_recvmsg() 956 sock_recv_timestamp(msg, sk, skb); hci_sock_recvmsg() 959 if (hci_mgmt_chan_find(hci_pi(sk)->channel)) hci_sock_recvmsg() 960 sock_recv_timestamp(msg, sk, skb); hci_sock_recvmsg() 964 skb_free_datagram(sk, skb); hci_sock_recvmsg() 969 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, hci_mgmt_cmd() argument 1008 err = mgmt_cmd_status(sk, index, opcode, hci_mgmt_cmd() 1015 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) && hci_mgmt_cmd() 1017 err = mgmt_cmd_status(sk, index, opcode, hci_mgmt_cmd() 1025 err = mgmt_cmd_status(sk, index, opcode, hci_mgmt_cmd() 1033 err = mgmt_cmd_status(sk, index, opcode, hci_mgmt_cmd() 1040 err = mgmt_cmd_status(sk, index, opcode, hci_mgmt_cmd() 1048 err = mgmt_cmd_status(sk, index, opcode, hci_mgmt_cmd() 1056 err = mgmt_cmd_status(sk, index, opcode, hci_mgmt_cmd() 1062 chan->hdev_init(sk, hdev); hci_mgmt_cmd() 1066 err = handler->func(sk, hdev, cp, len); hci_mgmt_cmd() 1083 struct sock *sk = sock->sk; hci_sock_sendmsg() local 1089 BT_DBG("sock %p sk %p", sock, sk); hci_sock_sendmsg() 1100 lock_sock(sk); hci_sock_sendmsg() 1102 switch (hci_pi(sk)->channel) { hci_sock_sendmsg() 1111 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel); hci_sock_sendmsg() 1113 err = hci_mgmt_cmd(chan, sk, msg, len); hci_sock_sendmsg() 1121 hdev = hci_pi(sk)->hdev; hci_sock_sendmsg() 1132 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); hci_sock_sendmsg() 1144 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { hci_sock_sendmsg() 1197 release_sock(sk); hci_sock_sendmsg() 1209 struct sock *sk = sock->sk; hci_sock_setsockopt() local 1212 BT_DBG("sk %p, opt %d", sk, optname); hci_sock_setsockopt() 1214 lock_sock(sk); hci_sock_setsockopt() 1216 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { hci_sock_setsockopt() 1229 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; hci_sock_setsockopt() 1231 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR; hci_sock_setsockopt() 1241 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; hci_sock_setsockopt() 1243 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP; hci_sock_setsockopt() 1248 struct hci_filter *f = &hci_pi(sk)->filter; hci_sock_setsockopt() 1269 struct hci_filter *f = &hci_pi(sk)->filter; hci_sock_setsockopt() 1284 release_sock(sk); hci_sock_setsockopt() 1292 struct sock *sk = sock->sk; hci_sock_getsockopt() local 1295 BT_DBG("sk %p, opt %d", sk, optname); hci_sock_getsockopt() 1300 lock_sock(sk); hci_sock_getsockopt() 1302 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { hci_sock_getsockopt() 1309 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) hci_sock_getsockopt() 1319 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) hci_sock_getsockopt() 1330 struct hci_filter *f = &hci_pi(sk)->filter; hci_sock_getsockopt() 1350 release_sock(sk); hci_sock_getsockopt() 1383 struct sock *sk; hci_sock_create() local 1392 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto); hci_sock_create() 1393 if (!sk) hci_sock_create() 1396 sock_init_data(sock, sk); hci_sock_create() 1398 sock_reset_flag(sk, SOCK_ZAPPED); hci_sock_create() 1400 sk->sk_protocol = protocol; hci_sock_create() 1403 sk->sk_state = BT_OPEN; hci_sock_create() 1405 bt_sock_link(&hci_sk_list, sk); hci_sock_create()
|
H A D | mgmt.c | 271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, read_version() argument 276 BT_DBG("sock %p", sk); read_version() 281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, read_version() 285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, read_commands() argument 293 BT_DBG("sock %p", sk); read_commands() 295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) { read_commands() 312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) { read_commands() 330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, read_commands() 337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, read_index_list() argument 346 BT_DBG("sock %p", sk); read_index_list() 389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, read_index_list() 397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, read_unconf_index_list() argument 406 BT_DBG("sock %p", sk); read_unconf_index_list() 449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, read_unconf_index_list() 457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev, read_ext_index_list() argument 466 BT_DBG("sock %p", sk); read_ext_index_list() 521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS); read_ext_index_list() 522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS); read_ext_index_list() 523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); read_ext_index_list() 525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, read_ext_index_list() 569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) send_options_rsp() argument 573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options, send_options_rsp() 577 static int read_config_info(struct sock *sk, struct hci_dev *hdev, read_config_info() argument 583 BT_DBG("sock %p %s", sk, hdev->name); read_config_info() 601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, read_config_info() 1335 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) mgmt_init_hdev() argument 1351 static int read_controller_info(struct sock *sk, struct hci_dev *hdev, read_controller_info() argument 1356 BT_DBG("sock %p %s", sk, hdev->name); read_controller_info() 1377 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp, read_controller_info() 1381 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) send_settings_rsp() argument 1385 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings, send_settings_rsp() 1442 static void advertising_added(struct sock *sk, struct hci_dev *hdev, advertising_added() argument 1449 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk); advertising_added() 1452 static void advertising_removed(struct sock *sk, struct hci_dev *hdev, advertising_removed() argument 1459 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk); advertising_removed() 1547 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, set_powered() argument 1557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, set_powered() 1563 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, set_powered() 1572 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, set_powered() 1580 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); set_powered() 1584 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); set_powered() 1627 struct sock *sk; member in struct:cmd_lookup 1636 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); settings_rsp() 1640 if (match->sk == NULL) { settings_rsp() 1641 match->sk = cmd->sk; settings_rsp() 1642 sock_hold(match->sk); settings_rsp() 1652 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); cmd_status_rsp() 1672 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, generic_cmd_complete() 1678 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, addr_cmd_complete() 1720 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); set_discoverable_complete() 1738 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); set_discoverable_complete() 1741 new_settings(hdev, cmd->sk); set_discoverable_complete() 1760 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, set_discoverable() argument 1774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, set_discoverable() 1778 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, set_discoverable() 1788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, set_discoverable() 1794 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, set_discoverable() 1801 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, set_discoverable() 1807 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, set_discoverable() 1824 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); set_discoverable() 1829 err = new_settings(hdev, sk); set_discoverable() 1850 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); set_discoverable() 1854 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); set_discoverable() 1977 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); set_connectable_complete() 1993 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); set_connectable_complete() 1996 new_settings(hdev, cmd->sk); set_connectable_complete() 2011 struct sock *sk, u8 val) set_connectable_update_settings() 2026 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); set_connectable_update_settings() 2033 return new_settings(hdev, sk); set_connectable_update_settings() 2039 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, set_connectable() argument 2052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, set_connectable() 2056 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, set_connectable() 2062 err = set_connectable_update_settings(hdev, sk, cp->val); set_connectable() 2068 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, set_connectable() 2073 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); set_connectable() 2126 err = set_connectable_update_settings(hdev, sk, set_connectable() 2136 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data, set_bondable() argument 2146 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE, set_bondable() 2156 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev); set_bondable() 2161 err = new_settings(hdev, sk); set_bondable() 2168 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, set_link_security() argument 2180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, set_link_security() 2184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, set_link_security() 2197 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev); set_link_security() 2202 err = new_settings(hdev, sk); set_link_security() 2208 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, set_link_security() 2216 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev); set_link_security() 2220 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len); set_link_security() 2237 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) set_ssp() argument 2248 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status); set_ssp() 2251 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, set_ssp() 2255 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, set_ssp() 2276 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); set_ssp() 2281 err = new_settings(hdev, sk); set_ssp() 2287 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, set_ssp() 2293 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); set_ssp() 2297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len); set_ssp() 2318 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) set_hs() argument 2329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); set_hs() 2332 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, set_hs() 2336 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, set_hs() 2340 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, set_hs() 2346 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, set_hs() 2355 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, set_hs() 2363 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev); set_hs() 2368 err = new_settings(hdev, sk); set_hs() 2391 new_settings(hdev, match.sk); le_enable_complete() 2393 if (match.sk) le_enable_complete() 2394 sock_put(match.sk); le_enable_complete() 2415 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) set_le() argument 2427 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, set_le() 2431 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, set_le() 2445 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev); set_le() 2447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, set_le() 2469 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev); set_le() 2474 err = new_settings(hdev, sk); set_le() 2481 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, set_le() 2486 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len); set_le() 2568 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_class_complete() 2584 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) add_uuid() argument 2597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, add_uuid() 2624 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, add_uuid() 2629 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len); add_uuid() 2663 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, remove_uuid() argument 2678 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, remove_uuid() 2687 err = mgmt_cmd_complete(sk, hdev->id, remove_uuid() 2708 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, remove_uuid() 2724 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, remove_uuid() 2729 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); remove_uuid() 2749 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, set_dev_class() argument 2760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, set_dev_class() 2766 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, set_dev_class() 2772 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, set_dev_class() 2781 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, set_dev_class() 2802 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, set_dev_class() 2807 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); set_dev_class() 2820 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, load_link_keys() argument 2833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, load_link_keys() 2840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, load_link_keys() 2849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, load_link_keys() 2854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, load_link_keys() 2864 return mgmt_cmd_status(sk, hdev->id, load_link_keys() 2895 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); load_link_keys() 2914 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, unpair_device() argument 2929 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, unpair_device() 2934 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, unpair_device() 2941 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, unpair_device() 2995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, unpair_device() 3005 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0, unpair_device() 3007 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk); unpair_device() 3011 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp, unpair_device() 3031 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, disconnect() argument 3047 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, disconnect() 3054 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, disconnect() 3061 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, disconnect() 3073 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, disconnect() 3079 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); disconnect() 3115 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data, get_connections() argument 3129 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, get_connections() 3163 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp, get_connections() 3173 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, send_pin_code_neg_reply() argument 3179 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, send_pin_code_neg_reply() 3192 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data, pin_code_reply() argument 3206 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, pin_code_reply() 3213 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, pin_code_reply() 3225 err = send_pin_code_neg_reply(sk, hdev, &ncp); pin_code_reply() 3227 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, pin_code_reply() 3233 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len); pin_code_reply() 3254 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, set_io_capability() argument 3262 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, set_io_capability() 3274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, set_io_capability() 3305 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, pairing_complete() 3372 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, pair_device() argument 3389 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, pair_device() 3394 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, pair_device() 3401 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, pair_device() 3408 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, pair_device() 3458 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, pair_device() 3465 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, pair_device() 3470 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len); pair_device() 3506 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, cancel_pair_device() argument 3519 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, cancel_pair_device() 3526 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, cancel_pair_device() 3534 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, cancel_pair_device() 3542 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, cancel_pair_device() 3549 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, user_pairing_resp() argument 3560 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, user_pairing_resp() 3572 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, user_pairing_resp() 3581 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, user_pairing_resp() 3585 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, user_pairing_resp() 3592 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr)); user_pairing_resp() 3619 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, pin_code_neg_reply() argument 3626 return user_pairing_resp(sk, hdev, &cp->addr, pin_code_neg_reply() 3631 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data, user_confirm_reply() argument 3639 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, user_confirm_reply() 3642 return user_pairing_resp(sk, hdev, &cp->addr, user_confirm_reply() 3647 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev, user_confirm_neg_reply() argument 3654 return user_pairing_resp(sk, hdev, &cp->addr, user_confirm_neg_reply() 3659 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data, user_passkey_reply() argument 3666 return user_pairing_resp(sk, hdev, &cp->addr, user_passkey_reply() 3671 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev, user_passkey_neg_reply() argument 3678 return user_pairing_resp(sk, hdev, &cp->addr, user_passkey_neg_reply() 3709 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, set_name_complete() 3712 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, set_name_complete() 3721 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, set_local_name() argument 3739 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, set_local_name() 3749 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, set_local_name() 3755 data, len, sk); set_local_name() 3760 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); set_local_name() 3804 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, read_local_oob_data_complete() 3815 mgmt_cmd_status(cmd->sk, hdev->id, read_local_oob_data_complete() 3829 mgmt_cmd_status(cmd->sk, hdev->id, read_local_oob_data_complete() 3842 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, read_local_oob_data_complete() 3849 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, read_local_oob_data() argument 3861 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, read_local_oob_data() 3867 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, read_local_oob_data() 3873 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, read_local_oob_data() 3878 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); read_local_oob_data() 3900 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, add_remote_oob_data() argument 3909 return mgmt_cmd_complete(sk, hdev->id, add_remote_oob_data() 3921 err = mgmt_cmd_complete(sk, hdev->id, add_remote_oob_data() 3936 err = mgmt_cmd_complete(sk, hdev->id, add_remote_oob_data() 3950 err = mgmt_cmd_complete(sk, hdev->id, add_remote_oob_data() 3993 err = mgmt_cmd_complete(sk, hdev->id, add_remote_oob_data() 3998 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, add_remote_oob_data() 4007 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, remove_remote_oob_data() argument 4017 return mgmt_cmd_complete(sk, hdev->id, remove_remote_oob_data() 4037 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, remove_remote_oob_data() 4254 static int start_discovery(struct sock *sk, struct hci_dev *hdev, start_discovery() argument 4268 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, start_discovery() 4276 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, start_discovery() 4282 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len); start_discovery() 4301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, start_discovery() 4323 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, service_discovery_cmd_complete() 4327 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, start_service_discovery() argument 4343 err = mgmt_cmd_complete(sk, hdev->id, start_service_discovery() 4352 err = mgmt_cmd_complete(sk, hdev->id, start_service_discovery() 4363 err = mgmt_cmd_complete(sk, hdev->id, start_service_discovery() 4374 err = mgmt_cmd_complete(sk, hdev->id, start_service_discovery() 4381 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY, start_service_discovery() 4404 err = mgmt_cmd_complete(sk, hdev->id, start_service_discovery() 4416 err = mgmt_cmd_complete(sk, hdev->id, start_service_discovery() 4456 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, stop_discovery() argument 4469 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, stop_discovery() 4476 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, stop_discovery() 4482 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); stop_discovery() 4504 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0, stop_discovery() 4514 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data, confirm_name() argument 4526 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, confirm_name() 4534 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, confirm_name() 4548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, confirm_name() 4556 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data, block_device() argument 4566 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, block_device() 4580 sk); block_device() 4584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, block_device() 4592 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data, unblock_device() argument 4602 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, unblock_device() 4616 sk); unblock_device() 4620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, unblock_device() 4628 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, set_device_id() argument 4641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, set_device_id() 4651 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, set_device_id() 4693 new_settings(hdev, match.sk); set_advertising_complete() 4695 if (match.sk) set_advertising_complete() 4696 sock_put(match.sk); set_advertising_complete() 4717 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, set_advertising() argument 4730 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, set_advertising() 4734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, set_advertising() 4765 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev); set_advertising() 4770 err = new_settings(hdev, sk); set_advertising() 4777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, set_advertising() 4782 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len); set_advertising() 4813 static int set_static_address(struct sock *sk, struct hci_dev *hdev, set_static_address() argument 4822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, set_static_address() 4826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, set_static_address() 4831 return mgmt_cmd_status(sk, hdev->id, set_static_address() 4837 return mgmt_cmd_status(sk, hdev->id, set_static_address() 4846 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev); set_static_address() 4850 err = new_settings(hdev, sk); set_static_address() 4857 static int set_scan_params(struct sock *sk, struct hci_dev *hdev, set_scan_params() argument 4867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, set_scan_params() 4873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, set_scan_params() 4879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, set_scan_params() 4883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, set_scan_params() 4891 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, set_scan_params() 4928 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, fast_connectable_complete() 4938 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); fast_connectable_complete() 4939 new_settings(hdev, cmd->sk); fast_connectable_complete() 4948 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, set_fast_connectable() argument 4960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, set_fast_connectable() 4964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, set_fast_connectable() 4970 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, set_fast_connectable() 4976 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, set_fast_connectable() 4983 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, set_fast_connectable() 4985 new_settings(hdev, sk); set_fast_connectable() 4989 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, set_fast_connectable() 5002 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, set_fast_connectable() 5033 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); set_bredr_complete() 5035 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev); set_bredr_complete() 5036 new_settings(hdev, cmd->sk); set_bredr_complete() 5045 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) set_bredr() argument 5055 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, set_bredr() 5059 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, set_bredr() 5063 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, set_bredr() 5069 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); set_bredr() 5084 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); set_bredr() 5088 err = new_settings(hdev, sk); set_bredr() 5094 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, set_bredr() 5115 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, set_bredr() 5122 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, set_bredr() 5127 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len); set_bredr() 5171 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, sc_enable_complete() 5193 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev); sc_enable_complete() 5194 new_settings(hdev, cmd->sk); sc_enable_complete() 5202 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, set_secure_conn() argument 5215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, set_secure_conn() 5221 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, set_secure_conn() 5225 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, set_secure_conn() 5247 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); set_secure_conn() 5252 err = new_settings(hdev, sk); set_secure_conn() 5258 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, set_secure_conn() 5267 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); set_secure_conn() 5271 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len); set_secure_conn() 5290 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev, set_debug_keys() argument 5300 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, set_debug_keys() 5325 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev); set_debug_keys() 5330 err = new_settings(hdev, sk); set_debug_keys() 5337 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data, set_privacy() argument 5347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, set_privacy() 5351 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, set_privacy() 5355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, set_privacy() 5375 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev); set_privacy() 5380 err = new_settings(hdev, sk); set_privacy() 5403 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, load_irks() argument 5415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, load_irks() 5421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, load_irks() 5429 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, load_irks() 5439 return mgmt_cmd_status(sk, hdev->id, load_irks() 5463 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0); load_irks() 5489 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, load_long_term_keys() argument 5501 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, load_long_term_keys() 5507 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, load_long_term_keys() 5516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, load_long_term_keys() 5526 return mgmt_cmd_status(sk, hdev->id, load_long_term_keys() 5573 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0, load_long_term_keys() 5599 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, conn_info_cmd_complete() 5661 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, get_conn_info() argument 5677 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, get_conn_info() 5684 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, get_conn_info() 5697 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, get_conn_info() 5704 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, get_conn_info() 5755 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev, get_conn_info() 5773 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, get_conn_info() 5807 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, clock_info_cmd_complete() 5850 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, get_clock_info() argument 5868 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, get_clock_info() 5875 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, get_clock_info() 5885 err = mgmt_cmd_complete(sk, hdev->id, get_clock_info() 5895 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len); get_clock_info() 5985 static void device_added(struct sock *sk, struct hci_dev *hdev, device_added() argument 5994 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk); device_added() 6016 static int add_device(struct sock *sk, struct hci_dev *hdev, add_device() argument 6029 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, add_device() 6034 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, add_device() 6042 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len); add_device() 6092 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); add_device() 6109 static void device_removed(struct sock *sk, struct hci_dev *hdev, device_removed() argument 6117 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk); device_removed() 6139 static int remove_device(struct sock *sk, struct hci_dev *hdev, remove_device() argument 6153 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len); remove_device() 6185 device_removed(sk, hdev, &cp->addr.bdaddr, remove_device() 6216 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); remove_device() 6229 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type); remove_device() 6239 device_removed(sk, hdev, &p->addr, p->addr_type); remove_device() 6266 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, load_conn_param() argument 6276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, load_conn_param() 6283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, load_conn_param() 6292 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, load_conn_param() 6348 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, load_conn_param() 6352 static int set_external_config(struct sock *sk, struct hci_dev *hdev, set_external_config() argument 6362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, set_external_config() 6366 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, set_external_config() 6370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, set_external_config() 6380 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev); set_external_config() 6387 err = new_options(hdev, sk); set_external_config() 6408 static int set_public_address(struct sock *sk, struct hci_dev *hdev, set_public_address() argument 6418 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, set_public_address() 6422 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, set_public_address() 6426 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, set_public_address() 6434 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev); set_public_address() 6442 err = new_options(hdev, sk); set_public_address() 6566 err = mgmt_cmd_complete(cmd->sk, hdev->id, read_local_oob_ext_data_complete() 6572 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS); read_local_oob_ext_data_complete() 6576 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk); read_local_oob_ext_data_complete() 6582 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, read_local_ssp_oob_req() argument 6589 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev, read_local_ssp_oob_req() 6610 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, read_local_oob_ext_data() argument 6662 err = read_local_ssp_oob_req(hdev, sk, cp); BIT() 6743 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS); 6751 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6758 HCI_MGMT_OOB_DATA_EVENTS, sk); 6781 static int read_adv_features(struct sock *sk, struct hci_dev *hdev, read_adv_features() argument 6793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, read_adv_features() 6832 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, read_adv_features() 6898 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1); add_advertising_complete() 6907 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, add_advertising_complete() 6910 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, add_advertising_complete() 6931 static int add_advertising(struct sock *sk, struct hci_dev *hdev, add_advertising() argument 6948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, add_advertising() 6959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, add_advertising() 6965 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, add_advertising() 6973 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, add_advertising() 6981 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, add_advertising() 7010 advertising_added(sk, hdev, 1); add_advertising() 7018 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, add_advertising() 7026 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data, add_advertising() 7069 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS, remove_advertising_complete() 7077 static int remove_advertising(struct sock *sk, struct hci_dev *hdev, remove_advertising() argument 7092 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, remove_advertising() 7100 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, remove_advertising() 7106 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, remove_advertising() 7116 advertising_removed(sk, hdev, 1); remove_advertising() 7126 err = mgmt_cmd_complete(sk, hdev->id, remove_advertising() 7132 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data, remove_advertising() 7355 new_settings(hdev, match.sk); powered_complete() 7359 if (match.sk) powered_complete() 7360 sock_put(match.sk); powered_complete() 7475 err = new_settings(hdev, match.sk); mgmt_powered() 7477 if (match.sk) mgmt_powered() 7478 sock_put(match.sk); mgmt_powered() 7497 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); mgmt_set_powered_failed() 7733 struct sock **sk = data; disconnect_rsp() local 7737 *sk = cmd->sk; disconnect_rsp() 7738 sock_hold(*sk); disconnect_rsp() 7748 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); unpair_device_rsp() 7775 struct sock *sk = NULL; mgmt_device_disconnected() local 7791 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); mgmt_device_disconnected() 7797 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk); mgmt_device_disconnected() 7799 if (sk) mgmt_device_disconnected() 7800 sock_put(sk); mgmt_device_disconnected() 7995 cmd ? cmd->sk : NULL); mgmt_auth_failed() 8024 new_settings(hdev, match.sk); mgmt_auth_enable_complete() 8026 if (match.sk) mgmt_auth_enable_complete() 8027 sock_put(match.sk); mgmt_auth_enable_complete() 8079 new_settings(hdev, match.sk); mgmt_ssp_enable_complete() 8081 if (match.sk) mgmt_ssp_enable_complete() 8082 sock_put(match.sk); mgmt_ssp_enable_complete() 8102 if (match->sk == NULL) { sk_lookup() 8103 match->sk = cmd->sk; sk_lookup() 8104 sock_hold(match->sk); sk_lookup() 8121 if (match.sk) mgmt_set_class_of_dev_complete() 8122 sock_put(match.sk); mgmt_set_class_of_dev_complete() 8149 cmd ? cmd->sk : NULL); mgmt_set_local_name_complete() 2010 set_connectable_update_settings(struct hci_dev *hdev, struct sock *sk, u8 val) set_connectable_update_settings() argument
|
H A D | mgmt_util.c | 60 int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) mgmt_cmd_status() argument 67 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); mgmt_cmd_status() 83 err = sock_queue_rcv_skb(sk, skb); mgmt_cmd_status() 90 int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, mgmt_cmd_complete() argument 98 BT_DBG("sock %p", sk); mgmt_cmd_complete() 117 err = sock_queue_rcv_skb(sk, skb); mgmt_cmd_complete() 130 if (hci_sock_get_channel(cmd->sk) != channel) mgmt_pending_find() 170 struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, mgmt_pending_add() argument 191 cmd->sk = sk; mgmt_pending_add() 192 sock_hold(sk); mgmt_pending_add() 201 sock_put(cmd->sk); mgmt_pending_free()
|
/linux-4.1.27/include/net/ |
H A D | timewait_sock.h | 22 int (*twsk_unique)(struct sock *sk, 24 void (*twsk_destructor)(struct sock *sk); 27 static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) twsk_unique() argument 29 if (sk->sk_prot->twsk_prot->twsk_unique != NULL) twsk_unique() 30 return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp); twsk_unique() 34 static inline void twsk_destructor(struct sock *sk) twsk_destructor() argument 36 BUG_ON(sk == NULL); twsk_destructor() 37 BUG_ON(sk->sk_prot == NULL); twsk_destructor() 38 BUG_ON(sk->sk_prot->twsk_prot == NULL); twsk_destructor() 39 if (sk->sk_prot->twsk_prot->twsk_destructor != NULL) twsk_destructor() 40 sk->sk_prot->twsk_prot->twsk_destructor(sk); twsk_destructor()
|
H A D | llc_c_ev.h | 128 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb); 129 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb); 131 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb); 132 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb); 133 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb); 134 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb); 135 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb); 136 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb); 137 int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb); 138 int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb); 139 int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); 140 int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); 141 int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, 143 int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); 144 int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, 146 int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, 148 int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); 149 int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb); 150 int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); 151 int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb); 152 int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb); 153 int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk, 155 int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk, 157 int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb); 158 int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb); 159 int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb); 160 int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb); 161 int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb); 163 int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); 164 int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); 165 int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, 167 int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, 169 int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb); 170 int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); 171 int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, 173 int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, 175 int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb); 176 int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); 177 int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb); 178 int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); 179 int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb); 180 int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); 181 int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb); 182 int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); 183 int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb); 184 int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); 185 int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb); 186 int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); 187 int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb); 188 int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); 189 int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb); 190 int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb); 191 int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb); 194 int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb); 195 int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb); 196 int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb); 197 int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb); 198 int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb); 199 int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb); 200 int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb); 201 int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb); 202 int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb); 203 int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb); 204 int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb); 205 int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb); 206 int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb); 207 int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb); 208 int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb); 209 int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb); 210 int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb); 211 int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb); 212 int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb); 213 int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk, 215 int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb); 216 int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb); 217 int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb); 219 static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb) llc_conn_space() argument 221 return atomic_read(&sk->sk_rmem_alloc) + skb->truesize < llc_conn_space() 222 (unsigned int)sk->sk_rcvbuf; llc_conn_space()
|
H A D | llc_c_ac.h | 90 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb); 92 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb); 93 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb); 94 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb); 95 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb); 96 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb); 97 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb); 98 int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb); 99 int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk, 101 int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk, 103 int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb); 104 int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb); 105 int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); 106 int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb); 107 int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb); 108 int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb); 109 int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb); 110 int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); 111 int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); 112 int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk, 114 int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); 115 int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb); 116 int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); 117 int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); 118 int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb); 119 int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); 120 int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); 121 int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb); 122 int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); 123 int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb); 124 int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); 125 int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb); 126 int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); 127 int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb); 128 int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb); 129 int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb); 130 int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb); 131 int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb); 132 int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb); 133 int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb); 134 int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb); 135 int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, 137 int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb); 138 int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb); 139 int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb); 140 int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb); 141 int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb); 142 int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb); 143 int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb); 144 int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb); 145 int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb); 146 int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb); 147 int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb); 148 int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb); 149 int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk, 151 int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb); 152 int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb); 153 int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb); 154 int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb); 155 int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb); 156 int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb); 157 int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb); 158 int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb); 159 int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb); 160 int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb); 161 int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb); 162 int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb); 163 int llc_conn_disc(struct sock *sk, struct sk_buff *skb); 164 int llc_conn_reset(struct sock *sk, struct sk_buff *skb); 165 int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb); 167 int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb); 168 int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb); 169 int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb); 170 int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb); 171 int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb); 172 int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb); 179 void llc_conn_set_p_flag(struct sock *sk, u8 value);
|
H A D | inet_connection_sock.h | 39 int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl); 40 void (*send_check)(struct sock *sk, struct sk_buff *skb); 41 int (*rebuild_header)(struct sock *sk); 42 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); 43 int (*conn_request)(struct sock *sk, struct sk_buff *skb); 44 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, 50 int (*setsockopt)(struct sock *sk, int level, int optname, 52 int (*getsockopt)(struct sock *sk, int level, int optname, 55 int (*compat_setsockopt)(struct sock *sk, 58 int (*compat_getsockopt)(struct sock *sk, 62 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); 63 int (*bind_conflict)(const struct sock *sk, 65 void (*mtu_reduced)(struct sock *sk); 100 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); 145 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) inet_csk() argument 147 return (struct inet_connection_sock *)sk; inet_csk() 150 static inline void *inet_csk_ca(const struct sock *sk) inet_csk_ca() argument 152 return (void *)inet_csk(sk)->icsk_ca_priv; inet_csk_ca() 155 struct sock *inet_csk_clone_lock(const struct sock *sk, 166 void inet_csk_init_xmit_timers(struct sock *sk, 170 void inet_csk_clear_xmit_timers(struct sock *sk); 172 static inline void inet_csk_schedule_ack(struct sock *sk) inet_csk_schedule_ack() argument 174 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; inet_csk_schedule_ack() 177 static inline int inet_csk_ack_scheduled(const struct sock *sk) inet_csk_ack_scheduled() argument 179 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; inet_csk_ack_scheduled() 182 static inline void inet_csk_delack_init(struct sock *sk) inet_csk_delack_init() argument 184 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); inet_csk_delack_init() 187 void inet_csk_delete_keepalive_timer(struct sock *sk); 188 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout); 194 static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) inet_csk_clear_xmit_timer() argument 196 struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_clear_xmit_timer() 201 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); inet_csk_clear_xmit_timer() 206 sk_stop_timer(sk, &icsk->icsk_delack_timer); inet_csk_clear_xmit_timer() 219 static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, inet_csk_reset_xmit_timer() argument 223 struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_reset_xmit_timer() 227 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", inet_csk_reset_xmit_timer() 228 sk, what, when, current_text_addr()); inet_csk_reset_xmit_timer() 237 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); inet_csk_reset_xmit_timer() 241 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); inet_csk_reset_xmit_timer() 259 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); 261 struct request_sock *inet_csk_search_req(struct sock *sk, 265 int inet_csk_bind_conflict(const struct sock *sk, 267 int inet_csk_get_port(struct sock *sk, unsigned short snum); 269 struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4, 271 struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk, 274 static inline void inet_csk_reqsk_queue_add(struct sock *sk, inet_csk_reqsk_queue_add() argument 278 reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child); inet_csk_reqsk_queue_add() 281 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 284 static inline void inet_csk_reqsk_queue_added(struct sock *sk, inet_csk_reqsk_queue_added() argument 287 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); inet_csk_reqsk_queue_added() 290 static inline int inet_csk_reqsk_queue_len(const struct sock *sk) inet_csk_reqsk_queue_len() argument 292 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); inet_csk_reqsk_queue_len() 295 static inline int inet_csk_reqsk_queue_young(const struct sock *sk) inet_csk_reqsk_queue_young() argument 297 return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue); inet_csk_reqsk_queue_young() 300 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) inet_csk_reqsk_queue_is_full() argument 302 return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); inet_csk_reqsk_queue_is_full() 305 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); 307 void inet_csk_destroy_sock(struct sock *sk); 308 void inet_csk_prepare_forced_close(struct sock *sk); 313 static inline unsigned int inet_csk_listen_poll(const struct sock *sk) inet_csk_listen_poll() argument 315 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? inet_csk_listen_poll() 319 int inet_csk_listen_start(struct sock *sk, const int nr_table_entries); 320 void inet_csk_listen_stop(struct sock *sk); 322 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); 324 int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, 326 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, 329 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
|
H A D | sock.h | 97 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ 102 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) SOCK_DEBUG() argument 239 * @sk_napi_id: id of the last napi context to receive data for sk 435 void (*sk_state_change)(struct sock *sk); 436 void (*sk_data_ready)(struct sock *sk); 437 void (*sk_write_space)(struct sock *sk); 438 void (*sk_error_report)(struct sock *sk); 439 int (*sk_backlog_rcv)(struct sock *sk, 441 void (*sk_destruct)(struct sock *sk); 444 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) 446 #define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) 447 #define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) 460 static inline int sk_peek_offset(struct sock *sk, int flags) sk_peek_offset() argument 462 if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) sk_peek_offset() 463 return sk->sk_peek_off; sk_peek_offset() 468 static inline void sk_peek_offset_bwd(struct sock *sk, int val) sk_peek_offset_bwd() argument 470 if (sk->sk_peek_off >= 0) { sk_peek_offset_bwd() 471 if (sk->sk_peek_off >= val) sk_peek_offset_bwd() 472 sk->sk_peek_off -= val; sk_peek_offset_bwd() 474 sk->sk_peek_off = 0; sk_peek_offset_bwd() 478 static inline void sk_peek_offset_fwd(struct sock *sk, int val) sk_peek_offset_fwd() argument 480 if (sk->sk_peek_off >= 0) sk_peek_offset_fwd() 481 sk->sk_peek_off += val; sk_peek_offset_fwd() 512 static inline struct sock *sk_next(const struct sock *sk) sk_next() argument 514 return sk->sk_node.next ? sk_next() 515 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; sk_next() 518 static inline struct sock *sk_nulls_next(const struct sock *sk) sk_nulls_next() argument 520 return (!is_a_nulls(sk->sk_nulls_node.next)) ? sk_nulls_next() 521 hlist_nulls_entry(sk->sk_nulls_node.next, sk_nulls_next() 526 static inline bool sk_unhashed(const struct sock *sk) sk_unhashed() argument 528 return hlist_unhashed(&sk->sk_node); sk_unhashed() 531 static inline bool sk_hashed(const struct sock *sk) sk_hashed() argument 533 return !sk_unhashed(sk); sk_hashed() 546 static inline void __sk_del_node(struct sock *sk) __sk_del_node() argument 548 __hlist_del(&sk->sk_node); __sk_del_node() 552 static inline bool __sk_del_node_init(struct sock *sk) __sk_del_node_init() argument 554 if (sk_hashed(sk)) { __sk_del_node_init() 555 __sk_del_node(sk); __sk_del_node_init() 556 sk_node_init(&sk->sk_node); __sk_del_node_init() 563 when sk is ALREADY grabbed f.e. it is found in hash table 568 static inline void sock_hold(struct sock *sk) sock_hold() argument 570 atomic_inc(&sk->sk_refcnt); sock_hold() 576 static inline void __sock_put(struct sock *sk) __sock_put() argument 578 atomic_dec(&sk->sk_refcnt); __sock_put() 581 static inline bool sk_del_node_init(struct sock *sk) sk_del_node_init() argument 583 bool rc = __sk_del_node_init(sk); sk_del_node_init() 587 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); sk_del_node_init() 588 __sock_put(sk); sk_del_node_init() 592 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) 594 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) __sk_nulls_del_node_init_rcu() argument 596 if (sk_hashed(sk)) { __sk_nulls_del_node_init_rcu() 597 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); __sk_nulls_del_node_init_rcu() 603 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) sk_nulls_del_node_init_rcu() argument 605 bool rc = __sk_nulls_del_node_init_rcu(sk); sk_nulls_del_node_init_rcu() 609 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); sk_nulls_del_node_init_rcu() 610 __sock_put(sk); sk_nulls_del_node_init_rcu() 615 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) __sk_add_node() argument 617 hlist_add_head(&sk->sk_node, list); __sk_add_node() 620 static inline void sk_add_node(struct sock *sk, struct hlist_head *list) sk_add_node() argument 622 sock_hold(sk); sk_add_node() 623 __sk_add_node(sk, list); sk_add_node() 626 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) sk_add_node_rcu() argument 628 sock_hold(sk); sk_add_node_rcu() 629 hlist_add_head_rcu(&sk->sk_node, list); sk_add_node_rcu() 632 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) __sk_nulls_add_node_rcu() argument 634 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); __sk_nulls_add_node_rcu() 637 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) sk_nulls_add_node_rcu() argument 639 sock_hold(sk); sk_nulls_add_node_rcu() 640 __sk_nulls_add_node_rcu(sk, list); sk_nulls_add_node_rcu() 643 static inline void __sk_del_bind_node(struct sock *sk) __sk_del_bind_node() argument 645 __hlist_del(&sk->sk_bind_node); __sk_del_bind_node() 648 static inline void sk_add_bind_node(struct sock *sk, sk_add_bind_node() argument 651 hlist_add_head(&sk->sk_bind_node, list); sk_add_bind_node() 686 static inline struct user_namespace *sk_user_ns(struct sock *sk) sk_user_ns() argument 692 return sk->sk_socket->file->f_cred->user_ns; sk_user_ns() 706 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 733 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) sock_set_flag() argument 735 __set_bit(flag, &sk->sk_flags); sock_set_flag() 738 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) sock_reset_flag() argument 740 __clear_bit(flag, &sk->sk_flags); sock_reset_flag() 743 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) sock_flag() argument 745 return test_bit(flag, &sk->sk_flags); sock_flag() 763 static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask) sk_gfp_atomic() argument 765 return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC); sk_gfp_atomic() 768 static inline void sk_acceptq_removed(struct sock *sk) sk_acceptq_removed() argument 770 sk->sk_ack_backlog--; sk_acceptq_removed() 773 static inline void sk_acceptq_added(struct sock *sk) sk_acceptq_added() argument 775 sk->sk_ack_backlog++; sk_acceptq_added() 778 static inline bool sk_acceptq_is_full(const struct sock *sk) sk_acceptq_is_full() argument 780 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; sk_acceptq_is_full() 786 static inline int sk_stream_min_wspace(const struct sock *sk) sk_stream_min_wspace() argument 788 return sk->sk_wmem_queued >> 1; sk_stream_min_wspace() 791 static inline int sk_stream_wspace(const struct sock *sk) sk_stream_wspace() argument 793 return sk->sk_sndbuf - sk->sk_wmem_queued; sk_stream_wspace() 796 void sk_stream_write_space(struct sock *sk); 799 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) __sk_add_backlog() argument 804 if (!sk->sk_backlog.tail) __sk_add_backlog() 805 sk->sk_backlog.head = skb; __sk_add_backlog() 807 sk->sk_backlog.tail->next = skb; __sk_add_backlog() 809 sk->sk_backlog.tail = skb; __sk_add_backlog() 818 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) sk_rcvqueues_full() argument 820 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); sk_rcvqueues_full() 826 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, sk_add_backlog() argument 829 if (sk_rcvqueues_full(sk, limit)) sk_add_backlog() 837 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) sk_add_backlog() 840 __sk_add_backlog(sk, skb); sk_add_backlog() 841 sk->sk_backlog.len += skb->truesize; sk_add_backlog() 845 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 847 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) sk_backlog_rcv() argument 850 return __sk_backlog_rcv(sk, skb); sk_backlog_rcv() 852 return sk->sk_backlog_rcv(sk, skb); sk_backlog_rcv() 855 static inline void sk_incoming_cpu_update(struct sock *sk) sk_incoming_cpu_update() argument 857 sk->sk_incoming_cpu = raw_smp_processor_id(); sk_incoming_cpu_update() 872 static inline void sock_rps_record_flow(const struct sock *sk) sock_rps_record_flow() argument 875 sock_rps_record_flow_hash(sk->sk_rxhash); sock_rps_record_flow() 879 static inline void sock_rps_save_rxhash(struct sock *sk, sock_rps_save_rxhash() argument 883 if (unlikely(sk->sk_rxhash != skb->hash)) sock_rps_save_rxhash() 884 sk->sk_rxhash = skb->hash; sock_rps_save_rxhash() 888 static inline void sock_rps_reset_rxhash(struct sock *sk) sock_rps_reset_rxhash() argument 891 sk->sk_rxhash = 0; sock_rps_reset_rxhash() 908 int sk_stream_wait_connect(struct sock *sk, long *timeo_p); 909 int sk_stream_wait_memory(struct sock *sk, long *timeo_p); 910 void sk_stream_wait_close(struct sock *sk, long timeo_p); 911 int sk_stream_error(struct sock *sk, int flags, int err); 912 void sk_stream_kill_queues(struct sock *sk); 913 void sk_set_memalloc(struct sock *sk); 914 void sk_clear_memalloc(struct sock *sk); 916 int sk_wait_data(struct sock *sk, long *timeo); 928 static inline void sk_prot_clear_nulls(struct sock *sk, int size) sk_prot_clear_nulls() argument 931 memset(sk, 0, offsetof(struct sock, sk_node.next)); sk_prot_clear_nulls() 932 memset(&sk->sk_node.pprev, 0, sk_prot_clear_nulls() 941 void (*close)(struct sock *sk, 943 int (*connect)(struct sock *sk, 946 int (*disconnect)(struct sock *sk, int flags); 948 struct sock * (*accept)(struct sock *sk, int flags, int *err); 950 int (*ioctl)(struct sock *sk, int cmd, 952 int (*init)(struct sock *sk); 953 void (*destroy)(struct sock *sk); 954 void (*shutdown)(struct sock *sk, int how); 955 int (*setsockopt)(struct sock *sk, int level, 958 int (*getsockopt)(struct sock *sk, int level, 962 int (*compat_setsockopt)(struct sock *sk, 966 int (*compat_getsockopt)(struct sock *sk, 970 int (*compat_ioctl)(struct sock *sk, 973 int (*sendmsg)(struct sock *sk, struct msghdr *msg, 975 int (*recvmsg)(struct sock *sk, struct msghdr *msg, 978 int (*sendpage)(struct sock *sk, struct page *page, 980 int (*bind)(struct sock *sk, 983 int (*backlog_rcv) (struct sock *sk, 986 void (*release_cb)(struct sock *sk); 988 /* Keeping track of sk's, looking them up, and port selection methods. */ 989 void (*hash)(struct sock *sk); 990 void (*unhash)(struct sock *sk); 991 void (*rehash)(struct sock *sk); 992 int (*get_port)(struct sock *sk, unsigned short snum); 993 void (*clear_sk)(struct sock *sk, int size); 1000 bool (*stream_memory_free)(const struct sock *sk); 1002 void (*enter_memory_pressure)(struct sock *sk); 1092 static inline void sk_refcnt_debug_inc(struct sock *sk) sk_refcnt_debug_inc() argument 1094 atomic_inc(&sk->sk_prot->socks); sk_refcnt_debug_inc() 1097 static inline void sk_refcnt_debug_dec(struct sock *sk) sk_refcnt_debug_dec() argument 1099 atomic_dec(&sk->sk_prot->socks); sk_refcnt_debug_dec() 1101 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); sk_refcnt_debug_dec() 1104 static inline void sk_refcnt_debug_release(const struct sock *sk) sk_refcnt_debug_release() argument 1106 if (atomic_read(&sk->sk_refcnt) != 1) sk_refcnt_debug_release() 1108 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); sk_refcnt_debug_release() 1111 #define sk_refcnt_debug_inc(sk) do { } while (0) 1112 #define sk_refcnt_debug_dec(sk) do { } while (0) 1113 #define sk_refcnt_debug_release(sk) do { } while (0) 1133 static inline bool sk_stream_memory_free(const struct sock *sk) sk_stream_memory_free() argument 1135 if (sk->sk_wmem_queued >= sk->sk_sndbuf) sk_stream_memory_free() 1138 return sk->sk_prot->stream_memory_free ? sk_stream_memory_free() 1139 sk->sk_prot->stream_memory_free(sk) : true; sk_stream_memory_free() 1142 static inline bool sk_stream_is_writeable(const struct sock *sk) sk_stream_is_writeable() argument 1144 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sk_stream_is_writeable() 1145 sk_stream_memory_free(sk); sk_stream_is_writeable() 1149 static inline bool sk_has_memory_pressure(const struct sock *sk) sk_has_memory_pressure() argument 1151 return sk->sk_prot->memory_pressure != NULL; sk_has_memory_pressure() 1154 static inline bool sk_under_memory_pressure(const struct sock *sk) sk_under_memory_pressure() argument 1156 if (!sk->sk_prot->memory_pressure) sk_under_memory_pressure() 1159 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) sk_under_memory_pressure() 1160 return !!sk->sk_cgrp->memory_pressure; sk_under_memory_pressure() 1162 return !!*sk->sk_prot->memory_pressure; sk_under_memory_pressure() 1165 static inline void sk_leave_memory_pressure(struct sock *sk) sk_leave_memory_pressure() argument 1167 int *memory_pressure = sk->sk_prot->memory_pressure; sk_leave_memory_pressure() 1175 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { sk_leave_memory_pressure() 1176 struct cg_proto *cg_proto = sk->sk_cgrp; sk_leave_memory_pressure() 1177 struct proto *prot = sk->sk_prot; sk_leave_memory_pressure() 1185 static inline void sk_enter_memory_pressure(struct sock *sk) sk_enter_memory_pressure() argument 1187 if (!sk->sk_prot->enter_memory_pressure) sk_enter_memory_pressure() 1190 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { sk_enter_memory_pressure() 1191 struct cg_proto *cg_proto = sk->sk_cgrp; sk_enter_memory_pressure() 1192 struct proto *prot = sk->sk_prot; sk_enter_memory_pressure() 1198 sk->sk_prot->enter_memory_pressure(sk); sk_enter_memory_pressure() 1201 static inline long sk_prot_mem_limits(const struct sock *sk, int index) sk_prot_mem_limits() argument 1203 long *prot = sk->sk_prot->sysctl_mem; sk_prot_mem_limits() 1204 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) sk_prot_mem_limits() 1205 prot = sk->sk_cgrp->sysctl_mem; sk_prot_mem_limits() 1227 sk_memory_allocated(const struct sock *sk) sk_memory_allocated() argument 1229 struct proto *prot = sk->sk_prot; sk_memory_allocated() 1231 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) sk_memory_allocated() 1232 return page_counter_read(&sk->sk_cgrp->memory_allocated); sk_memory_allocated() 1238 sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) sk_memory_allocated_add() argument 1240 struct proto *prot = sk->sk_prot; sk_memory_allocated_add() 1242 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { sk_memory_allocated_add() 1243 memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); sk_memory_allocated_add() 1246 return page_counter_read(&sk->sk_cgrp->memory_allocated); sk_memory_allocated_add() 1253 sk_memory_allocated_sub(struct sock *sk, int amt) sk_memory_allocated_sub() argument 1255 struct proto *prot = sk->sk_prot; sk_memory_allocated_sub() 1257 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) sk_memory_allocated_sub() 1258 memcg_memory_allocated_sub(sk->sk_cgrp, amt); sk_memory_allocated_sub() 1263 static inline void sk_sockets_allocated_dec(struct sock *sk) sk_sockets_allocated_dec() argument 1265 struct proto *prot = sk->sk_prot; sk_sockets_allocated_dec() 1267 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { sk_sockets_allocated_dec() 1268 struct cg_proto *cg_proto = sk->sk_cgrp; sk_sockets_allocated_dec() 1277 static inline void sk_sockets_allocated_inc(struct sock *sk) sk_sockets_allocated_inc() argument 1279 struct proto *prot = sk->sk_prot; sk_sockets_allocated_inc() 1281 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { sk_sockets_allocated_inc() 1282 struct cg_proto *cg_proto = sk->sk_cgrp; sk_sockets_allocated_inc() 1292 sk_sockets_allocated_read_positive(struct sock *sk) sk_sockets_allocated_read_positive() argument 1294 struct proto *prot = sk->sk_prot; sk_sockets_allocated_read_positive() 1296 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) sk_sockets_allocated_read_positive() 1297 return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated); sk_sockets_allocated_read_positive() 1338 static inline void __sk_prot_rehash(struct sock *sk) __sk_prot_rehash() argument 1340 sk->sk_prot->unhash(sk); __sk_prot_rehash() 1341 sk->sk_prot->hash(sk); __sk_prot_rehash() 1344 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size); 1379 int __sk_mem_schedule(struct sock *sk, int size, int kind); 1380 void __sk_mem_reclaim(struct sock *sk); 1392 static inline bool sk_has_account(struct sock *sk) sk_has_account() argument 1395 return !!sk->sk_prot->memory_allocated; sk_has_account() 1398 static inline bool sk_wmem_schedule(struct sock *sk, int size) sk_wmem_schedule() argument 1400 if (!sk_has_account(sk)) sk_wmem_schedule() 1402 return size <= sk->sk_forward_alloc || sk_wmem_schedule() 1403 __sk_mem_schedule(sk, size, SK_MEM_SEND); sk_wmem_schedule() 1407 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) sk_rmem_schedule() argument 1409 if (!sk_has_account(sk)) sk_rmem_schedule() 1411 return size<= sk->sk_forward_alloc || sk_rmem_schedule() 1412 __sk_mem_schedule(sk, size, SK_MEM_RECV) || sk_rmem_schedule() 1416 static inline void sk_mem_reclaim(struct sock *sk) sk_mem_reclaim() argument 1418 if (!sk_has_account(sk)) sk_mem_reclaim() 1420 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) sk_mem_reclaim() 1421 __sk_mem_reclaim(sk); sk_mem_reclaim() 1424 static inline void sk_mem_reclaim_partial(struct sock *sk) sk_mem_reclaim_partial() argument 1426 if (!sk_has_account(sk)) sk_mem_reclaim_partial() 1428 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) sk_mem_reclaim_partial() 1429 __sk_mem_reclaim(sk); sk_mem_reclaim_partial() 1432 static inline void sk_mem_charge(struct sock *sk, int size) sk_mem_charge() argument 1434 if (!sk_has_account(sk)) sk_mem_charge() 1436 sk->sk_forward_alloc -= size; sk_mem_charge() 1439 static inline void sk_mem_uncharge(struct sock *sk, int size) sk_mem_uncharge() argument 1441 if (!sk_has_account(sk)) sk_mem_uncharge() 1443 sk->sk_forward_alloc += size; sk_mem_uncharge() 1446 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) sk_wmem_free_skb() argument 1448 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); sk_wmem_free_skb() 1449 sk->sk_wmem_queued -= skb->truesize; sk_wmem_free_skb() 1450 sk_mem_uncharge(sk, skb->truesize); sk_wmem_free_skb() 1467 #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) 1469 static inline void sock_release_ownership(struct sock *sk) sock_release_ownership() argument 1471 sk->sk_lock.owned = 0; sock_release_ownership() 1481 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ 1483 sk->sk_lock.owned = 0; \ 1484 init_waitqueue_head(&sk->sk_lock.wq); \ 1485 spin_lock_init(&(sk)->sk_lock.slock); \ 1486 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ 1487 sizeof((sk)->sk_lock)); \ 1488 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ 1490 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ 1493 void lock_sock_nested(struct sock *sk, int subclass); 1495 static inline void lock_sock(struct sock *sk) lock_sock() argument 1497 lock_sock_nested(sk, 0); lock_sock() 1500 void release_sock(struct sock *sk); 1509 bool lock_sock_fast(struct sock *sk); 1512 * @sk: socket 1518 static inline void unlock_sock_fast(struct sock *sk, bool slow) unlock_sock_fast() argument 1521 release_sock(sk); unlock_sock_fast() 1523 spin_unlock_bh(&sk->sk_lock.slock); unlock_sock_fast() 1529 void sk_free(struct sock *sk); 1530 void sk_release_kernel(struct sock *sk); 1531 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); 1533 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1550 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1552 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1555 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); 1556 void sock_kfree_s(struct sock *sk, void *mem, int size); 1557 void sock_kzfree_s(struct sock *sk, void *mem, int size); 1558 void sk_send_sigurg(struct sock *sk); 1598 void sk_common_release(struct sock *sk); 1605 void sock_init_data(struct socket *sock, struct sock *sk); 1633 static inline void sock_put(struct sock *sk) sock_put() argument 1635 if (atomic_dec_and_test(&sk->sk_refcnt)) sock_put() 1636 sk_free(sk); sock_put() 1641 void sock_gen_put(struct sock *sk); 1643 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested); 1645 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) sk_tx_queue_set() argument 1647 sk->sk_tx_queue_mapping = tx_queue; sk_tx_queue_set() 1650 static inline void sk_tx_queue_clear(struct sock *sk) sk_tx_queue_clear() argument 1652 sk->sk_tx_queue_mapping = -1; sk_tx_queue_clear() 1655 static inline int sk_tx_queue_get(const struct sock *sk) sk_tx_queue_get() argument 1657 return sk ? sk->sk_tx_queue_mapping : -1; sk_tx_queue_get() 1660 static inline void sk_set_socket(struct sock *sk, struct socket *sock) sk_set_socket() argument 1662 sk_tx_queue_clear(sk); sk_set_socket() 1663 sk->sk_socket = sock; sk_set_socket() 1666 static inline wait_queue_head_t *sk_sleep(struct sock *sk) sk_sleep() argument 1669 return &rcu_dereference_raw(sk->sk_wq)->wait; sk_sleep() 1678 static inline void sock_orphan(struct sock *sk) sock_orphan() argument 1680 write_lock_bh(&sk->sk_callback_lock); sock_orphan() 1681 sock_set_flag(sk, SOCK_DEAD); sock_orphan() 1682 sk_set_socket(sk, NULL); sock_orphan() 1683 sk->sk_wq = NULL; sock_orphan() 1684 write_unlock_bh(&sk->sk_callback_lock); sock_orphan() 1687 static inline void sock_graft(struct sock *sk, struct socket *parent) sock_graft() argument 1689 write_lock_bh(&sk->sk_callback_lock); sock_graft() 1690 sk->sk_wq = parent->wq; sock_graft() 1691 parent->sk = sk; sock_graft() 1692 sk_set_socket(sk, parent); sock_graft() 1693 security_sock_graft(sk, parent); sock_graft() 1694 write_unlock_bh(&sk->sk_callback_lock); sock_graft() 1697 kuid_t sock_i_uid(struct sock *sk); 1698 unsigned long sock_i_ino(struct sock *sk); 1701 __sk_dst_get(struct sock *sk) __sk_dst_get() argument 1703 return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) || __sk_dst_get() 1704 lockdep_is_held(&sk->sk_lock.slock)); __sk_dst_get() 1708 sk_dst_get(struct sock *sk) sk_dst_get() argument 1713 dst = rcu_dereference(sk->sk_dst_cache); sk_dst_get() 1720 static inline void dst_negative_advice(struct sock *sk) dst_negative_advice() argument 1722 struct dst_entry *ndst, *dst = __sk_dst_get(sk); dst_negative_advice() 1728 rcu_assign_pointer(sk->sk_dst_cache, ndst); dst_negative_advice() 1729 sk_tx_queue_clear(sk); dst_negative_advice() 1735 __sk_dst_set(struct sock *sk, struct dst_entry *dst) __sk_dst_set() argument 1739 sk_tx_queue_clear(sk); __sk_dst_set() 1741 * This can be called while sk is owned by the caller only, __sk_dst_set() 1744 old_dst = rcu_dereference_raw(sk->sk_dst_cache); __sk_dst_set() 1745 rcu_assign_pointer(sk->sk_dst_cache, dst); __sk_dst_set() 1750 sk_dst_set(struct sock *sk, struct dst_entry *dst) sk_dst_set() argument 1754 sk_tx_queue_clear(sk); sk_dst_set() 1755 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); sk_dst_set() 1760 __sk_dst_reset(struct sock *sk) __sk_dst_reset() argument 1762 __sk_dst_set(sk, NULL); __sk_dst_reset() 1766 sk_dst_reset(struct sock *sk) sk_dst_reset() argument 1768 sk_dst_set(sk, NULL); sk_dst_reset() 1771 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1773 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1775 bool sk_mc_loop(struct sock *sk); 1777 static inline bool sk_can_gso(const struct sock *sk) sk_can_gso() argument 1779 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); sk_can_gso() 1782 void sk_setup_caps(struct sock *sk, struct dst_entry *dst); 1784 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) sk_nocaps_add() argument 1786 sk->sk_route_nocaps |= flags; sk_nocaps_add() 1787 sk->sk_route_caps &= ~flags; sk_nocaps_add() 1790 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, skb_do_copy_data_nocache() argument 1799 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { skb_do_copy_data_nocache() 1808 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, skb_add_data_nocache() argument 1813 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), skb_add_data_nocache() 1821 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, skb_copy_to_page_nocache() argument 1828 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, skb_copy_to_page_nocache() 1836 sk->sk_wmem_queued += copy; skb_copy_to_page_nocache() 1837 sk_mem_charge(sk, copy); skb_copy_to_page_nocache() 1843 * @sk: socket 1847 static inline int sk_wmem_alloc_get(const struct sock *sk) sk_wmem_alloc_get() argument 1849 return atomic_read(&sk->sk_wmem_alloc) - 1; sk_wmem_alloc_get() 1854 * @sk: socket 1858 static inline int sk_rmem_alloc_get(const struct sock *sk) sk_rmem_alloc_get() argument 1860 return atomic_read(&sk->sk_rmem_alloc); sk_rmem_alloc_get() 1865 * @sk: socket 1869 static inline bool sk_has_allocations(const struct sock *sk) sk_has_allocations() argument 1871 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); sk_has_allocations() 1894 * wq = rcu_dereference(sk->sk_wq); 1939 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) skb_set_hash_from_sk() argument 1941 if (sk->sk_txhash) { skb_set_hash_from_sk() 1943 skb->hash = sk->sk_txhash; skb_set_hash_from_sk() 1956 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) skb_set_owner_w() argument 1959 skb->sk = sk; skb_set_owner_w() 1961 skb_set_hash_from_sk(skb, sk); skb_set_owner_w() 1963 * We used to take a refcount on sk, but following operation skb_set_owner_w() 1967 atomic_add(skb->truesize, &sk->sk_wmem_alloc); skb_set_owner_w() 1970 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) skb_set_owner_r() argument 1973 skb->sk = sk; skb_set_owner_r() 1975 atomic_add(skb->truesize, &sk->sk_rmem_alloc); skb_set_owner_r() 1976 sk_mem_charge(sk, skb->truesize); skb_set_owner_r() 1979 void sk_reset_timer(struct sock *sk, struct timer_list *timer, 1982 void sk_stop_timer(struct sock *sk, struct timer_list *timer); 1984 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1986 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); 1987 struct sk_buff *sock_dequeue_err_skb(struct sock *sk); 1993 static inline int sock_error(struct sock *sk) sock_error() argument 1996 if (likely(!sk->sk_err)) sock_error() 1998 err = xchg(&sk->sk_err, 0); sock_error() 2002 static inline unsigned long sock_wspace(struct sock *sk) sock_wspace() argument 2006 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { sock_wspace() 2007 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); sock_wspace() 2014 static inline void sk_wake_async(struct sock *sk, int how, int band) sk_wake_async() argument 2016 if (sock_flag(sk, SOCK_FASYNC)) sk_wake_async() 2017 sock_wake_async(sk->sk_socket, how, band); sk_wake_async() 2030 static inline void sk_stream_moderate_sndbuf(struct sock *sk) sk_stream_moderate_sndbuf() argument 2032 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { sk_stream_moderate_sndbuf() 2033 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); sk_stream_moderate_sndbuf() 2034 sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); sk_stream_moderate_sndbuf() 2038 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); 2042 * @sk: socket 2047 static inline struct page_frag *sk_page_frag(struct sock *sk) sk_page_frag() argument 2049 if (sk->sk_allocation & __GFP_WAIT) sk_page_frag() 2052 return &sk->sk_frag; sk_page_frag() 2055 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); 2060 static inline bool sock_writeable(const struct sock *sk) sock_writeable() argument 2062 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); sock_writeable() 2070 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) sock_rcvtimeo() argument 2072 return noblock ? 0 : sk->sk_rcvtimeo; sock_rcvtimeo() 2075 static inline long sock_sndtimeo(const struct sock *sk, bool noblock) sock_sndtimeo() argument 2077 return noblock ? 0 : sk->sk_sndtimeo; sock_sndtimeo() 2080 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) sock_rcvlowat() argument 2082 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; sock_rcvlowat() 2111 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) sock_skb_set_dropcount() argument 2113 SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops); sock_skb_set_dropcount() 2116 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 2118 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, 2122 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) sock_recv_timestamp() argument 2133 if (sock_flag(sk, SOCK_RCVTSTAMP) || sock_recv_timestamp() 2134 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || sock_recv_timestamp() 2135 (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || sock_recv_timestamp() 2137 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) sock_recv_timestamp() 2138 __sock_recv_timestamp(msg, sk, skb); sock_recv_timestamp() 2140 sk->sk_stamp = kt; sock_recv_timestamp() 2142 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) sock_recv_timestamp() 2143 __sock_recv_wifi_status(msg, sk, skb); sock_recv_timestamp() 2146 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 2149 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, sock_recv_ts_and_drops() argument 2157 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY) sock_recv_ts_and_drops() 2158 __sock_recv_ts_and_drops(msg, sk, skb); sock_recv_ts_and_drops() 2160 sk->sk_stamp = skb->tstamp; sock_recv_ts_and_drops() 2163 void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); 2167 * @sk: socket sending this packet 2172 static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) sock_tx_timestamp() argument 2174 if (unlikely(sk->sk_tsflags)) sock_tx_timestamp() 2175 __sock_tx_timestamp(sk, tx_flags); sock_tx_timestamp() 2176 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) sock_tx_timestamp() 2182 * @sk: socket to eat this skb from 2188 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) sk_eat_skb() argument 2190 __skb_unlink(skb, &sk->sk_receive_queue); sk_eat_skb() 2195 struct net *sock_net(const struct sock *sk) sock_net() argument 2197 return read_pnet(&sk->sk_net); sock_net() 2201 void sock_net_set(struct sock *sk, struct net *net) sock_net_set() argument 2203 write_pnet(&sk->sk_net, net); sock_net_set() 2212 static inline void sk_change_net(struct sock *sk, struct net *net) sk_change_net() argument 2214 struct net *current_net = sock_net(sk); sk_change_net() 2218 sock_net_set(sk, net); sk_change_net() 2224 if (skb->sk) { skb_steal_sock() 2225 struct sock *sk = skb->sk; skb_steal_sock() local 2228 skb->sk = NULL; skb_steal_sock() 2229 return sk; skb_steal_sock() 2237 static inline bool sk_fullsock(const struct sock *sk) sk_fullsock() argument 2239 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); sk_fullsock() 2242 void sock_enable_timestamp(struct sock *sk, int flag); 2245 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, 2248 bool sk_ns_capable(const struct sock *sk, 2250 bool sk_capable(const struct sock *sk, int cap); 2251 bool sk_net_capable(const struct sock *sk, int cap);
|
H A D | tcp.h | 53 void tcp_time_wait(struct sock *sk, int state, int timeo); 306 static inline bool tcp_out_of_memory(struct sock *sk) tcp_out_of_memory() argument 308 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && tcp_out_of_memory() 309 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) tcp_out_of_memory() 314 static inline bool tcp_too_many_orphans(struct sock *sk, int shift) tcp_too_many_orphans() argument 316 struct percpu_counter *ocp = sk->sk_prot->orphan_count; tcp_too_many_orphans() 327 bool tcp_check_oom(struct sock *sk, int shift); 330 static inline void tcp_synq_overflow(struct sock *sk) tcp_synq_overflow() argument 332 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies; tcp_synq_overflow() 336 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) tcp_synq_no_recent_overflow() argument 338 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; tcp_synq_no_recent_overflow() 354 void tcp_shutdown(struct sock *sk, int how); 360 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 361 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 363 void tcp_release_cb(struct sock *sk); 365 void tcp_write_timer_handler(struct sock *sk); 366 void tcp_delack_timer_handler(struct sock *sk); 367 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); 368 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 370 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 372 void tcp_rcv_space_adjust(struct sock *sk); 373 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); 374 void tcp_twsk_destructor(struct sock *sk); 375 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, 379 static inline void tcp_dec_quickack_mode(struct sock *sk, tcp_dec_quickack_mode() argument 382 struct inet_connection_sock *icsk = inet_csk(sk); tcp_dec_quickack_mode() 410 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 414 void tcp_enter_loss(struct sock *sk); 416 void tcp_update_metrics(struct sock *sk); 417 void tcp_init_metrics(struct sock *sk); 421 bool tcp_remember_stamp(struct sock *sk); 423 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst); 425 void tcp_close(struct sock *sk, long timeout); 426 void tcp_init_sock(struct sock *sk); 429 int tcp_getsockopt(struct sock *sk, int level, int optname, 431 int tcp_setsockopt(struct sock *sk, int level, int optname, 433 int compat_tcp_getsockopt(struct sock *sk, int level, int optname, 435 int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 437 void tcp_set_keepalive(struct sock *sk, int val); 439 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, 450 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); 451 void tcp_v4_mtu_reduced(struct sock *sk); 452 void tcp_req_err(struct sock *sk, u32 seq); 453 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 454 struct sock *tcp_create_openreq_child(struct sock *sk, 457 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst); 458 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, 461 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 462 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 463 int tcp_connect(struct sock *sk); 464 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 467 int tcp_disconnect(struct sock *sk, int flags); 469 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); 470 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); 471 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); 476 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); 498 __u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb, 508 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 512 __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, 517 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 519 bool tcp_may_send_now(struct sock *sk); 522 void tcp_retransmit_timer(struct sock *sk); 531 void tcp_send_fin(struct sock *sk); 532 void tcp_send_active_reset(struct sock *sk, gfp_t priority); 535 void tcp_send_ack(struct sock *sk); 536 void tcp_send_delayed_ack(struct sock *sk); 537 void tcp_send_loss_probe(struct sock *sk); 538 bool tcp_schedule_loss_probe(struct sock *sk); 541 void tcp_resume_early_retransmit(struct sock *sk); 542 void tcp_rearm_rto(struct sock *sk); 543 void tcp_reset(struct sock *sk); 547 static inline void tcp_clear_xmit_timers(struct sock *sk) tcp_clear_xmit_timers() argument 549 inet_csk_clear_xmit_timers(sk); tcp_clear_xmit_timers() 552 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 553 unsigned int tcp_current_mss(struct sock *sk); 584 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 587 void tcp_initialize_rcv_mss(struct sock *sk); 589 int tcp_mtu_to_mss(struct sock *sk, int pmtu); 590 int tcp_mss_to_mtu(struct sock *sk, int mss); 591 void tcp_mtup_init(struct sock *sk); 592 void tcp_init_buffer_space(struct sock *sk); 594 static inline void tcp_bound_rto(const struct sock *sk) tcp_bound_rto() argument 596 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) tcp_bound_rto() 597 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; tcp_bound_rto() 617 static inline void tcp_fast_path_check(struct sock *sk) tcp_fast_path_check() argument 619 struct tcp_sock *tp = tcp_sk(sk); tcp_fast_path_check() 623 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && tcp_fast_path_check() 629 static inline u32 tcp_rto_min(struct sock *sk) tcp_rto_min() argument 631 const struct dst_entry *dst = __sk_dst_get(sk); tcp_rto_min() 639 static inline u32 tcp_rto_min_us(struct sock *sk) tcp_rto_min_us() argument 641 return jiffies_to_usecs(tcp_rto_min(sk)); tcp_rto_min_us() 666 u32 __tcp_select_window(struct sock *sk); 668 void tcp_send_window_probe(struct sock *sk); 815 void (*init)(struct sock *sk); 817 void (*release)(struct sock *sk); 820 u32 (*ssthresh)(struct sock *sk); 822 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked); 824 void (*set_state)(struct sock *sk, u8 new_state); 826 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 828 void (*in_ack_event)(struct sock *sk, u32 flags); 830 u32 (*undo_cwnd)(struct sock *sk); 832 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); 834 size_t (*get_info)(struct sock *sk, u32 ext, int *attr, 844 void tcp_assign_congestion_control(struct sock *sk); 845 void tcp_init_congestion_control(struct sock *sk); 846 void tcp_cleanup_congestion_control(struct sock *sk); 852 int tcp_set_congestion_control(struct sock *sk, const char *name); 856 u32 tcp_reno_ssthresh(struct sock *sk); 857 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 871 static inline bool tcp_ca_needs_ecn(const struct sock *sk) tcp_ca_needs_ecn() argument 873 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_ca_needs_ecn() 878 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) tcp_set_ca_state() argument 880 struct inet_connection_sock *icsk = inet_csk(sk); tcp_set_ca_state() 883 icsk->icsk_ca_ops->set_state(sk, ca_state); tcp_set_ca_state() 887 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) tcp_ca_event() argument 889 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_ca_event() 892 icsk->icsk_ca_ops->cwnd_event(sk, event); tcp_ca_event() 969 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) tcp_in_cwnd_reduction() argument 972 (1 << inet_csk(sk)->icsk_ca_state); tcp_in_cwnd_reduction() 979 static inline __u32 tcp_current_ssthresh(const struct sock *sk) tcp_current_ssthresh() argument 981 const struct tcp_sock *tp = tcp_sk(sk); tcp_current_ssthresh() 983 if (tcp_in_cwnd_reduction(sk)) tcp_current_ssthresh() 994 void tcp_enter_cwr(struct sock *sk); 1035 static inline bool tcp_is_cwnd_limited(const struct sock *sk) tcp_is_cwnd_limited() argument 1037 const struct tcp_sock *tp = tcp_sk(sk); tcp_is_cwnd_limited() 1046 static inline void tcp_check_probe_timer(struct sock *sk) tcp_check_probe_timer() argument 1048 const struct tcp_sock *tp = tcp_sk(sk); tcp_check_probe_timer() 1049 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_check_probe_timer() 1052 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, tcp_check_probe_timer() 1096 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); 1107 void tcp_set_state(struct sock *sk, int state); 1109 void tcp_done(struct sock *sk); 1132 static inline int tcp_space(const struct sock *sk) tcp_space() argument 1134 return tcp_win_from_space(sk->sk_rcvbuf - tcp_space() 1135 atomic_read(&sk->sk_rmem_alloc)); tcp_space() 1138 static inline int tcp_full_space(const struct sock *sk) tcp_full_space() argument 1140 return tcp_win_from_space(sk->sk_rcvbuf); tcp_full_space() 1144 struct sock *sk, struct dst_entry *dst); 1146 void tcp_enter_memory_pressure(struct sock *sk); 1171 static inline int tcp_fin_time(const struct sock *sk) tcp_fin_time() argument 1173 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; tcp_fin_time() 1174 const int rto = inet_csk(sk)->icsk_rto; tcp_fin_time() 1303 const struct sock *sk, const struct sk_buff *skb); 1304 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, 1306 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, 1308 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, 1312 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, 1317 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, tcp_md5_do_lookup() argument 1341 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, 1344 void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 1358 bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 1373 static inline void tcp_write_queue_purge(struct sock *sk) tcp_write_queue_purge() argument 1377 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) tcp_write_queue_purge() 1378 sk_wmem_free_skb(sk, skb); tcp_write_queue_purge() 1379 sk_mem_reclaim(sk); tcp_write_queue_purge() 1380 tcp_clear_all_retrans_hints(tcp_sk(sk)); tcp_write_queue_purge() 1383 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) tcp_write_queue_head() argument 1385 return skb_peek(&sk->sk_write_queue); tcp_write_queue_head() 1388 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) tcp_write_queue_tail() argument 1390 return skb_peek_tail(&sk->sk_write_queue); tcp_write_queue_tail() 1393 static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk, tcp_write_queue_next() argument 1396 return skb_queue_next(&sk->sk_write_queue, skb); tcp_write_queue_next() 1399 static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk, tcp_write_queue_prev() argument 1402 return skb_queue_prev(&sk->sk_write_queue, skb); tcp_write_queue_prev() 1405 #define tcp_for_write_queue(skb, sk) \ 1406 skb_queue_walk(&(sk)->sk_write_queue, skb) 1408 #define tcp_for_write_queue_from(skb, sk) \ 1409 skb_queue_walk_from(&(sk)->sk_write_queue, skb) 1411 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 1412 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) 1414 static inline struct sk_buff *tcp_send_head(const struct sock *sk) tcp_send_head() argument 1416 return sk->sk_send_head; tcp_send_head() 1419 static inline bool tcp_skb_is_last(const struct sock *sk, tcp_skb_is_last() argument 1422 return skb_queue_is_last(&sk->sk_write_queue, skb); tcp_skb_is_last() 1425 static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb) tcp_advance_send_head() argument 1427 if (tcp_skb_is_last(sk, skb)) tcp_advance_send_head() 1428 sk->sk_send_head = NULL; tcp_advance_send_head() 1430 sk->sk_send_head = tcp_write_queue_next(sk, skb); tcp_advance_send_head() 1433 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) tcp_check_send_head() argument 1435 if (sk->sk_send_head == skb_unlinked) tcp_check_send_head() 1436 sk->sk_send_head = NULL; tcp_check_send_head() 1439 static inline void tcp_init_send_head(struct sock *sk) tcp_init_send_head() argument 1441 sk->sk_send_head = NULL; tcp_init_send_head() 1444 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) __tcp_add_write_queue_tail() argument 1446 __skb_queue_tail(&sk->sk_write_queue, skb); __tcp_add_write_queue_tail() 1449 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) tcp_add_write_queue_tail() argument 1451 __tcp_add_write_queue_tail(sk, skb); tcp_add_write_queue_tail() 1454 if (sk->sk_send_head == NULL) { tcp_add_write_queue_tail() 1455 sk->sk_send_head = skb; tcp_add_write_queue_tail() 1457 if (tcp_sk(sk)->highest_sack == NULL) tcp_add_write_queue_tail() 1458 tcp_sk(sk)->highest_sack = skb; tcp_add_write_queue_tail() 1462 static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) __tcp_add_write_queue_head() argument 1464 __skb_queue_head(&sk->sk_write_queue, skb); __tcp_add_write_queue_head() 1467 /* Insert buff after skb on the write queue of sk. */ tcp_insert_write_queue_after() 1470 struct sock *sk) tcp_insert_write_queue_after() 1472 __skb_queue_after(&sk->sk_write_queue, skb, buff); tcp_insert_write_queue_after() 1475 /* Insert new before skb on the write queue of sk. */ tcp_insert_write_queue_before() 1478 struct sock *sk) tcp_insert_write_queue_before() 1480 __skb_queue_before(&sk->sk_write_queue, skb, new); tcp_insert_write_queue_before() 1482 if (sk->sk_send_head == skb) tcp_insert_write_queue_before() 1483 sk->sk_send_head = new; tcp_insert_write_queue_before() 1486 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) tcp_unlink_write_queue() argument 1488 __skb_unlink(skb, &sk->sk_write_queue); tcp_unlink_write_queue() 1491 static inline bool tcp_write_queue_empty(struct sock *sk) tcp_write_queue_empty() argument 1493 return skb_queue_empty(&sk->sk_write_queue); tcp_write_queue_empty() 1496 static inline void tcp_push_pending_frames(struct sock *sk) tcp_push_pending_frames() argument 1498 if (tcp_send_head(sk)) { tcp_push_pending_frames() 1499 struct tcp_sock *tp = tcp_sk(sk); tcp_push_pending_frames() 1501 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); tcp_push_pending_frames() 1520 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) tcp_advance_highest_sack() argument 1522 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL : tcp_advance_highest_sack() 1523 tcp_write_queue_next(sk, skb); tcp_advance_highest_sack() 1526 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) tcp_highest_sack() argument 1528 return tcp_sk(sk)->highest_sack; tcp_highest_sack() 1531 static inline void tcp_highest_sack_reset(struct sock *sk) tcp_highest_sack_reset() argument 1533 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); tcp_highest_sack_reset() 1537 static inline void tcp_highest_sack_combine(struct sock *sk, tcp_highest_sack_combine() argument 1541 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) tcp_highest_sack_combine() 1542 tcp_sk(sk)->highest_sack = new; tcp_highest_sack_combine() 1585 void tcp_v4_destroy_sock(struct sock *sk); 1599 static inline bool tcp_stream_memory_free(const struct sock *sk) tcp_stream_memory_free() argument 1601 const struct tcp_sock *tp = tcp_sk(sk); tcp_stream_memory_free() 1612 int tcp_rtx_synack(struct sock *sk, struct request_sock *req); 1615 struct sock *sk, struct sk_buff *skb); 1620 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, 1624 const struct sock *sk, 1626 int (*md5_parse)(struct sock *sk, 1635 struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk, 1639 const struct sock *sk, 1642 void (*init_req)(struct request_sock *req, struct sock *sk, 1645 __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb, 1648 struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl, 1652 int (*send_synack)(struct sock *sk, struct dst_entry *dst, 1655 void (*queue_hash_add)(struct sock *sk, struct request_sock *req, 1661 struct sock *sk, struct sk_buff *skb, cookie_init_sequence() 1664 return ops->cookie_init_seq(sk, skb, mss); cookie_init_sequence() 1668 struct sock *sk, struct sk_buff *skb, cookie_init_sequence() 1468 tcp_insert_write_queue_after(struct sk_buff *skb, struct sk_buff *buff, struct sock *sk) tcp_insert_write_queue_after() argument 1476 tcp_insert_write_queue_before(struct sk_buff *new, struct sk_buff *skb, struct sock *sk) tcp_insert_write_queue_before() argument 1660 cookie_init_sequence(const struct tcp_request_sock_ops *ops, struct sock *sk, struct sk_buff *skb, __u16 *mss) cookie_init_sequence() argument 1667 cookie_init_sequence(const struct tcp_request_sock_ops *ops, struct sock *sk, struct sk_buff *skb, __u16 *mss) cookie_init_sequence() argument
|
H A D | ip6_route.h | 67 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, 71 const struct sock *sk, ip6_route_output() 74 return ip6_route_output_flags(net, sk, fl6, 0); ip6_route_output() 120 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu); 124 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); 144 static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, __ip6_dst_store() argument 148 struct ipv6_pinfo *np = inet6_sk(sk); __ip6_dst_store() 151 sk_setup_caps(sk, dst); __ip6_dst_store() 159 static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, ip6_dst_store() argument 162 spin_lock(&sk->sk_dst_lock); ip6_dst_store() 163 __ip6_dst_store(sk, dst, daddr, saddr); ip6_dst_store() 164 spin_unlock(&sk->sk_dst_lock); ip6_dst_store() 181 int ip6_fragment(struct sock *sk, struct sk_buff *skb, 186 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? ip6_skb_dst_mtu() 187 inet6_sk(skb->sk) : NULL; ip6_skb_dst_mtu() 193 static inline bool ip6_sk_accept_pmtu(const struct sock *sk) ip6_sk_accept_pmtu() argument 195 return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE && ip6_sk_accept_pmtu() 196 inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT; ip6_sk_accept_pmtu() 199 static inline bool ip6_sk_ignore_df(const struct sock *sk) ip6_sk_ignore_df() argument 201 return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO || ip6_sk_ignore_df() 202 inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; ip6_sk_ignore_df() 70 ip6_route_output(struct net *net, const struct sock *sk, struct flowi6 *fl6) ip6_route_output() argument
|
H A D | dn_nsp.h | 18 void dn_nsp_send_data_ack(struct sock *sk); 19 void dn_nsp_send_oth_ack(struct sock *sk); 20 void dn_nsp_delayed_ack(struct sock *sk); 21 void dn_send_conn_ack(struct sock *sk); 22 void dn_send_conn_conf(struct sock *sk, gfp_t gfp); 23 void dn_nsp_send_disc(struct sock *sk, unsigned char type, 27 void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); 28 void dn_nsp_send_conninit(struct sock *sk, unsigned char flags); 30 void dn_nsp_output(struct sock *sk); 31 int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, 33 void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, 35 unsigned long dn_nsp_persist(struct sock *sk); 36 int dn_nsp_xmit_timeout(struct sock *sk); 39 int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 41 struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); 42 struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, 197 static __inline__ int dn_congested(struct sock *sk) dn_congested() argument 199 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); dn_congested()
|
H A D | ping.h | 34 int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len, 36 void (*ip6_datagram_recv_common_ctl)(struct sock *sk, 39 void (*ip6_datagram_recv_specific_ctl)(struct sock *sk, 43 void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err, 67 int ping_get_port(struct sock *sk, unsigned short ident); 68 void ping_hash(struct sock *sk); 69 void ping_unhash(struct sock *sk); 71 int ping_init_sock(struct sock *sk); 72 void ping_close(struct sock *sk, long timeout); 73 int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len); 78 int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 82 int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 83 int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
|
H A D | inet6_connection_sock.h | 25 int inet6_csk_bind_conflict(const struct sock *sk, 28 struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6, 31 struct request_sock *inet6_csk_search_req(struct sock *sk, 37 void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 40 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); 42 int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); 44 struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
|
H A D | dn_neigh.h | 21 int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb); 22 int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb); 25 int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb);
|
H A D | inet_sock.h | 103 static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) inet_rsk() argument 105 return (struct inet_request_sock *)sk; inet_rsk() 108 static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) inet_request_mark() argument 110 if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) inet_request_mark() 113 return sk->sk_mark; inet_request_mark() 140 * @sk - ancestor class 159 /* sk and pinet6 has to be the first two members of inet_sock */ 160 struct sock sk; member in struct:inet_sock 165 #define inet_daddr sk.__sk_common.skc_daddr 166 #define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr 167 #define inet_dport sk.__sk_common.skc_dport 168 #define inet_num sk.__sk_common.skc_num 212 static inline struct inet_sock *inet_sk(const struct sock *sk) inet_sk() argument 214 return (struct inet_sock *)sk; inet_sk() 232 int inet_sk_rebuild_header(struct sock *sk); 249 static inline __u8 inet_sk_flowi_flags(const struct sock *sk) inet_sk_flowi_flags() argument 253 if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl) inet_sk_flowi_flags() 258 static inline void inet_inc_convert_csum(struct sock *sk) inet_inc_convert_csum() argument 260 inet_sk(sk)->convert_csum++; inet_inc_convert_csum() 263 static inline void inet_dec_convert_csum(struct sock *sk) inet_dec_convert_csum() argument 265 if (inet_sk(sk)->convert_csum > 0) inet_dec_convert_csum() 266 inet_sk(sk)->convert_csum--; inet_dec_convert_csum() 269 static inline bool inet_get_convert_csum(struct sock *sk) inet_get_convert_csum() argument 271 return !!inet_sk(sk)->convert_csum; inet_get_convert_csum()
|
H A D | busy_poll.h | 50 static inline unsigned long sk_busy_loop_end_time(struct sock *sk) sk_busy_loop_end_time() argument 52 return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec); sk_busy_loop_end_time() 61 static inline bool sk_can_busy_loop(struct sock *sk) sk_can_busy_loop() argument 63 return sk->sk_ll_usec && sk->sk_napi_id && sk_can_busy_loop() 78 static inline bool sk_busy_loop(struct sock *sk, int nonblock) sk_busy_loop() argument 80 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; sk_busy_loop() 91 napi = napi_by_id(sk->sk_napi_id); sk_busy_loop() 107 NET_ADD_STATS_BH(sock_net(sk), sk_busy_loop() 111 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && sk_busy_loop() 114 rc = !skb_queue_empty(&sk->sk_receive_queue); sk_busy_loop() 128 static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb) sk_mark_napi_id() argument 130 sk->sk_napi_id = skb->napi_id; sk_mark_napi_id() 144 static inline bool sk_can_busy_loop(struct sock *sk) sk_can_busy_loop() argument 154 static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb) sk_mark_napi_id() argument 163 static inline bool sk_busy_loop(struct sock *sk, int nonblock) sk_busy_loop() argument
|
H A D | cls_cgroup.h | 44 static inline void sock_update_classid(struct sock *sk) sock_update_classid() argument 49 if (classid != sk->sk_classid) sock_update_classid() 50 sk->sk_classid = classid; sock_update_classid() 53 static inline void sock_update_classid(struct sock *sk) sock_update_classid() argument
|
H A D | ip.h | 72 struct sock *sk; member in struct:ip_ra_chain 102 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, 109 int ip_output(struct sock *sk, struct sk_buff *skb); 110 int ip_mc_output(struct sock *sk, struct sk_buff *skb); 111 int ip_fragment(struct sock *sk, struct sk_buff *skb, 116 int ip_local_out_sk(struct sock *sk, struct sk_buff *skb); ip_local_out() 119 return ip_local_out_sk(skb->sk, skb); ip_local_out() 122 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); 124 int ip_append_data(struct sock *sk, struct flowi4 *fl4, 133 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, 135 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, 139 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); 140 void ip_flush_pending_frames(struct sock *sk); 141 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, 148 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) ip_finish_skb() argument 150 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); ip_finish_skb() 158 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) get_rtconn_flags() argument 160 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk); get_rtconn_flags() 164 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 165 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 167 void ip4_datagram_release_cb(struct sock *sk); 186 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, 275 int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) ip_dont_fragment() argument 277 return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || ip_dont_fragment() 278 (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT && ip_dont_fragment() 282 static inline bool ip_sk_accept_pmtu(const struct sock *sk) ip_sk_accept_pmtu() argument 284 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE && ip_sk_accept_pmtu() 285 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT; ip_sk_accept_pmtu() 288 static inline bool ip_sk_use_pmtu(const struct sock *sk) ip_sk_use_pmtu() argument 290 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; ip_sk_use_pmtu() 293 static inline bool ip_sk_ignore_df(const struct sock *sk) ip_sk_ignore_df() argument 295 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || ip_sk_ignore_df() 296 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; ip_sk_ignore_df() 314 if (!skb->sk || ip_sk_use_pmtu(skb->sk)) { ip_skb_dst_mtu() 326 struct sock *sk, int segs) ip_select_ident_segs() 336 if (sk && inet_sk(sk)->inet_daddr) { ip_select_ident_segs() 337 iph->id = htons(inet_sk(sk)->inet_id); ip_select_ident_segs() 338 inet_sk(sk)->inet_id += segs; ip_select_ident_segs() 348 struct sock *sk) ip_select_ident() 350 ip_select_ident_segs(net, skb, sk, 1); ip_select_ident() 359 static inline void inet_set_txhash(struct sock *sk) inet_set_txhash() argument 361 struct inet_sock *inet = inet_sk(sk); inet_set_txhash() 369 sk->sk_txhash = flow_hash_from_keys(&keys); inet_set_txhash() 445 static __inline__ void inet_reset_saddr(struct sock *sk) inet_reset_saddr() argument 447 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; inet_reset_saddr() 449 if (sk->sk_family == PF_INET6) { inet_reset_saddr() 450 struct ipv6_pinfo *np = inet6_sk(sk); inet_reset_saddr() 453 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); inet_reset_saddr() 528 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); 532 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 534 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 536 int compat_ip_setsockopt(struct sock *sk, int level, int optname, 538 int compat_ip_getsockopt(struct sock *sk, int level, int optname, 540 int ip_ra_control(struct sock *sk, unsigned char on, 543 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); 544 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 546 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, 325 ip_select_ident_segs(struct net *net, struct sk_buff *skb, struct sock *sk, int segs) ip_select_ident_segs() argument 347 ip_select_ident(struct net *net, struct sk_buff *skb, struct sock *sk) ip_select_ident() argument
|
H A D | af_unix.h | 52 /* WARNING: sk has to be the first member */ 53 struct sock sk; member in struct:unix_sock 69 static inline struct unix_sock *unix_sk(struct sock *sk) unix_sk() argument 71 return (struct unix_sock *)sk; unix_sk() 76 long unix_inq_len(struct sock *sk); 77 long unix_outq_len(struct sock *sk);
|
H A D | inet_common.h | 31 void inet_sock_destruct(struct sock *sk); 36 int inet_ctl_sock_create(struct sock **sk, unsigned short family, 39 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, 42 static inline void inet_ctl_sock_destroy(struct sock *sk) inet_ctl_sock_destroy() argument 44 if (sk) inet_ctl_sock_destroy() 45 sk_release_kernel(sk); inet_ctl_sock_destroy()
|
H A D | transp_v6.h | 33 int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 36 void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, 38 void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg, 40 void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, 43 int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, 55 void inet6_destroy_sock(struct sock *sk);
|
H A D | route.h | 44 #define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE)) 45 #define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE)) 115 struct sock *sk); 137 struct sock *sk, ip_route_output_ports() 142 flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos, ip_route_output_ports() 144 sk ? inet_sk_flowi_flags(sk) : 0, ip_route_output_ports() 146 if (sk) ip_route_output_ports() 147 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); ip_route_output_ports() 148 return ip_route_output_flow(net, fl4, sk); ip_route_output_ports() 184 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu); 187 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk); 246 struct sock *sk) ip_route_connect_init() 250 if (inet_sk(sk)->transparent) ip_route_connect_init() 253 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, ip_route_connect_init() 261 struct sock *sk) ip_route_connect() 263 struct net *net = sock_net(sk); ip_route_connect() 267 sport, dport, sk); ip_route_connect() 276 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); ip_route_connect() 277 return ip_route_output_flow(net, fl4, sk); ip_route_connect() 283 struct sock *sk) ip_route_newports() 289 flowi4_update_output(fl4, sk->sk_bound_dev_if, ip_route_newports() 290 RT_CONN_FLAGS(sk), fl4->daddr, ip_route_newports() 292 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); ip_route_newports() 293 return ip_route_output_flow(sock_net(sk), fl4, sk); ip_route_newports() 136 ip_route_output_ports(struct net *net, struct flowi4 *fl4, struct sock *sk, __be32 daddr, __be32 saddr, __be16 dport, __be16 sport, __u8 proto, __u8 tos, int oif) ip_route_output_ports() argument 243 ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 src, u32 tos, int oif, u8 protocol, __be16 sport, __be16 dport, struct sock *sk) ip_route_connect_init() argument 257 ip_route_connect(struct flowi4 *fl4, __be32 dst, __be32 src, u32 tos, int oif, u8 protocol, __be16 sport, __be16 dport, struct sock *sk) ip_route_connect() argument 280 ip_route_newports(struct flowi4 *fl4, struct rtable *rt, __be16 orig_sport, __be16 orig_dport, __be16 sport, __be16 dport, struct sock *sk) ip_route_newports() argument
|
H A D | llc_conn.h | 34 struct sock sk; member in struct:llc_sock 83 static inline struct llc_sock *llc_sk(const struct sock *sk) llc_sk() argument 85 return (struct llc_sock *)sk; llc_sk() 100 void llc_sk_free(struct sock *sk); 102 void llc_sk_reset(struct sock *sk); 105 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); 106 void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); 107 void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); 108 void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit); 109 void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit); 113 void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk); 114 void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
|
H A D | udplite.h | 26 /* Designate sk as UDP-Lite socket */ udplite_sk_init() 27 static inline int udplite_sk_init(struct sock *sk) udplite_sk_init() argument 29 udp_sk(sk)->pcflag = UDPLITE_BIT; udplite_sk_init() 71 static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) udplite_csum_outgoing() argument 73 const struct udp_sock *up = udp_sk(skb->sk); udplite_csum_outgoing() 101 skb_queue_walk(&sk->sk_write_queue, skb) { udplite_csum_outgoing() 116 const struct udp_sock *up = udp_sk(skb->sk); udplite_csum() 131 int udplite_get_port(struct sock *sk, unsigned short snum,
|
H A D | udp.h | 127 * @sk: socket we are writing to 131 static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb) udp_csum_outgoing() argument 135 skb_queue_walk(&sk->sk_write_queue, skb) { udp_csum_outgoing() 180 static inline void udp_lib_hash(struct sock *sk) udp_lib_hash() argument 185 void udp_lib_unhash(struct sock *sk); 186 void udp_lib_rehash(struct sock *sk, u16 new_hash); 188 static inline void udp_lib_close(struct sock *sk, long timeout) udp_lib_close() argument 190 sk_common_release(sk); udp_lib_close() 193 int udp_lib_get_port(struct sock *sk, unsigned short snum, 237 int udp_get_port(struct sock *sk, unsigned short snum, 241 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 242 int udp_push_pending_frames(struct sock *sk); 243 void udp_flush_pending_frames(struct sock *sk); 246 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); 247 int udp_disconnect(struct sock *sk, int flags); 252 int udp_lib_getsockopt(struct sock *sk, int level, int optname, 254 int udp_lib_setsockopt(struct sock *sk, int level, int optname, 291 #define UDPX_INC_STATS_BH(sk, field) \ 293 if ((sk)->sk_family == AF_INET) \ 294 UDP_INC_STATS_BH(sock_net(sk), field, 0); \ 296 UDP6_INC_STATS_BH(sock_net(sk), field, 0); \ 299 #define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0)
|
H A D | inet_hashtables.h | 52 * 2) If all sockets have sk->sk_reuse set, and none of them are in 55 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local 68 * sk->sk_reuse set, we don't even have to walk the owners list at all, 121 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE 230 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 239 static inline int inet_sk_listen_hashfn(const struct sock *sk) inet_sk_listen_hashfn() argument 241 return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num); inet_sk_listen_hashfn() 245 int __inet_inherit_port(struct sock *sk, struct sock *child); 247 void inet_put_port(struct sock *sk); 251 int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw); 252 int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw); 253 void inet_hash(struct sock *sk); 254 void inet_unhash(struct sock *sk); 349 struct sock *sk = __inet_lookup_established(net, hashinfo, __inet_lookup() local 352 return sk ? : __inet_lookup_listener(net, hashinfo, saddr, sport, __inet_lookup() 362 struct sock *sk; inet_lookup() local 365 sk = __inet_lookup(net, hashinfo, saddr, sport, daddr, dport, dif); inet_lookup() 368 return sk; inet_lookup() 376 struct sock *sk = skb_steal_sock(skb); __inet_lookup_skb() local 379 if (sk) __inet_lookup_skb() 380 return sk; __inet_lookup_skb() 387 u32 sk_ehashfn(const struct sock *sk); 392 static inline void sk_daddr_set(struct sock *sk, __be32 addr) sk_daddr_set() argument 394 sk->sk_daddr = addr; /* alias of inet_daddr */ sk_daddr_set() 396 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr); sk_daddr_set() 400 static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) sk_rcv_saddr_set() argument 402 sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */ sk_rcv_saddr_set() 404 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr); sk_rcv_saddr_set() 409 struct sock *sk, u32 port_offset, 415 struct sock *sk);
|
H A D | raw.h | 60 void raw_hash_sk(struct sock *sk); 61 void raw_unhash_sk(struct sock *sk); 70 static inline struct raw_sock *raw_sk(const struct sock *sk) raw_sk() argument 72 return (struct raw_sock *)sk; raw_sk()
|
H A D | rawv6.h | 10 int rawv6_rcv(struct sock *sk, struct sk_buff *skb);
|
/linux-4.1.27/net/core/ |
H A D | stream.c | 24 * @sk: socket 28 void sk_stream_write_space(struct sock *sk) sk_stream_write_space() argument 30 struct socket *sock = sk->sk_socket; sk_stream_write_space() 33 if (sk_stream_is_writeable(sk) && sock) { sk_stream_write_space() 37 wq = rcu_dereference(sk->sk_wq); sk_stream_write_space() 41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sk_stream_write_space() 50 * @sk: sock to wait on 55 int sk_stream_wait_connect(struct sock *sk, long *timeo_p) sk_stream_wait_connect() argument 62 int err = sock_error(sk); sk_stream_wait_connect() 65 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) sk_stream_wait_connect() 72 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sk_stream_wait_connect() 73 sk->sk_write_pending++; sk_stream_wait_connect() 74 done = sk_wait_event(sk, timeo_p, sk_stream_wait_connect() 75 !sk->sk_err && sk_stream_wait_connect() 76 !((1 << sk->sk_state) & sk_stream_wait_connect() 78 finish_wait(sk_sleep(sk), &wait); sk_stream_wait_connect() 79 sk->sk_write_pending--; sk_stream_wait_connect() 87 * @sk: socket to verify 89 static inline int sk_stream_closing(struct sock *sk) sk_stream_closing() argument 91 return (1 << sk->sk_state) & sk_stream_closing() 95 void sk_stream_wait_close(struct sock *sk, long timeout) sk_stream_wait_close() argument 101 prepare_to_wait(sk_sleep(sk), &wait, sk_stream_wait_close() 103 if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) sk_stream_wait_close() 107 finish_wait(sk_sleep(sk), &wait); sk_stream_wait_close() 114 * @sk: socket to wait for memory 117 int sk_stream_wait_memory(struct sock *sk, long *timeo_p) sk_stream_wait_memory() argument 124 if (sk_stream_memory_free(sk)) sk_stream_wait_memory() 128 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); sk_stream_wait_memory() 130 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sk_stream_wait_memory() 132 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) sk_stream_wait_memory() 138 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); sk_stream_wait_memory() 139 if (sk_stream_memory_free(sk) && !vm_wait) sk_stream_wait_memory() 142 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk_stream_wait_memory() 143 sk->sk_write_pending++; sk_stream_wait_memory() 144 sk_wait_event(sk, ¤t_timeo, sk->sk_err || sk_stream_wait_memory() 145 (sk->sk_shutdown & SEND_SHUTDOWN) || sk_stream_wait_memory() 146 (sk_stream_memory_free(sk) && sk_stream_wait_memory() 148 sk->sk_write_pending--; sk_stream_wait_memory() 161 finish_wait(sk_sleep(sk), &wait); sk_stream_wait_memory() 176 int sk_stream_error(struct sock *sk, int flags, int err) sk_stream_error() argument 179 err = sock_error(sk) ? : -EPIPE; sk_stream_error() 186 void sk_stream_kill_queues(struct sock *sk) sk_stream_kill_queues() argument 189 __skb_queue_purge(&sk->sk_receive_queue); sk_stream_kill_queues() 192 __skb_queue_purge(&sk->sk_error_queue); sk_stream_kill_queues() 195 WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); sk_stream_kill_queues() 198 sk_mem_reclaim(sk); sk_stream_kill_queues() 200 WARN_ON(sk->sk_wmem_queued); sk_stream_kill_queues() 201 WARN_ON(sk->sk_forward_alloc); sk_stream_kill_queues()
|
H A D | sock.c | 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 150 * @sk: Socket to use a capability on or through 158 bool sk_ns_capable(const struct sock *sk, sk_ns_capable() argument 161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && sk_ns_capable() 168 * @sk: Socket to use a capability on or through 175 bool sk_capable(const struct sock *sk, int cap) sk_capable() argument 177 return sk_ns_capable(sk, &init_user_ns, cap); sk_capable() 183 * @sk: Socket to use a capability on or through 190 bool sk_net_capable(const struct sock *sk, int cap) sk_net_capable() argument 192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); sk_net_capable() 335 * @sk: socket to set it on 341 void sk_set_memalloc(struct sock *sk) sk_set_memalloc() argument 343 sock_set_flag(sk, SOCK_MEMALLOC); sk_set_memalloc() 344 sk->sk_allocation |= __GFP_MEMALLOC; sk_set_memalloc() 349 void sk_clear_memalloc(struct sock *sk) sk_clear_memalloc() argument 351 sock_reset_flag(sk, SOCK_MEMALLOC); sk_clear_memalloc() 352 sk->sk_allocation &= ~__GFP_MEMALLOC; sk_clear_memalloc() 362 sk_mem_reclaim(sk); sk_clear_memalloc() 366 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) __sk_backlog_rcv() argument 372 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); __sk_backlog_rcv() 375 ret = sk->sk_backlog_rcv(sk, skb); __sk_backlog_rcv() 424 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) sock_disable_timestamp() argument 426 if (sk->sk_flags & flags) { sock_disable_timestamp() 427 sk->sk_flags &= ~flags; sock_disable_timestamp() 428 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) sock_disable_timestamp() 434 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) sock_queue_rcv_skb() argument 438 struct sk_buff_head *list = &sk->sk_receive_queue; sock_queue_rcv_skb() 440 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { sock_queue_rcv_skb() 441 atomic_inc(&sk->sk_drops); sock_queue_rcv_skb() 442 trace_sock_rcvqueue_full(sk, skb); sock_queue_rcv_skb() 446 err = sk_filter(sk, skb); sock_queue_rcv_skb() 450 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { sock_queue_rcv_skb() 451 atomic_inc(&sk->sk_drops); sock_queue_rcv_skb() 456 skb_set_owner_r(skb, sk); sock_queue_rcv_skb() 464 sock_skb_set_dropcount(sk, skb); sock_queue_rcv_skb() 468 if (!sock_flag(sk, SOCK_DEAD)) sock_queue_rcv_skb() 469 sk->sk_data_ready(sk); sock_queue_rcv_skb() 474 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) sk_receive_skb() argument 478 if (sk_filter(sk, skb)) sk_receive_skb() 483 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { sk_receive_skb() 484 atomic_inc(&sk->sk_drops); sk_receive_skb() 488 bh_lock_sock_nested(sk); sk_receive_skb() 490 bh_lock_sock(sk); sk_receive_skb() 491 if (!sock_owned_by_user(sk)) { sk_receive_skb() 495 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); sk_receive_skb() 497 rc = sk_backlog_rcv(sk, skb); sk_receive_skb() 499 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); sk_receive_skb() 500 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { sk_receive_skb() 501 bh_unlock_sock(sk); sk_receive_skb() 502 atomic_inc(&sk->sk_drops); sk_receive_skb() 506 bh_unlock_sock(sk); sk_receive_skb() 508 sock_put(sk); sk_receive_skb() 516 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) __sk_dst_check() argument 518 struct dst_entry *dst = __sk_dst_get(sk); __sk_dst_check() 521 sk_tx_queue_clear(sk); __sk_dst_check() 522 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); __sk_dst_check() 531 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) sk_dst_check() argument 533 struct dst_entry *dst = sk_dst_get(sk); sk_dst_check() 536 sk_dst_reset(sk); sk_dst_check() 545 static int sock_setbindtodevice(struct sock *sk, char __user *optval, sock_setbindtodevice() argument 550 struct net *net = sock_net(sk); sock_setbindtodevice() 590 lock_sock(sk); sock_setbindtodevice() 591 sk->sk_bound_dev_if = index; sock_setbindtodevice() 592 sk_dst_reset(sk); sock_setbindtodevice() 593 release_sock(sk); sock_setbindtodevice() 603 static int sock_getbindtodevice(struct sock *sk, char __user *optval, sock_getbindtodevice() argument 608 struct net *net = sock_net(sk); sock_getbindtodevice() 611 if (sk->sk_bound_dev_if == 0) { sock_getbindtodevice() 620 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if); sock_getbindtodevice() 643 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) sock_valbool_flag() argument 646 sock_set_flag(sk, bit); sock_valbool_flag() 648 sock_reset_flag(sk, bit); sock_valbool_flag() 651 bool sk_mc_loop(struct sock *sk) sk_mc_loop() argument 655 if (!sk) sk_mc_loop() 657 switch (sk->sk_family) { sk_mc_loop() 659 return inet_sk(sk)->mc_loop; sk_mc_loop() 662 return inet6_sk(sk)->mc_loop; sk_mc_loop() 678 struct sock *sk = sock->sk; sock_setsockopt() local 689 return sock_setbindtodevice(sk, optval, optlen); sock_setsockopt() 699 lock_sock(sk); sock_setsockopt() 706 sock_valbool_flag(sk, SOCK_DBG, valbool); sock_setsockopt() 709 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); sock_setsockopt() 712 sk->sk_reuseport = valbool; sock_setsockopt() 721 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); sock_setsockopt() 724 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); sock_setsockopt() 734 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; sock_setsockopt() 735 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); sock_setsockopt() 737 sk->sk_write_space(sk); sock_setsockopt() 755 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; sock_setsockopt() 771 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); sock_setsockopt() 783 if (sk->sk_protocol == IPPROTO_TCP && sock_setsockopt() 784 sk->sk_type == SOCK_STREAM) sock_setsockopt() 785 tcp_set_keepalive(sk, valbool); sock_setsockopt() 787 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); sock_setsockopt() 791 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); sock_setsockopt() 795 sk->sk_no_check_tx = valbool; sock_setsockopt() 800 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) sock_setsockopt() 801 sk->sk_priority = val; sock_setsockopt() 816 sock_reset_flag(sk, SOCK_LINGER); sock_setsockopt() 820 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; sock_setsockopt() 823 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; sock_setsockopt() 824 sock_set_flag(sk, SOCK_LINGER); sock_setsockopt() 843 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); sock_setsockopt() 845 sock_set_flag(sk, SOCK_RCVTSTAMPNS); sock_setsockopt() 846 sock_set_flag(sk, SOCK_RCVTSTAMP); sock_setsockopt() 847 sock_enable_timestamp(sk, SOCK_TIMESTAMP); sock_setsockopt() 849 sock_reset_flag(sk, SOCK_RCVTSTAMP); sock_setsockopt() 850 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); sock_setsockopt() 861 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { sock_setsockopt() 862 if (sk->sk_protocol == IPPROTO_TCP && sock_setsockopt() 863 sk->sk_type == SOCK_STREAM) { sock_setsockopt() 864 if (sk->sk_state != TCP_ESTABLISHED) { sock_setsockopt() 868 sk->sk_tskey = tcp_sk(sk)->snd_una; sock_setsockopt() 870 sk->sk_tskey = 0; sock_setsockopt() 873 sk->sk_tsflags = val; sock_setsockopt() 875 sock_enable_timestamp(sk, sock_setsockopt() 878 sock_disable_timestamp(sk, sock_setsockopt() 885 sk->sk_rcvlowat = val ? : 1; sock_setsockopt() 889 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); sock_setsockopt() 893 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); sock_setsockopt() 905 ret = sk_attach_filter(&fprog, sk); sock_setsockopt() 918 ret = sk_attach_bpf(ufd, sk); sock_setsockopt() 923 ret = sk_detach_filter(sk); sock_setsockopt() 927 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) sock_setsockopt() 930 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); sock_setsockopt() 940 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) sock_setsockopt() 943 sk->sk_mark = val; sock_setsockopt() 947 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); sock_setsockopt() 951 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); sock_setsockopt() 956 ret = sock->ops->set_peek_off(sk, val); sock_setsockopt() 962 sock_valbool_flag(sk, SOCK_NOFCS, valbool); sock_setsockopt() 966 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); sock_setsockopt() 972 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) sock_setsockopt() 978 sk->sk_ll_usec = val; sock_setsockopt() 984 sk->sk_max_pacing_rate = val; sock_setsockopt() 985 sk->sk_pacing_rate = min(sk->sk_pacing_rate, sock_setsockopt() 986 sk->sk_max_pacing_rate); sock_setsockopt() 993 release_sock(sk); sock_setsockopt() 1015 struct sock *sk = sock->sk; sock_getsockopt() local 1035 v.val = sock_flag(sk, SOCK_DBG); sock_getsockopt() 1039 v.val = sock_flag(sk, SOCK_LOCALROUTE); sock_getsockopt() 1043 v.val = sock_flag(sk, SOCK_BROADCAST); sock_getsockopt() 1047 v.val = sk->sk_sndbuf; sock_getsockopt() 1051 v.val = sk->sk_rcvbuf; sock_getsockopt() 1055 v.val = sk->sk_reuse; sock_getsockopt() 1059 v.val = sk->sk_reuseport; sock_getsockopt() 1063 v.val = sock_flag(sk, SOCK_KEEPOPEN); sock_getsockopt() 1067 v.val = sk->sk_type; sock_getsockopt() 1071 v.val = sk->sk_protocol; sock_getsockopt() 1075 v.val = sk->sk_family; sock_getsockopt() 1079 v.val = -sock_error(sk); sock_getsockopt() 1081 v.val = xchg(&sk->sk_err_soft, 0); sock_getsockopt() 1085 v.val = sock_flag(sk, SOCK_URGINLINE); sock_getsockopt() 1089 v.val = sk->sk_no_check_tx; sock_getsockopt() 1093 v.val = sk->sk_priority; sock_getsockopt() 1098 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); sock_getsockopt() 1099 v.ling.l_linger = sk->sk_lingertime / HZ; sock_getsockopt() 1107 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_getsockopt() 1108 !sock_flag(sk, SOCK_RCVTSTAMPNS); sock_getsockopt() 1112 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); sock_getsockopt() 1116 v.val = sk->sk_tsflags; sock_getsockopt() 1121 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { sock_getsockopt() 1125 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; sock_getsockopt() 1126 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; sock_getsockopt() 1132 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { sock_getsockopt() 1136 v.tm.tv_sec = sk->sk_sndtimeo / HZ; sock_getsockopt() 1137 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; sock_getsockopt() 1142 v.val = sk->sk_rcvlowat; sock_getsockopt() 1158 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); sock_getsockopt() 1181 v.val = sk->sk_state == TCP_LISTEN; sock_getsockopt() 1192 v.val = sk->sk_mark; sock_getsockopt() 1196 v.val = sock_flag(sk, SOCK_RXQ_OVFL); sock_getsockopt() 1200 v.val = sock_flag(sk, SOCK_WIFI_STATUS); sock_getsockopt() 1207 v.val = sk->sk_peek_off; sock_getsockopt() 1210 v.val = sock_flag(sk, SOCK_NOFCS); sock_getsockopt() 1214 return sock_getbindtodevice(sk, optval, optlen, len); sock_getsockopt() 1217 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); sock_getsockopt() 1224 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); sock_getsockopt() 1232 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); sock_getsockopt() 1237 v.val = sk->sk_ll_usec; sock_getsockopt() 1242 v.val = sk->sk_max_pacing_rate; sock_getsockopt() 1246 v.val = sk->sk_incoming_cpu; sock_getsockopt() 1271 static inline void sock_lock_init(struct sock *sk) sock_lock_init() argument 1273 sock_lock_init_class_and_name(sk, sock_lock_init() 1274 af_family_slock_key_strings[sk->sk_family], sock_lock_init() 1275 af_family_slock_keys + sk->sk_family, sock_lock_init() 1276 af_family_key_strings[sk->sk_family], sock_lock_init() 1277 af_family_keys + sk->sk_family); sock_lock_init() 1301 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) sk_prot_clear_portaddr_nulls() argument 1311 memset((char *)sk, 0, nulls1); sk_prot_clear_portaddr_nulls() 1312 memset((char *)sk + nulls1 + sizeof(void *), 0, sk_prot_clear_portaddr_nulls() 1314 memset((char *)sk + nulls2 + sizeof(void *), 0, sk_prot_clear_portaddr_nulls() 1322 struct sock *sk; sk_prot_alloc() local 1327 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); sk_prot_alloc() 1328 if (!sk) sk_prot_alloc() 1329 return sk; sk_prot_alloc() 1332 prot->clear_sk(sk, prot->obj_size); sk_prot_alloc() 1334 sk_prot_clear_nulls(sk, prot->obj_size); sk_prot_alloc() 1337 sk = kmalloc(prot->obj_size, priority); sk_prot_alloc() 1339 if (sk != NULL) { sk_prot_alloc() 1340 kmemcheck_annotate_bitfield(sk, flags); sk_prot_alloc() 1342 if (security_sk_alloc(sk, family, priority)) sk_prot_alloc() 1347 sk_tx_queue_clear(sk); sk_prot_alloc() 1350 return sk; sk_prot_alloc() 1353 security_sk_free(sk); sk_prot_alloc() 1356 kmem_cache_free(slab, sk); sk_prot_alloc() 1358 kfree(sk); sk_prot_alloc() 1362 static void sk_prot_free(struct proto *prot, struct sock *sk) sk_prot_free() argument 1370 security_sk_free(sk); sk_prot_free() 1372 kmem_cache_free(slab, sk); sk_prot_free() 1374 kfree(sk); sk_prot_free() 1379 void sock_update_netprioidx(struct sock *sk) sock_update_netprioidx() argument 1384 sk->sk_cgrp_prioidx = task_netprioidx(current); sock_update_netprioidx() 1399 struct sock *sk; sk_alloc() local 1401 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); sk_alloc() 1402 if (sk) { sk_alloc() 1403 sk->sk_family = family; sk_alloc() 1408 sk->sk_prot = sk->sk_prot_creator = prot; sk_alloc() 1409 sock_lock_init(sk); sk_alloc() 1410 sock_net_set(sk, get_net(net)); sk_alloc() 1411 atomic_set(&sk->sk_wmem_alloc, 1); sk_alloc() 1413 sock_update_classid(sk); sk_alloc() 1414 sock_update_netprioidx(sk); sk_alloc() 1417 return sk; sk_alloc() 1421 static void __sk_free(struct sock *sk) __sk_free() argument 1425 if (sk->sk_destruct) __sk_free() 1426 sk->sk_destruct(sk); __sk_free() 1428 filter = rcu_dereference_check(sk->sk_filter, __sk_free() 1429 atomic_read(&sk->sk_wmem_alloc) == 0); __sk_free() 1431 sk_filter_uncharge(sk, filter); __sk_free() 1432 RCU_INIT_POINTER(sk->sk_filter, NULL); __sk_free() 1435 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); __sk_free() 1437 if (atomic_read(&sk->sk_omem_alloc)) __sk_free() 1439 __func__, atomic_read(&sk->sk_omem_alloc)); __sk_free() 1441 if (sk->sk_peer_cred) __sk_free() 1442 put_cred(sk->sk_peer_cred); __sk_free() 1443 put_pid(sk->sk_peer_pid); __sk_free() 1444 put_net(sock_net(sk)); __sk_free() 1445 sk_prot_free(sk->sk_prot_creator, sk); __sk_free() 1448 void sk_free(struct sock *sk) sk_free() argument 1453 * If not null, sock_wfree() will call __sk_free(sk) later sk_free() 1455 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) sk_free() 1456 __sk_free(sk); sk_free() 1461 * Last sock_put should drop reference to sk->sk_net. It has already 1467 void sk_release_kernel(struct sock *sk) sk_release_kernel() argument 1469 if (sk == NULL || sk->sk_socket == NULL) sk_release_kernel() 1472 sock_hold(sk); sk_release_kernel() 1473 sock_release(sk->sk_socket); sk_release_kernel() 1474 sock_net_set(sk, get_net(&init_net)); sk_release_kernel() 1475 sock_put(sk); sk_release_kernel() 1479 static void sk_update_clone(const struct sock *sk, struct sock *newsk) sk_update_clone() argument 1481 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) sk_update_clone() 1487 * @sk: the socket to clone 1492 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) sk_clone_lock() argument 1497 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); sk_clone_lock() 1501 sock_copy(newsk, sk); sk_clone_lock() 1530 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; sk_clone_lock() 1567 * is the same as sk->sk_prot->socks, as this field was copied sk_clone_lock() 1579 sk_update_clone(sk, newsk); sk_clone_lock() 1592 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) sk_setup_caps() argument 1594 __sk_dst_set(sk, dst); sk_setup_caps() 1595 sk->sk_route_caps = dst->dev->features; sk_setup_caps() 1596 if (sk->sk_route_caps & NETIF_F_GSO) sk_setup_caps() 1597 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; sk_setup_caps() 1598 sk->sk_route_caps &= ~sk->sk_route_nocaps; sk_setup_caps() 1599 if (sk_can_gso(sk)) { sk_setup_caps() 1601 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; sk_setup_caps() 1603 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk_setup_caps() 1604 sk->sk_gso_max_size = dst->dev->gso_max_size; sk_setup_caps() 1605 sk->sk_gso_max_segs = dst->dev->gso_max_segs; sk_setup_caps() 1621 struct sock *sk = skb->sk; sock_wfree() local 1624 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { sock_wfree() 1629 atomic_sub(len - 1, &sk->sk_wmem_alloc); sock_wfree() 1630 sk->sk_write_space(sk); sock_wfree() 1637 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) sock_wfree() 1638 __sk_free(sk); sock_wfree() 1653 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); skb_orphan_partial() 1666 struct sock *sk = skb->sk; sock_rfree() local 1669 atomic_sub(len, &sk->sk_rmem_alloc); sock_rfree() 1670 sk_mem_uncharge(sk, len); sock_rfree() 1680 sock_put(skb->sk); sock_efree() 1684 kuid_t sock_i_uid(struct sock *sk) sock_i_uid() argument 1688 read_lock_bh(&sk->sk_callback_lock); sock_i_uid() 1689 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; sock_i_uid() 1690 read_unlock_bh(&sk->sk_callback_lock); sock_i_uid() 1695 unsigned long sock_i_ino(struct sock *sk) sock_i_ino() argument 1699 read_lock_bh(&sk->sk_callback_lock); sock_i_ino() 1700 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; sock_i_ino() 1701 read_unlock_bh(&sk->sk_callback_lock); sock_i_ino() 1709 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, sock_wmalloc() argument 1712 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { sock_wmalloc() 1715 skb_set_owner_w(skb, sk); sock_wmalloc() 1726 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) sock_kmalloc() argument 1729 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { sock_kmalloc() 1734 atomic_add(size, &sk->sk_omem_alloc); sock_kmalloc() 1738 atomic_sub(size, &sk->sk_omem_alloc); sock_kmalloc() 1748 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, __sock_kfree_s() argument 1757 atomic_sub(size, &sk->sk_omem_alloc); __sock_kfree_s() 1760 void sock_kfree_s(struct sock *sk, void *mem, int size) sock_kfree_s() argument 1762 __sock_kfree_s(sk, mem, size, false); sock_kfree_s() 1766 void sock_kzfree_s(struct sock *sk, void *mem, int size) sock_kzfree_s() argument 1768 __sock_kfree_s(sk, mem, size, true); sock_kzfree_s() 1775 static long sock_wait_for_wmem(struct sock *sk, long timeo) sock_wait_for_wmem() argument 1779 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); sock_wait_for_wmem() 1785 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sock_wait_for_wmem() 1786 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sock_wait_for_wmem() 1787 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) sock_wait_for_wmem() 1789 if (sk->sk_shutdown & SEND_SHUTDOWN) sock_wait_for_wmem() 1791 if (sk->sk_err) sock_wait_for_wmem() 1795 finish_wait(sk_sleep(sk), &wait); sock_wait_for_wmem() 1804 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, sock_alloc_send_pskb() argument 1812 timeo = sock_sndtimeo(sk, noblock); sock_alloc_send_pskb() 1814 err = sock_error(sk); sock_alloc_send_pskb() 1819 if (sk->sk_shutdown & SEND_SHUTDOWN) sock_alloc_send_pskb() 1822 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) sock_alloc_send_pskb() 1825 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); sock_alloc_send_pskb() 1826 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sock_alloc_send_pskb() 1832 timeo = sock_wait_for_wmem(sk, timeo); sock_alloc_send_pskb() 1835 errcode, sk->sk_allocation); sock_alloc_send_pskb() 1837 skb_set_owner_w(skb, sk); sock_alloc_send_pskb() 1848 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, sock_alloc_send_skb() argument 1851 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); sock_alloc_send_skb() 1899 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) sk_page_frag_refill() argument 1901 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) sk_page_frag_refill() 1904 sk_enter_memory_pressure(sk); sk_page_frag_refill() 1905 sk_stream_moderate_sndbuf(sk); sk_page_frag_refill() 1910 static void __lock_sock(struct sock *sk) 1911 __releases(&sk->sk_lock.slock) 1912 __acquires(&sk->sk_lock.slock) 1917 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1919 spin_unlock_bh(&sk->sk_lock.slock); 1921 spin_lock_bh(&sk->sk_lock.slock); 1922 if (!sock_owned_by_user(sk)) 1925 finish_wait(&sk->sk_lock.wq, &wait); 1928 static void __release_sock(struct sock *sk) 1929 __releases(&sk->sk_lock.slock) 1930 __acquires(&sk->sk_lock.slock) 1932 struct sk_buff *skb = sk->sk_backlog.head; 1935 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1936 bh_unlock_sock(sk); variable 1944 sk_backlog_rcv(sk, skb); 1957 bh_lock_sock(sk); variable 1958 } while ((skb = sk->sk_backlog.head) != NULL); 1964 sk->sk_backlog.len = 0; 1969 * @sk: sock to wait on 1972 * Now socket state including sk->sk_err is changed only under lock, 1977 int sk_wait_data(struct sock *sk, long *timeo) sk_wait_data() argument 1982 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sk_wait_data() 1983 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); sk_wait_data() 1984 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); sk_wait_data() 1985 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); sk_wait_data() 1986 finish_wait(sk_sleep(sk), &wait); sk_wait_data() 1993 * @sk: socket 2001 int __sk_mem_schedule(struct sock *sk, int size, int kind) __sk_mem_schedule() argument 2003 struct proto *prot = sk->sk_prot; __sk_mem_schedule() 2008 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; __sk_mem_schedule() 2010 allocated = sk_memory_allocated_add(sk, amt, &parent_status); __sk_mem_schedule() 2014 allocated <= sk_prot_mem_limits(sk, 0)) { __sk_mem_schedule() 2015 sk_leave_memory_pressure(sk); __sk_mem_schedule() 2021 allocated > sk_prot_mem_limits(sk, 1)) __sk_mem_schedule() 2022 sk_enter_memory_pressure(sk); __sk_mem_schedule() 2026 (allocated > sk_prot_mem_limits(sk, 2))) __sk_mem_schedule() 2031 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) __sk_mem_schedule() 2035 if (sk->sk_type == SOCK_STREAM) { __sk_mem_schedule() 2036 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) __sk_mem_schedule() 2038 } else if (atomic_read(&sk->sk_wmem_alloc) < __sk_mem_schedule() 2043 if (sk_has_memory_pressure(sk)) { __sk_mem_schedule() 2046 if (!sk_under_memory_pressure(sk)) __sk_mem_schedule() 2048 alloc = sk_sockets_allocated_read_positive(sk); __sk_mem_schedule() 2049 if (sk_prot_mem_limits(sk, 2) > alloc * __sk_mem_schedule() 2050 sk_mem_pages(sk->sk_wmem_queued + __sk_mem_schedule() 2051 atomic_read(&sk->sk_rmem_alloc) + __sk_mem_schedule() 2052 sk->sk_forward_alloc)) __sk_mem_schedule() 2058 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { __sk_mem_schedule() 2059 sk_stream_moderate_sndbuf(sk); __sk_mem_schedule() 2064 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) __sk_mem_schedule() 2068 trace_sock_exceed_buf_limit(sk, prot, allocated); __sk_mem_schedule() 2071 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; __sk_mem_schedule() 2073 sk_memory_allocated_sub(sk, amt); __sk_mem_schedule() 2081 * @sk: socket 2083 void __sk_mem_reclaim(struct sock *sk) __sk_mem_reclaim() argument 2085 sk_memory_allocated_sub(sk, __sk_mem_reclaim() 2086 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); __sk_mem_reclaim() 2087 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; __sk_mem_reclaim() 2089 if (sk_under_memory_pressure(sk) && __sk_mem_reclaim() 2090 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) __sk_mem_reclaim() 2091 sk_leave_memory_pressure(sk); __sk_mem_reclaim() 2211 static void sock_def_wakeup(struct sock *sk) sock_def_wakeup() argument 2216 wq = rcu_dereference(sk->sk_wq); sock_def_wakeup() 2222 static void sock_def_error_report(struct sock *sk) sock_def_error_report() argument 2227 wq = rcu_dereference(sk->sk_wq); sock_def_error_report() 2230 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); sock_def_error_report() 2234 static void sock_def_readable(struct sock *sk) sock_def_readable() argument 2239 wq = rcu_dereference(sk->sk_wq); sock_def_readable() 2243 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); sock_def_readable() 2247 static void sock_def_write_space(struct sock *sk) sock_def_write_space() argument 2256 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { sock_def_write_space() 2257 wq = rcu_dereference(sk->sk_wq); sock_def_write_space() 2263 if (sock_writeable(sk)) sock_def_write_space() 2264 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); sock_def_write_space() 2270 static void sock_def_destruct(struct sock *sk) sock_def_destruct() argument 2272 kfree(sk->sk_protinfo); sock_def_destruct() 2275 void sk_send_sigurg(struct sock *sk) sk_send_sigurg() argument 2277 if (sk->sk_socket && sk->sk_socket->file) sk_send_sigurg() 2278 if (send_sigurg(&sk->sk_socket->file->f_owner)) sk_send_sigurg() 2279 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); sk_send_sigurg() 2283 void sk_reset_timer(struct sock *sk, struct timer_list* timer, sk_reset_timer() argument 2287 sock_hold(sk); sk_reset_timer() 2291 void sk_stop_timer(struct sock *sk, struct timer_list* timer) sk_stop_timer() argument 2294 __sock_put(sk); sk_stop_timer() 2298 void sock_init_data(struct socket *sock, struct sock *sk) sock_init_data() argument 2300 skb_queue_head_init(&sk->sk_receive_queue); sock_init_data() 2301 skb_queue_head_init(&sk->sk_write_queue); sock_init_data() 2302 skb_queue_head_init(&sk->sk_error_queue); sock_init_data() 2304 sk->sk_send_head = NULL; sock_init_data() 2306 init_timer(&sk->sk_timer); sock_init_data() 2308 sk->sk_allocation = GFP_KERNEL; sock_init_data() 2309 sk->sk_rcvbuf = sysctl_rmem_default; sock_init_data() 2310 sk->sk_sndbuf = sysctl_wmem_default; sock_init_data() 2311 sk->sk_state = TCP_CLOSE; sock_init_data() 2312 sk_set_socket(sk, sock); sock_init_data() 2314 sock_set_flag(sk, SOCK_ZAPPED); sock_init_data() 2317 sk->sk_type = sock->type; sock_init_data() 2318 sk->sk_wq = sock->wq; sock_init_data() 2319 sock->sk = sk; sock_init_data() 2321 sk->sk_wq = NULL; sock_init_data() 2323 spin_lock_init(&sk->sk_dst_lock); sock_init_data() 2324 rwlock_init(&sk->sk_callback_lock); sock_init_data() 2325 lockdep_set_class_and_name(&sk->sk_callback_lock, sock_init_data() 2326 af_callback_keys + sk->sk_family, sock_init_data() 2327 af_family_clock_key_strings[sk->sk_family]); sock_init_data() 2329 sk->sk_state_change = sock_def_wakeup; sock_init_data() 2330 sk->sk_data_ready = sock_def_readable; sock_init_data() 2331 sk->sk_write_space = sock_def_write_space; sock_init_data() 2332 sk->sk_error_report = sock_def_error_report; sock_init_data() 2333 sk->sk_destruct = sock_def_destruct; sock_init_data() 2335 sk->sk_frag.page = NULL; sock_init_data() 2336 sk->sk_frag.offset = 0; sock_init_data() 2337 sk->sk_peek_off = -1; sock_init_data() 2339 sk->sk_peer_pid = NULL; sock_init_data() 2340 sk->sk_peer_cred = NULL; sock_init_data() 2341 sk->sk_write_pending = 0; sock_init_data() 2342 sk->sk_rcvlowat = 1; sock_init_data() 2343 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; sock_init_data() 2344 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; sock_init_data() 2346 sk->sk_stamp = ktime_set(-1L, 0); sock_init_data() 2349 sk->sk_napi_id = 0; sock_init_data() 2350 sk->sk_ll_usec = sysctl_net_busy_read; sock_init_data() 2353 sk->sk_max_pacing_rate = ~0U; sock_init_data() 2354 sk->sk_pacing_rate = ~0U; sock_init_data() 2360 atomic_set(&sk->sk_refcnt, 1); sock_init_data() 2361 atomic_set(&sk->sk_drops, 0); sock_init_data() 2365 void lock_sock_nested(struct sock *sk, int subclass) lock_sock_nested() argument 2368 spin_lock_bh(&sk->sk_lock.slock); lock_sock_nested() 2369 if (sk->sk_lock.owned) lock_sock_nested() 2370 __lock_sock(sk); lock_sock_nested() 2371 sk->sk_lock.owned = 1; lock_sock_nested() 2372 spin_unlock(&sk->sk_lock.slock); lock_sock_nested() 2376 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); lock_sock_nested() 2381 void release_sock(struct sock *sk) release_sock() argument 2386 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); release_sock() 2388 spin_lock_bh(&sk->sk_lock.slock); release_sock() 2389 if (sk->sk_backlog.tail) release_sock() 2390 __release_sock(sk); release_sock() 2392 /* Warning : release_cb() might need to release sk ownership, release_sock() 2393 * ie call sock_release_ownership(sk) before us. release_sock() 2395 if (sk->sk_prot->release_cb) release_sock() 2396 sk->sk_prot->release_cb(sk); release_sock() 2398 sock_release_ownership(sk); release_sock() 2399 if (waitqueue_active(&sk->sk_lock.wq)) release_sock() 2400 wake_up(&sk->sk_lock.wq); release_sock() 2401 spin_unlock_bh(&sk->sk_lock.slock); release_sock() 2407 * @sk: socket 2415 bool lock_sock_fast(struct sock *sk) lock_sock_fast() argument 2418 spin_lock_bh(&sk->sk_lock.slock); lock_sock_fast() 2420 if (!sk->sk_lock.owned) lock_sock_fast() 2426 __lock_sock(sk); lock_sock_fast() 2427 sk->sk_lock.owned = 1; lock_sock_fast() 2428 spin_unlock(&sk->sk_lock.slock); lock_sock_fast() 2432 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); lock_sock_fast() 2438 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) sock_get_timestamp() argument 2441 if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_get_timestamp() 2442 sock_enable_timestamp(sk, SOCK_TIMESTAMP); sock_get_timestamp() 2443 tv = ktime_to_timeval(sk->sk_stamp); sock_get_timestamp() 2447 sk->sk_stamp = ktime_get_real(); sock_get_timestamp() 2448 tv = ktime_to_timeval(sk->sk_stamp); sock_get_timestamp() 2454 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) sock_get_timestampns() argument 2457 if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_get_timestampns() 2458 sock_enable_timestamp(sk, SOCK_TIMESTAMP); sock_get_timestampns() 2459 ts = ktime_to_timespec(sk->sk_stamp); sock_get_timestampns() 2463 sk->sk_stamp = ktime_get_real(); sock_get_timestampns() 2464 ts = ktime_to_timespec(sk->sk_stamp); sock_get_timestampns() 2470 void sock_enable_timestamp(struct sock *sk, int flag) sock_enable_timestamp() argument 2472 if (!sock_flag(sk, flag)) { sock_enable_timestamp() 2473 unsigned long previous_flags = sk->sk_flags; sock_enable_timestamp() 2475 sock_set_flag(sk, flag); sock_enable_timestamp() 2486 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, sock_recv_errqueue() argument 2494 skb = sock_dequeue_err_skb(sk); sock_recv_errqueue() 2507 sock_recv_timestamp(msg, sk, skb); sock_recv_errqueue() 2532 struct sock *sk = sock->sk; sock_common_getsockopt() local 2534 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); sock_common_getsockopt() 2542 struct sock *sk = sock->sk; compat_sock_common_getsockopt() local 2544 if (sk->sk_prot->compat_getsockopt != NULL) compat_sock_common_getsockopt() 2545 return sk->sk_prot->compat_getsockopt(sk, level, optname, compat_sock_common_getsockopt() 2547 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); compat_sock_common_getsockopt() 2555 struct sock *sk = sock->sk; sock_common_recvmsg() local 2559 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, sock_common_recvmsg() 2573 struct sock *sk = sock->sk; sock_common_setsockopt() local 2575 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); sock_common_setsockopt() 2583 struct sock *sk = sock->sk; compat_sock_common_setsockopt() local 2585 if (sk->sk_prot->compat_setsockopt != NULL) compat_sock_common_setsockopt() 2586 return sk->sk_prot->compat_setsockopt(sk, level, optname, compat_sock_common_setsockopt() 2588 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); compat_sock_common_setsockopt() 2593 void sk_common_release(struct sock *sk) sk_common_release() argument 2595 if (sk->sk_prot->destroy) sk_common_release() 2596 sk->sk_prot->destroy(sk); sk_common_release() 2606 sk->sk_prot->unhash(sk); sk_common_release() 2620 sock_orphan(sk); sk_common_release() 2622 xfrm_sk_free_policy(sk); sk_common_release() 2624 sk_refcnt_debug_release(sk); sk_common_release() 2626 if (sk->sk_frag.page) { sk_common_release() 2627 put_page(sk->sk_frag.page); sk_common_release() 2628 sk->sk_frag.page = NULL; sk_common_release() 2631 sock_put(sk); sk_common_release()
|
H A D | sock_diag.c | 16 static u64 sock_gen_cookie(struct sock *sk) sock_gen_cookie() argument 19 u64 res = atomic64_read(&sk->sk_cookie); sock_gen_cookie() 23 res = atomic64_inc_return(&sock_net(sk)->cookie_gen); sock_gen_cookie() 24 atomic64_cmpxchg(&sk->sk_cookie, 0, res); sock_gen_cookie() 28 int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie) sock_diag_check_cookie() argument 35 res = sock_gen_cookie(sk); sock_diag_check_cookie() 43 void sock_diag_save_cookie(struct sock *sk, __u32 *cookie) sock_diag_save_cookie() argument 45 u64 res = sock_gen_cookie(sk); sock_diag_save_cookie() 52 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) sock_diag_put_meminfo() argument 56 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); sock_diag_put_meminfo() 57 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; sock_diag_put_meminfo() 58 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); sock_diag_put_meminfo() 59 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; sock_diag_put_meminfo() 60 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; sock_diag_put_meminfo() 61 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; sock_diag_put_meminfo() 62 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); sock_diag_put_meminfo() 63 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; sock_diag_put_meminfo() 69 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, sock_diag_put_filterinfo() argument 84 filter = rcu_dereference(sk->sk_filter); sock_diag_put_filterinfo()
|
H A D | datagram.c | 66 static inline int connection_based(struct sock *sk) connection_based() argument 68 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; connection_based() 86 static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, wait_for_more_packets() argument 92 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); wait_for_more_packets() 95 error = sock_error(sk); wait_for_more_packets() 99 if (sk->sk_receive_queue.prev != skb) wait_for_more_packets() 103 if (sk->sk_shutdown & RCV_SHUTDOWN) wait_for_more_packets() 110 if (connection_based(sk) && wait_for_more_packets() 111 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) wait_for_more_packets() 121 finish_wait(sk_sleep(sk), &wait); wait_for_more_packets() 165 * @sk: socket 194 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, __skb_recv_datagram() argument 197 struct sk_buff_head *queue = &sk->sk_receive_queue; __skb_recv_datagram() 202 * Caller is allowed not to check sk->sk_err before skb_recv_datagram() __skb_recv_datagram() 204 int error = sock_error(sk); __skb_recv_datagram() 209 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); __skb_recv_datagram() 247 if (sk_can_busy_loop(sk) && 248 sk_busy_loop(sk, flags & MSG_DONTWAIT)) 256 } while (!wait_for_more_packets(sk, err, &timeo, last)); 268 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, skb_recv_datagram() argument 273 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), skb_recv_datagram() 278 void skb_free_datagram(struct sock *sk, struct sk_buff *skb) skb_free_datagram() argument 281 sk_mem_reclaim_partial(sk); skb_free_datagram() 285 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) skb_free_datagram_locked() argument 294 slow = lock_sock_fast(sk); skb_free_datagram_locked() 296 sk_mem_reclaim_partial(sk); skb_free_datagram_locked() 297 unlock_sock_fast(sk, slow); skb_free_datagram_locked() 306 * @sk: socket 325 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) skb_kill_datagram() argument 331 spin_lock_bh(&sk->sk_receive_queue.lock); skb_kill_datagram() 332 if (skb == skb_peek(&sk->sk_receive_queue)) { skb_kill_datagram() 333 __skb_unlink(skb, &sk->sk_receive_queue); skb_kill_datagram() 337 spin_unlock_bh(&sk->sk_receive_queue.lock); skb_kill_datagram() 341 atomic_inc(&sk->sk_drops); skb_kill_datagram() 342 sk_mem_reclaim_partial(sk); skb_kill_datagram() 556 atomic_add(truesize, &skb->sk->sk_wmem_alloc); zerocopy_sg_from_iter() 755 struct sock *sk = sock->sk; datagram_poll() local 758 sock_poll_wait(file, sk_sleep(sk), wait); datagram_poll() 762 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) datagram_poll() 764 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); datagram_poll() 766 if (sk->sk_shutdown & RCV_SHUTDOWN) datagram_poll() 768 if (sk->sk_shutdown == SHUTDOWN_MASK) datagram_poll() 772 if (!skb_queue_empty(&sk->sk_receive_queue)) datagram_poll() 776 if (connection_based(sk)) { datagram_poll() 777 if (sk->sk_state == TCP_CLOSE) datagram_poll() 780 if (sk->sk_state == TCP_SYN_SENT) datagram_poll() 785 if (sock_writeable(sk)) datagram_poll() 788 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); datagram_poll()
|
/linux-4.1.27/net/dccp/ |
H A D | qpolicy.c | 18 static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb) qpolicy_simple_push() argument 20 skb_queue_tail(&sk->sk_write_queue, skb); qpolicy_simple_push() 23 static bool qpolicy_simple_full(struct sock *sk) qpolicy_simple_full() argument 25 return dccp_sk(sk)->dccps_tx_qlen && qpolicy_simple_full() 26 sk->sk_write_queue.qlen >= dccp_sk(sk)->dccps_tx_qlen; qpolicy_simple_full() 29 static struct sk_buff *qpolicy_simple_top(struct sock *sk) qpolicy_simple_top() argument 31 return skb_peek(&sk->sk_write_queue); qpolicy_simple_top() 39 static struct sk_buff *qpolicy_prio_best_skb(struct sock *sk) qpolicy_prio_best_skb() argument 43 skb_queue_walk(&sk->sk_write_queue, skb) qpolicy_prio_best_skb() 49 static struct sk_buff *qpolicy_prio_worst_skb(struct sock *sk) qpolicy_prio_worst_skb() argument 53 skb_queue_walk(&sk->sk_write_queue, skb) qpolicy_prio_worst_skb() 59 static bool qpolicy_prio_full(struct sock *sk) qpolicy_prio_full() argument 61 if (qpolicy_simple_full(sk)) qpolicy_prio_full() 62 dccp_qpolicy_drop(sk, qpolicy_prio_worst_skb(sk)); qpolicy_prio_full() 73 void (*push) (struct sock *sk, struct sk_buff *skb); 74 bool (*full) (struct sock *sk); 75 struct sk_buff* (*top) (struct sock *sk); 96 void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb) dccp_qpolicy_push() argument 98 qpol_table[dccp_sk(sk)->dccps_qpolicy].push(sk, skb); dccp_qpolicy_push() 101 bool dccp_qpolicy_full(struct sock *sk) dccp_qpolicy_full() argument 103 return qpol_table[dccp_sk(sk)->dccps_qpolicy].full(sk); dccp_qpolicy_full() 106 void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb) dccp_qpolicy_drop() argument 109 skb_unlink(skb, &sk->sk_write_queue); dccp_qpolicy_drop() 114 struct sk_buff *dccp_qpolicy_top(struct sock *sk) dccp_qpolicy_top() argument 116 return qpol_table[dccp_sk(sk)->dccps_qpolicy].top(sk); dccp_qpolicy_top() 119 struct sk_buff *dccp_qpolicy_pop(struct sock *sk) dccp_qpolicy_pop() argument 121 struct sk_buff *skb = dccp_qpolicy_top(sk); dccp_qpolicy_pop() 126 skb_unlink(skb, &sk->sk_write_queue); dccp_qpolicy_pop() 131 bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param) dccp_qpolicy_param_ok() argument 136 return (qpol_table[dccp_sk(sk)->dccps_qpolicy].params & param) == param; dccp_qpolicy_param_ok()
|
H A D | timer.c | 24 static void dccp_write_err(struct sock *sk) dccp_write_err() argument 26 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; dccp_write_err() 27 sk->sk_error_report(sk); dccp_write_err() 29 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_write_err() 30 dccp_done(sk); dccp_write_err() 35 static int dccp_write_timeout(struct sock *sk) dccp_write_timeout() argument 37 const struct inet_connection_sock *icsk = inet_csk(sk); dccp_write_timeout() 40 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { dccp_write_timeout() 42 dst_negative_advice(sk); dccp_write_timeout() 67 dst_negative_advice(sk); dccp_write_timeout() 78 dccp_write_err(sk); dccp_write_timeout() 87 static void dccp_retransmit_timer(struct sock *sk) dccp_retransmit_timer() argument 89 struct inet_connection_sock *icsk = inet_csk(sk); dccp_retransmit_timer() 95 if (dccp_write_timeout(sk)) dccp_retransmit_timer() 105 if (dccp_retransmit_skb(sk) != 0) { dccp_retransmit_timer() 112 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, dccp_retransmit_timer() 122 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, dccp_retransmit_timer() 125 __sk_dst_reset(sk); dccp_retransmit_timer() 130 struct sock *sk = (struct sock *)data; dccp_write_timer() local 131 struct inet_connection_sock *icsk = inet_csk(sk); dccp_write_timer() 134 bh_lock_sock(sk); dccp_write_timer() 135 if (sock_owned_by_user(sk)) { dccp_write_timer() 137 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, dccp_write_timer() 142 if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending) dccp_write_timer() 146 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, dccp_write_timer() 156 dccp_retransmit_timer(sk); dccp_write_timer() 160 bh_unlock_sock(sk); dccp_write_timer() 161 sock_put(sk); dccp_write_timer() 166 struct sock *sk = (struct sock *)data; dccp_keepalive_timer() local 169 sock_put(sk); dccp_keepalive_timer() 175 struct sock *sk = (struct sock *)data; dccp_delack_timer() local 176 struct inet_connection_sock *icsk = inet_csk(sk); dccp_delack_timer() 178 bh_lock_sock(sk); dccp_delack_timer() 179 if (sock_owned_by_user(sk)) { dccp_delack_timer() 182 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); dccp_delack_timer() 183 sk_reset_timer(sk, &icsk->icsk_delack_timer, dccp_delack_timer() 188 if (sk->sk_state == DCCP_CLOSED || dccp_delack_timer() 192 sk_reset_timer(sk, &icsk->icsk_delack_timer, dccp_delack_timer() 199 if (inet_csk_ack_scheduled(sk)) { dccp_delack_timer() 211 dccp_send_ack(sk); dccp_delack_timer() 212 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); dccp_delack_timer() 215 bh_unlock_sock(sk); dccp_delack_timer() 216 sock_put(sk); dccp_delack_timer() 225 struct sock *sk = (struct sock *)data; dccp_write_xmitlet() local 227 bh_lock_sock(sk); dccp_write_xmitlet() 228 if (sock_owned_by_user(sk)) dccp_write_xmitlet() 229 sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1); dccp_write_xmitlet() 231 dccp_write_xmit(sk); dccp_write_xmitlet() 232 bh_unlock_sock(sk); dccp_write_xmitlet() 241 void dccp_init_xmit_timers(struct sock *sk) dccp_init_xmit_timers() argument 243 struct dccp_sock *dp = dccp_sk(sk); dccp_init_xmit_timers() 245 tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk); dccp_init_xmit_timers() 247 (unsigned long)sk); dccp_init_xmit_timers() 248 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, dccp_init_xmit_timers()
|
H A D | proto.c | 77 void dccp_set_state(struct sock *sk, const int state) dccp_set_state() argument 79 const int oldstate = sk->sk_state; dccp_set_state() 81 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk, dccp_set_state() 91 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg); dccp_set_state() 99 sk->sk_prot->unhash(sk); dccp_set_state() 100 if (inet_csk(sk)->icsk_bind_hash != NULL && dccp_set_state() 101 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) dccp_set_state() 102 inet_put_port(sk); dccp_set_state() 112 sk->sk_state = state; dccp_set_state() 117 static void dccp_finish_passive_close(struct sock *sk) dccp_finish_passive_close() argument 119 switch (sk->sk_state) { dccp_finish_passive_close() 122 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED); dccp_finish_passive_close() 123 dccp_set_state(sk, DCCP_CLOSED); dccp_finish_passive_close() 130 dccp_send_close(sk, 1); dccp_finish_passive_close() 131 dccp_set_state(sk, DCCP_CLOSING); dccp_finish_passive_close() 135 void dccp_done(struct sock *sk) dccp_done() argument 137 dccp_set_state(sk, DCCP_CLOSED); dccp_done() 138 dccp_clear_xmit_timers(sk); dccp_done() 140 sk->sk_shutdown = SHUTDOWN_MASK; dccp_done() 142 if (!sock_flag(sk, SOCK_DEAD)) dccp_done() 143 sk->sk_state_change(sk); dccp_done() 145 inet_csk_destroy_sock(sk); dccp_done() 173 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) dccp_init_sock() argument 175 struct dccp_sock *dp = dccp_sk(sk); dccp_init_sock() 176 struct inet_connection_sock *icsk = inet_csk(sk); dccp_init_sock() 180 sk->sk_state = DCCP_CLOSED; dccp_init_sock() 181 sk->sk_write_space = dccp_write_space; dccp_init_sock() 189 dccp_init_xmit_timers(sk); dccp_init_sock() 194 return dccp_feat_init(sk); dccp_init_sock() 200 void dccp_destroy_sock(struct sock *sk) dccp_destroy_sock() argument 202 struct dccp_sock *dp = dccp_sk(sk); dccp_destroy_sock() 208 if (sk->sk_send_head != NULL) { dccp_destroy_sock() 209 kfree_skb(sk->sk_send_head); dccp_destroy_sock() 210 sk->sk_send_head = NULL; dccp_destroy_sock() 214 if (inet_csk(sk)->icsk_bind_hash != NULL) dccp_destroy_sock() 215 inet_put_port(sk); dccp_destroy_sock() 224 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); dccp_destroy_sock() 225 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); dccp_destroy_sock() 234 static inline int dccp_listen_start(struct sock *sk, int backlog) dccp_listen_start() argument 236 struct dccp_sock *dp = dccp_sk(sk); dccp_listen_start() 242 return inet_csk_listen_start(sk, backlog); dccp_listen_start() 251 int dccp_disconnect(struct sock *sk, int flags) dccp_disconnect() argument 253 struct inet_connection_sock *icsk = inet_csk(sk); dccp_disconnect() 254 struct inet_sock *inet = inet_sk(sk); dccp_disconnect() 256 const int old_state = sk->sk_state; dccp_disconnect() 259 dccp_set_state(sk, DCCP_CLOSED); dccp_disconnect() 266 inet_csk_listen_stop(sk); dccp_disconnect() 268 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_disconnect() 269 sk->sk_err = ECONNRESET; dccp_disconnect() 271 sk->sk_err = ECONNRESET; dccp_disconnect() 273 dccp_clear_xmit_timers(sk); dccp_disconnect() 275 __skb_queue_purge(&sk->sk_receive_queue); dccp_disconnect() 276 __skb_queue_purge(&sk->sk_write_queue); dccp_disconnect() 277 if (sk->sk_send_head != NULL) { dccp_disconnect() 278 __kfree_skb(sk->sk_send_head); dccp_disconnect() 279 sk->sk_send_head = NULL; dccp_disconnect() 284 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) dccp_disconnect() 285 inet_reset_saddr(sk); dccp_disconnect() 287 sk->sk_shutdown = 0; dccp_disconnect() 288 sock_reset_flag(sk, SOCK_DONE); dccp_disconnect() 291 inet_csk_delack_init(sk); dccp_disconnect() 292 __sk_dst_reset(sk); dccp_disconnect() 296 sk->sk_error_report(sk); dccp_disconnect() 313 struct sock *sk = sock->sk; dccp_poll() local 315 sock_poll_wait(file, sk_sleep(sk), wait); dccp_poll() 316 if (sk->sk_state == DCCP_LISTEN) dccp_poll() 317 return inet_csk_listen_poll(sk); dccp_poll() 325 if (sk->sk_err) dccp_poll() 328 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) dccp_poll() 330 if (sk->sk_shutdown & RCV_SHUTDOWN) dccp_poll() 334 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { dccp_poll() 335 if (atomic_read(&sk->sk_rmem_alloc) > 0) dccp_poll() 338 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { dccp_poll() 339 if (sk_stream_is_writeable(sk)) { dccp_poll() 343 &sk->sk_socket->flags); dccp_poll() 344 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); dccp_poll() 350 if (sk_stream_is_writeable(sk)) dccp_poll() 360 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) dccp_ioctl() argument 364 lock_sock(sk); dccp_ioctl() 366 if (sk->sk_state == DCCP_LISTEN) dccp_ioctl() 374 skb = skb_peek(&sk->sk_receive_queue); dccp_ioctl() 390 release_sock(sk); dccp_ioctl() 396 static int dccp_setsockopt_service(struct sock *sk, const __be32 service, dccp_setsockopt_service() argument 399 struct dccp_sock *dp = dccp_sk(sk); dccp_setsockopt_service() 421 lock_sock(sk); dccp_setsockopt_service() 427 release_sock(sk); dccp_setsockopt_service() 431 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx) dccp_setsockopt_cscov() argument 455 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len); dccp_setsockopt_cscov() 459 dccp_sk(sk)->dccps_pcrlen = cscov; dccp_setsockopt_cscov() 461 dccp_sk(sk)->dccps_pcslen = cscov; dccp_setsockopt_cscov() 467 static int dccp_setsockopt_ccid(struct sock *sk, int type, dccp_setsockopt_ccid() argument 480 lock_sock(sk); dccp_setsockopt_ccid() 482 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen); dccp_setsockopt_ccid() 485 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen); dccp_setsockopt_ccid() 486 release_sock(sk); dccp_setsockopt_ccid() 492 static int do_dccp_setsockopt(struct sock *sk, int level, int optname, do_dccp_setsockopt() argument 495 struct dccp_sock *dp = dccp_sk(sk); do_dccp_setsockopt() 509 return dccp_setsockopt_ccid(sk, optname, optval, optlen); do_dccp_setsockopt() 519 return dccp_setsockopt_service(sk, val, optval, optlen); do_dccp_setsockopt() 521 lock_sock(sk); do_dccp_setsockopt() 530 err = dccp_setsockopt_cscov(sk, val, false); do_dccp_setsockopt() 533 err = dccp_setsockopt_cscov(sk, val, true); do_dccp_setsockopt() 536 if (sk->sk_state != DCCP_CLOSED) do_dccp_setsockopt() 553 release_sock(sk); do_dccp_setsockopt() 558 int dccp_setsockopt(struct sock *sk, int level, int optname, dccp_setsockopt() argument 562 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, dccp_setsockopt() 565 return do_dccp_setsockopt(sk, level, optname, optval, optlen); dccp_setsockopt() 571 int compat_dccp_setsockopt(struct sock *sk, int level, int optname, compat_dccp_setsockopt() argument 575 return inet_csk_compat_setsockopt(sk, level, optname, compat_dccp_setsockopt() 577 return do_dccp_setsockopt(sk, level, optname, optval, optlen); compat_dccp_setsockopt() 583 static int dccp_getsockopt_service(struct sock *sk, int len, dccp_getsockopt_service() argument 587 const struct dccp_sock *dp = dccp_sk(sk); dccp_getsockopt_service() 591 lock_sock(sk); dccp_getsockopt_service() 607 release_sock(sk); dccp_getsockopt_service() 611 static int do_dccp_getsockopt(struct sock *sk, int level, int optname, do_dccp_getsockopt() argument 623 dp = dccp_sk(sk); do_dccp_getsockopt() 630 return dccp_getsockopt_service(sk, len, do_dccp_getsockopt() 636 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen); do_dccp_getsockopt() 663 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, do_dccp_getsockopt() 666 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname, do_dccp_getsockopt() 679 int dccp_getsockopt(struct sock *sk, int level, int optname, dccp_getsockopt() argument 683 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level, dccp_getsockopt() 686 return do_dccp_getsockopt(sk, level, optname, optval, optlen); dccp_getsockopt() 692 int compat_dccp_getsockopt(struct sock *sk, int level, int optname, compat_dccp_getsockopt() argument 696 return inet_csk_compat_getsockopt(sk, level, optname, compat_dccp_getsockopt() 698 return do_dccp_getsockopt(sk, level, optname, optval, optlen); compat_dccp_getsockopt() 728 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type)) for_each_cmsghdr() 744 int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) dccp_sendmsg() argument 746 const struct dccp_sock *dp = dccp_sk(sk); dccp_sendmsg() 756 lock_sock(sk); dccp_sendmsg() 758 if (dccp_qpolicy_full(sk)) { dccp_sendmsg() 763 timeo = sock_sndtimeo(sk, noblock); dccp_sendmsg() 770 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN)) dccp_sendmsg() 771 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0) dccp_sendmsg() 774 size = sk->sk_prot->max_header + len; dccp_sendmsg() 775 release_sock(sk); dccp_sendmsg() 776 skb = sock_alloc_send_skb(sk, size, noblock, &rc); dccp_sendmsg() 777 lock_sock(sk); dccp_sendmsg() 781 skb_reserve(skb, sk->sk_prot->max_header); dccp_sendmsg() 790 dccp_qpolicy_push(sk, skb); dccp_sendmsg() 797 dccp_write_xmit(sk); dccp_sendmsg() 799 release_sock(sk); dccp_sendmsg() 808 int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, dccp_recvmsg() argument 814 lock_sock(sk); dccp_recvmsg() 816 if (sk->sk_state == DCCP_LISTEN) { dccp_recvmsg() 821 timeo = sock_rcvtimeo(sk, nonblock); dccp_recvmsg() 824 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); dccp_recvmsg() 839 dccp_finish_passive_close(sk); dccp_recvmsg() 849 sk_eat_skb(sk, skb); dccp_recvmsg() 852 if (sock_flag(sk, SOCK_DONE)) { dccp_recvmsg() 857 if (sk->sk_err) { dccp_recvmsg() 858 len = sock_error(sk); dccp_recvmsg() 862 if (sk->sk_shutdown & RCV_SHUTDOWN) { dccp_recvmsg() 867 if (sk->sk_state == DCCP_CLOSED) { dccp_recvmsg() 868 if (!sock_flag(sk, SOCK_DONE)) { dccp_recvmsg() 889 sk_wait_data(sk, &timeo); dccp_recvmsg() 906 sk_eat_skb(sk, skb); dccp_recvmsg() 910 release_sock(sk); dccp_recvmsg() 918 struct sock *sk = sock->sk; inet_dccp_listen() local 922 lock_sock(sk); inet_dccp_listen() 928 old_state = sk->sk_state; inet_dccp_listen() 937 * FIXME: here it probably should be sk->sk_prot->listen_start inet_dccp_listen() 940 err = dccp_listen_start(sk, backlog); inet_dccp_listen() 944 sk->sk_max_ack_backlog = backlog; inet_dccp_listen() 948 release_sock(sk); inet_dccp_listen() 954 static void dccp_terminate_connection(struct sock *sk) dccp_terminate_connection() argument 958 switch (sk->sk_state) { dccp_terminate_connection() 961 dccp_finish_passive_close(sk); dccp_terminate_connection() 964 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk); dccp_terminate_connection() 965 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); dccp_terminate_connection() 968 dccp_send_close(sk, 1); dccp_terminate_connection() 970 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER && dccp_terminate_connection() 971 !dccp_sk(sk)->dccps_server_timewait) dccp_terminate_connection() 977 dccp_set_state(sk, next_state); dccp_terminate_connection() 981 void dccp_close(struct sock *sk, long timeout) dccp_close() argument 983 struct dccp_sock *dp = dccp_sk(sk); dccp_close() 988 lock_sock(sk); dccp_close() 990 sk->sk_shutdown = SHUTDOWN_MASK; dccp_close() 992 if (sk->sk_state == DCCP_LISTEN) { dccp_close() 993 dccp_set_state(sk, DCCP_CLOSED); dccp_close() 996 inet_csk_listen_stop(sk); dccp_close() 1001 sk_stop_timer(sk, &dp->dccps_xmit_timer); dccp_close() 1008 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { dccp_close() 1016 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_close() 1017 dccp_set_state(sk, DCCP_CLOSED); dccp_close() 1018 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { dccp_close() 1020 sk->sk_prot->disconnect(sk, 0); dccp_close() 1021 } else if (sk->sk_state != DCCP_CLOSED) { dccp_close() 1026 dccp_flush_write_queue(sk, &timeout); dccp_close() 1027 dccp_terminate_connection(sk); dccp_close() 1036 __skb_queue_purge(&sk->sk_write_queue); dccp_close() 1038 sk_stream_wait_close(sk, timeout); dccp_close() 1041 state = sk->sk_state; dccp_close() 1042 sock_hold(sk); dccp_close() 1043 sock_orphan(sk); dccp_close() 1048 release_sock(sk); dccp_close() 1054 bh_lock_sock(sk); dccp_close() 1055 WARN_ON(sock_owned_by_user(sk)); dccp_close() 1057 percpu_counter_inc(sk->sk_prot->orphan_count); dccp_close() 1060 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) dccp_close() 1063 if (sk->sk_state == DCCP_CLOSED) dccp_close() 1064 inet_csk_destroy_sock(sk); dccp_close() 1069 bh_unlock_sock(sk); dccp_close() 1071 sock_put(sk); dccp_close() 1076 void dccp_shutdown(struct sock *sk, int how) dccp_shutdown() argument
|
H A D | output.c | 25 static inline void dccp_event_ack_sent(struct sock *sk) dccp_event_ack_sent() argument 27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); dccp_event_ack_sent() 31 static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb) dccp_skb_entail() argument 33 skb_set_owner_w(skb, sk); dccp_skb_entail() 34 WARN_ON(sk->sk_send_head); dccp_skb_entail() 35 sk->sk_send_head = skb; dccp_skb_entail() 36 return skb_clone(sk->sk_send_head, gfp_any()); dccp_skb_entail() 45 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) dccp_transmit_skb() argument 48 struct inet_sock *inet = inet_sk(sk); dccp_transmit_skb() 49 const struct inet_connection_sock *icsk = inet_csk(sk); dccp_transmit_skb() 50 struct dccp_sock *dp = dccp_sk(sk); dccp_transmit_skb() 89 * through here with skb->sk set. dccp_transmit_skb() 91 WARN_ON(skb->sk); dccp_transmit_skb() 92 skb_set_owner_w(skb, sk); dccp_transmit_skb() 96 if (dccp_insert_options(sk, skb)) { dccp_transmit_skb() 113 dccp_update_gss(sk, dcb->dccpd_seq); dccp_transmit_skb() 134 icsk->icsk_af_ops->send_check(sk, skb); dccp_transmit_skb() 137 dccp_event_ack_sent(sk); dccp_transmit_skb() 141 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); dccp_transmit_skb() 162 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) dccp_sync_mss() argument 164 struct inet_connection_sock *icsk = inet_csk(sk); dccp_sync_mss() 165 struct dccp_sock *dp = dccp_sk(sk); dccp_sync_mss() 198 void dccp_write_space(struct sock *sk) dccp_write_space() argument 203 wq = rcu_dereference(sk->sk_wq); dccp_write_space() 207 if (sock_writeable(sk)) dccp_write_space() 208 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); dccp_write_space() 215 * @sk: socket to wait for 220 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) dccp_wait_for_ccid() argument 225 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); dccp_wait_for_ccid() 226 sk->sk_write_pending++; dccp_wait_for_ccid() 227 release_sock(sk); dccp_wait_for_ccid() 231 lock_sock(sk); dccp_wait_for_ccid() 232 sk->sk_write_pending--; dccp_wait_for_ccid() 233 finish_wait(sk_sleep(sk), &wait); dccp_wait_for_ccid() 235 if (signal_pending(current) || sk->sk_err) dccp_wait_for_ccid() 244 static void dccp_xmit_packet(struct sock *sk) dccp_xmit_packet() argument 247 struct dccp_sock *dp = dccp_sk(sk); dccp_xmit_packet() 248 struct sk_buff *skb = dccp_qpolicy_pop(sk); dccp_xmit_packet() 254 if (sk->sk_state == DCCP_PARTOPEN) { dccp_xmit_packet() 265 dccp_send_ack(sk); dccp_xmit_packet() 269 inet_csk_schedule_ack(sk); dccp_xmit_packet() 270 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, dccp_xmit_packet() 271 inet_csk(sk)->icsk_rto, dccp_xmit_packet() 274 } else if (dccp_ack_pending(sk)) { dccp_xmit_packet() 280 err = dccp_transmit_skb(sk, skb); dccp_xmit_packet() 288 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); dccp_xmit_packet() 297 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC); dccp_xmit_packet() 307 void dccp_flush_write_queue(struct sock *sk, long *time_budget) dccp_flush_write_queue() argument 309 struct dccp_sock *dp = dccp_sk(sk); dccp_flush_write_queue() 313 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) { dccp_flush_write_queue() 314 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); dccp_flush_write_queue() 329 rc = dccp_wait_for_ccid(sk, delay); dccp_flush_write_queue() 336 dccp_xmit_packet(sk); dccp_flush_write_queue() 339 skb_dequeue(&sk->sk_write_queue); dccp_flush_write_queue() 346 void dccp_write_xmit(struct sock *sk) dccp_write_xmit() argument 348 struct dccp_sock *dp = dccp_sk(sk); dccp_write_xmit() 351 while ((skb = dccp_qpolicy_top(sk))) { dccp_write_xmit() 352 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); dccp_write_xmit() 358 sk_reset_timer(sk, &dp->dccps_xmit_timer, dccp_write_xmit() 362 dccp_xmit_packet(sk); dccp_write_xmit() 365 dccp_qpolicy_drop(sk, skb); dccp_write_xmit() 378 * This function expects sk->sk_send_head to contain the original skb. 380 int dccp_retransmit_skb(struct sock *sk) dccp_retransmit_skb() argument 382 WARN_ON(sk->sk_send_head == NULL); dccp_retransmit_skb() 384 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) dccp_retransmit_skb() 388 inet_csk(sk)->icsk_retransmits++; dccp_retransmit_skb() 390 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC)); dccp_retransmit_skb() 393 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, dccp_make_response() argument 401 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, dccp_make_response() 407 skb_reserve(skb, sk->sk_prot->max_header); dccp_make_response() 451 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb) dccp_ctl_make_reset() argument 461 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); dccp_ctl_make_reset() 465 skb_reserve(skb, sk->sk_prot->max_header); dccp_ctl_make_reset() 503 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) dccp_send_reset() argument 510 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); dccp_send_reset() 515 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); dccp_send_reset() 520 skb_reserve(skb, sk->sk_prot->max_header); dccp_send_reset() 524 return dccp_transmit_skb(sk, skb); dccp_send_reset() 530 int dccp_connect(struct sock *sk) dccp_connect() argument 533 struct dccp_sock *dp = dccp_sk(sk); dccp_connect() 534 struct dst_entry *dst = __sk_dst_get(sk); dccp_connect() 535 struct inet_connection_sock *icsk = inet_csk(sk); dccp_connect() 537 sk->sk_err = 0; dccp_connect() 538 sock_reset_flag(sk, SOCK_DONE); dccp_connect() 540 dccp_sync_mss(sk, dst_mtu(dst)); dccp_connect() 543 if (dccp_feat_finalise_settings(dccp_sk(sk))) dccp_connect() 549 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation); dccp_connect() 554 skb_reserve(skb, sk->sk_prot->max_header); dccp_connect() 558 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb)); dccp_connect() 563 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, dccp_connect() 570 void dccp_send_ack(struct sock *sk) dccp_send_ack() argument 573 if (sk->sk_state != DCCP_CLOSED) { dccp_send_ack() 574 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, dccp_send_ack() 578 inet_csk_schedule_ack(sk); dccp_send_ack() 579 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; dccp_send_ack() 580 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, dccp_send_ack() 587 skb_reserve(skb, sk->sk_prot->max_header); dccp_send_ack() 589 dccp_transmit_skb(sk, skb); dccp_send_ack() 597 void dccp_send_delayed_ack(struct sock *sk) 599 struct inet_connection_sock *icsk = inet_csk(sk); 615 dccp_send_ack(sk); 624 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 628 void dccp_send_sync(struct sock *sk, const u64 ackno, dccp_send_sync() argument 636 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); dccp_send_sync() 645 skb_reserve(skb, sk->sk_prot->max_header); dccp_send_sync() 653 dccp_sk(sk)->dccps_sync_scheduled = 0; dccp_send_sync() 655 dccp_transmit_skb(sk, skb); dccp_send_sync() 665 void dccp_send_close(struct sock *sk, const int active) dccp_send_close() argument 667 struct dccp_sock *dp = dccp_sk(sk); dccp_send_close() 671 skb = alloc_skb(sk->sk_prot->max_header, prio); dccp_send_close() 676 skb_reserve(skb, sk->sk_prot->max_header); dccp_send_close() 683 skb = dccp_skb_entail(sk, skb); dccp_send_close() 694 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, dccp_send_close() 697 dccp_transmit_skb(sk, skb); dccp_send_close()
|
H A D | input.c | 26 static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb) dccp_enqueue_skb() argument 29 __skb_queue_tail(&sk->sk_receive_queue, skb); dccp_enqueue_skb() 30 skb_set_owner_r(skb, sk); dccp_enqueue_skb() 31 sk->sk_data_ready(sk); dccp_enqueue_skb() 34 static void dccp_fin(struct sock *sk, struct sk_buff *skb) dccp_fin() argument 42 sk->sk_shutdown = SHUTDOWN_MASK; dccp_fin() 43 sock_set_flag(sk, SOCK_DONE); dccp_fin() 44 dccp_enqueue_skb(sk, skb); dccp_fin() 47 static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb) dccp_rcv_close() argument 51 switch (sk->sk_state) { dccp_rcv_close() 69 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) dccp_rcv_close() 74 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED); dccp_rcv_close() 75 dccp_done(sk); dccp_rcv_close() 81 dccp_fin(sk, skb); dccp_rcv_close() 82 dccp_set_state(sk, DCCP_PASSIVE_CLOSE); dccp_rcv_close() 88 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); dccp_rcv_close() 93 static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) dccp_rcv_closereq() argument 103 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) { dccp_rcv_closereq() 104 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); dccp_rcv_closereq() 109 switch (sk->sk_state) { dccp_rcv_closereq() 111 dccp_send_close(sk, 0); dccp_rcv_closereq() 112 dccp_set_state(sk, DCCP_CLOSING); dccp_rcv_closereq() 118 dccp_fin(sk, skb); dccp_rcv_closereq() 119 dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ); dccp_rcv_closereq() 122 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); dccp_rcv_closereq() 149 static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) dccp_rcv_reset() argument 153 sk->sk_err = err; dccp_rcv_reset() 156 dccp_fin(sk, skb); dccp_rcv_reset() 158 if (err && !sock_flag(sk, SOCK_DEAD)) dccp_rcv_reset() 159 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); dccp_rcv_reset() 160 dccp_time_wait(sk, DCCP_TIME_WAIT, 0); dccp_rcv_reset() 163 static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb) dccp_handle_ackvec_processing() argument 165 struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec; dccp_handle_ackvec_processing() 174 static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb) dccp_deliver_input_to_ccids() argument 176 const struct dccp_sock *dp = dccp_sk(sk); dccp_deliver_input_to_ccids() 179 if (!(sk->sk_shutdown & RCV_SHUTDOWN)) dccp_deliver_input_to_ccids() 180 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); dccp_deliver_input_to_ccids() 185 if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN)) dccp_deliver_input_to_ccids() 186 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); dccp_deliver_input_to_ccids() 189 static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) dccp_check_seqno() argument 192 struct dccp_sock *dp = dccp_sk(sk); dccp_check_seqno() 212 dccp_update_gsr(sk, seqno); dccp_check_seqno() 241 dccp_update_gsr(sk, seqno); dccp_check_seqno() 280 dccp_send_sync(sk, seqno, DCCP_PKT_SYNC); dccp_check_seqno() 287 static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb, __dccp_rcv_established() argument 290 struct dccp_sock *dp = dccp_sk(sk); __dccp_rcv_established() 300 dccp_enqueue_skb(sk, skb); __dccp_rcv_established() 313 dccp_rcv_reset(sk, skb); __dccp_rcv_established() 316 if (dccp_rcv_closereq(sk, skb)) __dccp_rcv_established() 320 if (dccp_rcv_close(sk, skb)) __dccp_rcv_established() 345 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, __dccp_rcv_established() 350 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, __dccp_rcv_established() 368 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, dccp_rcv_established() argument 371 if (dccp_check_seqno(sk, skb)) dccp_rcv_established() 374 if (dccp_parse_options(sk, NULL, skb)) dccp_rcv_established() 377 dccp_handle_ackvec_processing(sk, skb); dccp_rcv_established() 378 dccp_deliver_input_to_ccids(sk, skb); dccp_rcv_established() 380 return __dccp_rcv_established(sk, skb, dh, len); dccp_rcv_established() 388 static int dccp_rcv_request_sent_state_process(struct sock *sk, dccp_rcv_request_sent_state_process() argument 405 const struct inet_connection_sock *icsk = inet_csk(sk); dccp_rcv_request_sent_state_process() 406 struct dccp_sock *dp = dccp_sk(sk); dccp_rcv_request_sent_state_process() 424 if (dccp_parse_options(sk, NULL, skb)) dccp_rcv_request_sent_state_process() 429 dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp - dccp_rcv_request_sent_state_process() 433 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); dccp_rcv_request_sent_state_process() 434 WARN_ON(sk->sk_send_head == NULL); dccp_rcv_request_sent_state_process() 435 kfree_skb(sk->sk_send_head); dccp_rcv_request_sent_state_process() 436 sk->sk_send_head = NULL; dccp_rcv_request_sent_state_process() 447 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie); dccp_rcv_request_sent_state_process() 464 dccp_set_state(sk, DCCP_PARTOPEN); dccp_rcv_request_sent_state_process() 472 if (dccp_feat_activate_values(sk, &dp->dccps_featneg)) dccp_rcv_request_sent_state_process() 476 icsk->icsk_af_ops->rebuild_header(sk); dccp_rcv_request_sent_state_process() 478 if (!sock_flag(sk, SOCK_DEAD)) { dccp_rcv_request_sent_state_process() 479 sk->sk_state_change(sk); dccp_rcv_request_sent_state_process() 480 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); dccp_rcv_request_sent_state_process() 483 if (sk->sk_write_pending || icsk->icsk_ack.pingpong || dccp_rcv_request_sent_state_process() 501 dccp_send_ack(sk); dccp_rcv_request_sent_state_process() 516 dccp_set_state(sk, DCCP_CLOSED); dccp_rcv_request_sent_state_process() 517 sk->sk_err = ECOMM; dccp_rcv_request_sent_state_process() 521 static int dccp_rcv_respond_partopen_state_process(struct sock *sk, dccp_rcv_respond_partopen_state_process() argument 526 struct dccp_sock *dp = dccp_sk(sk); dccp_rcv_respond_partopen_state_process() 532 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); dccp_rcv_respond_partopen_state_process() 535 if (sk->sk_state == DCCP_RESPOND) dccp_rcv_respond_partopen_state_process() 549 if (sk->sk_state == DCCP_PARTOPEN) dccp_rcv_respond_partopen_state_process() 550 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); dccp_rcv_respond_partopen_state_process() 556 dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta); dccp_rcv_respond_partopen_state_process() 560 dccp_set_state(sk, DCCP_OPEN); dccp_rcv_respond_partopen_state_process() 564 __dccp_rcv_established(sk, skb, dh, len); dccp_rcv_respond_partopen_state_process() 574 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, dccp_rcv_state_process() argument 577 struct dccp_sock *dp = dccp_sk(sk); dccp_rcv_state_process() 579 const int old_state = sk->sk_state; dccp_rcv_state_process() 604 if (sk->sk_state == DCCP_LISTEN) { dccp_rcv_state_process() 606 if (inet_csk(sk)->icsk_af_ops->conn_request(sk, dccp_rcv_state_process() 617 } else if (sk->sk_state == DCCP_CLOSED) { dccp_rcv_state_process() 623 if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb)) dccp_rcv_state_process() 638 (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) { dccp_rcv_state_process() 639 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC); dccp_rcv_state_process() 644 if (dccp_parse_options(sk, NULL, skb)) dccp_rcv_state_process() 656 dccp_rcv_reset(sk, skb); dccp_rcv_state_process() 659 if (dccp_rcv_closereq(sk, skb)) dccp_rcv_state_process() 663 if (dccp_rcv_close(sk, skb)) dccp_rcv_state_process() 668 switch (sk->sk_state) { dccp_rcv_state_process() 670 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); dccp_rcv_state_process() 679 dccp_handle_ackvec_processing(sk, skb); dccp_rcv_state_process() 680 dccp_deliver_input_to_ccids(sk, skb); dccp_rcv_state_process() 683 queued = dccp_rcv_respond_partopen_state_process(sk, skb, dccp_rcv_state_process() 692 sk->sk_state_change(sk); dccp_rcv_state_process() 693 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); dccp_rcv_state_process() 697 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK); dccp_rcv_state_process() 717 u32 dccp_sample_rtt(struct sock *sk, long delta) dccp_sample_rtt() argument 720 delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10; dccp_sample_rtt()
|
H A D | ccid.h | 59 int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk); 60 int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk); 61 void (*ccid_hc_rx_exit)(struct sock *sk); 62 void (*ccid_hc_tx_exit)(struct sock *sk); 63 void (*ccid_hc_rx_packet_recv)(struct sock *sk, 65 int (*ccid_hc_rx_parse_options)(struct sock *sk, u8 pkt, 67 int (*ccid_hc_rx_insert_options)(struct sock *sk, 69 void (*ccid_hc_tx_packet_recv)(struct sock *sk, 71 int (*ccid_hc_tx_parse_options)(struct sock *sk, u8 pkt, 73 int (*ccid_hc_tx_send_packet)(struct sock *sk, 75 void (*ccid_hc_tx_packet_sent)(struct sock *sk, 77 void (*ccid_hc_rx_get_info)(struct sock *sk, 79 void (*ccid_hc_tx_get_info)(struct sock *sk, 81 int (*ccid_hc_rx_getsockopt)(struct sock *sk, 85 int (*ccid_hc_tx_getsockopt)(struct sock *sk, 111 int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, 114 struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx); 134 void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk); 135 void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk); 166 static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, ccid_hc_tx_send_packet() argument 170 return ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb); ccid_hc_tx_send_packet() 174 static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, ccid_hc_tx_packet_sent() argument 178 ccid->ccid_ops->ccid_hc_tx_packet_sent(sk, len); ccid_hc_tx_packet_sent() 181 static inline void ccid_hc_rx_packet_recv(struct ccid *ccid, struct sock *sk, ccid_hc_rx_packet_recv() argument 185 ccid->ccid_ops->ccid_hc_rx_packet_recv(sk, skb); ccid_hc_rx_packet_recv() 188 static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk, ccid_hc_tx_packet_recv() argument 192 ccid->ccid_ops->ccid_hc_tx_packet_recv(sk, skb); ccid_hc_tx_packet_recv() 202 static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, ccid_hc_tx_parse_options() argument 207 return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); ccid_hc_tx_parse_options() 214 static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, ccid_hc_rx_parse_options() argument 219 return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); ccid_hc_rx_parse_options() 222 static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk, ccid_hc_rx_insert_options() argument 226 return ccid->ccid_ops->ccid_hc_rx_insert_options(sk, skb); ccid_hc_rx_insert_options() 230 static inline void ccid_hc_rx_get_info(struct ccid *ccid, struct sock *sk, ccid_hc_rx_get_info() argument 234 ccid->ccid_ops->ccid_hc_rx_get_info(sk, info); ccid_hc_rx_get_info() 237 static inline void ccid_hc_tx_get_info(struct ccid *ccid, struct sock *sk, ccid_hc_tx_get_info() argument 241 ccid->ccid_ops->ccid_hc_tx_get_info(sk, info); ccid_hc_tx_get_info() 244 static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk, ccid_hc_rx_getsockopt() argument 250 rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len, ccid_hc_rx_getsockopt() 255 static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk, ccid_hc_tx_getsockopt() argument 261 rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len, ccid_hc_tx_getsockopt()
|
H A D | ipv6.c | 51 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb) dccp_v6_send_check() argument 53 struct ipv6_pinfo *np = inet6_sk(sk); dccp_v6_send_check() 57 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr); dccp_v6_send_check() 76 struct sock *sk; dccp_v6_err() local 88 sk = __inet6_lookup_established(net, &dccp_hashinfo, dccp_v6_err() 93 if (!sk) { dccp_v6_err() 99 if (sk->sk_state == DCCP_TIME_WAIT) { dccp_v6_err() 100 inet_twsk_put(inet_twsk(sk)); dccp_v6_err() 104 if (sk->sk_state == DCCP_NEW_SYN_RECV) dccp_v6_err() 105 return dccp_req_err(sk, seq); dccp_v6_err() 107 bh_lock_sock(sk); dccp_v6_err() 108 if (sock_owned_by_user(sk)) dccp_v6_err() 111 if (sk->sk_state == DCCP_CLOSED) dccp_v6_err() 114 dp = dccp_sk(sk); dccp_v6_err() 115 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && dccp_v6_err() 121 np = inet6_sk(sk); dccp_v6_err() 124 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); dccp_v6_err() 127 dst->ops->redirect(dst, sk, skb); dccp_v6_err() 134 if (!ip6_sk_accept_pmtu(sk)) dccp_v6_err() 137 if (sock_owned_by_user(sk)) dccp_v6_err() 139 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED)) dccp_v6_err() 142 dst = inet6_csk_update_pmtu(sk, ntohl(info)); dccp_v6_err() 146 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) dccp_v6_err() 147 dccp_sync_mss(sk, dst_mtu(dst)); dccp_v6_err() 154 switch (sk->sk_state) { dccp_v6_err() 158 if (!sock_owned_by_user(sk)) { dccp_v6_err() 160 sk->sk_err = err; dccp_v6_err() 165 sk->sk_error_report(sk); dccp_v6_err() 166 dccp_done(sk); dccp_v6_err() 168 sk->sk_err_soft = err; dccp_v6_err() 172 if (!sock_owned_by_user(sk) && np->recverr) { dccp_v6_err() 173 sk->sk_err = err; dccp_v6_err() 174 sk->sk_error_report(sk); dccp_v6_err() 176 sk->sk_err_soft = err; dccp_v6_err() 179 bh_unlock_sock(sk); dccp_v6_err() 180 sock_put(sk); dccp_v6_err() 184 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) dccp_v6_send_response() argument 187 struct ipv6_pinfo *np = inet6_sk(sk); dccp_v6_send_response() 209 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); dccp_v6_send_response() 216 skb = dccp_make_response(sk, dst, req); dccp_v6_send_response() 225 err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), dccp_v6_send_response() 242 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) dccp_v6_ctl_send_reset() argument 275 /* sk = NULL, but it is safe for now. RST socket required. */ dccp_v6_ctl_send_reset() 298 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) dccp_v6_hnd_req() argument 305 req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr, dccp_v6_hnd_req() 308 nsk = dccp_check_req(sk, skb, req); dccp_v6_hnd_req() 313 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, dccp_v6_hnd_req() 326 return sk; dccp_v6_hnd_req() 329 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) dccp_v6_conn_request() argument 334 struct ipv6_pinfo *np = inet6_sk(sk); dccp_v6_conn_request() 339 return dccp_v4_conn_request(sk, skb); dccp_v6_conn_request() 344 if (dccp_bad_service_code(sk, service)) { dccp_v6_conn_request() 352 if (inet_csk_reqsk_queue_is_full(sk)) dccp_v6_conn_request() 355 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) dccp_v6_conn_request() 358 req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk); dccp_v6_conn_request() 362 if (dccp_reqsk_init(req, dccp_sk(sk), skb)) dccp_v6_conn_request() 366 if (dccp_parse_options(sk, dreq, skb)) dccp_v6_conn_request() 369 if (security_inet_conn_request(sk, skb, req)) dccp_v6_conn_request() 377 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || dccp_v6_conn_request() 383 ireq->ir_iif = sk->sk_bound_dev_if; dccp_v6_conn_request() 386 if (!sk->sk_bound_dev_if && dccp_v6_conn_request() 403 if (dccp_v6_send_response(sk, req)) dccp_v6_conn_request() 406 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); dccp_v6_conn_request() 416 static struct sock *dccp_v6_request_recv_sock(struct sock *sk, dccp_v6_request_recv_sock() argument 422 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); dccp_v6_request_recv_sock() 432 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst); dccp_v6_request_recv_sock() 468 if (sk_acceptq_is_full(sk)) dccp_v6_request_recv_sock() 480 fl6.flowi6_oif = sk->sk_bound_dev_if; dccp_v6_request_recv_sock() 483 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dccp_v6_request_recv_sock() 485 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); dccp_v6_request_recv_sock() 490 newsk = dccp_create_openreq_child(sk, req, skb); dccp_v6_request_recv_sock() 558 if (__inet_inherit_port(sk, newsk) < 0) { dccp_v6_request_recv_sock() 568 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); dccp_v6_request_recv_sock() 572 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); dccp_v6_request_recv_sock() 584 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) dccp_v6_do_rcv() argument 586 struct ipv6_pinfo *np = inet6_sk(sk); dccp_v6_do_rcv() 598 return dccp_v4_do_rcv(sk, skb); dccp_v6_do_rcv() 600 if (sk_filter(sk, skb)) dccp_v6_do_rcv() 628 if (sk->sk_state == DCCP_OPEN) { /* Fast path */ dccp_v6_do_rcv() 629 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) dccp_v6_do_rcv() 662 if (sk->sk_state == DCCP_LISTEN) { dccp_v6_do_rcv() 663 struct sock *nsk = dccp_v6_hnd_req(sk, skb); dccp_v6_do_rcv() 672 if (nsk != sk) { dccp_v6_do_rcv() 673 if (dccp_child_process(sk, nsk, skb)) dccp_v6_do_rcv() 681 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) dccp_v6_do_rcv() 690 dccp_v6_ctl_send_reset(sk, skb); dccp_v6_do_rcv() 701 struct sock *sk; dccp_v6_rcv() local 728 sk = __inet6_lookup_skb(&dccp_hashinfo, skb, dccp_v6_rcv() 735 if (sk == NULL) { dccp_v6_rcv() 747 if (sk->sk_state == DCCP_TIME_WAIT) { dccp_v6_rcv() 748 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); dccp_v6_rcv() 749 inet_twsk_put(inet_twsk(sk)); dccp_v6_rcv() 758 min_cov = dccp_sk(sk)->dccps_pcrlen; dccp_v6_rcv() 766 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) dccp_v6_rcv() 769 return sk_receive_skb(sk, skb, 1) ? -1 : 0; dccp_v6_rcv() 783 dccp_v6_ctl_send_reset(sk, skb); dccp_v6_rcv() 791 sock_put(sk); dccp_v6_rcv() 795 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, dccp_v6_connect() argument 799 struct inet_connection_sock *icsk = inet_csk(sk); dccp_v6_connect() 800 struct inet_sock *inet = inet_sk(sk); dccp_v6_connect() 801 struct ipv6_pinfo *np = inet6_sk(sk); dccp_v6_connect() 802 struct dccp_sock *dp = dccp_sk(sk); dccp_v6_connect() 825 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); dccp_v6_connect() 848 if (sk->sk_bound_dev_if && dccp_v6_connect() 849 sk->sk_bound_dev_if != usin->sin6_scope_id) dccp_v6_connect() 852 sk->sk_bound_dev_if = usin->sin6_scope_id; dccp_v6_connect() 856 if (!sk->sk_bound_dev_if) dccp_v6_connect() 860 sk->sk_v6_daddr = usin->sin6_addr; dccp_v6_connect() 870 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); dccp_v6_connect() 872 if (__ipv6_only_sock(sk)) dccp_v6_connect() 880 sk->sk_backlog_rcv = dccp_v4_do_rcv; dccp_v6_connect() 882 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); dccp_v6_connect() 886 sk->sk_backlog_rcv = dccp_v6_do_rcv; dccp_v6_connect() 889 np->saddr = sk->sk_v6_rcv_saddr; dccp_v6_connect() 893 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) dccp_v6_connect() 894 saddr = &sk->sk_v6_rcv_saddr; dccp_v6_connect() 897 fl6.daddr = sk->sk_v6_daddr; dccp_v6_connect() 899 fl6.flowi6_oif = sk->sk_bound_dev_if; dccp_v6_connect() 902 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dccp_v6_connect() 904 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); dccp_v6_connect() 907 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); dccp_v6_connect() 915 sk->sk_v6_rcv_saddr = *saddr; dccp_v6_connect() 922 __ip6_dst_store(sk, dst, NULL, NULL); dccp_v6_connect() 930 dccp_set_state(sk, DCCP_REQUESTING); dccp_v6_connect() 931 err = inet6_hash_connect(&dccp_death_row, sk); dccp_v6_connect() 936 sk->sk_v6_daddr.s6_addr32, dccp_v6_connect() 939 err = dccp_connect(sk); dccp_v6_connect() 946 dccp_set_state(sk, DCCP_CLOSED); dccp_v6_connect() 947 __sk_dst_reset(sk); dccp_v6_connect() 950 sk->sk_route_caps = 0; dccp_v6_connect() 995 static int dccp_v6_init_sock(struct sock *sk) dccp_v6_init_sock() argument 998 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized); dccp_v6_init_sock() 1003 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; dccp_v6_init_sock() 1009 static void dccp_v6_destroy_sock(struct sock *sk) dccp_v6_destroy_sock() argument 1011 dccp_destroy_sock(sk); dccp_v6_destroy_sock() 1012 inet6_destroy_sock(sk); dccp_v6_destroy_sock()
|
H A D | diag.c | 19 static void dccp_get_info(struct sock *sk, struct tcp_info *info) dccp_get_info() argument 21 struct dccp_sock *dp = dccp_sk(sk); dccp_get_info() 22 const struct inet_connection_sock *icsk = inet_csk(sk); dccp_get_info() 26 info->tcpi_state = sk->sk_state; dccp_get_info() 36 ccid_hc_rx_get_info(dp->dccps_hc_rx_ccid, sk, info); dccp_get_info() 39 ccid_hc_tx_get_info(dp->dccps_hc_tx_ccid, sk, info); dccp_get_info() 42 static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, dccp_diag_get_info() argument 48 dccp_get_info(sk, _info); dccp_diag_get_info()
|
H A D | ipv4.c | 42 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) dccp_v4_connect() argument 45 struct inet_sock *inet = inet_sk(sk); dccp_v4_connect() 46 struct dccp_sock *dp = dccp_sk(sk); dccp_v4_connect() 65 sock_owned_by_user(sk)); dccp_v4_connect() 76 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, dccp_v4_connect() 78 orig_sport, orig_dport, sk); dccp_v4_connect() 92 sk_rcv_saddr_set(sk, inet->inet_saddr); dccp_v4_connect() 94 sk_daddr_set(sk, daddr); dccp_v4_connect() 96 inet_csk(sk)->icsk_ext_hdr_len = 0; dccp_v4_connect() 98 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; dccp_v4_connect() 105 dccp_set_state(sk, DCCP_REQUESTING); dccp_v4_connect() 106 err = inet_hash_connect(&dccp_death_row, sk); dccp_v4_connect() 111 inet->inet_sport, inet->inet_dport, sk); dccp_v4_connect() 118 sk_setup_caps(sk, &rt->dst); dccp_v4_connect() 126 err = dccp_connect(sk); dccp_v4_connect() 136 dccp_set_state(sk, DCCP_CLOSED); dccp_v4_connect() 138 sk->sk_route_caps = 0; dccp_v4_connect() 147 static inline void dccp_do_pmtu_discovery(struct sock *sk, dccp_do_pmtu_discovery() argument 152 const struct inet_sock *inet = inet_sk(sk); dccp_do_pmtu_discovery() 153 const struct dccp_sock *dp = dccp_sk(sk); dccp_do_pmtu_discovery() 159 if (sk->sk_state == DCCP_LISTEN) dccp_do_pmtu_discovery() 162 dst = inet_csk_update_pmtu(sk, mtu); dccp_do_pmtu_discovery() 169 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) dccp_do_pmtu_discovery() 170 sk->sk_err_soft = EMSGSIZE; dccp_do_pmtu_discovery() 175 ip_sk_accept_pmtu(sk) && dccp_do_pmtu_discovery() 176 inet_csk(sk)->icsk_pmtu_cookie > mtu) { dccp_do_pmtu_discovery() 177 dccp_sync_mss(sk, mtu); dccp_do_pmtu_discovery() 186 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC); dccp_do_pmtu_discovery() 190 static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk) dccp_do_redirect() argument 192 struct dst_entry *dst = __sk_dst_check(sk, 0); dccp_do_redirect() 195 dst->ops->redirect(dst, sk, skb); dccp_do_redirect() 198 void dccp_req_err(struct sock *sk, u64 seq) dccp_req_err() argument 200 struct request_sock *req = inet_reqsk(sk); dccp_req_err() 201 struct net *net = sock_net(sk); dccp_req_err() 207 WARN_ON(req->sk); dccp_req_err() 245 struct sock *sk; dccp_v4_err() local 256 sk = __inet_lookup_established(net, &dccp_hashinfo, dccp_v4_err() 260 if (!sk) { dccp_v4_err() 265 if (sk->sk_state == DCCP_TIME_WAIT) { dccp_v4_err() 266 inet_twsk_put(inet_twsk(sk)); dccp_v4_err() 270 if (sk->sk_state == DCCP_NEW_SYN_RECV) dccp_v4_err() 271 return dccp_req_err(sk, seq); dccp_v4_err() 273 bh_lock_sock(sk); dccp_v4_err() 277 if (sock_owned_by_user(sk)) dccp_v4_err() 280 if (sk->sk_state == DCCP_CLOSED) dccp_v4_err() 283 dp = dccp_sk(sk); dccp_v4_err() 284 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && dccp_v4_err() 292 dccp_do_redirect(skb, sk); dccp_v4_err() 305 if (!sock_owned_by_user(sk)) dccp_v4_err() 306 dccp_do_pmtu_discovery(sk, iph, info); dccp_v4_err() 319 switch (sk->sk_state) { dccp_v4_err() 322 if (!sock_owned_by_user(sk)) { dccp_v4_err() 324 sk->sk_err = err; dccp_v4_err() 326 sk->sk_error_report(sk); dccp_v4_err() 328 dccp_done(sk); dccp_v4_err() 330 sk->sk_err_soft = err; dccp_v4_err() 350 inet = inet_sk(sk); dccp_v4_err() 351 if (!sock_owned_by_user(sk) && inet->recverr) { dccp_v4_err() 352 sk->sk_err = err; dccp_v4_err() 353 sk->sk_error_report(sk); dccp_v4_err() 355 sk->sk_err_soft = err; dccp_v4_err() 357 bh_unlock_sock(sk); dccp_v4_err() 358 sock_put(sk); dccp_v4_err() 367 void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb) dccp_v4_send_check() argument 369 const struct inet_sock *inet = inet_sk(sk); dccp_v4_send_check() 393 struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, dccp_v4_request_recv_sock() argument 401 if (sk_acceptq_is_full(sk)) dccp_v4_request_recv_sock() 404 newsk = dccp_create_openreq_child(sk, req, skb); dccp_v4_request_recv_sock() 419 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) dccp_v4_request_recv_sock() 426 if (__inet_inherit_port(sk, newsk) < 0) dccp_v4_request_recv_sock() 433 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); dccp_v4_request_recv_sock() 437 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); dccp_v4_request_recv_sock() 446 static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) dccp_v4_hnd_req() argument 452 struct request_sock *req = inet_csk_search_req(sk, dh->dccph_sport, dccp_v4_hnd_req() 455 nsk = dccp_check_req(sk, skb, req); dccp_v4_hnd_req() 460 nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo, dccp_v4_hnd_req() 473 return sk; dccp_v4_hnd_req() 476 static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, dccp_v4_route_skb() argument 485 .flowi4_tos = RT_CONN_FLAGS(sk), dccp_v4_route_skb() 486 .flowi4_proto = sk->sk_protocol, dccp_v4_route_skb() 492 rt = ip_route_output_flow(net, &fl4, sk); dccp_v4_route_skb() 501 static int dccp_v4_send_response(struct sock *sk, struct request_sock *req) dccp_v4_send_response() argument 508 dst = inet_csk_route_req(sk, &fl4, req); dccp_v4_send_response() 512 skb = dccp_make_response(sk, dst, req); dccp_v4_send_response() 519 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, dccp_v4_send_response() 530 static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) dccp_v4_ctl_send_reset() argument 593 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) dccp_v4_conn_request() argument 605 if (dccp_bad_service_code(sk, service)) { dccp_v4_conn_request() 615 if (inet_csk_reqsk_queue_is_full(sk)) dccp_v4_conn_request() 624 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) dccp_v4_conn_request() 627 req = inet_reqsk_alloc(&dccp_request_sock_ops, sk); dccp_v4_conn_request() 631 if (dccp_reqsk_init(req, dccp_sk(sk), skb)) dccp_v4_conn_request() 635 if (dccp_parse_options(sk, dreq, skb)) dccp_v4_conn_request() 638 if (security_inet_conn_request(sk, skb, req)) dccp_v4_conn_request() 645 ireq->ir_iif = sk->sk_bound_dev_if; dccp_v4_conn_request() 660 if (dccp_v4_send_response(sk, req)) dccp_v4_conn_request() 663 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); dccp_v4_conn_request() 674 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) dccp_v4_do_rcv() argument 678 if (sk->sk_state == DCCP_OPEN) { /* Fast path */ dccp_v4_do_rcv() 679 if (dccp_rcv_established(sk, skb, dh, skb->len)) dccp_v4_do_rcv() 707 if (sk->sk_state == DCCP_LISTEN) { dccp_v4_do_rcv() 708 struct sock *nsk = dccp_v4_hnd_req(sk, skb); dccp_v4_do_rcv() 713 if (nsk != sk) { dccp_v4_do_rcv() 714 if (dccp_child_process(sk, nsk, skb)) dccp_v4_do_rcv() 720 if (dccp_rcv_state_process(sk, skb, dh, skb->len)) dccp_v4_do_rcv() 725 dccp_v4_ctl_send_reset(sk, skb); dccp_v4_do_rcv() 809 struct sock *sk; dccp_v4_rcv() local 846 sk = __inet_lookup_skb(&dccp_hashinfo, skb, dccp_v4_rcv() 852 if (sk == NULL) { dccp_v4_rcv() 864 if (sk->sk_state == DCCP_TIME_WAIT) { dccp_v4_rcv() 865 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); dccp_v4_rcv() 866 inet_twsk_put(inet_twsk(sk)); dccp_v4_rcv() 875 min_cov = dccp_sk(sk)->dccps_pcrlen; dccp_v4_rcv() 885 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) dccp_v4_rcv() 889 return sk_receive_skb(sk, skb, 1); dccp_v4_rcv() 903 dccp_v4_ctl_send_reset(sk, skb); dccp_v4_rcv() 911 sock_put(sk); dccp_v4_rcv() 933 static int dccp_v4_init_sock(struct sock *sk) dccp_v4_init_sock() argument 936 int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized); dccp_v4_init_sock() 941 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops; dccp_v4_init_sock()
|
H A D | dccp.h | 56 void dccp_time_wait(struct sock *sk, int state, int timeo); 227 void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb); 229 int dccp_retransmit_skb(struct sock *sk); 231 void dccp_send_ack(struct sock *sk); 232 void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 235 void dccp_send_sync(struct sock *sk, const u64 seq, 241 void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb); 242 bool dccp_qpolicy_full(struct sock *sk); 243 void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb); 244 struct sk_buff *dccp_qpolicy_top(struct sock *sk); 245 struct sk_buff *dccp_qpolicy_pop(struct sock *sk); 246 bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param); 251 void dccp_write_xmit(struct sock *sk); 252 void dccp_write_space(struct sock *sk); 253 void dccp_flush_write_queue(struct sock *sk, long *time_budget); 255 void dccp_init_xmit_timers(struct sock *sk); 256 static inline void dccp_clear_xmit_timers(struct sock *sk) 258 inet_csk_clear_xmit_timers(sk); 261 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu); 265 void dccp_set_state(struct sock *sk, const int state); 266 void dccp_done(struct sock *sk); 271 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 273 struct sock *dccp_create_openreq_child(struct sock *sk, 277 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 279 struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, 282 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, 287 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 289 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 292 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); 293 void dccp_destroy_sock(struct sock *sk); 295 void dccp_close(struct sock *sk, long timeout); 296 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, 299 int dccp_connect(struct sock *sk); 300 int dccp_disconnect(struct sock *sk, int flags); 301 int dccp_getsockopt(struct sock *sk, int level, int optname, 303 int dccp_setsockopt(struct sock *sk, int level, int optname, 306 int compat_dccp_getsockopt(struct sock *sk, int level, int optname, 308 int compat_dccp_setsockopt(struct sock *sk, int level, int optname, 311 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); 312 int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 313 int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, 315 void dccp_shutdown(struct sock *sk, int how); 319 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 320 void dccp_req_err(struct sock *sk, u64 seq); 322 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb); 323 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code); 324 void dccp_send_close(struct sock *sk, const int active); 326 u32 dccp_sample_rtt(struct sock *sk, long delta); 328 static inline int dccp_bad_service_code(const struct sock *sk, 331 const struct dccp_sock *dp = dccp_sk(sk); 418 static inline void dccp_update_gsr(struct sock *sk, u64 seq) 420 struct dccp_sock *dp = dccp_sk(sk); 446 static inline void dccp_update_gss(struct sock *sk, u64 seq) 448 struct dccp_sock *dp = dccp_sk(sk); 459 static inline int dccp_ackvec_pending(const struct sock *sk) 461 return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL && 462 !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec); 465 static inline int dccp_ack_pending(const struct sock *sk) 467 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk); 470 int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val); 475 int dccp_feat_activate_values(struct sock *sk, struct list_head *fn); 478 int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
|
H A D | minisocks.c | 35 void dccp_time_wait(struct sock *sk, int state, int timeo) dccp_time_wait() argument 39 tw = inet_twsk_alloc(sk, &dccp_death_row, state); dccp_time_wait() 42 const struct inet_connection_sock *icsk = inet_csk(sk); dccp_time_wait() 46 tw->tw_v6_daddr = sk->sk_v6_daddr; dccp_time_wait() 47 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; dccp_time_wait() 48 tw->tw_ipv6only = sk->sk_ipv6only; dccp_time_wait() 62 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); dccp_time_wait() 72 dccp_done(sk); dccp_time_wait() 75 struct sock *dccp_create_openreq_child(struct sock *sk, dccp_create_openreq_child() argument 85 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); dccp_create_openreq_child() 141 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, dccp_check_req() argument 158 inet_rtx_syn_ack(sk, req); dccp_check_req() 182 if (dccp_parse_options(sk, dreq, skb)) dccp_check_req() 185 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); dccp_check_req() 189 inet_csk_reqsk_queue_drop(sk, req); dccp_check_req() 190 inet_csk_reqsk_queue_add(sk, req, child); dccp_check_req() 198 req->rsk_ops->send_reset(sk, skb); dccp_check_req() 200 inet_csk_reqsk_queue_drop(sk, req); dccp_check_req() 239 void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, dccp_reqsk_send_ack() argument
|
/linux-4.1.27/net/ipv4/ |
H A D | inet_hashtables.c | 42 u32 sk_ehashfn(const struct sock *sk) sk_ehashfn() argument 45 if (sk->sk_family == AF_INET6 && sk_ehashfn() 46 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) sk_ehashfn() 47 return inet6_ehashfn(sock_net(sk), sk_ehashfn() 48 &sk->sk_v6_rcv_saddr, sk->sk_num, sk_ehashfn() 49 &sk->sk_v6_daddr, sk->sk_dport); sk_ehashfn() 51 return inet_ehashfn(sock_net(sk), sk_ehashfn() 52 sk->sk_rcv_saddr, sk->sk_num, sk_ehashfn() 53 sk->sk_daddr, sk->sk_dport); sk_ehashfn() 90 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, inet_bind_hash() argument 93 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; inet_bind_hash() 97 inet_sk(sk)->inet_num = snum; inet_bind_hash() 98 sk_add_bind_node(sk, &tb->owners); inet_bind_hash() 100 inet_csk(sk)->icsk_bind_hash = tb; inet_bind_hash() 106 static void __inet_put_port(struct sock *sk) __inet_put_port() argument 108 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; __inet_put_port() 109 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num, __inet_put_port() 117 tb = inet_csk(sk)->icsk_bind_hash; __inet_put_port() 118 __sk_del_bind_node(sk); __inet_put_port() 120 inet_csk(sk)->icsk_bind_hash = NULL; __inet_put_port() 121 inet_sk(sk)->inet_num = 0; __inet_put_port() 126 void inet_put_port(struct sock *sk) inet_put_port() argument 129 __inet_put_port(sk); inet_put_port() 134 int __inet_inherit_port(struct sock *sk, struct sock *child) __inet_inherit_port() argument 136 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; __inet_inherit_port() 138 const int bhash = inet_bhashfn(sock_net(sk), port, __inet_inherit_port() 144 tb = inet_csk(sk)->icsk_bind_hash; __inet_inherit_port() 152 if (net_eq(ib_net(tb), sock_net(sk)) && __inet_inherit_port() 158 sock_net(sk), head, port); __inet_inherit_port() 172 static inline int compute_score(struct sock *sk, struct net *net, compute_score() argument 177 struct inet_sock *inet = inet_sk(sk); compute_score() 179 if (net_eq(sock_net(sk), net) && inet->inet_num == hnum && compute_score() 180 !ipv6_only_sock(sk)) { compute_score() 182 score = sk->sk_family == PF_INET ? 2 : 1; compute_score() 188 if (sk->sk_bound_dev_if) { compute_score() 189 if (sk->sk_bound_dev_if != dif) compute_score() 211 struct sock *sk, *result; __inet_lookup_listener() local 222 sk_nulls_for_each_rcu(sk, node, &ilb->head) { __inet_lookup_listener() 223 score = compute_score(sk, net, hnum, daddr, dif); __inet_lookup_listener() 225 result = sk; __inet_lookup_listener() 227 reuseport = sk->sk_reuseport; __inet_lookup_listener() 236 result = sk; __inet_lookup_listener() 262 void sock_gen_put(struct sock *sk) sock_gen_put() argument 264 if (!atomic_dec_and_test(&sk->sk_refcnt)) sock_gen_put() 267 if (sk->sk_state == TCP_TIME_WAIT) sock_gen_put() 268 inet_twsk_free(inet_twsk(sk)); sock_gen_put() 269 else if (sk->sk_state == TCP_NEW_SYN_RECV) sock_gen_put() 270 reqsk_free(inet_reqsk(sk)); sock_gen_put() 272 sk_free(sk); sock_gen_put() 278 sock_gen_put(skb->sk); sock_edemux() 290 struct sock *sk; __inet_lookup_established() local 301 sk_nulls_for_each_rcu(sk, node, &head->chain) { __inet_lookup_established() 302 if (sk->sk_hash != hash) __inet_lookup_established() 304 if (likely(INET_MATCH(sk, net, acookie, __inet_lookup_established() 306 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) __inet_lookup_established() 308 if (unlikely(!INET_MATCH(sk, net, acookie, __inet_lookup_established() 310 sock_gen_put(sk); __inet_lookup_established() 324 sk = NULL; __inet_lookup_established() 327 return sk; __inet_lookup_established() 333 struct sock *sk, __u16 lport, __inet_check_established() 337 struct inet_sock *inet = inet_sk(sk); __inet_check_established() 340 int dif = sk->sk_bound_dev_if; __inet_check_established() 343 struct net *net = sock_net(sk); __inet_check_established() 363 if (twsk_unique(sk, sk2, twp)) __inet_check_established() 375 sk->sk_hash = hash; __inet_check_established() 376 WARN_ON(!sk_unhashed(sk)); __inet_check_established() 377 __sk_nulls_add_node_rcu(sk, &head->chain); __inet_check_established() 385 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); __inet_check_established() 402 static inline u32 inet_sk_port_offset(const struct sock *sk) inet_sk_port_offset() argument 404 const struct inet_sock *inet = inet_sk(sk); inet_sk_port_offset() 410 int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw) __inet_hash_nolisten() argument 412 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; __inet_hash_nolisten() 418 WARN_ON(!sk_unhashed(sk)); __inet_hash_nolisten() 420 sk->sk_hash = sk_ehashfn(sk); __inet_hash_nolisten() 421 head = inet_ehash_bucket(hashinfo, sk->sk_hash); __inet_hash_nolisten() 423 lock = inet_ehash_lockp(hashinfo, sk->sk_hash); __inet_hash_nolisten() 426 __sk_nulls_add_node_rcu(sk, list); __inet_hash_nolisten() 428 WARN_ON(sk->sk_hash != tw->tw_hash); __inet_hash_nolisten() 432 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); __inet_hash_nolisten() 437 int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw) __inet_hash() argument 439 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; __inet_hash() 442 if (sk->sk_state != TCP_LISTEN) __inet_hash() 443 return __inet_hash_nolisten(sk, tw); __inet_hash() 445 WARN_ON(!sk_unhashed(sk)); __inet_hash() 446 ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; __inet_hash() 449 __sk_nulls_add_node_rcu(sk, &ilb->head); __inet_hash() 450 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); __inet_hash() 456 void inet_hash(struct sock *sk) inet_hash() argument 458 if (sk->sk_state != TCP_CLOSE) { inet_hash() 460 __inet_hash(sk, NULL); inet_hash() 466 void inet_unhash(struct sock *sk) inet_unhash() argument 468 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; inet_unhash() 472 if (sk_unhashed(sk)) inet_unhash() 475 if (sk->sk_state == TCP_LISTEN) inet_unhash() 476 lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock; inet_unhash() 478 lock = inet_ehash_lockp(hashinfo, sk->sk_hash); inet_unhash() 481 done = __sk_nulls_del_node_init_rcu(sk); inet_unhash() 483 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); inet_unhash() 489 struct sock *sk, u32 port_offset, __inet_hash_connect() 494 const unsigned short snum = inet_sk(sk)->inet_num; __inet_hash_connect() 498 struct net *net = sock_net(sk); __inet_hash_connect() 530 if (!check_established(death_row, sk, __inet_hash_connect() 558 inet_bind_hash(sk, tb, port); __inet_hash_connect() 559 if (sk_unhashed(sk)) { __inet_hash_connect() 560 inet_sk(sk)->inet_sport = htons(port); __inet_hash_connect() 561 twrefcnt += __inet_hash_nolisten(sk, tw); __inet_hash_connect() 580 tb = inet_csk(sk)->icsk_bind_hash; __inet_hash_connect() 582 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { __inet_hash_connect() 583 __inet_hash_nolisten(sk, NULL); __inet_hash_connect() 589 ret = check_established(death_row, sk, snum, NULL); __inet_hash_connect() 600 struct sock *sk) inet_hash_connect() 602 return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk), inet_hash_connect() 332 __inet_check_established(struct inet_timewait_death_row *death_row, struct sock *sk, __u16 lport, struct inet_timewait_sock **twp) __inet_check_established() argument 488 __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u32 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **)) __inet_hash_connect() argument 599 inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) inet_hash_connect() argument
|
H A D | tcp_timer.c | 35 static void tcp_write_err(struct sock *sk) tcp_write_err() argument 37 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; tcp_write_err() 38 sk->sk_error_report(sk); tcp_write_err() 40 tcp_done(sk); tcp_write_err() 41 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); tcp_write_err() 55 static int tcp_out_of_resources(struct sock *sk, bool do_reset) tcp_out_of_resources() argument 57 struct tcp_sock *tp = tcp_sk(sk); tcp_out_of_resources() 66 if (sk->sk_err_soft) tcp_out_of_resources() 69 if (tcp_check_oom(sk, shift)) { tcp_out_of_resources() 77 tcp_send_active_reset(sk, GFP_ATOMIC); tcp_out_of_resources() 78 tcp_done(sk); tcp_out_of_resources() 79 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); tcp_out_of_resources() 86 static int tcp_orphan_retries(struct sock *sk, int alive) tcp_orphan_retries() argument 91 if (sk->sk_err_soft && !alive) tcp_orphan_retries() 102 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) tcp_mtu_probing() argument 104 struct net *net = sock_net(sk); tcp_mtu_probing() 111 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_mtu_probing() 113 struct net *net = sock_net(sk); tcp_mtu_probing() 114 struct tcp_sock *tp = tcp_sk(sk); tcp_mtu_probing() 117 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; tcp_mtu_probing() 120 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); tcp_mtu_probing() 121 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_mtu_probing() 131 static bool retransmits_timed_out(struct sock *sk, retransmits_timed_out() argument 139 if (!inet_csk(sk)->icsk_retransmits) retransmits_timed_out() 142 start_ts = tcp_sk(sk)->retrans_stamp; retransmits_timed_out() 144 start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk)); retransmits_timed_out() 159 static int tcp_write_timeout(struct sock *sk) tcp_write_timeout() argument 161 struct inet_connection_sock *icsk = inet_csk(sk); tcp_write_timeout() 162 struct tcp_sock *tp = tcp_sk(sk); tcp_write_timeout() 166 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { tcp_write_timeout() 168 dst_negative_advice(sk); tcp_write_timeout() 170 tcp_fastopen_cache_set(sk, 0, NULL, true, 0); tcp_write_timeout() 172 NET_INC_STATS_BH(sock_net(sk), tcp_write_timeout() 178 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { tcp_write_timeout() 186 tcp_fastopen_cache_set(sk, 0, NULL, true, 0); tcp_write_timeout() 188 NET_INC_STATS_BH(sock_net(sk), tcp_write_timeout() 192 tcp_mtu_probing(icsk, sk); tcp_write_timeout() 194 dst_negative_advice(sk); tcp_write_timeout() 198 if (sock_flag(sk, SOCK_DEAD)) { tcp_write_timeout() 201 retry_until = tcp_orphan_retries(sk, alive); tcp_write_timeout() 203 !retransmits_timed_out(sk, retry_until, 0, 0); tcp_write_timeout() 205 if (tcp_out_of_resources(sk, do_reset)) tcp_write_timeout() 210 if (retransmits_timed_out(sk, retry_until, tcp_write_timeout() 213 tcp_write_err(sk); tcp_write_timeout() 219 void tcp_delack_timer_handler(struct sock *sk) tcp_delack_timer_handler() argument 221 struct tcp_sock *tp = tcp_sk(sk); tcp_delack_timer_handler() 222 struct inet_connection_sock *icsk = inet_csk(sk); tcp_delack_timer_handler() 224 sk_mem_reclaim_partial(sk); tcp_delack_timer_handler() 226 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) tcp_delack_timer_handler() 230 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); tcp_delack_timer_handler() 238 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); tcp_delack_timer_handler() 241 sk_backlog_rcv(sk, skb); tcp_delack_timer_handler() 246 if (inet_csk_ack_scheduled(sk)) { tcp_delack_timer_handler() 257 tcp_send_ack(sk); tcp_delack_timer_handler() 258 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); tcp_delack_timer_handler() 262 if (sk_under_memory_pressure(sk)) tcp_delack_timer_handler() 263 sk_mem_reclaim(sk); tcp_delack_timer_handler() 268 struct sock *sk = (struct sock *)data; tcp_delack_timer() local 270 bh_lock_sock(sk); tcp_delack_timer() 271 if (!sock_owned_by_user(sk)) { tcp_delack_timer() 272 tcp_delack_timer_handler(sk); tcp_delack_timer() 274 inet_csk(sk)->icsk_ack.blocked = 1; tcp_delack_timer() 275 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); tcp_delack_timer() 277 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) tcp_delack_timer() 278 sock_hold(sk); tcp_delack_timer() 280 bh_unlock_sock(sk); tcp_delack_timer() 281 sock_put(sk); tcp_delack_timer() 284 static void tcp_probe_timer(struct sock *sk) tcp_probe_timer() argument 286 struct inet_connection_sock *icsk = inet_csk(sk); tcp_probe_timer() 287 struct tcp_sock *tp = tcp_sk(sk); tcp_probe_timer() 291 if (tp->packets_out || !tcp_send_head(sk)) { tcp_probe_timer() 304 start_ts = tcp_skb_timestamp(tcp_send_head(sk)); tcp_probe_timer() 306 skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp); tcp_probe_timer() 312 if (sock_flag(sk, SOCK_DEAD)) { tcp_probe_timer() 315 max_probes = tcp_orphan_retries(sk, alive); tcp_probe_timer() 318 if (tcp_out_of_resources(sk, true)) tcp_probe_timer() 323 abort: tcp_write_err(sk); tcp_probe_timer() 326 tcp_send_probe0(sk); tcp_probe_timer() 332 * sk here is the child socket, not the parent (listener) socket. 334 static void tcp_fastopen_synack_timer(struct sock *sk) tcp_fastopen_synack_timer() argument 336 struct inet_connection_sock *icsk = inet_csk(sk); tcp_fastopen_synack_timer() 341 req = tcp_sk(sk)->fastopen_rsk; tcp_fastopen_synack_timer() 345 tcp_write_err(sk); tcp_fastopen_synack_timer() 353 inet_rtx_syn_ack(sk, req); tcp_fastopen_synack_timer() 355 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, tcp_fastopen_synack_timer() 363 void tcp_retransmit_timer(struct sock *sk) tcp_retransmit_timer() argument 365 struct tcp_sock *tp = tcp_sk(sk); tcp_retransmit_timer() 366 struct inet_connection_sock *icsk = inet_csk(sk); tcp_retransmit_timer() 369 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && tcp_retransmit_timer() 370 sk->sk_state != TCP_FIN_WAIT1); tcp_retransmit_timer() 371 tcp_fastopen_synack_timer(sk); tcp_retransmit_timer() 380 WARN_ON(tcp_write_queue_empty(sk)); tcp_retransmit_timer() 384 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && tcp_retransmit_timer() 385 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { tcp_retransmit_timer() 391 struct inet_sock *inet = inet_sk(sk); tcp_retransmit_timer() 392 if (sk->sk_family == AF_INET) { tcp_retransmit_timer() 400 else if (sk->sk_family == AF_INET6) { tcp_retransmit_timer() 402 &sk->sk_v6_daddr, tcp_retransmit_timer() 409 tcp_write_err(sk); tcp_retransmit_timer() 412 tcp_enter_loss(sk); tcp_retransmit_timer() 413 tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); tcp_retransmit_timer() 414 __sk_dst_reset(sk); tcp_retransmit_timer() 418 if (tcp_write_timeout(sk)) tcp_retransmit_timer() 440 NET_INC_STATS_BH(sock_net(sk), mib_idx); tcp_retransmit_timer() 443 tcp_enter_loss(sk); tcp_retransmit_timer() 445 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) { tcp_retransmit_timer() 451 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, tcp_retransmit_timer() 485 if (sk->sk_state == TCP_ESTABLISHED && tcp_retransmit_timer() 495 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); tcp_retransmit_timer() 496 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0)) tcp_retransmit_timer() 497 __sk_dst_reset(sk); tcp_retransmit_timer() 502 void tcp_write_timer_handler(struct sock *sk) tcp_write_timer_handler() argument 504 struct inet_connection_sock *icsk = inet_csk(sk); tcp_write_timer_handler() 507 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) tcp_write_timer_handler() 511 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); tcp_write_timer_handler() 519 tcp_resume_early_retransmit(sk); tcp_write_timer_handler() 522 tcp_send_loss_probe(sk); tcp_write_timer_handler() 526 tcp_retransmit_timer(sk); tcp_write_timer_handler() 530 tcp_probe_timer(sk); tcp_write_timer_handler() 535 sk_mem_reclaim(sk); tcp_write_timer_handler() 540 struct sock *sk = (struct sock *)data; tcp_write_timer() local 542 bh_lock_sock(sk); tcp_write_timer() 543 if (!sock_owned_by_user(sk)) { tcp_write_timer() 544 tcp_write_timer_handler(sk); tcp_write_timer() 547 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) tcp_write_timer() 548 sock_hold(sk); tcp_write_timer() 550 bh_unlock_sock(sk); tcp_write_timer() 551 sock_put(sk); tcp_write_timer() 562 void tcp_set_keepalive(struct sock *sk, int val) tcp_set_keepalive() argument 564 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) tcp_set_keepalive() 567 if (val && !sock_flag(sk, SOCK_KEEPOPEN)) tcp_set_keepalive() 568 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); tcp_set_keepalive() 570 inet_csk_delete_keepalive_timer(sk); tcp_set_keepalive() 576 struct sock *sk = (struct sock *) data; tcp_keepalive_timer() local 577 struct inet_connection_sock *icsk = inet_csk(sk); tcp_keepalive_timer() 578 struct tcp_sock *tp = tcp_sk(sk); tcp_keepalive_timer() 582 bh_lock_sock(sk); tcp_keepalive_timer() 583 if (sock_owned_by_user(sk)) { tcp_keepalive_timer() 585 inet_csk_reset_keepalive_timer (sk, HZ/20); tcp_keepalive_timer() 589 if (sk->sk_state == TCP_LISTEN) { tcp_keepalive_timer() 594 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { tcp_keepalive_timer() 596 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; tcp_keepalive_timer() 599 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); tcp_keepalive_timer() 603 tcp_send_active_reset(sk, GFP_ATOMIC); tcp_keepalive_timer() 607 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) tcp_keepalive_timer() 613 if (tp->packets_out || tcp_send_head(sk)) tcp_keepalive_timer() 627 tcp_send_active_reset(sk, GFP_ATOMIC); tcp_keepalive_timer() 628 tcp_write_err(sk); tcp_keepalive_timer() 631 if (tcp_write_wakeup(sk) <= 0) { tcp_keepalive_timer() 645 sk_mem_reclaim(sk); tcp_keepalive_timer() 648 inet_csk_reset_keepalive_timer (sk, elapsed); tcp_keepalive_timer() 652 tcp_done(sk); tcp_keepalive_timer() 655 bh_unlock_sock(sk); tcp_keepalive_timer() 656 sock_put(sk); tcp_keepalive_timer() 659 void tcp_init_xmit_timers(struct sock *sk) tcp_init_xmit_timers() argument 661 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, tcp_init_xmit_timers()
|
H A D | datagram.c | 23 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) __ip4_datagram_connect() argument 25 struct inet_sock *inet = inet_sk(sk); __ip4_datagram_connect() 40 sk_dst_reset(sk); __ip4_datagram_connect() 42 oif = sk->sk_bound_dev_if; __ip4_datagram_connect() 52 RT_CONN_FLAGS(sk), oif, __ip4_datagram_connect() 53 sk->sk_protocol, __ip4_datagram_connect() 54 inet->inet_sport, usin->sin_port, sk); __ip4_datagram_connect() 58 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); __ip4_datagram_connect() 62 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) { __ip4_datagram_connect() 71 if (sk->sk_prot->rehash) __ip4_datagram_connect() 72 sk->sk_prot->rehash(sk); __ip4_datagram_connect() 76 sk->sk_state = TCP_ESTABLISHED; __ip4_datagram_connect() 77 inet_set_txhash(sk); __ip4_datagram_connect() 80 sk_dst_set(sk, &rt->dst); __ip4_datagram_connect() 87 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ip4_datagram_connect() argument 91 lock_sock(sk); ip4_datagram_connect() 92 res = __ip4_datagram_connect(sk, uaddr, addr_len); ip4_datagram_connect() 93 release_sock(sk); ip4_datagram_connect() 102 void ip4_datagram_release_cb(struct sock *sk) ip4_datagram_release_cb() argument 104 const struct inet_sock *inet = inet_sk(sk); ip4_datagram_release_cb() 113 dst = __sk_dst_get(sk); ip4_datagram_release_cb() 121 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr, ip4_datagram_release_cb() 123 inet->inet_sport, sk->sk_protocol, ip4_datagram_release_cb() 124 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); ip4_datagram_release_cb() 127 sk_dst_set(sk, dst); ip4_datagram_release_cb()
|
H A D | udp_impl.h | 11 int udp_v4_get_port(struct sock *sk, unsigned short snum); 13 int udp_setsockopt(struct sock *sk, int level, int optname, 15 int udp_getsockopt(struct sock *sk, int level, int optname, 19 int compat_udp_setsockopt(struct sock *sk, int level, int optname, 21 int compat_udp_getsockopt(struct sock *sk, int level, int optname, 24 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 26 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 28 int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 29 void udp_destroy_sock(struct sock *sk);
|
H A D | udp_tunnel.c | 22 sk_change_net(sock->sk, net); udp_sock_create4() 42 sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; udp_sock_create4() 50 sk_release_kernel(sock->sk); udp_sock_create4() 60 struct sock *sk = sock->sk; setup_udp_tunnel_sock() local 63 inet_sk(sk)->mc_loop = 0; setup_udp_tunnel_sock() 66 inet_inc_convert_csum(sk); setup_udp_tunnel_sock() 68 rcu_assign_sk_user_data(sk, cfg->sk_user_data); setup_udp_tunnel_sock() 70 udp_sk(sk)->encap_type = cfg->encap_type; setup_udp_tunnel_sock() 71 udp_sk(sk)->encap_rcv = cfg->encap_rcv; setup_udp_tunnel_sock() 72 udp_sk(sk)->encap_destroy = cfg->encap_destroy; setup_udp_tunnel_sock() 78 int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, udp_tunnel_xmit_skb() argument 95 return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, udp_tunnel_xmit_skb() 102 rcu_assign_sk_user_data(sock->sk, NULL); udp_tunnel_sock_release() 104 sk_release_kernel(sock->sk); udp_tunnel_sock_release()
|
H A D | ping.c | 82 int ping_get_port(struct sock *sk, unsigned short ident) ping_get_port() argument 89 isk = inet_sk(sk); ping_get_port() 98 hlist = ping_hashslot(&ping_table, sock_net(sk), ping_get_port() 116 hlist = ping_hashslot(&ping_table, sock_net(sk), ident); ping_portaddr_for_each_entry() 125 (sk2 != sk) && ping_portaddr_for_each_entry() 126 (!sk2->sk_reuse || !sk->sk_reuse)) ping_portaddr_for_each_entry() 133 if (sk_unhashed(sk)) { 135 sock_hold(sk); 136 hlist_nulls_add_head(&sk->sk_nulls_node, hlist); 137 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 148 void ping_hash(struct sock *sk) ping_hash() argument 150 pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num); ping_hash() 154 void ping_unhash(struct sock *sk) ping_unhash() argument 156 struct inet_sock *isk = inet_sk(sk); ping_unhash() 158 if (sk_hashed(sk)) { ping_unhash() 160 hlist_nulls_del(&sk->sk_nulls_node); ping_unhash() 161 sk_nulls_node_init(&sk->sk_nulls_node); ping_unhash() 162 sock_put(sk); ping_unhash() 165 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); ping_unhash() 174 struct sock *sk = NULL; ping_lookup() local 191 ping_portaddr_for_each_entry(sk, hnode, hslot) { ping_portaddr_for_each_entry() 192 isk = inet_sk(sk); ping_portaddr_for_each_entry() 199 sk->sk_family == AF_INET) { ping_portaddr_for_each_entry() 200 pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk, ping_portaddr_for_each_entry() 202 sk->sk_bound_dev_if); ping_portaddr_for_each_entry() 209 sk->sk_family == AF_INET6) { ping_portaddr_for_each_entry() 211 pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk, ping_portaddr_for_each_entry() 213 &sk->sk_v6_rcv_saddr, ping_portaddr_for_each_entry() 214 sk->sk_bound_dev_if); ping_portaddr_for_each_entry() 216 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && ping_portaddr_for_each_entry() 217 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, ping_portaddr_for_each_entry() 225 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ping_portaddr_for_each_entry() 228 sock_hold(sk); ping_portaddr_for_each_entry() 232 sk = NULL; 236 return sk; 254 int ping_init_sock(struct sock *sk) ping_init_sock() argument 256 struct net *net = sock_net(sk); ping_init_sock() 263 if (sk->sk_family == AF_INET6) ping_init_sock() 264 sk->sk_ipv6only = 1; ping_init_sock() 291 void ping_close(struct sock *sk, long timeout) ping_close() argument 293 pr_debug("ping_close(sk=%p,sk->num=%u)\n", ping_close() 294 inet_sk(sk), inet_sk(sk)->inet_num); ping_close() 295 pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter); ping_close() 297 sk_common_release(sk); ping_close() 301 /* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */ ping_check_bind_addr() 302 static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, ping_check_bind_addr() argument 304 struct net *net = sock_net(sk); ping_check_bind_addr() 305 if (sk->sk_family == AF_INET) { ping_check_bind_addr() 317 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", ping_check_bind_addr() 318 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); ping_check_bind_addr() 333 } else if (sk->sk_family == AF_INET6) { ping_check_bind_addr() 344 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", ping_check_bind_addr() 345 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); ping_check_bind_addr() 371 sk->sk_bound_dev_if = addr->sin6_scope_id; ping_check_bind_addr() 379 static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr) ping_set_saddr() argument 382 struct inet_sock *isk = inet_sk(sk); ping_set_saddr() 388 struct ipv6_pinfo *np = inet6_sk(sk); ping_set_saddr() 389 sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr; ping_set_saddr() 394 static void ping_clear_saddr(struct sock *sk, int dif) ping_clear_saddr() argument 396 sk->sk_bound_dev_if = dif; ping_clear_saddr() 397 if (sk->sk_family == AF_INET) { ping_clear_saddr() 398 struct inet_sock *isk = inet_sk(sk); ping_clear_saddr() 401 } else if (sk->sk_family == AF_INET6) { ping_clear_saddr() 402 struct ipv6_pinfo *np = inet6_sk(sk); ping_clear_saddr() 403 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); ping_clear_saddr() 413 int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) ping_bind() argument 415 struct inet_sock *isk = inet_sk(sk); ping_bind() 418 int dif = sk->sk_bound_dev_if; ping_bind() 420 err = ping_check_bind_addr(sk, isk, uaddr, addr_len); ping_bind() 424 lock_sock(sk); ping_bind() 431 ping_set_saddr(sk, uaddr); ping_bind() 433 if (ping_get_port(sk, snum) != 0) { ping_bind() 434 ping_clear_saddr(sk, dif); ping_bind() 440 (int)sk->sk_bound_dev_if); ping_bind() 443 if (sk->sk_family == AF_INET && isk->inet_rcv_saddr) ping_bind() 444 sk->sk_userlocks |= SOCK_BINDADDR_LOCK; ping_bind() 446 if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr)) ping_bind() 447 sk->sk_userlocks |= SOCK_BINDADDR_LOCK; ping_bind() 451 sk->sk_userlocks |= SOCK_BINDPORT_LOCK; ping_bind() 457 if (sk->sk_family == AF_INET6) ping_bind() 458 memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); ping_bind() 461 sk_dst_reset(sk); ping_bind() 463 release_sock(sk); ping_bind() 492 struct sock *sk; ping_err() local 519 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); ping_err() 520 if (!sk) { ping_err() 524 pr_debug("err on socket %p\n", sk); ping_err() 528 inet_sock = inet_sk(sk); ping_err() 548 ipv4_sk_update_pmtu(skb, sk, info); ping_err() 564 ipv4_sk_redirect(skb, sk); ping_err() 579 (family == AF_INET6 && !inet6_sk(sk)->recverr)) { ping_err() 580 if (!harderr || sk->sk_state != TCP_ESTABLISHED) ping_err() 584 ip_icmp_error(sk, skb, err, 0 /* no remote port */, ping_err() 588 pingv6_ops.ipv6_icmp_error(sk, skb, err, 0, ping_err() 593 sk->sk_err = err; ping_err() 594 sk->sk_error_report(sk); ping_err() 596 sock_put(sk); ping_err() 642 static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, ping_v4_push_pending_frames() argument 645 struct sk_buff *skb = skb_peek(&sk->sk_write_queue); ping_v4_push_pending_frames() 652 return ip_push_pending_frames(sk, fl4); ping_v4_push_pending_frames() 696 static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ping_v4_sendmsg() argument 698 struct net *net = sock_net(sk); ping_v4_sendmsg() 700 struct inet_sock *inet = inet_sk(sk); ping_v4_sendmsg() 711 pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); ping_v4_sendmsg() 731 if (sk->sk_state != TCP_ESTABLISHED) ping_v4_sendmsg() 739 ipc.oif = sk->sk_bound_dev_if; ping_v4_sendmsg() 744 sock_tx_timestamp(sk, &ipc.tx_flags); ping_v4_sendmsg() 747 err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); ping_v4_sendmsg() 777 if (sock_flag(sk, SOCK_LOCALROUTE) || ping_v4_sendmsg() 791 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, ping_v4_sendmsg() 792 RT_SCOPE_UNIVERSE, sk->sk_protocol, ping_v4_sendmsg() 793 inet_sk_flowi_flags(sk), faddr, saddr, 0, 0); ping_v4_sendmsg() 795 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); ping_v4_sendmsg() 796 rt = ip_route_output_flow(net, &fl4, sk); ping_v4_sendmsg() 807 !sock_flag(sk, SOCK_BROADCAST)) ping_v4_sendmsg() 817 lock_sock(sk); ping_v4_sendmsg() 828 err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len, ping_v4_sendmsg() 831 ip_flush_pending_frames(sk); ping_v4_sendmsg() 833 err = ping_v4_push_pending_frames(sk, &pfh, &fl4); ping_v4_sendmsg() 834 release_sock(sk); ping_v4_sendmsg() 841 icmp_out_count(sock_net(sk), user_icmph.type); ping_v4_sendmsg() 854 int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, ping_recvmsg() argument 857 struct inet_sock *isk = inet_sk(sk); ping_recvmsg() 858 int family = sk->sk_family; ping_recvmsg() 862 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); ping_recvmsg() 869 return inet_recv_error(sk, msg, len, addr_len); ping_recvmsg() 871 skb = skb_recv_datagram(sk, flags, noblock, &err); ping_recvmsg() 886 sock_recv_timestamp(msg, sk, skb); ping_recvmsg() 905 struct ipv6_pinfo *np = inet6_sk(sk); ping_recvmsg() 922 if (inet6_sk(sk)->rxopt.all) ping_recvmsg() 923 pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb); ping_recvmsg() 925 inet6_sk(sk)->rxopt.all) ping_recvmsg() 926 pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb); ping_recvmsg() 937 skb_free_datagram(sk, skb); ping_recvmsg() 944 int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ping_queue_rcv_skb() argument 946 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", ping_queue_rcv_skb() 947 inet_sk(sk), inet_sk(sk)->inet_num, skb); ping_queue_rcv_skb() 948 if (sock_queue_rcv_skb(sk, skb) < 0) { ping_queue_rcv_skb() 964 struct sock *sk; ping_rcv() local 976 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); ping_rcv() 977 if (sk) { ping_rcv() 980 pr_debug("rcv on socket %p\n", sk); ping_rcv() 982 ping_queue_rcv_skb(sk, skb2); ping_rcv() 983 sock_put(sk); ping_rcv() 1017 struct sock *sk; ping_get_first() local 1031 sk_nulls_for_each(sk, node, hslot) { sk_nulls_for_each() 1032 if (net_eq(sock_net(sk), net) && sk_nulls_for_each() 1033 sk->sk_family == state->family) sk_nulls_for_each() 1037 sk = NULL; 1039 return sk; 1042 static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk) ping_get_next() argument 1048 sk = sk_nulls_next(sk); ping_get_next() 1049 } while (sk && (!net_eq(sock_net(sk), net))); ping_get_next() 1051 if (!sk) ping_get_next() 1053 return sk; ping_get_next() 1058 struct sock *sk = ping_get_first(seq, 0); ping_get_idx() local 1060 if (sk) ping_get_idx() 1061 while (pos && (sk = ping_get_next(seq, sk)) != NULL) ping_get_idx() 1063 return pos ? NULL : sk; ping_get_idx() 1085 struct sock *sk; ping_seq_next() local 1088 sk = ping_get_idx(seq, 0); ping_seq_next() 1090 sk = ping_get_next(seq, v); ping_seq_next() 1093 return sk; ping_seq_next()
|
H A D | tcp.c | 24 * sk->inuse=1 and was trying to connect 50 * Alan Cox : sk->keepopen now seems to work 52 * Alan Cox : Fixed assorted sk->rqueue->next errors 112 * Alan Cox : Changed the semantics of sk->socket to 165 * Marc Tamsky : Various sk->prot->retransmits and 166 * sk->retransmits misupdating fixed. 329 void tcp_enter_memory_pressure(struct sock *sk) tcp_enter_memory_pressure() argument 332 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); tcp_enter_memory_pressure() 380 void tcp_init_sock(struct sock *sk) tcp_init_sock() argument 382 struct inet_connection_sock *icsk = inet_csk(sk); tcp_init_sock() 383 struct tcp_sock *tp = tcp_sk(sk); tcp_init_sock() 386 tcp_init_xmit_timers(sk); tcp_init_sock() 410 tcp_assign_congestion_control(sk); tcp_init_sock() 414 sk->sk_state = TCP_CLOSE; tcp_init_sock() 416 sk->sk_write_space = sk_stream_write_space; tcp_init_sock() 417 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); tcp_init_sock() 421 sk->sk_sndbuf = sysctl_tcp_wmem[1]; tcp_init_sock() 422 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; tcp_init_sock() 425 sock_update_memcg(sk); tcp_init_sock() 426 sk_sockets_allocated_inc(sk); tcp_init_sock() 431 static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb) tcp_tx_timestamp() argument 433 if (sk->sk_tsflags) { tcp_tx_timestamp() 436 sock_tx_timestamp(sk, &shinfo->tx_flags); tcp_tx_timestamp() 452 struct sock *sk = sock->sk; tcp_poll() local 453 const struct tcp_sock *tp = tcp_sk(sk); tcp_poll() 455 sock_rps_record_flow(sk); tcp_poll() 457 sock_poll_wait(file, sk_sleep(sk), wait); tcp_poll() 458 if (sk->sk_state == TCP_LISTEN) tcp_poll() 459 return inet_csk_listen_poll(sk); tcp_poll() 495 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) tcp_poll() 497 if (sk->sk_shutdown & RCV_SHUTDOWN) tcp_poll() 501 if (sk->sk_state != TCP_SYN_SENT && tcp_poll() 502 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) { tcp_poll() 503 int target = sock_rcvlowat(sk, 0, INT_MAX); tcp_poll() 506 !sock_flag(sk, SOCK_URGINLINE) && tcp_poll() 511 * escape above sk->sk_state, we can be illegally awaken tcp_poll() 516 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { tcp_poll() 517 if (sk_stream_is_writeable(sk)) { tcp_poll() 521 &sk->sk_socket->flags); tcp_poll() 522 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); tcp_poll() 530 if (sk_stream_is_writeable(sk)) tcp_poll() 541 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) tcp_poll() 548 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) tcp_ioctl() argument 550 struct tcp_sock *tp = tcp_sk(sk); tcp_ioctl() 556 if (sk->sk_state == TCP_LISTEN) tcp_ioctl() 559 slow = lock_sock_fast(sk); tcp_ioctl() 560 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) tcp_ioctl() 562 else if (sock_flag(sk, SOCK_URGINLINE) || tcp_ioctl() 570 if (answ && sock_flag(sk, SOCK_DONE)) tcp_ioctl() 574 unlock_sock_fast(sk, slow); tcp_ioctl() 580 if (sk->sk_state == TCP_LISTEN) tcp_ioctl() 583 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) tcp_ioctl() 589 if (sk->sk_state == TCP_LISTEN) tcp_ioctl() 592 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) tcp_ioctl() 616 static void skb_entail(struct sock *sk, struct sk_buff *skb) skb_entail() argument 618 struct tcp_sock *tp = tcp_sk(sk); skb_entail() 626 tcp_add_write_queue_tail(sk, skb); skb_entail() 627 sk->sk_wmem_queued += skb->truesize; skb_entail() 628 sk_mem_charge(sk, skb->truesize); skb_entail() 649 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, tcp_should_autocork() argument 654 skb != tcp_write_queue_head(sk) && tcp_should_autocork() 655 atomic_read(&sk->sk_wmem_alloc) > skb->truesize; tcp_should_autocork() 658 static void tcp_push(struct sock *sk, int flags, int mss_now, tcp_push() argument 661 struct tcp_sock *tp = tcp_sk(sk); tcp_push() 664 if (!tcp_send_head(sk)) tcp_push() 667 skb = tcp_write_queue_tail(sk); tcp_push() 673 if (tcp_should_autocork(sk, skb, size_goal)) { tcp_push() 677 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); tcp_push() 683 if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize) tcp_push() 690 __tcp_push_pending_frames(sk, mss_now, nonagle); tcp_push() 706 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) __tcp_splice_read() argument 714 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); __tcp_splice_read() 733 struct sock *sk = sock->sk; tcp_splice_read() local 743 sock_rps_record_flow(sk); tcp_splice_read() 752 lock_sock(sk); tcp_splice_read() 754 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); tcp_splice_read() 756 ret = __tcp_splice_read(sk, &tss); tcp_splice_read() 762 if (sock_flag(sk, SOCK_DONE)) tcp_splice_read() 764 if (sk->sk_err) { tcp_splice_read() 765 ret = sock_error(sk); tcp_splice_read() 768 if (sk->sk_shutdown & RCV_SHUTDOWN) tcp_splice_read() 770 if (sk->sk_state == TCP_CLOSE) { tcp_splice_read() 775 if (!sock_flag(sk, SOCK_DONE)) tcp_splice_read() 783 sk_wait_data(sk, &timeo); tcp_splice_read() 795 release_sock(sk); tcp_splice_read() 796 lock_sock(sk); tcp_splice_read() 798 if (sk->sk_err || sk->sk_state == TCP_CLOSE || tcp_splice_read() 799 (sk->sk_shutdown & RCV_SHUTDOWN) || tcp_splice_read() 804 release_sock(sk); tcp_splice_read() 813 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) sk_stream_alloc_skb() argument 820 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); sk_stream_alloc_skb() 822 if (sk_wmem_schedule(sk, skb->truesize)) { sk_stream_alloc_skb() 823 skb_reserve(skb, sk->sk_prot->max_header); sk_stream_alloc_skb() 833 sk->sk_prot->enter_memory_pressure(sk); sk_stream_alloc_skb() 834 sk_stream_moderate_sndbuf(sk); sk_stream_alloc_skb() 839 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, tcp_xmit_size_goal() argument 842 struct tcp_sock *tp = tcp_sk(sk); tcp_xmit_size_goal() 845 if (!large_allowed || !sk_can_gso(sk)) tcp_xmit_size_goal() 849 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; tcp_xmit_size_goal() 857 sk->sk_gso_max_segs); tcp_xmit_size_goal() 864 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) tcp_send_mss() argument 868 mss_now = tcp_current_mss(sk); tcp_send_mss() 869 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); tcp_send_mss() 874 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, do_tcp_sendpages() argument 877 struct tcp_sock *tp = tcp_sk(sk); do_tcp_sendpages() 881 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); do_tcp_sendpages() 887 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && do_tcp_sendpages() 888 !tcp_passive_fastopen(sk)) { do_tcp_sendpages() 889 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) do_tcp_sendpages() 893 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); do_tcp_sendpages() 895 mss_now = tcp_send_mss(sk, &size_goal, flags); do_tcp_sendpages() 899 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) do_tcp_sendpages() 903 struct sk_buff *skb = tcp_write_queue_tail(sk); do_tcp_sendpages() 907 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { do_tcp_sendpages() 909 if (!sk_stream_memory_free(sk)) do_tcp_sendpages() 912 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); do_tcp_sendpages() 916 skb_entail(sk, skb); do_tcp_sendpages() 929 if (!sk_wmem_schedule(sk, copy)) do_tcp_sendpages() 943 sk->sk_wmem_queued += copy; do_tcp_sendpages() 944 sk_mem_charge(sk, copy); do_tcp_sendpages() 956 tcp_tx_timestamp(sk, skb); do_tcp_sendpages() 965 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); do_tcp_sendpages() 966 } else if (skb == tcp_send_head(sk)) do_tcp_sendpages() 967 tcp_push_one(sk, mss_now); do_tcp_sendpages() 971 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); do_tcp_sendpages() 973 tcp_push(sk, flags & ~MSG_MORE, mss_now, do_tcp_sendpages() 976 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) do_tcp_sendpages() 979 mss_now = tcp_send_mss(sk, &size_goal, flags); do_tcp_sendpages() 984 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); do_tcp_sendpages() 991 return sk_stream_error(sk, flags, err); do_tcp_sendpages() 994 int tcp_sendpage(struct sock *sk, struct page *page, int offset, tcp_sendpage() argument 999 if (!(sk->sk_route_caps & NETIF_F_SG) || tcp_sendpage() 1000 !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) tcp_sendpage() 1001 return sock_no_sendpage(sk->sk_socket, page, offset, size, tcp_sendpage() 1004 lock_sock(sk); tcp_sendpage() 1005 res = do_tcp_sendpages(sk, page, offset, size, flags); tcp_sendpage() 1006 release_sock(sk); tcp_sendpage() 1011 static inline int select_size(const struct sock *sk, bool sg) select_size() argument 1013 const struct tcp_sock *tp = tcp_sk(sk); select_size() 1017 if (sk_can_gso(sk)) { select_size() 1042 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, tcp_sendmsg_fastopen() argument 1045 struct tcp_sock *tp = tcp_sk(sk); tcp_sendmsg_fastopen() 1054 sk->sk_allocation); tcp_sendmsg_fastopen() 1061 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, tcp_sendmsg_fastopen() 1068 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) tcp_sendmsg() argument 1070 struct tcp_sock *tp = tcp_sk(sk); tcp_sendmsg() 1077 lock_sock(sk); tcp_sendmsg() 1081 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); tcp_sendmsg() 1088 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); tcp_sendmsg() 1094 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && tcp_sendmsg() 1095 !tcp_passive_fastopen(sk)) { tcp_sendmsg() 1096 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) tcp_sendmsg() 1102 copied = tcp_send_rcvq(sk, msg, size); tcp_sendmsg() 1114 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); tcp_sendmsg() 1116 mss_now = tcp_send_mss(sk, &size_goal, flags); tcp_sendmsg() 1122 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) tcp_sendmsg() 1125 sg = !!(sk->sk_route_caps & NETIF_F_SG); tcp_sendmsg() 1131 skb = tcp_write_queue_tail(sk); tcp_sendmsg() 1132 if (tcp_send_head(sk)) { tcp_sendmsg() 1143 if (!sk_stream_memory_free(sk)) tcp_sendmsg() 1146 skb = sk_stream_alloc_skb(sk, tcp_sendmsg() 1147 select_size(sk, sg), tcp_sendmsg() 1148 sk->sk_allocation); tcp_sendmsg() 1155 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) tcp_sendmsg() 1158 skb_entail(sk, skb); tcp_sendmsg() 1178 err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); tcp_sendmsg() 1184 struct page_frag *pfrag = sk_page_frag(sk); tcp_sendmsg() 1186 if (!sk_page_frag_refill(sk, pfrag)) tcp_sendmsg() 1200 if (!sk_wmem_schedule(sk, copy)) tcp_sendmsg() 1203 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, tcp_sendmsg() 1230 tcp_tx_timestamp(sk, skb); tcp_sendmsg() 1239 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); tcp_sendmsg() 1240 } else if (skb == tcp_send_head(sk)) tcp_sendmsg() 1241 tcp_push_one(sk, mss_now); tcp_sendmsg() 1245 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); tcp_sendmsg() 1248 tcp_push(sk, flags & ~MSG_MORE, mss_now, tcp_sendmsg() 1251 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) tcp_sendmsg() 1254 mss_now = tcp_send_mss(sk, &size_goal, flags); tcp_sendmsg() 1259 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); tcp_sendmsg() 1261 release_sock(sk); tcp_sendmsg() 1266 tcp_unlink_write_queue(skb, sk); tcp_sendmsg() 1270 tcp_check_send_head(sk, skb); tcp_sendmsg() 1271 sk_wmem_free_skb(sk, skb); tcp_sendmsg() 1278 err = sk_stream_error(sk, flags, err); tcp_sendmsg() 1279 release_sock(sk); tcp_sendmsg() 1289 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) tcp_recv_urg() argument 1291 struct tcp_sock *tp = tcp_sk(sk); tcp_recv_urg() 1294 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || tcp_recv_urg() 1298 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) tcp_recv_urg() 1321 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) tcp_recv_urg() 1333 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) tcp_peek_sndq() argument 1340 skb_queue_walk(&sk->sk_write_queue, skb) { tcp_peek_sndq() 1357 static void tcp_cleanup_rbuf(struct sock *sk, int copied) tcp_cleanup_rbuf() argument 1359 struct tcp_sock *tp = tcp_sk(sk); tcp_cleanup_rbuf() 1362 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); tcp_cleanup_rbuf() 1368 if (inet_csk_ack_scheduled(sk)) { tcp_cleanup_rbuf() 1369 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_cleanup_rbuf() 1385 !atomic_read(&sk->sk_rmem_alloc))) tcp_cleanup_rbuf() 1395 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { tcp_cleanup_rbuf() 1400 __u32 new_window = __tcp_select_window(sk); tcp_cleanup_rbuf() 1412 tcp_send_ack(sk); tcp_cleanup_rbuf() 1415 static void tcp_prequeue_process(struct sock *sk) tcp_prequeue_process() argument 1418 struct tcp_sock *tp = tcp_sk(sk); tcp_prequeue_process() 1420 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); tcp_prequeue_process() 1426 sk_backlog_rcv(sk, skb); tcp_prequeue_process() 1433 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) tcp_recv_skb() argument 1438 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { tcp_recv_skb() 1450 sk_eat_skb(sk, skb); tcp_recv_skb() 1466 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, tcp_read_sock() argument 1470 struct tcp_sock *tp = tcp_sk(sk); tcp_read_sock() 1475 if (sk->sk_state == TCP_LISTEN) tcp_read_sock() 1477 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { tcp_read_sock() 1506 skb = tcp_recv_skb(sk, seq - 1, &offset); tcp_read_sock() 1516 sk_eat_skb(sk, skb); tcp_read_sock() 1520 sk_eat_skb(sk, skb); tcp_read_sock() 1527 tcp_rcv_space_adjust(sk); tcp_read_sock() 1531 tcp_recv_skb(sk, seq, &offset); tcp_read_sock() 1532 tcp_cleanup_rbuf(sk, copied); tcp_read_sock() 1546 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, tcp_recvmsg() argument 1549 struct tcp_sock *tp = tcp_sk(sk); tcp_recvmsg() 1562 return inet_recv_error(sk, msg, len, addr_len); tcp_recvmsg() 1564 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && tcp_recvmsg() 1565 (sk->sk_state == TCP_ESTABLISHED)) tcp_recvmsg() 1566 sk_busy_loop(sk, nonblock); tcp_recvmsg() 1568 lock_sock(sk); tcp_recvmsg() 1571 if (sk->sk_state == TCP_LISTEN) tcp_recvmsg() 1574 timeo = sock_rcvtimeo(sk, nonblock); tcp_recvmsg() 1601 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); tcp_recvmsg() 1618 skb_queue_walk(&sk->sk_receive_queue, skb) { tcp_recvmsg() 1642 if (copied >= target && !sk->sk_backlog.tail) tcp_recvmsg() 1646 if (sk->sk_err || tcp_recvmsg() 1647 sk->sk_state == TCP_CLOSE || tcp_recvmsg() 1648 (sk->sk_shutdown & RCV_SHUTDOWN) || tcp_recvmsg() 1653 if (sock_flag(sk, SOCK_DONE)) tcp_recvmsg() 1656 if (sk->sk_err) { tcp_recvmsg() 1657 copied = sock_error(sk); tcp_recvmsg() 1661 if (sk->sk_shutdown & RCV_SHUTDOWN) tcp_recvmsg() 1664 if (sk->sk_state == TCP_CLOSE) { tcp_recvmsg() 1665 if (!sock_flag(sk, SOCK_DONE)) { tcp_recvmsg() 1686 tcp_cleanup_rbuf(sk, copied); tcp_recvmsg() 1735 release_sock(sk); tcp_recvmsg() 1736 lock_sock(sk); tcp_recvmsg() 1738 sk_wait_data(sk, &timeo); tcp_recvmsg() 1746 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); tcp_recvmsg() 1754 tcp_prequeue_process(sk); tcp_recvmsg() 1757 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); tcp_recvmsg() 1783 if (!sock_flag(sk, SOCK_URGINLINE)) { tcp_recvmsg() 1810 tcp_rcv_space_adjust(sk); tcp_recvmsg() 1815 tcp_fast_path_check(sk); tcp_recvmsg() 1823 sk_eat_skb(sk, skb); tcp_recvmsg() 1830 sk_eat_skb(sk, skb); tcp_recvmsg() 1840 tcp_prequeue_process(sk); tcp_recvmsg() 1843 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); tcp_recvmsg() 1858 tcp_cleanup_rbuf(sk, copied); tcp_recvmsg() 1860 release_sock(sk); tcp_recvmsg() 1864 release_sock(sk); tcp_recvmsg() 1868 err = tcp_recv_urg(sk, msg, len, flags); tcp_recvmsg() 1872 err = tcp_peek_sndq(sk, msg, len); tcp_recvmsg() 1877 void tcp_set_state(struct sock *sk, int state) tcp_set_state() argument 1879 int oldstate = sk->sk_state; tcp_set_state() 1884 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); tcp_set_state() 1889 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); tcp_set_state() 1891 sk->sk_prot->unhash(sk); tcp_set_state() 1892 if (inet_csk(sk)->icsk_bind_hash && tcp_set_state() 1893 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) tcp_set_state() 1894 inet_put_port(sk); tcp_set_state() 1898 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); tcp_set_state() 1904 sk->sk_state = state; tcp_set_state() 1907 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); tcp_set_state() 1936 static int tcp_close_state(struct sock *sk) tcp_close_state() argument 1938 int next = (int)new_state[sk->sk_state]; tcp_close_state() 1941 tcp_set_state(sk, ns); tcp_close_state() 1948 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 1951 void tcp_shutdown(struct sock *sk, int how) tcp_shutdown() argument 1961 if ((1 << sk->sk_state) & tcp_shutdown() 1965 if (tcp_close_state(sk)) tcp_shutdown() 1966 tcp_send_fin(sk); tcp_shutdown() 1971 bool tcp_check_oom(struct sock *sk, int shift) tcp_check_oom() argument 1975 too_many_orphans = tcp_too_many_orphans(sk, shift); tcp_check_oom() 1976 out_of_socket_memory = tcp_out_of_memory(sk); tcp_check_oom() 1985 void tcp_close(struct sock *sk, long timeout) tcp_close() argument 1991 lock_sock(sk); tcp_close() 1992 sk->sk_shutdown = SHUTDOWN_MASK; tcp_close() 1994 if (sk->sk_state == TCP_LISTEN) { tcp_close() 1995 tcp_set_state(sk, TCP_CLOSE); tcp_close() 1998 inet_csk_listen_stop(sk); tcp_close() 2007 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { tcp_close() 2016 sk_mem_reclaim(sk); tcp_close() 2019 if (sk->sk_state == TCP_CLOSE) tcp_close() 2029 if (unlikely(tcp_sk(sk)->repair)) { tcp_close() 2030 sk->sk_prot->disconnect(sk, 0); tcp_close() 2033 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); tcp_close() 2034 tcp_set_state(sk, TCP_CLOSE); tcp_close() 2035 tcp_send_active_reset(sk, sk->sk_allocation); tcp_close() 2036 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { tcp_close() 2038 sk->sk_prot->disconnect(sk, 0); tcp_close() 2039 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_close() 2040 } else if (tcp_close_state(sk)) { tcp_close() 2070 tcp_send_fin(sk); tcp_close() 2073 sk_stream_wait_close(sk, timeout); tcp_close() 2076 state = sk->sk_state; tcp_close() 2077 sock_hold(sk); tcp_close() 2078 sock_orphan(sk); tcp_close() 2081 release_sock(sk); tcp_close() 2088 bh_lock_sock(sk); tcp_close() 2089 WARN_ON(sock_owned_by_user(sk)); tcp_close() 2091 percpu_counter_inc(sk->sk_prot->orphan_count); tcp_close() 2094 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) tcp_close() 2111 if (sk->sk_state == TCP_FIN_WAIT2) { tcp_close() 2112 struct tcp_sock *tp = tcp_sk(sk); tcp_close() 2114 tcp_set_state(sk, TCP_CLOSE); tcp_close() 2115 tcp_send_active_reset(sk, GFP_ATOMIC); tcp_close() 2116 NET_INC_STATS_BH(sock_net(sk), tcp_close() 2119 const int tmo = tcp_fin_time(sk); tcp_close() 2122 inet_csk_reset_keepalive_timer(sk, tcp_close() 2125 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); tcp_close() 2130 if (sk->sk_state != TCP_CLOSE) { tcp_close() 2131 sk_mem_reclaim(sk); tcp_close() 2132 if (tcp_check_oom(sk, 0)) { tcp_close() 2133 tcp_set_state(sk, TCP_CLOSE); tcp_close() 2134 tcp_send_active_reset(sk, GFP_ATOMIC); tcp_close() 2135 NET_INC_STATS_BH(sock_net(sk), tcp_close() 2140 if (sk->sk_state == TCP_CLOSE) { tcp_close() 2141 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; tcp_close() 2147 reqsk_fastopen_remove(sk, req, false); tcp_close() 2148 inet_csk_destroy_sock(sk); tcp_close() 2153 bh_unlock_sock(sk); tcp_close() 2155 sock_put(sk); tcp_close() 2168 int tcp_disconnect(struct sock *sk, int flags) tcp_disconnect() argument 2170 struct inet_sock *inet = inet_sk(sk); tcp_disconnect() 2171 struct inet_connection_sock *icsk = inet_csk(sk); tcp_disconnect() 2172 struct tcp_sock *tp = tcp_sk(sk); tcp_disconnect() 2174 int old_state = sk->sk_state; tcp_disconnect() 2177 tcp_set_state(sk, TCP_CLOSE); tcp_disconnect() 2181 inet_csk_listen_stop(sk); tcp_disconnect() 2183 sk->sk_err = ECONNABORTED; tcp_disconnect() 2190 tcp_send_active_reset(sk, gfp_any()); tcp_disconnect() 2191 sk->sk_err = ECONNRESET; tcp_disconnect() 2193 sk->sk_err = ECONNRESET; tcp_disconnect() 2195 tcp_clear_xmit_timers(sk); tcp_disconnect() 2196 __skb_queue_purge(&sk->sk_receive_queue); tcp_disconnect() 2197 tcp_write_queue_purge(sk); tcp_disconnect() 2202 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) tcp_disconnect() 2203 inet_reset_saddr(sk); tcp_disconnect() 2205 sk->sk_shutdown = 0; tcp_disconnect() 2206 sock_reset_flag(sk, SOCK_DONE); tcp_disconnect() 2217 tcp_set_ca_state(sk, TCP_CA_Open); tcp_disconnect() 2219 inet_csk_delack_init(sk); tcp_disconnect() 2220 tcp_init_send_head(sk); tcp_disconnect() 2222 __sk_dst_reset(sk); tcp_disconnect() 2226 sk->sk_error_report(sk); tcp_disconnect() 2231 void tcp_sock_destruct(struct sock *sk) tcp_sock_destruct() argument 2233 inet_sock_destruct(sk); tcp_sock_destruct() 2235 kfree(inet_csk(sk)->icsk_accept_queue.fastopenq); tcp_sock_destruct() 2238 static inline bool tcp_can_repair_sock(const struct sock *sk) tcp_can_repair_sock() argument 2240 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && tcp_can_repair_sock() 2241 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); tcp_can_repair_sock() 2296 static int do_tcp_setsockopt(struct sock *sk, int level, do_tcp_setsockopt() argument 2299 struct tcp_sock *tp = tcp_sk(sk); do_tcp_setsockopt() 2300 struct inet_connection_sock *icsk = inet_csk(sk); do_tcp_setsockopt() 2318 lock_sock(sk); do_tcp_setsockopt() 2319 err = tcp_set_congestion_control(sk, name); do_tcp_setsockopt() 2320 release_sock(sk); do_tcp_setsockopt() 2334 lock_sock(sk); do_tcp_setsockopt() 2359 tcp_push_pending_frames(sk); do_tcp_setsockopt() 2383 if (!tcp_can_repair_sock(sk)) do_tcp_setsockopt() 2387 sk->sk_reuse = SK_FORCE_REUSE; do_tcp_setsockopt() 2391 sk->sk_reuse = SK_NO_REUSE; do_tcp_setsockopt() 2392 tcp_send_window_probe(sk); do_tcp_setsockopt() 2408 if (sk->sk_state != TCP_CLOSE) do_tcp_setsockopt() 2421 else if (sk->sk_state == TCP_ESTABLISHED) do_tcp_setsockopt() 2447 tcp_push_pending_frames(sk); do_tcp_setsockopt() 2456 if (sock_flag(sk, SOCK_KEEPOPEN) && do_tcp_setsockopt() 2457 !((1 << sk->sk_state) & do_tcp_setsockopt() 2464 inet_csk_reset_keepalive_timer(sk, elapsed); do_tcp_setsockopt() 2505 if (sk->sk_state != TCP_CLOSE) { do_tcp_setsockopt() 2520 if ((1 << sk->sk_state) & do_tcp_setsockopt() 2522 inet_csk_ack_scheduled(sk)) { do_tcp_setsockopt() 2524 tcp_cleanup_rbuf(sk, 1); do_tcp_setsockopt() 2534 err = tp->af_specific->md5_parse(sk, optval, optlen); do_tcp_setsockopt() 2548 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | do_tcp_setsockopt() 2552 err = fastopen_init_queue(sk, val); do_tcp_setsockopt() 2565 sk->sk_write_space(sk); do_tcp_setsockopt() 2572 release_sock(sk); do_tcp_setsockopt() 2576 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, tcp_setsockopt() argument 2579 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_setsockopt() 2582 return icsk->icsk_af_ops->setsockopt(sk, level, optname, tcp_setsockopt() 2584 return do_tcp_setsockopt(sk, level, optname, optval, optlen); tcp_setsockopt() 2589 int compat_tcp_setsockopt(struct sock *sk, int level, int optname, compat_tcp_setsockopt() argument 2593 return inet_csk_compat_setsockopt(sk, level, optname, compat_tcp_setsockopt() 2595 return do_tcp_setsockopt(sk, level, optname, optval, optlen); compat_tcp_setsockopt() 2601 void tcp_get_info(struct sock *sk, struct tcp_info *info) tcp_get_info() argument 2603 const struct tcp_sock *tp = tcp_sk(sk); tcp_get_info() 2604 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_get_info() 2612 info->tcpi_state = sk->sk_state; tcp_get_info() 2640 if (sk->sk_state == TCP_LISTEN) { tcp_get_info() 2641 info->tcpi_unacked = sk->sk_ack_backlog; tcp_get_info() 2642 info->tcpi_sacked = sk->sk_max_ack_backlog; tcp_get_info() 2669 rate = READ_ONCE(sk->sk_pacing_rate); tcp_get_info() 2673 rate = READ_ONCE(sk->sk_max_pacing_rate); tcp_get_info() 2685 static int do_tcp_getsockopt(struct sock *sk, int level, do_tcp_getsockopt() argument 2688 struct inet_connection_sock *icsk = inet_csk(sk); do_tcp_getsockopt() 2689 struct tcp_sock *tp = tcp_sk(sk); do_tcp_getsockopt() 2703 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) do_tcp_getsockopt() 2744 tcp_get_info(sk, &info); do_tcp_getsockopt() 2764 sz = ca_ops->get_info(sk, ~0U, &attr, &info); do_tcp_getsockopt() 2842 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, tcp_getsockopt() argument 2845 struct inet_connection_sock *icsk = inet_csk(sk); tcp_getsockopt() 2848 return icsk->icsk_af_ops->getsockopt(sk, level, optname, tcp_getsockopt() 2850 return do_tcp_getsockopt(sk, level, optname, optval, optlen); tcp_getsockopt() 2855 int compat_tcp_getsockopt(struct sock *sk, int level, int optname, compat_tcp_getsockopt() argument 2859 return inet_csk_compat_getsockopt(sk, level, optname, compat_tcp_getsockopt() 2861 return do_tcp_getsockopt(sk, level, optname, optval, optlen); compat_tcp_getsockopt() 2994 void tcp_done(struct sock *sk) tcp_done() argument 2996 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; tcp_done() 2998 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) tcp_done() 2999 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); tcp_done() 3001 tcp_set_state(sk, TCP_CLOSE); tcp_done() 3002 tcp_clear_xmit_timers(sk); tcp_done() 3004 reqsk_fastopen_remove(sk, req, false); tcp_done() 3006 sk->sk_shutdown = SHUTDOWN_MASK; tcp_done() 3008 if (!sock_flag(sk, SOCK_DEAD)) tcp_done() 3009 sk->sk_state_change(sk); tcp_done() 3011 inet_csk_destroy_sock(sk); tcp_done()
|
H A D | inet_connection_sock.c | 46 int inet_csk_bind_conflict(const struct sock *sk, inet_csk_bind_conflict() argument 50 int reuse = sk->sk_reuse; inet_csk_bind_conflict() 51 int reuseport = sk->sk_reuseport; inet_csk_bind_conflict() 52 kuid_t uid = sock_i_uid((struct sock *)sk); inet_csk_bind_conflict() 55 * Unlike other sk lookup places we do not check inet_csk_bind_conflict() 62 if (sk != sk2 && inet_csk_bind_conflict() 64 (!sk->sk_bound_dev_if || inet_csk_bind_conflict() 66 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { inet_csk_bind_conflict() 73 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr || inet_csk_bind_conflict() 74 sk2->sk_rcv_saddr == sk->sk_rcv_saddr) inet_csk_bind_conflict() 80 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr || inet_csk_bind_conflict() 81 sk2->sk_rcv_saddr == sk->sk_rcv_saddr) inet_csk_bind_conflict() 93 int inet_csk_get_port(struct sock *sk, unsigned short snum) inet_csk_get_port() argument 95 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; inet_csk_get_port() 99 struct net *net = sock_net(sk); inet_csk_get_port() 101 kuid_t uid = sock_i_uid(sk); inet_csk_get_port() 122 sk->sk_reuse && inet_csk_get_port() 123 sk->sk_state != TCP_LISTEN) || inet_csk_get_port() 125 sk->sk_reuseport && inet_csk_get_port() 131 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { inet_csk_get_port() 136 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { inet_csk_get_port() 181 if (sk->sk_reuse == SK_FORCE_REUSE) inet_csk_get_port() 185 sk->sk_reuse && sk->sk_state != TCP_LISTEN) || inet_csk_get_port() 187 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && inet_csk_get_port() 192 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) { inet_csk_get_port() 193 if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) || inet_csk_get_port() 195 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && inet_csk_get_port() 211 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) inet_csk_get_port() 215 if (sk->sk_reuseport) { inet_csk_get_port() 222 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) inet_csk_get_port() 225 (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid))) inet_csk_get_port() 229 if (!inet_csk(sk)->icsk_bind_hash) inet_csk_get_port() 230 inet_bind_hash(sk, tb, snum); inet_csk_get_port() 231 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); inet_csk_get_port() 246 static int inet_csk_wait_for_connect(struct sock *sk, long timeo) inet_csk_wait_for_connect() argument 248 struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_wait_for_connect() 267 prepare_to_wait_exclusive(sk_sleep(sk), &wait, inet_csk_wait_for_connect() 269 release_sock(sk); inet_csk_wait_for_connect() 273 lock_sock(sk); inet_csk_wait_for_connect() 278 if (sk->sk_state != TCP_LISTEN) inet_csk_wait_for_connect() 287 finish_wait(sk_sleep(sk), &wait); inet_csk_wait_for_connect() 294 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) inet_csk_accept() argument 296 struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_accept() 302 lock_sock(sk); inet_csk_accept() 308 if (sk->sk_state != TCP_LISTEN) inet_csk_accept() 313 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); inet_csk_accept() 320 error = inet_csk_wait_for_connect(sk, timeo); inet_csk_accept() 325 newsk = req->sk; inet_csk_accept() 327 sk_acceptq_removed(sk); inet_csk_accept() 328 if (sk->sk_protocol == IPPROTO_TCP && inet_csk_accept() 334 * so can't free req now. Instead, we set req->sk to inet_csk_accept() 339 req->sk = NULL; inet_csk_accept() 345 release_sock(sk); inet_csk_accept() 362 void inet_csk_init_xmit_timers(struct sock *sk, inet_csk_init_xmit_timers() argument 367 struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_init_xmit_timers() 370 (unsigned long)sk); inet_csk_init_xmit_timers() 372 (unsigned long)sk); inet_csk_init_xmit_timers() 373 setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk); inet_csk_init_xmit_timers() 378 void inet_csk_clear_xmit_timers(struct sock *sk) inet_csk_clear_xmit_timers() argument 380 struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_clear_xmit_timers() 384 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); inet_csk_clear_xmit_timers() 385 sk_stop_timer(sk, &icsk->icsk_delack_timer); inet_csk_clear_xmit_timers() 386 sk_stop_timer(sk, &sk->sk_timer); inet_csk_clear_xmit_timers() 390 void inet_csk_delete_keepalive_timer(struct sock *sk) inet_csk_delete_keepalive_timer() argument 392 sk_stop_timer(sk, &sk->sk_timer); inet_csk_delete_keepalive_timer() 396 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) inet_csk_reset_keepalive_timer() argument 398 sk_reset_timer(sk, &sk->sk_timer, jiffies + len); inet_csk_reset_keepalive_timer() 402 struct dst_entry *inet_csk_route_req(struct sock *sk, inet_csk_route_req() argument 412 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, inet_csk_route_req() 413 sk->sk_protocol, inet_sk_flowi_flags(sk), inet_csk_route_req() 418 rt = ip_route_output_flow(net, fl4, sk); inet_csk_route_req() 433 struct dst_entry *inet_csk_route_child_sock(struct sock *sk, inet_csk_route_child_sock() argument 449 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, inet_csk_route_child_sock() 450 sk->sk_protocol, inet_sk_flowi_flags(sk), inet_csk_route_child_sock() 455 rt = ip_route_output_flow(net, fl4, sk); inet_csk_route_child_sock() 487 struct request_sock *inet_csk_search_req(struct sock *sk, inet_csk_search_req() argument 492 struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_search_req() 507 WARN_ON(req->sk); inet_csk_search_req() 517 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, inet_csk_reqsk_queue_hash_add() argument 520 struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_reqsk_queue_hash_add() 527 inet_csk_reqsk_queue_added(sk, timeout); inet_csk_reqsk_queue_hash_add() 593 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) inet_csk_reqsk_queue_drop() argument 595 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) { inet_csk_reqsk_queue_drop() 596 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); inet_csk_reqsk_queue_drop() 678 req->sk = NULL; reqsk_queue_hash_req() 699 * @sk: the socket to clone 705 struct sock *inet_csk_clone_lock(const struct sock *sk, inet_csk_clone_lock() argument 709 struct sock *newsk = sk_clone_lock(sk, priority); inet_csk_clone_lock() 745 void inet_csk_destroy_sock(struct sock *sk) inet_csk_destroy_sock() argument 747 WARN_ON(sk->sk_state != TCP_CLOSE); inet_csk_destroy_sock() 748 WARN_ON(!sock_flag(sk, SOCK_DEAD)); inet_csk_destroy_sock() 751 WARN_ON(!sk_unhashed(sk)); inet_csk_destroy_sock() 753 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ inet_csk_destroy_sock() 754 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); inet_csk_destroy_sock() 756 sk->sk_prot->destroy(sk); inet_csk_destroy_sock() 758 sk_stream_kill_queues(sk); inet_csk_destroy_sock() 760 xfrm_sk_free_policy(sk); inet_csk_destroy_sock() 762 sk_refcnt_debug_release(sk); inet_csk_destroy_sock() 764 percpu_counter_dec(sk->sk_prot->orphan_count); inet_csk_destroy_sock() 765 sock_put(sk); inet_csk_destroy_sock() 772 void inet_csk_prepare_forced_close(struct sock *sk) 773 __releases(&sk->sk_lock.slock) 776 bh_unlock_sock(sk); variable 777 sock_put(sk); variable 780 sock_set_flag(sk, SOCK_DEAD); 781 percpu_counter_inc(sk->sk_prot->orphan_count); 782 inet_sk(sk)->inet_num = 0; 786 int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) inet_csk_listen_start() argument 788 struct inet_sock *inet = inet_sk(sk); inet_csk_listen_start() 789 struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_listen_start() 795 sk->sk_max_ack_backlog = 0; inet_csk_listen_start() 796 sk->sk_ack_backlog = 0; inet_csk_listen_start() 797 inet_csk_delack_init(sk); inet_csk_listen_start() 804 sk->sk_state = TCP_LISTEN; inet_csk_listen_start() 805 if (!sk->sk_prot->get_port(sk, inet->inet_num)) { inet_csk_listen_start() 808 sk_dst_reset(sk); inet_csk_listen_start() 809 sk->sk_prot->hash(sk); inet_csk_listen_start() 814 sk->sk_state = TCP_CLOSE; inet_csk_listen_start() 824 void inet_csk_listen_stop(struct sock *sk) inet_csk_listen_stop() argument 826 struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_listen_stop() 845 struct sock *child = req->sk; inet_csk_listen_stop() 854 sk->sk_prot->disconnect(child, O_NONBLOCK); inet_csk_listen_stop() 858 percpu_counter_inc(sk->sk_prot->orphan_count); inet_csk_listen_stop() 860 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { inet_csk_listen_stop() 862 BUG_ON(sk != req->rsk_listener); inet_csk_listen_stop() 878 sk_acceptq_removed(sk); inet_csk_listen_stop() 892 WARN_ON(sk->sk_ack_backlog); inet_csk_listen_stop() 896 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) inet_csk_addr2sockaddr() argument 899 const struct inet_sock *inet = inet_sk(sk); inet_csk_addr2sockaddr() 908 int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, inet_csk_compat_getsockopt() argument 911 const struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_compat_getsockopt() 914 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, inet_csk_compat_getsockopt() 916 return icsk->icsk_af_ops->getsockopt(sk, level, optname, inet_csk_compat_getsockopt() 921 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, inet_csk_compat_setsockopt() argument 924 const struct inet_connection_sock *icsk = inet_csk(sk); inet_csk_compat_setsockopt() 927 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, inet_csk_compat_setsockopt() 929 return icsk->icsk_af_ops->setsockopt(sk, level, optname, inet_csk_compat_setsockopt() 935 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) inet_csk_rebuild_route() argument 937 const struct inet_sock *inet = inet_sk(sk); inet_csk_rebuild_route() 948 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet_csk_rebuild_route() 950 inet->inet_sport, sk->sk_protocol, inet_csk_rebuild_route() 951 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); inet_csk_rebuild_route() 955 sk_setup_caps(sk, &rt->dst); inet_csk_rebuild_route() 961 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) inet_csk_update_pmtu() argument 963 struct dst_entry *dst = __sk_dst_check(sk, 0); inet_csk_update_pmtu() 964 struct inet_sock *inet = inet_sk(sk); inet_csk_update_pmtu() 967 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); inet_csk_update_pmtu() 971 dst->ops->update_pmtu(dst, sk, NULL, mtu); inet_csk_update_pmtu() 973 dst = __sk_dst_check(sk, 0); inet_csk_update_pmtu() 975 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); inet_csk_update_pmtu()
|
H A D | tcp_ipv4.c | 108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) tcp_twsk_unique() argument 111 struct tcp_sock *tp = tcp_sk(sk); tcp_twsk_unique() 141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) tcp_v4_connect() argument 144 struct inet_sock *inet = inet_sk(sk); tcp_v4_connect() 145 struct tcp_sock *tp = tcp_sk(sk); tcp_v4_connect() 161 sock_owned_by_user(sk)); tcp_v4_connect() 172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, tcp_v4_connect() 174 orig_sport, orig_dport, sk); tcp_v4_connect() 178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); tcp_v4_connect() 192 sk_rcv_saddr_set(sk, inet->inet_saddr); tcp_v4_connect() 204 tcp_fetch_timewait_stamp(sk, &rt->dst); tcp_v4_connect() 207 sk_daddr_set(sk, daddr); tcp_v4_connect() 209 inet_csk(sk)->icsk_ext_hdr_len = 0; tcp_v4_connect() 211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; tcp_v4_connect() 220 tcp_set_state(sk, TCP_SYN_SENT); tcp_v4_connect() 221 err = inet_hash_connect(&tcp_death_row, sk); tcp_v4_connect() 225 inet_set_txhash(sk); tcp_v4_connect() 228 inet->inet_sport, inet->inet_dport, sk); tcp_v4_connect() 235 sk->sk_gso_type = SKB_GSO_TCPV4; tcp_v4_connect() 236 sk_setup_caps(sk, &rt->dst); tcp_v4_connect() 246 err = tcp_connect(sk); tcp_v4_connect() 259 tcp_set_state(sk, TCP_CLOSE); tcp_v4_connect() 261 sk->sk_route_caps = 0; tcp_v4_connect() 272 void tcp_v4_mtu_reduced(struct sock *sk) tcp_v4_mtu_reduced() argument 275 struct inet_sock *inet = inet_sk(sk); tcp_v4_mtu_reduced() 276 u32 mtu = tcp_sk(sk)->mtu_info; tcp_v4_mtu_reduced() 278 dst = inet_csk_update_pmtu(sk, mtu); tcp_v4_mtu_reduced() 285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) tcp_v4_mtu_reduced() 286 sk->sk_err_soft = EMSGSIZE; tcp_v4_mtu_reduced() 291 ip_sk_accept_pmtu(sk) && tcp_v4_mtu_reduced() 292 inet_csk(sk)->icsk_pmtu_cookie > mtu) { tcp_v4_mtu_reduced() 293 tcp_sync_mss(sk, mtu); tcp_v4_mtu_reduced() 300 tcp_simple_retransmit(sk); tcp_v4_mtu_reduced() 305 static void do_redirect(struct sk_buff *skb, struct sock *sk) do_redirect() argument 307 struct dst_entry *dst = __sk_dst_check(sk, 0); do_redirect() 310 dst->ops->redirect(dst, sk, skb); do_redirect() 315 void tcp_req_err(struct sock *sk, u32 seq) tcp_req_err() argument 317 struct request_sock *req = inet_reqsk(sk); tcp_req_err() 318 struct net *net = sock_net(sk); tcp_req_err() 323 WARN_ON(req->sk); tcp_req_err() 366 struct sock *sk; tcp_v4_err() local 374 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, tcp_v4_err() 377 if (!sk) { tcp_v4_err() 381 if (sk->sk_state == TCP_TIME_WAIT) { tcp_v4_err() 382 inet_twsk_put(inet_twsk(sk)); tcp_v4_err() 386 if (sk->sk_state == TCP_NEW_SYN_RECV) tcp_v4_err() 387 return tcp_req_err(sk, seq); tcp_v4_err() 389 bh_lock_sock(sk); tcp_v4_err() 395 if (sock_owned_by_user(sk)) { tcp_v4_err() 399 if (sk->sk_state == TCP_CLOSE) tcp_v4_err() 402 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { tcp_v4_err() 407 icsk = inet_csk(sk); tcp_v4_err() 408 tp = tcp_sk(sk); tcp_v4_err() 412 if (sk->sk_state != TCP_LISTEN && tcp_v4_err() 420 do_redirect(icmp_skb, sk); tcp_v4_err() 437 if (sk->sk_state == TCP_LISTEN) tcp_v4_err() 441 if (!sock_owned_by_user(sk)) { tcp_v4_err() 442 tcp_v4_mtu_reduced(sk); tcp_v4_err() 445 sock_hold(sk); tcp_v4_err() 459 if (sock_owned_by_user(sk)) tcp_v4_err() 467 skb = tcp_write_queue_head(sk); tcp_v4_err() 475 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, tcp_v4_err() 480 tcp_retransmit_timer(sk); tcp_v4_err() 491 switch (sk->sk_state) { tcp_v4_err() 497 if (fastopen && !fastopen->sk) tcp_v4_err() 500 if (!sock_owned_by_user(sk)) { tcp_v4_err() 501 sk->sk_err = err; tcp_v4_err() 503 sk->sk_error_report(sk); tcp_v4_err() 505 tcp_done(sk); tcp_v4_err() 507 sk->sk_err_soft = err; tcp_v4_err() 528 inet = inet_sk(sk); tcp_v4_err() 529 if (!sock_owned_by_user(sk) && inet->recverr) { tcp_v4_err() 530 sk->sk_err = err; tcp_v4_err() 531 sk->sk_error_report(sk); tcp_v4_err() 533 sk->sk_err_soft = err; tcp_v4_err() 537 bh_unlock_sock(sk); tcp_v4_err() 538 sock_put(sk); tcp_v4_err() 558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) tcp_v4_send_check() argument 560 const struct inet_sock *inet = inet_sk(sk); tcp_v4_send_check() 579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) tcp_v4_send_reset() argument 602 /* If sk not NULL, it means we did a successful lookup and incoming tcp_v4_send_reset() 605 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) tcp_v4_send_reset() 627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); tcp_v4_send_reset() 630 if (!sk && hash_location) { tcp_v4_send_reset() 655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *) tcp_v4_send_reset() 678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; tcp_v4_send_reset() 683 if (sk) tcp_v4_send_reset() 684 arg.bound_dev_if = sk->sk_bound_dev_if; tcp_v4_send_reset() 780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) tcp_v4_timewait_ack() argument 782 struct inet_timewait_sock *tw = inet_twsk(sk); tcp_v4_timewait_ack() 783 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); tcp_v4_timewait_ack() 785 tcp_v4_send_ack(sock_net(sk), skb, tcp_v4_timewait_ack() 799 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, tcp_v4_reqsk_send_ack() argument 802 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV tcp_v4_reqsk_send_ack() 803 * sk->sk_state == TCP_SYN_RECV -> for Fast Open. tcp_v4_reqsk_send_ack() 805 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_v4_reqsk_send_ack() 806 tcp_sk(sk)->snd_nxt; tcp_v4_reqsk_send_ack() 808 tcp_v4_send_ack(sock_net(sk), skb, seq, tcp_v4_reqsk_send_ack() 813 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, tcp_v4_reqsk_send_ack() 824 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, tcp_v4_send_synack() argument 836 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) tcp_v4_send_synack() 839 skb = tcp_make_synack(sk, dst, req, foc); tcp_v4_send_synack() 845 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, tcp_v4_send_synack() 867 * We need to maintain these in the sk structure. 871 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, tcp_md5_do_lookup() argument 875 const struct tcp_sock *tp = tcp_sk(sk); tcp_md5_do_lookup() 882 sock_owned_by_user(sk) || tcp_md5_do_lookup() 883 lockdep_is_held(&sk->sk_lock.slock)); tcp_md5_do_lookup() 900 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, tcp_v4_md5_lookup() argument 906 return tcp_md5_do_lookup(sk, addr, AF_INET); tcp_v4_md5_lookup() 911 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, tcp_md5_do_add() argument 916 struct tcp_sock *tp = tcp_sk(sk); tcp_md5_do_add() 919 key = tcp_md5_do_lookup(sk, addr, family); tcp_md5_do_add() 928 sock_owned_by_user(sk) || tcp_md5_do_add() 929 lockdep_is_held(&sk->sk_lock.slock)); tcp_md5_do_add() 935 sk_nocaps_add(sk, NETIF_F_GSO_MASK); tcp_md5_do_add() 940 key = sock_kmalloc(sk, sizeof(*key), gfp); tcp_md5_do_add() 944 sock_kfree_s(sk, key, sizeof(*key)); tcp_md5_do_add() 959 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) tcp_md5_do_del() argument 963 key = tcp_md5_do_lookup(sk, addr, family); tcp_md5_do_del() 967 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); tcp_md5_do_del() 973 static void tcp_clear_md5_list(struct sock *sk) tcp_clear_md5_list() argument 975 struct tcp_sock *tp = tcp_sk(sk); tcp_clear_md5_list() 984 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); tcp_clear_md5_list() 989 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, tcp_v4_parse_md5_keys() argument 1005 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, tcp_v4_parse_md5_keys() 1011 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, tcp_v4_parse_md5_keys() 1072 const struct sock *sk, tcp_v4_md5_hash_skb() 1080 if (sk) { /* valid for establish/request sockets */ tcp_v4_md5_hash_skb() 1081 saddr = sk->sk_rcv_saddr; tcp_v4_md5_hash_skb() 1082 daddr = sk->sk_daddr; tcp_v4_md5_hash_skb() 1120 static bool tcp_v4_inbound_md5_hash(struct sock *sk, tcp_v4_inbound_md5_hash() argument 1138 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, tcp_v4_inbound_md5_hash() 1147 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); tcp_v4_inbound_md5_hash() 1152 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); tcp_v4_inbound_md5_hash() 1186 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl, tcp_v4_route_req() argument 1190 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); tcp_v4_route_req() 1228 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tcp_v4_conn_request() argument 1235 &tcp_request_sock_ipv4_ops, sk, skb); tcp_v4_conn_request() 1238 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); tcp_v4_conn_request() 1248 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, tcp_v4_syn_recv_sock() argument 1261 if (sk_acceptq_is_full(sk)) tcp_v4_syn_recv_sock() 1264 newsk = tcp_create_openreq_child(sk, req, skb); tcp_v4_syn_recv_sock() 1290 dst = inet_csk_route_child_sock(sk, newsk, req); tcp_v4_syn_recv_sock() 1302 if (tcp_sk(sk)->rx_opt.user_mss && tcp_v4_syn_recv_sock() 1303 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) tcp_v4_syn_recv_sock() 1304 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; tcp_v4_syn_recv_sock() 1310 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, tcp_v4_syn_recv_sock() 1325 if (__inet_inherit_port(sk, newsk) < 0) tcp_v4_syn_recv_sock() 1332 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); tcp_v4_syn_recv_sock() 1336 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); tcp_v4_syn_recv_sock() 1345 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) tcp_v4_hnd_req() argument 1352 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); tcp_v4_hnd_req() 1354 nsk = tcp_check_req(sk, skb, req, false); tcp_v4_hnd_req() 1355 if (!nsk || nsk == sk) tcp_v4_hnd_req() 1360 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr, tcp_v4_hnd_req() 1374 sk = cookie_v4_check(sk, skb); tcp_v4_hnd_req() 1376 return sk; tcp_v4_hnd_req() 1387 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) tcp_v4_do_rcv() argument 1391 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ tcp_v4_do_rcv() 1392 struct dst_entry *dst = sk->sk_rx_dst; tcp_v4_do_rcv() 1394 sock_rps_save_rxhash(sk, skb); tcp_v4_do_rcv() 1395 sk_mark_napi_id(sk, skb); tcp_v4_do_rcv() 1397 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || tcp_v4_do_rcv() 1400 sk->sk_rx_dst = NULL; tcp_v4_do_rcv() 1403 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); tcp_v4_do_rcv() 1410 if (sk->sk_state == TCP_LISTEN) { tcp_v4_do_rcv() 1411 struct sock *nsk = tcp_v4_hnd_req(sk, skb); tcp_v4_do_rcv() 1415 if (nsk != sk) { tcp_v4_do_rcv() 1417 sk_mark_napi_id(sk, skb); tcp_v4_do_rcv() 1418 if (tcp_child_process(sk, nsk, skb)) { tcp_v4_do_rcv() 1425 sock_rps_save_rxhash(sk, skb); tcp_v4_do_rcv() 1427 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { tcp_v4_do_rcv() 1428 rsk = sk; tcp_v4_do_rcv() 1438 * gcc suffers from register pressure on the x86, sk (in %ebx) tcp_v4_do_rcv() 1445 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); tcp_v4_do_rcv() 1446 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); tcp_v4_do_rcv() 1455 struct sock *sk; tcp_v4_early_demux() local 1469 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, tcp_v4_early_demux() 1473 if (sk) { tcp_v4_early_demux() 1474 skb->sk = sk; tcp_v4_early_demux() 1476 if (sk_fullsock(sk)) { tcp_v4_early_demux() 1477 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); tcp_v4_early_demux() 1482 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) tcp_v4_early_demux() 1495 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) tcp_prequeue() argument 1497 struct tcp_sock *tp = tcp_sk(sk); tcp_prequeue() 1508 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst tcp_prequeue() 1512 if (likely(sk->sk_rx_dst)) tcp_prequeue() 1519 if (tp->ucopy.memory > sk->sk_rcvbuf) { tcp_prequeue() 1522 BUG_ON(sock_owned_by_user(sk)); tcp_prequeue() 1525 sk_backlog_rcv(sk, skb1); tcp_prequeue() 1526 NET_INC_STATS_BH(sock_net(sk), tcp_prequeue() 1532 wake_up_interruptible_sync_poll(sk_sleep(sk), tcp_prequeue() 1534 if (!inet_csk_ack_scheduled(sk)) tcp_prequeue() 1535 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, tcp_prequeue() 1536 (3 * tcp_rto_min(sk)) / 4, tcp_prequeue() 1551 struct sock *sk; tcp_v4_rcv() local 1597 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); tcp_v4_rcv() 1598 if (!sk) tcp_v4_rcv() 1602 if (sk->sk_state == TCP_TIME_WAIT) tcp_v4_rcv() 1605 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { tcp_v4_rcv() 1610 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) tcp_v4_rcv() 1620 if (tcp_v4_inbound_md5_hash(sk, skb)) tcp_v4_rcv() 1626 if (sk_filter(sk, skb)) tcp_v4_rcv() 1629 sk_incoming_cpu_update(sk); tcp_v4_rcv() 1632 bh_lock_sock_nested(sk); tcp_v4_rcv() 1634 if (!sock_owned_by_user(sk)) { tcp_v4_rcv() 1635 if (!tcp_prequeue(sk, skb)) tcp_v4_rcv() 1636 ret = tcp_v4_do_rcv(sk, skb); tcp_v4_rcv() 1637 } else if (unlikely(sk_add_backlog(sk, skb, tcp_v4_rcv() 1638 sk->sk_rcvbuf + sk->sk_sndbuf))) { tcp_v4_rcv() 1639 bh_unlock_sock(sk); tcp_v4_rcv() 1643 bh_unlock_sock(sk); tcp_v4_rcv() 1645 sock_put(sk); tcp_v4_rcv() 1668 sock_put(sk); tcp_v4_rcv() 1673 inet_twsk_put(inet_twsk(sk)); tcp_v4_rcv() 1678 inet_twsk_put(inet_twsk(sk)); tcp_v4_rcv() 1682 inet_twsk_put(inet_twsk(sk)); tcp_v4_rcv() 1685 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { tcp_v4_rcv() 1693 inet_twsk_deschedule(inet_twsk(sk)); tcp_v4_rcv() 1694 inet_twsk_put(inet_twsk(sk)); tcp_v4_rcv() 1695 sk = sk2; tcp_v4_rcv() 1701 tcp_v4_timewait_ack(sk, skb); tcp_v4_rcv() 1716 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) inet_sk_rx_dst_set() argument 1721 sk->sk_rx_dst = dst; inet_sk_rx_dst_set() 1722 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; inet_sk_rx_dst_set() 1759 static int tcp_v4_init_sock(struct sock *sk) tcp_v4_init_sock() argument 1761 struct inet_connection_sock *icsk = inet_csk(sk); tcp_v4_init_sock() 1763 tcp_init_sock(sk); tcp_v4_init_sock() 1768 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; tcp_v4_init_sock() 1774 void tcp_v4_destroy_sock(struct sock *sk) tcp_v4_destroy_sock() argument 1776 struct tcp_sock *tp = tcp_sk(sk); tcp_v4_destroy_sock() 1778 tcp_clear_xmit_timers(sk); tcp_v4_destroy_sock() 1780 tcp_cleanup_congestion_control(sk); tcp_v4_destroy_sock() 1783 tcp_write_queue_purge(sk); tcp_v4_destroy_sock() 1791 tcp_clear_md5_list(sk); tcp_v4_destroy_sock() 1801 if (inet_csk(sk)->icsk_bind_hash) tcp_v4_destroy_sock() 1802 inet_put_port(sk); tcp_v4_destroy_sock() 1809 sk_sockets_allocated_dec(sk); tcp_v4_destroy_sock() 1810 sock_release_memcg(sk); tcp_v4_destroy_sock() 1826 struct sock *sk = cur; listening_get_next() local 1831 if (!sk) { listening_get_next() 1834 sk = sk_nulls_head(&ilb->head); listening_get_next() 1860 sk = sk_nulls_next(st->syn_wait_sk); listening_get_next() 1864 icsk = inet_csk(sk); listening_get_next() 1869 sk = sk_nulls_next(sk); listening_get_next() 1872 sk_nulls_for_each_from(sk, node) { sk_nulls_for_each_from() 1873 if (!net_eq(sock_net(sk), net)) sk_nulls_for_each_from() 1875 if (sk->sk_family == st->family) { sk_nulls_for_each_from() 1876 cur = sk; sk_nulls_for_each_from() 1879 icsk = inet_csk(sk); sk_nulls_for_each_from() 1883 st->uid = sock_i_uid(sk); sk_nulls_for_each_from() 1884 st->syn_wait_sk = sk; sk_nulls_for_each_from() 1896 sk = sk_nulls_head(&ilb->head); 1937 struct sock *sk; established_get_first() local 1946 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { established_get_first() 1947 if (sk->sk_family != st->family || established_get_first() 1948 !net_eq(sock_net(sk), net)) { established_get_first() 1951 rc = sk; established_get_first() 1962 struct sock *sk = cur; established_get_next() local 1970 sk = sk_nulls_next(sk); established_get_next() 1972 sk_nulls_for_each_from(sk, node) { sk_nulls_for_each_from() 1973 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) sk_nulls_for_each_from() 1974 return sk; sk_nulls_for_each_from() 2187 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) get_tcp4_sock() argument 2191 const struct tcp_sock *tp = tcp_sk(sk); get_tcp4_sock() 2192 const struct inet_connection_sock *icsk = inet_csk(sk); get_tcp4_sock() 2193 const struct inet_sock *inet = inet_sk(sk); get_tcp4_sock() 2209 } else if (timer_pending(&sk->sk_timer)) { get_tcp4_sock() 2211 timer_expires = sk->sk_timer.expires; get_tcp4_sock() 2217 if (sk->sk_state == TCP_LISTEN) get_tcp4_sock() 2218 rx_queue = sk->sk_ack_backlog; get_tcp4_sock() 2227 i, src, srcp, dest, destp, sk->sk_state, get_tcp4_sock() 2233 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), get_tcp4_sock() 2235 sock_i_ino(sk), get_tcp4_sock() 2236 atomic_read(&sk->sk_refcnt), sk, get_tcp4_sock() 2241 sk->sk_state == TCP_LISTEN ? get_tcp4_sock() 2270 struct sock *sk = v; tcp4_seq_show() local 2284 if (sk->sk_state == TCP_TIME_WAIT) tcp4_seq_show() 2408 struct sock *sk; for_each_possible_cpu() local 2410 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, for_each_possible_cpu() 2414 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; for_each_possible_cpu() 1071 tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb) tcp_v4_md5_hash_skb() argument
|
H A D | tcp_input.c | 130 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) tcp_measure_rcv_mss() argument 132 struct inet_connection_sock *icsk = inet_csk(sk); tcp_measure_rcv_mss() 163 len -= tcp_sk(sk)->tcp_header_len; tcp_measure_rcv_mss() 176 static void tcp_incr_quickack(struct sock *sk) tcp_incr_quickack() argument 178 struct inet_connection_sock *icsk = inet_csk(sk); tcp_incr_quickack() 179 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); tcp_incr_quickack() 187 static void tcp_enter_quickack_mode(struct sock *sk) tcp_enter_quickack_mode() argument 189 struct inet_connection_sock *icsk = inet_csk(sk); tcp_enter_quickack_mode() 190 tcp_incr_quickack(sk); tcp_enter_quickack_mode() 199 static inline bool tcp_in_quickack_mode(const struct sock *sk) tcp_in_quickack_mode() argument 201 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_in_quickack_mode() 280 * 1. Tuning sk->sk_sndbuf, when connection enters established state. 283 static void tcp_sndbuf_expand(struct sock *sk) tcp_sndbuf_expand() argument 285 const struct tcp_sock *tp = tcp_sk(sk); tcp_sndbuf_expand() 308 if (sk->sk_sndbuf < sndmem) tcp_sndbuf_expand() 309 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); tcp_sndbuf_expand() 338 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) __tcp_grow_window() argument 340 struct tcp_sock *tp = tcp_sk(sk); __tcp_grow_window() 347 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; __tcp_grow_window() 355 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) tcp_grow_window() argument 357 struct tcp_sock *tp = tcp_sk(sk); tcp_grow_window() 361 (int)tp->rcv_ssthresh < tcp_space(sk) && tcp_grow_window() 362 !sk_under_memory_pressure(sk)) { tcp_grow_window() 371 incr = __tcp_grow_window(sk, skb); tcp_grow_window() 377 inet_csk(sk)->icsk_ack.quick |= 1; tcp_grow_window() 383 static void tcp_fixup_rcvbuf(struct sock *sk) tcp_fixup_rcvbuf() argument 385 u32 mss = tcp_sk(sk)->advmss; tcp_fixup_rcvbuf() 397 if (sk->sk_rcvbuf < rcvmem) tcp_fixup_rcvbuf() 398 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); tcp_fixup_rcvbuf() 404 void tcp_init_buffer_space(struct sock *sk) tcp_init_buffer_space() argument 406 struct tcp_sock *tp = tcp_sk(sk); tcp_init_buffer_space() 409 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) tcp_init_buffer_space() 410 tcp_fixup_rcvbuf(sk); tcp_init_buffer_space() 411 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) tcp_init_buffer_space() 412 tcp_sndbuf_expand(sk); tcp_init_buffer_space() 418 maxwin = tcp_full_space(sk); tcp_init_buffer_space() 440 static void tcp_clamp_window(struct sock *sk) tcp_clamp_window() argument 442 struct tcp_sock *tp = tcp_sk(sk); tcp_clamp_window() 443 struct inet_connection_sock *icsk = inet_csk(sk); tcp_clamp_window() 447 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && tcp_clamp_window() 448 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && tcp_clamp_window() 449 !sk_under_memory_pressure(sk) && tcp_clamp_window() 450 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { tcp_clamp_window() 451 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), tcp_clamp_window() 454 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) tcp_clamp_window() 465 void tcp_initialize_rcv_mss(struct sock *sk) tcp_initialize_rcv_mss() argument 467 const struct tcp_sock *tp = tcp_sk(sk); tcp_initialize_rcv_mss() 474 inet_csk(sk)->icsk_ack.rcv_mss = hint; tcp_initialize_rcv_mss() 538 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, tcp_rcv_rtt_measure_ts() argument 541 struct tcp_sock *tp = tcp_sk(sk); tcp_rcv_rtt_measure_ts() 544 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) tcp_rcv_rtt_measure_ts() 552 void tcp_rcv_space_adjust(struct sock *sk) tcp_rcv_space_adjust() argument 554 struct tcp_sock *tp = tcp_sk(sk); tcp_rcv_space_adjust() 577 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { tcp_rcv_space_adjust() 604 if (rcvbuf > sk->sk_rcvbuf) { tcp_rcv_space_adjust() 605 sk->sk_rcvbuf = rcvbuf; tcp_rcv_space_adjust() 628 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) tcp_event_data_recv() argument 630 struct tcp_sock *tp = tcp_sk(sk); tcp_event_data_recv() 631 struct inet_connection_sock *icsk = inet_csk(sk); tcp_event_data_recv() 634 inet_csk_schedule_ack(sk); tcp_event_data_recv() 636 tcp_measure_rcv_mss(sk, skb); tcp_event_data_recv() 646 tcp_incr_quickack(sk); tcp_event_data_recv() 662 tcp_incr_quickack(sk); tcp_event_data_recv() 663 sk_mem_reclaim(sk); tcp_event_data_recv() 671 tcp_grow_window(sk, skb); tcp_event_data_recv() 683 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) tcp_rtt_estimator() argument 685 struct tcp_sock *tp = tcp_sk(sk); tcp_rtt_estimator() 734 tp->mdev_max_us = tcp_rto_min_us(sk); tcp_rtt_estimator() 740 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); tcp_rtt_estimator() 753 static void tcp_update_pacing_rate(struct sock *sk) tcp_update_pacing_rate() argument 755 const struct tcp_sock *tp = tcp_sk(sk); tcp_update_pacing_rate() 770 ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, tcp_update_pacing_rate() 771 sk->sk_max_pacing_rate); tcp_update_pacing_rate() 777 static void tcp_set_rto(struct sock *sk) tcp_set_rto() argument 779 const struct tcp_sock *tp = tcp_sk(sk); tcp_set_rto() 790 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); tcp_set_rto() 801 tcp_bound_rto(sk); tcp_set_rto() 831 static void tcp_update_reordering(struct sock *sk, const int metric, tcp_update_reordering() argument 834 struct tcp_sock *tp = tcp_sk(sk); tcp_update_reordering() 850 NET_INC_STATS_BH(sock_net(sk), mib_idx); tcp_update_reordering() 853 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, tcp_update_reordering() 1040 static void tcp_mark_lost_retrans(struct sock *sk) tcp_mark_lost_retrans() argument 1042 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_mark_lost_retrans() 1043 struct tcp_sock *tp = tcp_sk(sk); tcp_mark_lost_retrans() 1054 tcp_for_write_queue(skb, sk) { tcp_for_write_queue() 1057 if (skb == tcp_send_head(sk)) tcp_for_write_queue() 1083 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); tcp_for_write_queue() 1095 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, tcp_check_dsack() argument 1099 struct tcp_sock *tp = tcp_sk(sk); tcp_check_dsack() 1107 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); tcp_check_dsack() 1116 NET_INC_STATS_BH(sock_net(sk), tcp_check_dsack() 1145 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, tcp_match_skb_to_sack() argument 1183 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); tcp_match_skb_to_sack() 1192 static u8 tcp_sacktag_one(struct sock *sk, tcp_sacktag_one() argument 1198 struct tcp_sock *tp = tcp_sk(sk); tcp_sacktag_one() 1282 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, tcp_shifted_skb() argument 1287 struct tcp_sock *tp = tcp_sk(sk); tcp_shifted_skb() 1288 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); tcp_shifted_skb() 1300 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, tcp_shifted_skb() 1321 skb_shinfo(prev)->gso_type = sk->sk_gso_type; tcp_shifted_skb() 1335 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); tcp_shifted_skb() 1352 if (skb == tcp_highest_sack(sk)) tcp_shifted_skb() 1353 tcp_advance_highest_sack(sk, skb); tcp_shifted_skb() 1355 tcp_unlink_write_queue(skb, sk); tcp_shifted_skb() 1356 sk_wmem_free_skb(sk, skb); tcp_shifted_skb() 1358 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); tcp_shifted_skb() 1380 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, tcp_shift_skb_data() argument 1385 struct tcp_sock *tp = tcp_sk(sk); tcp_shift_skb_data() 1392 if (!sk_can_gso(sk)) tcp_shift_skb_data() 1406 if (unlikely(skb == tcp_write_queue_head(sk))) tcp_shift_skb_data() 1408 prev = tcp_write_queue_prev(sk, skb); tcp_shift_skb_data() 1484 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) tcp_shift_skb_data() 1490 if (prev == tcp_write_queue_tail(sk)) tcp_shift_skb_data() 1492 skb = tcp_write_queue_next(sk, prev); tcp_shift_skb_data() 1495 (skb == tcp_send_head(sk)) || tcp_shift_skb_data() 1503 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); tcp_shift_skb_data() 1514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); tcp_shift_skb_data() 1518 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, tcp_sacktag_walk() argument 1524 struct tcp_sock *tp = tcp_sk(sk); tcp_sacktag_walk() 1527 tcp_for_write_queue_from(skb, sk) { tcp_for_write_queue_from() 1531 if (skb == tcp_send_head(sk)) tcp_for_write_queue_from() 1540 in_sack = tcp_match_skb_to_sack(sk, skb, tcp_for_write_queue_from() 1552 tmp = tcp_shift_skb_data(sk, skb, state, tcp_for_write_queue_from() 1562 in_sack = tcp_match_skb_to_sack(sk, skb, tcp_for_write_queue_from() 1573 tcp_sacktag_one(sk, tcp_for_write_queue_from() 1584 tcp_advance_highest_sack(sk, skb); tcp_for_write_queue_from() 1595 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, tcp_sacktag_skip() argument 1599 tcp_for_write_queue_from(skb, sk) { tcp_for_write_queue_from() 1600 if (skb == tcp_send_head(sk)) tcp_for_write_queue_from() 1612 struct sock *sk, tcp_maybe_skipping_dsack() 1621 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); tcp_maybe_skipping_dsack() 1622 skb = tcp_sacktag_walk(skb, sk, NULL, state, tcp_maybe_skipping_dsack() 1636 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, tcp_sacktag_write_queue() argument 1639 struct tcp_sock *tp = tcp_sk(sk); tcp_sacktag_write_queue() 1660 tcp_highest_sack_reset(sk); tcp_sacktag_write_queue() 1663 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, tcp_sacktag_write_queue() 1704 NET_INC_STATS_BH(sock_net(sk), mib_idx); tcp_sacktag_write_queue() 1730 skb = tcp_write_queue_head(sk); tcp_sacktag_write_queue() 1765 skb = tcp_sacktag_skip(skb, sk, &state, tcp_sacktag_write_queue() 1767 skb = tcp_sacktag_walk(skb, sk, next_dup, tcp_sacktag_write_queue() 1778 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, tcp_sacktag_write_queue() 1785 skb = tcp_highest_sack(sk); tcp_sacktag_write_queue() 1793 skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq); tcp_sacktag_write_queue() 1800 skb = tcp_highest_sack(sk); tcp_sacktag_write_queue() 1805 skb = tcp_sacktag_skip(skb, sk, &state, start_seq); tcp_sacktag_write_queue() 1808 skb = tcp_sacktag_walk(skb, sk, next_dup, &state, tcp_sacktag_write_queue() 1824 ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) tcp_sacktag_write_queue() 1825 tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); tcp_sacktag_write_queue() 1827 tcp_mark_lost_retrans(sk); tcp_sacktag_write_queue() 1862 static void tcp_check_reno_reordering(struct sock *sk, const int addend) tcp_check_reno_reordering() argument 1864 struct tcp_sock *tp = tcp_sk(sk); tcp_check_reno_reordering() 1866 tcp_update_reordering(sk, tp->packets_out + addend, 0); tcp_check_reno_reordering() 1871 static void tcp_add_reno_sack(struct sock *sk) tcp_add_reno_sack() argument 1873 struct tcp_sock *tp = tcp_sk(sk); tcp_add_reno_sack() 1875 tcp_check_reno_reordering(sk, 0); tcp_add_reno_sack() 1881 static void tcp_remove_reno_sacks(struct sock *sk, int acked) tcp_remove_reno_sacks() argument 1883 struct tcp_sock *tp = tcp_sk(sk); tcp_remove_reno_sacks() 1892 tcp_check_reno_reordering(sk, acked); tcp_remove_reno_sacks() 1922 void tcp_enter_loss(struct sock *sk) tcp_enter_loss() argument 1924 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_enter_loss() 1925 struct tcp_sock *tp = tcp_sk(sk); tcp_enter_loss() 1935 tp->prior_ssthresh = tcp_current_ssthresh(sk); tcp_enter_loss() 1936 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_enter_loss() 1937 tcp_ca_event(sk, CA_EVENT_LOSS); tcp_enter_loss() 1950 skb = tcp_write_queue_head(sk); tcp_enter_loss() 1953 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); tcp_enter_loss() 1959 tcp_for_write_queue(skb, sk) { tcp_for_write_queue() 1960 if (skb == tcp_send_head(sk)) tcp_for_write_queue() 1980 tcp_set_ca_state(sk, TCP_CA_Loss); 1990 !inet_csk(sk)->icsk_mtup.probe_size; 2003 static bool tcp_check_sack_reneging(struct sock *sk, int flag) tcp_check_sack_reneging() argument 2006 struct tcp_sock *tp = tcp_sk(sk); tcp_check_sack_reneging() 2010 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, tcp_check_sack_reneging() 2042 static bool tcp_pause_early_retransmit(struct sock *sk, int flag) tcp_pause_early_retransmit() argument 2044 struct tcp_sock *tp = tcp_sk(sk); tcp_pause_early_retransmit() 2058 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) tcp_pause_early_retransmit() 2061 inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, tcp_pause_early_retransmit() 2159 static bool tcp_time_to_recover(struct sock *sk, int flag) tcp_time_to_recover() argument 2161 struct tcp_sock *tp = tcp_sk(sk); tcp_time_to_recover() 2178 !tcp_may_send_now(sk)) { tcp_time_to_recover() 2192 tcp_is_sack(tp) && !tcp_send_head(sk)) tcp_time_to_recover() 2202 !tcp_may_send_now(sk)) tcp_time_to_recover() 2203 return !tcp_pause_early_retransmit(sk, flag); tcp_time_to_recover() 2214 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) tcp_mark_head_lost() argument 2216 struct tcp_sock *tp = tcp_sk(sk); tcp_mark_head_lost() 2229 if (mark_head && skb != tcp_write_queue_head(sk)) tcp_mark_head_lost() 2232 skb = tcp_write_queue_head(sk); tcp_mark_head_lost() 2236 tcp_for_write_queue_from(skb, sk) { tcp_for_write_queue_from() 2237 if (skb == tcp_send_head(sk)) tcp_for_write_queue_from() 2259 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, tcp_for_write_queue_from() 2276 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) tcp_update_scoreboard() argument 2278 struct tcp_sock *tp = tcp_sk(sk); tcp_update_scoreboard() 2281 tcp_mark_head_lost(sk, 1, 1); tcp_update_scoreboard() 2286 tcp_mark_head_lost(sk, lost, 0); tcp_update_scoreboard() 2290 tcp_mark_head_lost(sk, sacked_upto, 0); tcp_update_scoreboard() 2292 tcp_mark_head_lost(sk, 1, 1); tcp_update_scoreboard() 2332 static bool tcp_any_retrans_done(const struct sock *sk) tcp_any_retrans_done() argument 2334 const struct tcp_sock *tp = tcp_sk(sk); tcp_any_retrans_done() 2340 skb = tcp_write_queue_head(sk); tcp_any_retrans_done() 2348 static void DBGUNDO(struct sock *sk, const char *msg) DBGUNDO() argument 2350 struct tcp_sock *tp = tcp_sk(sk); DBGUNDO() 2351 struct inet_sock *inet = inet_sk(sk); DBGUNDO() 2353 if (sk->sk_family == AF_INET) { DBGUNDO() 2362 else if (sk->sk_family == AF_INET6) { DBGUNDO() 2363 struct ipv6_pinfo *np = inet6_sk(sk); DBGUNDO() 2377 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) tcp_undo_cwnd_reduction() argument 2379 struct tcp_sock *tp = tcp_sk(sk); tcp_undo_cwnd_reduction() 2384 tcp_for_write_queue(skb, sk) { tcp_for_write_queue() 2385 if (skb == tcp_send_head(sk)) tcp_for_write_queue() 2394 const struct inet_connection_sock *icsk = inet_csk(sk); 2397 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); 2418 static bool tcp_try_undo_recovery(struct sock *sk) tcp_try_undo_recovery() argument 2420 struct tcp_sock *tp = tcp_sk(sk); tcp_try_undo_recovery() 2428 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); tcp_try_undo_recovery() 2429 tcp_undo_cwnd_reduction(sk, false); tcp_try_undo_recovery() 2430 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) tcp_try_undo_recovery() 2435 NET_INC_STATS_BH(sock_net(sk), mib_idx); tcp_try_undo_recovery() 2442 if (!tcp_any_retrans_done(sk)) tcp_try_undo_recovery() 2446 tcp_set_ca_state(sk, TCP_CA_Open); tcp_try_undo_recovery() 2451 static bool tcp_try_undo_dsack(struct sock *sk) tcp_try_undo_dsack() argument 2453 struct tcp_sock *tp = tcp_sk(sk); tcp_try_undo_dsack() 2456 DBGUNDO(sk, "D-SACK"); tcp_try_undo_dsack() 2457 tcp_undo_cwnd_reduction(sk, false); tcp_try_undo_dsack() 2458 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); tcp_try_undo_dsack() 2465 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) tcp_try_undo_loss() argument 2467 struct tcp_sock *tp = tcp_sk(sk); tcp_try_undo_loss() 2470 tcp_undo_cwnd_reduction(sk, true); tcp_try_undo_loss() 2472 DBGUNDO(sk, "partial loss"); tcp_try_undo_loss() 2473 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); tcp_try_undo_loss() 2475 NET_INC_STATS_BH(sock_net(sk), tcp_try_undo_loss() 2477 inet_csk(sk)->icsk_retransmits = 0; tcp_try_undo_loss() 2479 tcp_set_ca_state(sk, TCP_CA_Open); tcp_try_undo_loss() 2495 static void tcp_init_cwnd_reduction(struct sock *sk) tcp_init_cwnd_reduction() argument 2497 struct tcp_sock *tp = tcp_sk(sk); tcp_init_cwnd_reduction() 2505 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); tcp_init_cwnd_reduction() 2509 static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked, tcp_cwnd_reduction() argument 2512 struct tcp_sock *tp = tcp_sk(sk); tcp_cwnd_reduction() 2533 static inline void tcp_end_cwnd_reduction(struct sock *sk) tcp_end_cwnd_reduction() argument 2535 struct tcp_sock *tp = tcp_sk(sk); tcp_end_cwnd_reduction() 2538 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tcp_end_cwnd_reduction() 2543 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); tcp_end_cwnd_reduction() 2547 void tcp_enter_cwr(struct sock *sk) tcp_enter_cwr() argument 2549 struct tcp_sock *tp = tcp_sk(sk); tcp_enter_cwr() 2552 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { tcp_enter_cwr() 2554 tcp_init_cwnd_reduction(sk); tcp_enter_cwr() 2555 tcp_set_ca_state(sk, TCP_CA_CWR); tcp_enter_cwr() 2559 static void tcp_try_keep_open(struct sock *sk) tcp_try_keep_open() argument 2561 struct tcp_sock *tp = tcp_sk(sk); tcp_try_keep_open() 2564 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) tcp_try_keep_open() 2567 if (inet_csk(sk)->icsk_ca_state != state) { tcp_try_keep_open() 2568 tcp_set_ca_state(sk, state); tcp_try_keep_open() 2573 static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked) tcp_try_to_open() argument 2575 struct tcp_sock *tp = tcp_sk(sk); tcp_try_to_open() 2579 if (!tcp_any_retrans_done(sk)) tcp_try_to_open() 2583 tcp_enter_cwr(sk); tcp_try_to_open() 2585 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { tcp_try_to_open() 2586 tcp_try_keep_open(sk); tcp_try_to_open() 2588 tcp_cwnd_reduction(sk, prior_unsacked, 0); tcp_try_to_open() 2592 static void tcp_mtup_probe_failed(struct sock *sk) tcp_mtup_probe_failed() argument 2594 struct inet_connection_sock *icsk = inet_csk(sk); tcp_mtup_probe_failed() 2600 static void tcp_mtup_probe_success(struct sock *sk) tcp_mtup_probe_success() argument 2602 struct tcp_sock *tp = tcp_sk(sk); tcp_mtup_probe_success() 2603 struct inet_connection_sock *icsk = inet_csk(sk); tcp_mtup_probe_success() 2606 tp->prior_ssthresh = tcp_current_ssthresh(sk); tcp_mtup_probe_success() 2608 tcp_mss_to_mtu(sk, tp->mss_cache) / tcp_mtup_probe_success() 2612 tp->snd_ssthresh = tcp_current_ssthresh(sk); tcp_mtup_probe_success() 2616 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_mtup_probe_success() 2623 void tcp_simple_retransmit(struct sock *sk) tcp_simple_retransmit() argument 2625 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_simple_retransmit() 2626 struct tcp_sock *tp = tcp_sk(sk); tcp_simple_retransmit() 2628 unsigned int mss = tcp_current_mss(sk); tcp_simple_retransmit() 2631 tcp_for_write_queue(skb, sk) { tcp_for_write_queue() 2632 if (skb == tcp_send_head(sk)) tcp_for_write_queue() 2661 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2664 tcp_set_ca_state(sk, TCP_CA_Loss); 2666 tcp_xmit_retransmit_queue(sk); 2670 static void tcp_enter_recovery(struct sock *sk, bool ece_ack) tcp_enter_recovery() argument 2672 struct tcp_sock *tp = tcp_sk(sk); tcp_enter_recovery() 2680 NET_INC_STATS_BH(sock_net(sk), mib_idx); tcp_enter_recovery() 2685 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { tcp_enter_recovery() 2687 tp->prior_ssthresh = tcp_current_ssthresh(sk); tcp_enter_recovery() 2688 tcp_init_cwnd_reduction(sk); tcp_enter_recovery() 2690 tcp_set_ca_state(sk, TCP_CA_Recovery); tcp_enter_recovery() 2696 static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) tcp_process_loss() argument 2698 struct tcp_sock *tp = tcp_sk(sk); tcp_process_loss() 2702 tcp_try_undo_loss(sk, false)) tcp_process_loss() 2710 tcp_try_undo_loss(sk, true)) tcp_process_loss() 2718 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tcp_process_loss() 2728 tcp_try_undo_recovery(sk); tcp_process_loss() 2736 tcp_add_reno_sack(sk); tcp_process_loss() 2740 tcp_xmit_retransmit_queue(sk); tcp_process_loss() 2744 static bool tcp_try_undo_partial(struct sock *sk, const int acked, tcp_try_undo_partial() argument 2747 struct tcp_sock *tp = tcp_sk(sk); tcp_try_undo_partial() 2753 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); tcp_try_undo_partial() 2761 tcp_cwnd_reduction(sk, prior_unsacked, 0); tcp_try_undo_partial() 2765 if (!tcp_any_retrans_done(sk)) tcp_try_undo_partial() 2768 DBGUNDO(sk, "partial recovery"); tcp_try_undo_partial() 2769 tcp_undo_cwnd_reduction(sk, true); tcp_try_undo_partial() 2770 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); tcp_try_undo_partial() 2771 tcp_try_keep_open(sk); tcp_try_undo_partial() 2788 static void tcp_fastretrans_alert(struct sock *sk, const int acked, tcp_fastretrans_alert() argument 2792 struct inet_connection_sock *icsk = inet_csk(sk); tcp_fastretrans_alert() 2793 struct tcp_sock *tp = tcp_sk(sk); tcp_fastretrans_alert() 2809 if (tcp_check_sack_reneging(sk, flag)) tcp_fastretrans_alert() 2826 tcp_end_cwnd_reduction(sk); tcp_fastretrans_alert() 2827 tcp_set_ca_state(sk, TCP_CA_Open); tcp_fastretrans_alert() 2834 if (tcp_try_undo_recovery(sk)) tcp_fastretrans_alert() 2836 tcp_end_cwnd_reduction(sk); tcp_fastretrans_alert() 2846 tcp_add_reno_sack(sk); tcp_fastretrans_alert() 2848 if (tcp_try_undo_partial(sk, acked, prior_unsacked)) tcp_fastretrans_alert() 2854 if (tcp_try_undo_dsack(sk)) { tcp_fastretrans_alert() 2855 tcp_try_keep_open(sk); tcp_fastretrans_alert() 2860 tcp_process_loss(sk, flag, is_dupack); tcp_fastretrans_alert() 2869 tcp_add_reno_sack(sk); tcp_fastretrans_alert() 2873 tcp_try_undo_dsack(sk); tcp_fastretrans_alert() 2875 if (!tcp_time_to_recover(sk, flag)) { tcp_fastretrans_alert() 2876 tcp_try_to_open(sk, flag, prior_unsacked); tcp_fastretrans_alert() 2884 tcp_mtup_probe_failed(sk); tcp_fastretrans_alert() 2887 tcp_simple_retransmit(sk); tcp_fastretrans_alert() 2892 tcp_enter_recovery(sk, (flag & FLAG_ECE)); tcp_fastretrans_alert() 2897 tcp_update_scoreboard(sk, fast_rexmit); tcp_fastretrans_alert() 2898 tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit); tcp_fastretrans_alert() 2899 tcp_xmit_retransmit_queue(sk); tcp_fastretrans_alert() 2902 static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, tcp_ack_update_rtt() argument 2905 const struct tcp_sock *tp = tcp_sk(sk); tcp_ack_update_rtt() 2931 tcp_rtt_estimator(sk, seq_rtt_us); tcp_ack_update_rtt() 2932 tcp_set_rto(sk); tcp_ack_update_rtt() 2935 inet_csk(sk)->icsk_backoff = 0; tcp_ack_update_rtt() 2940 static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp) tcp_synack_rtt_meas() argument 2942 struct tcp_sock *tp = tcp_sk(sk); tcp_synack_rtt_meas() 2952 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L); tcp_synack_rtt_meas() 2955 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) tcp_cong_avoid() argument 2957 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_cong_avoid() 2959 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); tcp_cong_avoid() 2960 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; tcp_cong_avoid() 2966 void tcp_rearm_rto(struct sock *sk) tcp_rearm_rto() argument 2968 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_rearm_rto() 2969 struct tcp_sock *tp = tcp_sk(sk); tcp_rearm_rto() 2978 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); tcp_rearm_rto() 2980 u32 rto = inet_csk(sk)->icsk_rto; tcp_rearm_rto() 2984 struct sk_buff *skb = tcp_write_queue_head(sk); tcp_rearm_rto() 2994 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, tcp_rearm_rto() 3002 void tcp_resume_early_retransmit(struct sock *sk) tcp_resume_early_retransmit() argument 3004 struct tcp_sock *tp = tcp_sk(sk); tcp_resume_early_retransmit() 3006 tcp_rearm_rto(sk); tcp_resume_early_retransmit() 3012 tcp_enter_recovery(sk, false); tcp_resume_early_retransmit() 3013 tcp_update_scoreboard(sk, 1); tcp_resume_early_retransmit() 3014 tcp_xmit_retransmit_queue(sk); tcp_resume_early_retransmit() 3018 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) tcp_tso_acked() argument 3020 struct tcp_sock *tp = tcp_sk(sk); tcp_tso_acked() 3026 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) tcp_tso_acked() 3038 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, tcp_ack_tstamp() argument 3044 if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))) tcp_ack_tstamp() 3049 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) tcp_ack_tstamp() 3050 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); tcp_ack_tstamp() 3057 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, tcp_clean_rtx_queue() argument 3060 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_clean_rtx_queue() 3062 struct tcp_sock *tp = tcp_sk(sk); tcp_clean_rtx_queue() 3075 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { tcp_clean_rtx_queue() 3080 tcp_ack_tstamp(sk, skb, prior_snd_una); tcp_clean_rtx_queue() 3088 acked_pcount = tcp_tso_acked(sk, skb); tcp_clean_rtx_queue() 3139 tcp_unlink_write_queue(skb, sk); tcp_clean_rtx_queue() 3140 sk_wmem_free_skb(sk, skb); tcp_clean_rtx_queue() 3159 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us); tcp_clean_rtx_queue() 3163 = inet_csk(sk)->icsk_ca_ops; tcp_clean_rtx_queue() 3165 tcp_rearm_rto(sk); tcp_clean_rtx_queue() 3168 tcp_mtup_probe_success(sk); tcp_clean_rtx_queue() 3172 tcp_remove_reno_sacks(sk, pkts_acked); tcp_clean_rtx_queue() 3178 tcp_update_reordering(sk, tp->fackets_out - reord, 0); tcp_clean_rtx_queue() 3189 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); tcp_clean_rtx_queue() 3198 tcp_rearm_rto(sk); tcp_clean_rtx_queue() 3206 icsk = inet_csk(sk); tcp_clean_rtx_queue() 3227 static void tcp_ack_probe(struct sock *sk) tcp_ack_probe() argument 3229 const struct tcp_sock *tp = tcp_sk(sk); tcp_ack_probe() 3230 struct inet_connection_sock *icsk = inet_csk(sk); tcp_ack_probe() 3234 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { tcp_ack_probe() 3236 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); tcp_ack_probe() 3243 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, tcp_ack_probe() 3248 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) tcp_ack_is_dubious() argument 3251 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; tcp_ack_is_dubious() 3255 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) tcp_may_raise_cwnd() argument 3257 if (tcp_in_cwnd_reduction(sk)) tcp_may_raise_cwnd() 3266 if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) tcp_may_raise_cwnd() 3311 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, tcp_ack_update_window() argument 3314 struct tcp_sock *tp = tcp_sk(sk); tcp_ack_update_window() 3332 tcp_fast_path_check(sk); tcp_ack_update_window() 3336 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); tcp_ack_update_window() 3377 static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) tcp_send_challenge_ack() argument 3382 struct tcp_sock *tp = tcp_sk(sk); tcp_send_challenge_ack() 3386 if (tcp_oow_rate_limited(sock_net(sk), skb, tcp_send_challenge_ack() 3398 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_challenge_ack() 3399 tcp_send_ack(sk); tcp_send_challenge_ack() 3429 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) tcp_process_tlp_ack() argument 3431 struct tcp_sock *tp = tcp_sk(sk); tcp_process_tlp_ack() 3443 tcp_init_cwnd_reduction(sk); tcp_process_tlp_ack() 3444 tcp_set_ca_state(sk, TCP_CA_CWR); tcp_process_tlp_ack() 3445 tcp_end_cwnd_reduction(sk); tcp_process_tlp_ack() 3446 tcp_try_keep_open(sk); tcp_process_tlp_ack() 3447 NET_INC_STATS_BH(sock_net(sk), tcp_process_tlp_ack() 3456 static inline void tcp_in_ack_event(struct sock *sk, u32 flags) tcp_in_ack_event() argument 3458 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_in_ack_event() 3461 icsk->icsk_ca_ops->in_ack_event(sk, flags); tcp_in_ack_event() 3465 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_ack() argument 3467 struct inet_connection_sock *icsk = inet_csk(sk); tcp_ack() 3468 struct tcp_sock *tp = tcp_sk(sk); tcp_ack() 3480 prefetchw(sk->sk_write_queue.next); tcp_ack() 3488 tcp_send_challenge_ack(sk, skb); tcp_ack() 3502 tcp_rearm_rto(sk); tcp_ack() 3526 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); tcp_ack() 3528 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); tcp_ack() 3535 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); tcp_ack() 3537 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); tcp_ack() 3540 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, tcp_ack() 3551 tcp_in_ack_event(sk, ack_ev_flags); tcp_ack() 3557 sk->sk_err_soft = 0; tcp_ack() 3565 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, tcp_ack() 3570 if (tcp_may_raise_cwnd(sk, flag)) tcp_ack() 3571 tcp_cong_avoid(sk, ack, acked); tcp_ack() 3573 if (tcp_ack_is_dubious(sk, flag)) { tcp_ack() 3575 tcp_fastretrans_alert(sk, acked, prior_unsacked, tcp_ack() 3579 tcp_process_tlp_ack(sk, ack, flag); tcp_ack() 3582 struct dst_entry *dst = __sk_dst_get(sk); tcp_ack() 3588 tcp_schedule_loss_probe(sk); tcp_ack() 3589 tcp_update_pacing_rate(sk); tcp_ack() 3595 tcp_fastretrans_alert(sk, acked, prior_unsacked, tcp_ack() 3601 if (tcp_send_head(sk)) tcp_ack() 3602 tcp_ack_probe(sk); tcp_ack() 3605 tcp_process_tlp_ack(sk, ack, flag); tcp_ack() 3609 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); tcp_ack() 3617 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, tcp_ack() 3619 tcp_fastretrans_alert(sk, acked, prior_unsacked, tcp_ack() 3623 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); tcp_ack() 3864 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) tcp_disordered_ack() argument 3866 const struct tcp_sock *tp = tcp_sk(sk); tcp_disordered_ack() 3881 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); tcp_disordered_ack() 3884 static inline bool tcp_paws_discard(const struct sock *sk, tcp_paws_discard() argument 3887 const struct tcp_sock *tp = tcp_sk(sk); tcp_paws_discard() 3890 !tcp_disordered_ack(sk, skb); tcp_paws_discard() 3913 void tcp_reset(struct sock *sk) tcp_reset() argument 3916 switch (sk->sk_state) { tcp_reset() 3918 sk->sk_err = ECONNREFUSED; tcp_reset() 3921 sk->sk_err = EPIPE; tcp_reset() 3926 sk->sk_err = ECONNRESET; tcp_reset() 3931 if (!sock_flag(sk, SOCK_DEAD)) tcp_reset() 3932 sk->sk_error_report(sk); tcp_reset() 3934 tcp_done(sk); tcp_reset() 3951 static void tcp_fin(struct sock *sk) tcp_fin() argument 3953 struct tcp_sock *tp = tcp_sk(sk); tcp_fin() 3956 inet_csk_schedule_ack(sk); tcp_fin() 3958 sk->sk_shutdown |= RCV_SHUTDOWN; tcp_fin() 3959 sock_set_flag(sk, SOCK_DONE); tcp_fin() 3961 switch (sk->sk_state) { tcp_fin() 3965 tcp_set_state(sk, TCP_CLOSE_WAIT); tcp_fin() 3966 dst = __sk_dst_get(sk); tcp_fin() 3968 inet_csk(sk)->icsk_ack.pingpong = 1; tcp_fin() 3986 tcp_send_ack(sk); tcp_fin() 3987 tcp_set_state(sk, TCP_CLOSING); tcp_fin() 3991 tcp_send_ack(sk); tcp_fin() 3992 tcp_time_wait(sk, TCP_TIME_WAIT, 0); tcp_fin() 3998 pr_err("%s: Impossible, sk->sk_state=%d\n", tcp_fin() 3999 __func__, sk->sk_state); tcp_fin() 4009 sk_mem_reclaim(sk); tcp_fin() 4011 if (!sock_flag(sk, SOCK_DEAD)) { tcp_fin() 4012 sk->sk_state_change(sk); tcp_fin() 4015 if (sk->sk_shutdown == SHUTDOWN_MASK || tcp_fin() 4016 sk->sk_state == TCP_CLOSE) tcp_fin() 4017 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); tcp_fin() 4019 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); tcp_fin() 4036 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) tcp_dsack_set() argument 4038 struct tcp_sock *tp = tcp_sk(sk); tcp_dsack_set() 4048 NET_INC_STATS_BH(sock_net(sk), mib_idx); tcp_dsack_set() 4056 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) tcp_dsack_extend() argument 4058 struct tcp_sock *tp = tcp_sk(sk); tcp_dsack_extend() 4061 tcp_dsack_set(sk, seq, end_seq); tcp_dsack_extend() 4066 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) tcp_send_dupack() argument 4068 struct tcp_sock *tp = tcp_sk(sk); tcp_send_dupack() 4072 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_send_dupack() 4073 tcp_enter_quickack_mode(sk); tcp_send_dupack() 4080 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); tcp_send_dupack() 4084 tcp_send_ack(sk); tcp_send_dupack() 4115 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) tcp_sack_new_ofo_skb() argument 4117 struct tcp_sock *tp = tcp_sk(sk); tcp_sack_new_ofo_skb() 4193 * @sk: socket 4204 static bool tcp_try_coalesce(struct sock *sk, tcp_try_coalesce() argument 4220 atomic_add(delta, &sk->sk_rmem_alloc); tcp_try_coalesce() 4221 sk_mem_charge(sk, delta); tcp_try_coalesce() 4222 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); tcp_try_coalesce() 4232 static void tcp_ofo_queue(struct sock *sk) tcp_ofo_queue() argument 4234 struct tcp_sock *tp = tcp_sk(sk); tcp_ofo_queue() 4247 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); tcp_ofo_queue() 4252 SOCK_DEBUG(sk, "ofo packet was already received\n"); tcp_ofo_queue() 4256 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", tcp_ofo_queue() 4260 tail = skb_peek_tail(&sk->sk_receive_queue); tcp_ofo_queue() 4261 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); tcp_ofo_queue() 4264 __skb_queue_tail(&sk->sk_receive_queue, skb); tcp_ofo_queue() 4266 tcp_fin(sk); tcp_ofo_queue() 4272 static bool tcp_prune_ofo_queue(struct sock *sk); 4273 static int tcp_prune_queue(struct sock *sk); 4275 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, tcp_try_rmem_schedule() argument 4278 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || tcp_try_rmem_schedule() 4279 !sk_rmem_schedule(sk, skb, size)) { tcp_try_rmem_schedule() 4281 if (tcp_prune_queue(sk) < 0) tcp_try_rmem_schedule() 4284 if (!sk_rmem_schedule(sk, skb, size)) { tcp_try_rmem_schedule() 4285 if (!tcp_prune_ofo_queue(sk)) tcp_try_rmem_schedule() 4288 if (!sk_rmem_schedule(sk, skb, size)) tcp_try_rmem_schedule() 4295 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) tcp_data_queue_ofo() argument 4297 struct tcp_sock *tp = tcp_sk(sk); tcp_data_queue_ofo() 4303 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { tcp_data_queue_ofo() 4304 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); tcp_data_queue_ofo() 4311 inet_csk_schedule_ack(sk); tcp_data_queue_ofo() 4313 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); tcp_data_queue_ofo() 4314 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", tcp_data_queue_ofo() 4336 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { tcp_data_queue_ofo() 4339 tcp_grow_window(sk, skb); tcp_data_queue_ofo() 4368 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); tcp_data_queue_ofo() 4371 tcp_dsack_set(sk, seq, end_seq); tcp_data_queue_ofo() 4376 tcp_dsack_set(sk, seq, tcp_data_queue_ofo() 4400 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, tcp_data_queue_ofo() 4405 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, tcp_data_queue_ofo() 4407 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); tcp_data_queue_ofo() 4413 tcp_sack_new_ofo_skb(sk, seq, end_seq); tcp_data_queue_ofo() 4416 tcp_grow_window(sk, skb); tcp_data_queue_ofo() 4417 skb_set_owner_r(skb, sk); tcp_data_queue_ofo() 4421 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, tcp_queue_rcv() argument 4425 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); tcp_queue_rcv() 4429 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; tcp_queue_rcv() 4430 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); tcp_queue_rcv() 4432 __skb_queue_tail(&sk->sk_receive_queue, skb); tcp_queue_rcv() 4433 skb_set_owner_r(skb, sk); tcp_queue_rcv() 4438 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) tcp_send_rcvq() argument 4456 &err, sk->sk_allocation); tcp_send_rcvq() 4464 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) tcp_send_rcvq() 4471 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; tcp_send_rcvq() 4473 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; tcp_send_rcvq() 4475 if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { tcp_send_rcvq() 4488 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) tcp_data_queue() argument 4490 struct tcp_sock *tp = tcp_sk(sk); tcp_data_queue() 4515 sock_owned_by_user(sk) && !tp->urg_data) { tcp_data_queue() 4526 tcp_rcv_space_adjust(sk); tcp_data_queue() 4534 tcp_try_rmem_schedule(sk, skb, skb->truesize)) tcp_data_queue() 4537 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); tcp_data_queue() 4541 tcp_event_data_recv(sk, skb); tcp_data_queue() 4543 tcp_fin(sk); tcp_data_queue() 4546 tcp_ofo_queue(sk); tcp_data_queue() 4552 inet_csk(sk)->icsk_ack.pingpong = 0; tcp_data_queue() 4558 tcp_fast_path_check(sk); tcp_data_queue() 4562 if (!sock_flag(sk, SOCK_DEAD)) tcp_data_queue() 4563 sk->sk_data_ready(sk); tcp_data_queue() 4569 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_data_queue() 4570 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tcp_data_queue() 4573 tcp_enter_quickack_mode(sk); tcp_data_queue() 4574 inet_csk_schedule_ack(sk); tcp_data_queue() 4584 tcp_enter_quickack_mode(sk); tcp_data_queue() 4588 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", tcp_data_queue() 4592 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); tcp_data_queue() 4602 tcp_data_queue_ofo(sk, skb); tcp_data_queue() 4605 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, tcp_collapse_one() argument 4615 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); tcp_collapse_one() 4629 tcp_collapse(struct sock *sk, struct sk_buff_head *list, tcp_collapse() argument 4646 skb = tcp_collapse_one(sk, skb, list); skb_queue_walk_from_safe() 4691 skb_set_owner_r(nskb, sk); 4708 skb = tcp_collapse_one(sk, skb, list); 4721 static void tcp_collapse_ofo_queue(struct sock *sk) tcp_collapse_ofo_queue() argument 4723 struct tcp_sock *tp = tcp_sk(sk); tcp_collapse_ofo_queue() 4747 tcp_collapse(sk, &tp->out_of_order_queue, tcp_collapse_ofo_queue() 4768 static bool tcp_prune_ofo_queue(struct sock *sk) tcp_prune_ofo_queue() argument 4770 struct tcp_sock *tp = tcp_sk(sk); tcp_prune_ofo_queue() 4774 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); tcp_prune_ofo_queue() 4784 sk_mem_reclaim(sk); tcp_prune_ofo_queue() 4797 static int tcp_prune_queue(struct sock *sk) tcp_prune_queue() argument 4799 struct tcp_sock *tp = tcp_sk(sk); tcp_prune_queue() 4801 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); tcp_prune_queue() 4803 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); tcp_prune_queue() 4805 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) tcp_prune_queue() 4806 tcp_clamp_window(sk); tcp_prune_queue() 4807 else if (sk_under_memory_pressure(sk)) tcp_prune_queue() 4810 tcp_collapse_ofo_queue(sk); tcp_prune_queue() 4811 if (!skb_queue_empty(&sk->sk_receive_queue)) tcp_prune_queue() 4812 tcp_collapse(sk, &sk->sk_receive_queue, tcp_prune_queue() 4813 skb_peek(&sk->sk_receive_queue), tcp_prune_queue() 4816 sk_mem_reclaim(sk); tcp_prune_queue() 4818 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) tcp_prune_queue() 4824 tcp_prune_ofo_queue(sk); tcp_prune_queue() 4826 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) tcp_prune_queue() 4833 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); tcp_prune_queue() 4840 static bool tcp_should_expand_sndbuf(const struct sock *sk) tcp_should_expand_sndbuf() argument 4842 const struct tcp_sock *tp = tcp_sk(sk); tcp_should_expand_sndbuf() 4847 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) tcp_should_expand_sndbuf() 4851 if (sk_under_memory_pressure(sk)) tcp_should_expand_sndbuf() 4855 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) tcp_should_expand_sndbuf() 4871 static void tcp_new_space(struct sock *sk) tcp_new_space() argument 4873 struct tcp_sock *tp = tcp_sk(sk); tcp_new_space() 4875 if (tcp_should_expand_sndbuf(sk)) { tcp_new_space() 4876 tcp_sndbuf_expand(sk); tcp_new_space() 4880 sk->sk_write_space(sk); tcp_new_space() 4883 static void tcp_check_space(struct sock *sk) tcp_check_space() argument 4885 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { tcp_check_space() 4886 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); tcp_check_space() 4889 if (sk->sk_socket && tcp_check_space() 4890 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) tcp_check_space() 4891 tcp_new_space(sk); tcp_check_space() 4895 static inline void tcp_data_snd_check(struct sock *sk) tcp_data_snd_check() argument 4897 tcp_push_pending_frames(sk); tcp_data_snd_check() 4898 tcp_check_space(sk); tcp_data_snd_check() 4904 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) __tcp_ack_snd_check() argument 4906 struct tcp_sock *tp = tcp_sk(sk); __tcp_ack_snd_check() 4909 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && __tcp_ack_snd_check() 4913 __tcp_select_window(sk) >= tp->rcv_wnd) || __tcp_ack_snd_check() 4915 tcp_in_quickack_mode(sk) || __tcp_ack_snd_check() 4919 tcp_send_ack(sk); __tcp_ack_snd_check() 4922 tcp_send_delayed_ack(sk); __tcp_ack_snd_check() 4926 static inline void tcp_ack_snd_check(struct sock *sk) tcp_ack_snd_check() argument 4928 if (!inet_csk_ack_scheduled(sk)) { tcp_ack_snd_check() 4932 __tcp_ack_snd_check(sk, 1); tcp_ack_snd_check() 4945 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) tcp_check_urg() argument 4947 struct tcp_sock *tp = tcp_sk(sk); tcp_check_urg() 4976 sk_send_sigurg(sk); tcp_check_urg() 4994 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { tcp_check_urg() 4995 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); tcp_check_urg() 4998 __skb_unlink(skb, &sk->sk_receive_queue); tcp_check_urg() 5011 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) tcp_urg() argument 5013 struct tcp_sock *tp = tcp_sk(sk); tcp_urg() 5017 tcp_check_urg(sk, th); tcp_urg() 5030 if (!sock_flag(sk, SOCK_DEAD)) tcp_urg() 5031 sk->sk_data_ready(sk); tcp_urg() 5036 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) tcp_copy_to_iovec() argument 5038 struct tcp_sock *tp = tcp_sk(sk); tcp_copy_to_iovec() 5051 tcp_rcv_space_adjust(sk); tcp_copy_to_iovec() 5058 static __sum16 __tcp_checksum_complete_user(struct sock *sk, __tcp_checksum_complete_user() argument 5063 if (sock_owned_by_user(sk)) { __tcp_checksum_complete_user() 5073 static inline bool tcp_checksum_complete_user(struct sock *sk, tcp_checksum_complete_user() argument 5077 __tcp_checksum_complete_user(sk, skb); tcp_checksum_complete_user() 5083 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, tcp_validate_incoming() argument 5086 struct tcp_sock *tp = tcp_sk(sk); tcp_validate_incoming() 5090 tcp_paws_discard(sk, skb)) { tcp_validate_incoming() 5092 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); tcp_validate_incoming() 5093 if (!tcp_oow_rate_limited(sock_net(sk), skb, tcp_validate_incoming() 5096 tcp_send_dupack(sk, skb); tcp_validate_incoming() 5113 if (!tcp_oow_rate_limited(sock_net(sk), skb, tcp_validate_incoming() 5116 tcp_send_dupack(sk, skb); tcp_validate_incoming() 5130 tcp_reset(sk); tcp_validate_incoming() 5132 tcp_send_challenge_ack(sk, skb); tcp_validate_incoming() 5144 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); tcp_validate_incoming() 5145 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); tcp_validate_incoming() 5146 tcp_send_challenge_ack(sk, skb); tcp_validate_incoming() 5180 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_rcv_established() argument 5183 struct tcp_sock *tp = tcp_sk(sk); tcp_rcv_established() 5185 if (unlikely(!sk->sk_rx_dst)) tcp_rcv_established() 5186 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); tcp_rcv_established() 5255 tcp_ack(sk, skb, 0); tcp_rcv_established() 5257 tcp_data_snd_check(sk); tcp_rcv_established() 5260 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); tcp_rcv_established() 5270 sock_owned_by_user(sk)) { tcp_rcv_established() 5273 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { tcp_rcv_established() 5284 tcp_rcv_rtt_measure_ts(sk, skb); tcp_rcv_established() 5288 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); tcp_rcv_established() 5293 if (tcp_checksum_complete_user(sk, skb)) tcp_rcv_established() 5296 if ((int)skb->truesize > sk->sk_forward_alloc) tcp_rcv_established() 5308 tcp_rcv_rtt_measure_ts(sk, skb); tcp_rcv_established() 5310 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); tcp_rcv_established() 5313 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, tcp_rcv_established() 5317 tcp_event_data_recv(sk, skb); tcp_rcv_established() 5321 tcp_ack(sk, skb, FLAG_DATA); tcp_rcv_established() 5322 tcp_data_snd_check(sk); tcp_rcv_established() 5323 if (!inet_csk_ack_scheduled(sk)) tcp_rcv_established() 5327 __tcp_ack_snd_check(sk, 0); tcp_rcv_established() 5331 sk->sk_data_ready(sk); tcp_rcv_established() 5337 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) tcp_rcv_established() 5347 if (!tcp_validate_incoming(sk, skb, th, 1)) tcp_rcv_established() 5351 if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) tcp_rcv_established() 5354 tcp_rcv_rtt_measure_ts(sk, skb); tcp_rcv_established() 5357 tcp_urg(sk, skb, th); tcp_rcv_established() 5360 tcp_data_queue(sk, skb); tcp_rcv_established() 5362 tcp_data_snd_check(sk); tcp_rcv_established() 5363 tcp_ack_snd_check(sk); tcp_rcv_established() 5367 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); tcp_rcv_established() 5368 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); tcp_rcv_established() 5375 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) tcp_finish_connect() argument 5377 struct tcp_sock *tp = tcp_sk(sk); tcp_finish_connect() 5378 struct inet_connection_sock *icsk = inet_csk(sk); tcp_finish_connect() 5380 tcp_set_state(sk, TCP_ESTABLISHED); tcp_finish_connect() 5383 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); tcp_finish_connect() 5384 security_inet_conn_established(sk, skb); tcp_finish_connect() 5388 icsk->icsk_af_ops->rebuild_header(sk); tcp_finish_connect() 5390 tcp_init_metrics(sk); tcp_finish_connect() 5392 tcp_init_congestion_control(sk); tcp_finish_connect() 5399 tcp_init_buffer_space(sk); tcp_finish_connect() 5401 if (sock_flag(sk, SOCK_KEEPOPEN)) tcp_finish_connect() 5402 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); tcp_finish_connect() 5409 if (!sock_flag(sk, SOCK_DEAD)) { tcp_finish_connect() 5410 sk->sk_state_change(sk); tcp_finish_connect() 5411 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); tcp_finish_connect() 5415 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, tcp_rcv_fastopen_synack() argument 5418 struct tcp_sock *tp = tcp_sk(sk); tcp_rcv_fastopen_synack() 5419 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; tcp_rcv_fastopen_synack() 5451 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); tcp_rcv_fastopen_synack() 5454 tcp_for_write_queue_from(data, sk) { tcp_for_write_queue_from() 5455 if (data == tcp_send_head(sk) || tcp_for_write_queue_from() 5456 __tcp_retransmit_skb(sk, data)) tcp_for_write_queue_from() 5459 tcp_rearm_rto(sk); 5460 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); 5465 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); 5469 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tcp_rcv_synsent_state_process() argument 5472 struct inet_connection_sock *icsk = inet_csk(sk); tcp_rcv_synsent_state_process() 5473 struct tcp_sock *tp = tcp_sk(sk); tcp_rcv_synsent_state_process() 5497 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); tcp_rcv_synsent_state_process() 5510 tcp_reset(sk); tcp_rcv_synsent_state_process() 5534 tcp_ack(sk, skb, FLAG_SLOWPATH); tcp_rcv_synsent_state_process() 5565 tcp_mtup_init(sk); tcp_rcv_synsent_state_process() 5566 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_rcv_synsent_state_process() 5567 tcp_initialize_rcv_mss(sk); tcp_rcv_synsent_state_process() 5576 tcp_finish_connect(sk, skb); tcp_rcv_synsent_state_process() 5579 tcp_rcv_fastopen_synack(sk, skb, &foc)) tcp_rcv_synsent_state_process() 5582 if (sk->sk_write_pending || tcp_rcv_synsent_state_process() 5592 inet_csk_schedule_ack(sk); tcp_rcv_synsent_state_process() 5594 tcp_enter_quickack_mode(sk); tcp_rcv_synsent_state_process() 5595 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, tcp_rcv_synsent_state_process() 5602 tcp_send_ack(sk); tcp_rcv_synsent_state_process() 5629 tcp_set_state(sk, TCP_SYN_RECV); tcp_rcv_synsent_state_process() 5653 tcp_mtup_init(sk); tcp_rcv_synsent_state_process() 5654 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_rcv_synsent_state_process() 5655 tcp_initialize_rcv_mss(sk); tcp_rcv_synsent_state_process() 5657 tcp_send_synack(sk); tcp_rcv_synsent_state_process() 5697 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, tcp_rcv_state_process() argument 5700 struct tcp_sock *tp = tcp_sk(sk); tcp_rcv_state_process() 5701 struct inet_connection_sock *icsk = inet_csk(sk); tcp_rcv_state_process() 5709 switch (sk->sk_state) { tcp_rcv_state_process() 5723 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) tcp_rcv_state_process() 5749 queued = tcp_rcv_synsent_state_process(sk, skb, th, len); tcp_rcv_state_process() 5754 tcp_urg(sk, skb, th); tcp_rcv_state_process() 5756 tcp_data_snd_check(sk); tcp_rcv_state_process() 5762 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && tcp_rcv_state_process() 5763 sk->sk_state != TCP_FIN_WAIT1); tcp_rcv_state_process() 5765 if (!tcp_check_req(sk, skb, req, true)) tcp_rcv_state_process() 5772 if (!tcp_validate_incoming(sk, skb, th, 0)) tcp_rcv_state_process() 5776 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | tcp_rcv_state_process() 5779 switch (sk->sk_state) { tcp_rcv_state_process() 5790 reqsk_fastopen_remove(sk, req, false); tcp_rcv_state_process() 5794 icsk->icsk_af_ops->rebuild_header(sk); tcp_rcv_state_process() 5795 tcp_init_congestion_control(sk); tcp_rcv_state_process() 5797 tcp_mtup_init(sk); tcp_rcv_state_process() 5799 tcp_init_buffer_space(sk); tcp_rcv_state_process() 5802 tcp_set_state(sk, TCP_ESTABLISHED); tcp_rcv_state_process() 5803 sk->sk_state_change(sk); tcp_rcv_state_process() 5807 * sk->sk_sleep == NULL and sk->sk_socket == NULL. tcp_rcv_state_process() 5809 if (sk->sk_socket) tcp_rcv_state_process() 5810 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); tcp_rcv_state_process() 5815 tcp_synack_rtt_meas(sk, synack_stamp); tcp_rcv_state_process() 5829 tcp_rearm_rto(sk); tcp_rcv_state_process() 5831 tcp_init_metrics(sk); tcp_rcv_state_process() 5833 tcp_update_pacing_rate(sk); tcp_rcv_state_process() 5838 tcp_initialize_rcv_mss(sk); tcp_rcv_state_process() 5861 reqsk_fastopen_remove(sk, req, false); tcp_rcv_state_process() 5862 tcp_rearm_rto(sk); tcp_rcv_state_process() 5867 tcp_set_state(sk, TCP_FIN_WAIT2); tcp_rcv_state_process() 5868 sk->sk_shutdown |= SEND_SHUTDOWN; tcp_rcv_state_process() 5870 dst = __sk_dst_get(sk); tcp_rcv_state_process() 5874 if (!sock_flag(sk, SOCK_DEAD)) { tcp_rcv_state_process() 5876 sk->sk_state_change(sk); tcp_rcv_state_process() 5883 tcp_done(sk); tcp_rcv_state_process() 5884 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_rcv_state_process() 5888 tmo = tcp_fin_time(sk); tcp_rcv_state_process() 5890 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); tcp_rcv_state_process() 5891 } else if (th->fin || sock_owned_by_user(sk)) { tcp_rcv_state_process() 5898 inet_csk_reset_keepalive_timer(sk, tmo); tcp_rcv_state_process() 5900 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); tcp_rcv_state_process() 5908 tcp_time_wait(sk, TCP_TIME_WAIT, 0); tcp_rcv_state_process() 5915 tcp_update_metrics(sk); tcp_rcv_state_process() 5916 tcp_done(sk); tcp_rcv_state_process() 5923 tcp_urg(sk, skb, th); tcp_rcv_state_process() 5926 switch (sk->sk_state) { tcp_rcv_state_process() 5938 if (sk->sk_shutdown & RCV_SHUTDOWN) { tcp_rcv_state_process() 5941 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_rcv_state_process() 5942 tcp_reset(sk); tcp_rcv_state_process() 5948 tcp_data_queue(sk, skb); tcp_rcv_state_process() 5954 if (sk->sk_state != TCP_CLOSE) { tcp_rcv_state_process() 5955 tcp_data_snd_check(sk); tcp_rcv_state_process() 5956 tcp_ack_snd_check(sk); tcp_rcv_state_process() 6015 struct sk_buff *skb, const struct sock *sk) tcp_openreq_init() 6035 ireq->ir_mark = inet_request_mark(sk, skb); tcp_openreq_init() 6061 static bool tcp_syn_flood_action(struct sock *sk, tcp_syn_flood_action() argument 6073 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); tcp_syn_flood_action() 6076 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); tcp_syn_flood_action() 6078 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; tcp_syn_flood_action() 6089 struct sock *sk, struct sk_buff *skb) tcp_conn_request() 6093 struct tcp_sock *tp = tcp_sk(sk); tcp_conn_request() 6107 inet_csk_reqsk_queue_is_full(sk)) && !isn) { tcp_conn_request() 6108 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); tcp_conn_request() 6119 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { tcp_conn_request() 6120 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); tcp_conn_request() 6124 req = inet_reqsk_alloc(rsk_ops, sk); tcp_conn_request() 6139 tcp_openreq_init(req, &tmp_opt, skb, sk); tcp_conn_request() 6142 inet_rsk(req)->ir_iif = sk->sk_bound_dev_if; tcp_conn_request() 6144 af_ops->init_req(req, sk, skb); tcp_conn_request() 6146 if (security_inet_conn_request(sk, skb, req)) tcp_conn_request() 6162 dst = af_ops->route_req(sk, &fl, req, &strict); tcp_conn_request() 6167 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); tcp_conn_request() 6173 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < tcp_conn_request() 6192 dst = af_ops->route_req(sk, &fl, req, NULL); tcp_conn_request() 6197 tcp_ecn_create_request(req, skb, sk, dst); tcp_conn_request() 6200 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); tcp_conn_request() 6207 tcp_openreq_init_rwin(req, sk, dst); tcp_conn_request() 6209 tcp_try_fastopen(sk, skb, req, &foc, dst); tcp_conn_request() 6210 err = af_ops->send_synack(sk, dst, &fl, req, tcp_conn_request() 6217 af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT); tcp_conn_request() 6227 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); tcp_conn_request() 1611 tcp_maybe_skipping_dsack(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 skip_to_seq) tcp_maybe_skipping_dsack() argument 6013 tcp_openreq_init(struct request_sock *req, const struct tcp_options_received *rx_opt, struct sk_buff *skb, const struct sock *sk) tcp_openreq_init() argument 6087 tcp_conn_request(struct request_sock_ops *rsk_ops, const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct sk_buff *skb) tcp_conn_request() argument
|
H A D | udp_diag.c | 20 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, sk_diag_dump() argument 25 if (!inet_diag_bc_sk(bc, sk)) sk_diag_dump() 28 return inet_sk_diag_fill(sk, NULL, skb, req, sk_diag_dump() 29 sk_user_ns(NETLINK_CB(cb->skb).sk), sk_diag_dump() 39 struct sock *sk; udp_dump_one() local 41 struct net *net = sock_net(in_skb->sk); udp_dump_one() 44 sk = __udp4_lib_lookup(net, udp_dump_one() 50 sk = __udp6_lib_lookup(net, udp_dump_one() 61 if (!sk) udp_dump_one() 64 err = sock_diag_check_cookie(sk, req->id.idiag_cookie); udp_dump_one() 75 err = inet_sk_diag_fill(sk, NULL, rep, req, udp_dump_one() 76 sk_user_ns(NETLINK_CB(in_skb).sk), udp_dump_one() 89 if (sk) udp_dump_one() 90 sock_put(sk); udp_dump_one() 100 struct net *net = sock_net(skb->sk); udp_dump() 106 struct sock *sk; udp_dump() local 116 sk_nulls_for_each(sk, node, &hslot->head) { udp_dump() 117 struct inet_sock *inet = inet_sk(sk); udp_dump() 119 if (!net_eq(sock_net(sk), net)) udp_dump() 123 if (!(r->idiag_states & (1 << sk->sk_state))) udp_dump() 126 sk->sk_family != r->sdiag_family) udp_dump() 135 if (sk_diag_dump(sk, skb, cb, r, bc) < 0) { udp_dump() 161 static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, udp_diag_get_info() argument 164 r->idiag_rqueue = sk_rmem_alloc_get(sk); udp_diag_get_info() 165 r->idiag_wqueue = sk_wmem_alloc_get(sk); udp_diag_get_info()
|
H A D | af_inet.c | 131 void inet_sock_destruct(struct sock *sk) inet_sock_destruct() argument 133 struct inet_sock *inet = inet_sk(sk); inet_sock_destruct() 135 __skb_queue_purge(&sk->sk_receive_queue); inet_sock_destruct() 136 __skb_queue_purge(&sk->sk_error_queue); inet_sock_destruct() 138 sk_mem_reclaim(sk); inet_sock_destruct() 140 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { inet_sock_destruct() 142 sk->sk_state, sk); inet_sock_destruct() 145 if (!sock_flag(sk, SOCK_DEAD)) { inet_sock_destruct() 146 pr_err("Attempt to release alive inet socket %p\n", sk); inet_sock_destruct() 150 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); inet_sock_destruct() 151 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); inet_sock_destruct() 152 WARN_ON(sk->sk_wmem_queued); inet_sock_destruct() 153 WARN_ON(sk->sk_forward_alloc); inet_sock_destruct() 156 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); inet_sock_destruct() 157 dst_release(sk->sk_rx_dst); inet_sock_destruct() 158 sk_refcnt_debug_dec(sk); inet_sock_destruct() 172 static int inet_autobind(struct sock *sk) inet_autobind() argument 176 lock_sock(sk); inet_autobind() 177 inet = inet_sk(sk); inet_autobind() 179 if (sk->sk_prot->get_port(sk, 0)) { inet_autobind() 180 release_sock(sk); inet_autobind() 185 release_sock(sk); inet_autobind() 194 struct sock *sk = sock->sk; inet_listen() local 198 lock_sock(sk); inet_listen() 204 old_state = sk->sk_state; inet_listen() 220 !inet_csk(sk)->icsk_accept_queue.fastopenq) { inet_listen() 222 err = fastopen_init_queue(sk, backlog); inet_listen() 225 err = fastopen_init_queue(sk, inet_listen() 234 err = inet_csk_listen_start(sk, backlog); inet_listen() 238 sk->sk_max_ack_backlog = backlog; inet_listen() 242 release_sock(sk); inet_listen() 254 struct sock *sk; inet_create() local 325 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); inet_create() 326 if (!sk) inet_create() 331 sk->sk_reuse = SK_CAN_REUSE; inet_create() 333 inet = inet_sk(sk); inet_create() 351 sock_init_data(sock, sk); inet_create() 353 sk->sk_destruct = inet_sock_destruct; inet_create() 354 sk->sk_protocol = protocol; inet_create() 355 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; inet_create() 365 sk_refcnt_debug_inc(sk); inet_create() 375 sk->sk_prot->hash(sk); inet_create() 378 if (sk->sk_prot->init) { inet_create() 379 err = sk->sk_prot->init(sk); inet_create() 381 sk_common_release(sk); inet_create() 398 struct sock *sk = sock->sk; inet_release() local 400 if (sk) { inet_release() 404 ip_mc_drop_socket(sk); inet_release() 414 if (sock_flag(sk, SOCK_LINGER) && inet_release() 416 timeout = sk->sk_lingertime; inet_release() 417 sock->sk = NULL; inet_release() 418 sk->sk_prot->close(sk, timeout); inet_release() 427 struct sock *sk = sock->sk; inet_bind() local 428 struct inet_sock *inet = inet_sk(sk); inet_bind() 429 struct net *net = sock_net(sk); inet_bind() 435 if (sk->sk_prot->bind) { inet_bind() 436 err = sk->sk_prot->bind(sk, uaddr, addr_len); inet_bind() 484 lock_sock(sk); inet_bind() 488 if (sk->sk_state != TCP_CLOSE || inet->inet_num) inet_bind() 496 if (sk->sk_prot->get_port(sk, snum)) { inet_bind() 503 sk->sk_userlocks |= SOCK_BINDADDR_LOCK; inet_bind() 505 sk->sk_userlocks |= SOCK_BINDPORT_LOCK; inet_bind() 509 sk_dst_reset(sk); inet_bind() 512 release_sock(sk); inet_bind() 521 struct sock *sk = sock->sk; inet_dgram_connect() local 526 return sk->sk_prot->disconnect(sk, flags); inet_dgram_connect() 528 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) inet_dgram_connect() 530 return sk->sk_prot->connect(sk, uaddr, addr_len); inet_dgram_connect() 534 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) inet_wait_for_connect() argument 538 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); inet_wait_for_connect() 539 sk->sk_write_pending += writebias; inet_wait_for_connect() 541 /* Basic assumption: if someone sets sk->sk_err, he _must_ inet_wait_for_connect() 546 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { inet_wait_for_connect() 547 release_sock(sk); inet_wait_for_connect() 549 lock_sock(sk); inet_wait_for_connect() 552 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); inet_wait_for_connect() 554 finish_wait(sk_sleep(sk), &wait); inet_wait_for_connect() 555 sk->sk_write_pending -= writebias; inet_wait_for_connect() 566 struct sock *sk = sock->sk; __inet_stream_connect() local 574 err = sk->sk_prot->disconnect(sk, flags); __inet_stream_connect() 592 if (sk->sk_state != TCP_CLOSE) __inet_stream_connect() 595 err = sk->sk_prot->connect(sk, uaddr, addr_len); __inet_stream_connect() 609 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); __inet_stream_connect() 611 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { __inet_stream_connect() 612 int writebias = (sk->sk_protocol == IPPROTO_TCP) && __inet_stream_connect() 613 tcp_sk(sk)->fastopen_req && __inet_stream_connect() 614 tcp_sk(sk)->fastopen_req->data ? 1 : 0; __inet_stream_connect() 617 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias)) __inet_stream_connect() 628 if (sk->sk_state == TCP_CLOSE) __inet_stream_connect() 631 /* sk->sk_err may be not zero now, if RECVERR was ordered by user __inet_stream_connect() 642 err = sock_error(sk) ? : -ECONNABORTED; __inet_stream_connect() 644 if (sk->sk_prot->disconnect(sk, flags)) __inet_stream_connect() 655 lock_sock(sock->sk); inet_stream_connect() 657 release_sock(sock->sk); inet_stream_connect() 668 struct sock *sk1 = sock->sk; inet_accept() 699 struct sock *sk = sock->sk; inet_getname() local 700 struct inet_sock *inet = inet_sk(sk); inet_getname() 706 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && inet_getname() 726 struct sock *sk = sock->sk; inet_sendmsg() local 728 sock_rps_record_flow(sk); inet_sendmsg() 731 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind && inet_sendmsg() 732 inet_autobind(sk)) inet_sendmsg() 735 return sk->sk_prot->sendmsg(sk, msg, size); inet_sendmsg() 742 struct sock *sk = sock->sk; inet_sendpage() local 744 sock_rps_record_flow(sk); inet_sendpage() 747 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind && inet_sendpage() 748 inet_autobind(sk)) inet_sendpage() 751 if (sk->sk_prot->sendpage) inet_sendpage() 752 return sk->sk_prot->sendpage(sk, page, offset, size, flags); inet_sendpage() 760 struct sock *sk = sock->sk; inet_recvmsg() local 764 sock_rps_record_flow(sk); inet_recvmsg() 766 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, inet_recvmsg() 776 struct sock *sk = sock->sk; inet_shutdown() local 788 lock_sock(sk); inet_shutdown() 790 if ((1 << sk->sk_state) & inet_shutdown() 797 switch (sk->sk_state) { inet_shutdown() 803 sk->sk_shutdown |= how; inet_shutdown() 804 if (sk->sk_prot->shutdown) inet_shutdown() 805 sk->sk_prot->shutdown(sk, how); inet_shutdown() 817 err = sk->sk_prot->disconnect(sk, O_NONBLOCK); inet_shutdown() 823 sk->sk_state_change(sk); inet_shutdown() 824 release_sock(sk); inet_shutdown() 841 struct sock *sk = sock->sk; inet_ioctl() local 843 struct net *net = sock_net(sk); inet_ioctl() 847 err = sock_get_timestamp(sk, (struct timeval __user *)arg); inet_ioctl() 850 err = sock_get_timestampns(sk, (struct timespec __user *)arg); inet_ioctl() 876 if (sk->sk_prot->ioctl) inet_ioctl() 877 err = sk->sk_prot->ioctl(sk, cmd, arg); inet_ioctl() 889 struct sock *sk = sock->sk; inet_compat_ioctl() local 892 if (sk->sk_prot->compat_ioctl) inet_compat_ioctl() 893 err = sk->sk_prot->compat_ioctl(sk, cmd, arg); inet_compat_ioctl() 1105 static int inet_sk_reselect_saddr(struct sock *sk) inet_sk_reselect_saddr() argument 1107 struct inet_sock *inet = inet_sk(sk); inet_sk_reselect_saddr() 1116 sock_owned_by_user(sk)); inet_sk_reselect_saddr() 1122 rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk), inet_sk_reselect_saddr() 1123 sk->sk_bound_dev_if, sk->sk_protocol, inet_sk_reselect_saddr() 1124 inet->inet_sport, inet->inet_dport, sk); inet_sk_reselect_saddr() 1128 sk_setup_caps(sk, &rt->dst); inet_sk_reselect_saddr() 1150 __sk_prot_rehash(sk); inet_sk_reselect_saddr() 1154 int inet_sk_rebuild_header(struct sock *sk) inet_sk_rebuild_header() argument 1156 struct inet_sock *inet = inet_sk(sk); inet_sk_rebuild_header() 1157 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); inet_sk_rebuild_header() 1175 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr, inet_sk_rebuild_header() 1177 sk->sk_protocol, RT_CONN_FLAGS(sk), inet_sk_rebuild_header() 1178 sk->sk_bound_dev_if); inet_sk_rebuild_header() 1181 sk_setup_caps(sk, &rt->dst); inet_sk_rebuild_header() 1186 sk->sk_route_caps = 0; inet_sk_rebuild_header() 1192 sk->sk_state != TCP_SYN_SENT || inet_sk_rebuild_header() 1193 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || inet_sk_rebuild_header() 1194 (err = inet_sk_reselect_saddr(sk)) != 0) inet_sk_rebuild_header() 1195 sk->sk_err_soft = -err; inet_sk_rebuild_header() 1391 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) inet_recv_error() argument 1393 if (sk->sk_family == AF_INET) inet_recv_error() 1394 return ip_recv_error(sk, msg, len, addr_len); inet_recv_error() 1396 if (sk->sk_family == AF_INET6) inet_recv_error() 1397 return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len); inet_recv_error() 1433 int inet_ctl_sock_create(struct sock **sk, unsigned short family, inet_ctl_sock_create() argument 1441 *sk = sock->sk; inet_ctl_sock_create() 1442 (*sk)->sk_allocation = GFP_ATOMIC; inet_ctl_sock_create() 1447 (*sk)->sk_prot->unhash(*sk); inet_ctl_sock_create() 1449 sk_change_net(*sk, net); inet_ctl_sock_create()
|
H A D | raw.c | 18 * Alan Cox : Checks sk->broadcast. 96 void raw_hash_sk(struct sock *sk) raw_hash_sk() argument 98 struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; raw_hash_sk() 101 head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; raw_hash_sk() 104 sk_add_node(sk, head); raw_hash_sk() 105 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); raw_hash_sk() 110 void raw_unhash_sk(struct sock *sk) raw_unhash_sk() argument 112 struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; raw_unhash_sk() 115 if (sk_del_node_init(sk)) raw_unhash_sk() 116 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); raw_unhash_sk() 121 static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, __raw_v4_lookup() argument 124 sk_for_each_from(sk) { sk_for_each_from() 125 struct inet_sock *inet = inet_sk(sk); sk_for_each_from() 127 if (net_eq(sock_net(sk), net) && inet->inet_num == num && sk_for_each_from() 130 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) sk_for_each_from() 133 sk = NULL; 135 return sk; 142 static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) icmp_filter() argument 153 __u32 data = raw_sk(sk)->filter.data; icmp_filter() 170 struct sock *sk; raw_v4_input() local 181 sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, raw_v4_input() 185 while (sk) { raw_v4_input() 187 if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) && raw_v4_input() 188 ip_mc_sf_allow(sk, iph->daddr, iph->saddr, raw_v4_input() 194 raw_rcv(sk, clone); raw_v4_input() 196 sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol, raw_v4_input() 223 static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info) raw_err() argument 225 struct inet_sock *inet = inet_sk(sk); raw_err() 232 ipv4_sk_update_pmtu(skb, sk, info); raw_err() 234 ipv4_sk_redirect(skb, sk); raw_err() 243 if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED) raw_err() 275 ip_icmp_error(sk, skb, err, 0, info, payload); raw_err() 279 sk->sk_err = err; raw_err() 280 sk->sk_error_report(sk); raw_err() 310 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) raw_rcv_skb() argument 314 ipv4_pktinfo_prepare(sk, skb); raw_rcv_skb() 315 if (sock_queue_rcv_skb(sk, skb) < 0) { raw_rcv_skb() 323 int raw_rcv(struct sock *sk, struct sk_buff *skb) raw_rcv() argument 325 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { raw_rcv() 326 atomic_inc(&sk->sk_drops); raw_rcv() 334 raw_rcv_skb(sk, skb); raw_rcv() 338 static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, raw_send_hdrinc() argument 343 struct inet_sock *inet = inet_sk(sk); raw_send_hdrinc() 344 struct net *net = sock_net(sk); raw_send_hdrinc() 353 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, raw_send_hdrinc() 362 skb = sock_alloc_send_skb(sk, raw_send_hdrinc() 369 skb->priority = sk->sk_priority; raw_send_hdrinc() 370 skb->mark = sk->sk_mark; raw_send_hdrinc() 380 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); raw_send_hdrinc() 414 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, raw_send_hdrinc() 483 static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) raw_sendmsg() argument 485 struct inet_sock *inet = inet_sk(sk); raw_sendmsg() 532 if (sk->sk_state != TCP_ESTABLISHED) raw_sendmsg() 542 ipc.oif = sk->sk_bound_dev_if; raw_sendmsg() 545 err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); raw_sendmsg() 583 tos = get_rtconn_flags(&ipc, sk); raw_sendmsg() 595 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, raw_sendmsg() 597 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, raw_sendmsg() 598 inet_sk_flowi_flags(sk) | raw_sendmsg() 611 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); raw_sendmsg() 612 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); raw_sendmsg() 620 if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST)) raw_sendmsg() 628 err = raw_send_hdrinc(sk, &fl4, msg, len, raw_sendmsg() 632 sock_tx_timestamp(sk, &ipc.tx_flags); raw_sendmsg() 636 lock_sock(sk); raw_sendmsg() 637 err = ip_append_data(sk, &fl4, raw_getfrag, raw_sendmsg() 641 ip_flush_pending_frames(sk); raw_sendmsg() 643 err = ip_push_pending_frames(sk, &fl4); raw_sendmsg() 647 release_sock(sk); raw_sendmsg() 667 static void raw_close(struct sock *sk, long timeout) raw_close() argument 672 ip_ra_control(sk, 0, NULL); raw_close() 674 sk_common_release(sk); raw_close() 677 static void raw_destroy(struct sock *sk) raw_destroy() argument 679 lock_sock(sk); raw_destroy() 680 ip_flush_pending_frames(sk); raw_destroy() 681 release_sock(sk); raw_destroy() 685 static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) raw_bind() argument 687 struct inet_sock *inet = inet_sk(sk); raw_bind() 692 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) raw_bind() 694 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); raw_bind() 702 sk_dst_reset(sk); raw_bind() 712 static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, raw_recvmsg() argument 715 struct inet_sock *inet = inet_sk(sk); raw_recvmsg() 725 err = ip_recv_error(sk, msg, len, addr_len); raw_recvmsg() 729 skb = skb_recv_datagram(sk, flags, noblock, &err); raw_recvmsg() 743 sock_recv_ts_and_drops(msg, sk, skb); raw_recvmsg() 758 skb_free_datagram(sk, skb); raw_recvmsg() 765 static int raw_init(struct sock *sk) raw_init() argument 767 struct raw_sock *rp = raw_sk(sk); raw_init() 769 if (inet_sk(sk)->inet_num == IPPROTO_ICMP) raw_init() 774 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen) raw_seticmpfilter() argument 778 if (copy_from_user(&raw_sk(sk)->filter, optval, optlen)) raw_seticmpfilter() 783 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen) raw_geticmpfilter() argument 796 copy_to_user(optval, &raw_sk(sk)->filter, len)) raw_geticmpfilter() 802 static int do_raw_setsockopt(struct sock *sk, int level, int optname, do_raw_setsockopt() argument 806 if (inet_sk(sk)->inet_num != IPPROTO_ICMP) do_raw_setsockopt() 809 return raw_seticmpfilter(sk, optval, optlen); do_raw_setsockopt() 814 static int raw_setsockopt(struct sock *sk, int level, int optname, raw_setsockopt() argument 818 return ip_setsockopt(sk, level, optname, optval, optlen); raw_setsockopt() 819 return do_raw_setsockopt(sk, level, optname, optval, optlen); raw_setsockopt() 823 static int compat_raw_setsockopt(struct sock *sk, int level, int optname, compat_raw_setsockopt() argument 827 return compat_ip_setsockopt(sk, level, optname, optval, optlen); compat_raw_setsockopt() 828 return do_raw_setsockopt(sk, level, optname, optval, optlen); compat_raw_setsockopt() 832 static int do_raw_getsockopt(struct sock *sk, int level, int optname, do_raw_getsockopt() argument 836 if (inet_sk(sk)->inet_num != IPPROTO_ICMP) do_raw_getsockopt() 839 return raw_geticmpfilter(sk, optval, optlen); do_raw_getsockopt() 844 static int raw_getsockopt(struct sock *sk, int level, int optname, raw_getsockopt() argument 848 return ip_getsockopt(sk, level, optname, optval, optlen); raw_getsockopt() 849 return do_raw_getsockopt(sk, level, optname, optval, optlen); raw_getsockopt() 853 static int compat_raw_getsockopt(struct sock *sk, int level, int optname, compat_raw_getsockopt() argument 857 return compat_ip_getsockopt(sk, level, optname, optval, optlen); compat_raw_getsockopt() 858 return do_raw_getsockopt(sk, level, optname, optval, optlen); compat_raw_getsockopt() 862 static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) raw_ioctl() argument 866 int amount = sk_wmem_alloc_get(sk); raw_ioctl() 874 spin_lock_bh(&sk->sk_receive_queue.lock); raw_ioctl() 875 skb = skb_peek(&sk->sk_receive_queue); raw_ioctl() 878 spin_unlock_bh(&sk->sk_receive_queue.lock); raw_ioctl() 884 return ipmr_ioctl(sk, cmd, (void __user *)arg); raw_ioctl() 892 static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) compat_raw_ioctl() argument 900 return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg)); compat_raw_ioctl() 938 struct sock *sk; raw_get_first() local 943 sk_for_each(sk, &state->h->ht[state->bucket]) raw_get_first() 944 if (sock_net(sk) == seq_file_net(seq)) raw_get_first() 947 sk = NULL; raw_get_first() 949 return sk; raw_get_first() 952 static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk) raw_get_next() argument 957 sk = sk_next(sk); raw_get_next() 960 } while (sk && sock_net(sk) != seq_file_net(seq)); raw_get_next() 962 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) { raw_get_next() 963 sk = sk_head(&state->h->ht[state->bucket]); raw_get_next() 966 return sk; raw_get_next() 971 struct sock *sk = raw_get_first(seq); raw_get_idx() local 973 if (sk) raw_get_idx() 974 while (pos && (sk = raw_get_next(seq, sk)) != NULL) raw_get_idx() 976 return pos ? NULL : sk; raw_get_idx() 990 struct sock *sk; raw_seq_next() local 993 sk = raw_get_first(seq); raw_seq_next() 995 sk = raw_get_next(seq, v); raw_seq_next() 997 return sk; raw_seq_next()
|
H A D | udp.c | 32 * Fred Van Kempen : Net2e support for sk->broadcast. 139 struct sock *sk, udp_lib_lport_inuse() 146 kuid_t uid = sock_i_uid(sk); udp_lib_lport_inuse() 150 sk2 != sk && udp_lib_lport_inuse() 152 (!sk2->sk_reuse || !sk->sk_reuse) && udp_lib_lport_inuse() 153 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || udp_lib_lport_inuse() 154 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && udp_lib_lport_inuse() 155 (!sk2->sk_reuseport || !sk->sk_reuseport || udp_lib_lport_inuse() 157 saddr_comp(sk, sk2)) { udp_lib_lport_inuse() 172 struct sock *sk, udp_lib_lport_inuse2() 178 kuid_t uid = sock_i_uid(sk); udp_lib_lport_inuse2() 184 sk2 != sk && udp_lib_lport_inuse2() 186 (!sk2->sk_reuse || !sk->sk_reuse) && udp_lib_lport_inuse2() 187 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || udp_lib_lport_inuse2() 188 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && udp_lib_lport_inuse2() 189 (!sk2->sk_reuseport || !sk->sk_reuseport || udp_lib_lport_inuse2() 191 saddr_comp(sk, sk2)) { udp_lib_lport_inuse2() 203 * @sk: socket struct in question 209 int udp_lib_get_port(struct sock *sk, unsigned short snum, udp_lib_get_port() argument 215 struct udp_table *udptable = sk->sk_prot->h.udp_table; udp_lib_get_port() 217 struct net *net = sock_net(sk); udp_lib_get_port() 239 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, udp_lib_get_port() 263 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; udp_lib_get_port() 273 sk, saddr_comp); udp_lib_get_port() 277 sk, saddr_comp); udp_lib_get_port() 285 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, udp_lib_get_port() 290 inet_sk(sk)->inet_num = snum; udp_lib_get_port() 291 udp_sk(sk)->udp_port_hash = snum; udp_lib_get_port() 292 udp_sk(sk)->udp_portaddr_hash ^= snum; udp_lib_get_port() 293 if (sk_unhashed(sk)) { udp_lib_get_port() 294 sk_nulls_add_node_rcu(sk, &hslot->head); udp_lib_get_port() 296 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); udp_lib_get_port() 298 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); udp_lib_get_port() 300 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, udp_lib_get_port() 328 int udp_v4_get_port(struct sock *sk, unsigned short snum) udp_v4_get_port() argument 331 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); udp_v4_get_port() 333 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); udp_v4_get_port() 336 udp_sk(sk)->udp_portaddr_hash = hash2_partial; udp_v4_get_port() 337 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); udp_v4_get_port() 340 static inline int compute_score(struct sock *sk, struct net *net, compute_score() argument 347 if (!net_eq(sock_net(sk), net) || compute_score() 348 udp_sk(sk)->udp_port_hash != hnum || compute_score() 349 ipv6_only_sock(sk)) compute_score() 352 score = (sk->sk_family == PF_INET) ? 2 : 1; compute_score() 353 inet = inet_sk(sk); compute_score() 373 if (sk->sk_bound_dev_if) { compute_score() 374 if (sk->sk_bound_dev_if != dif) compute_score() 385 static inline int compute_score2(struct sock *sk, struct net *net, compute_score2() argument 392 if (!net_eq(sock_net(sk), net) || compute_score2() 393 ipv6_only_sock(sk)) compute_score2() 396 inet = inet_sk(sk); compute_score2() 402 score = (sk->sk_family == PF_INET) ? 2 : 1; compute_score2() 416 if (sk->sk_bound_dev_if) { compute_score2() 417 if (sk->sk_bound_dev_if != dif) compute_score2() 443 struct sock *sk, *result; udp4_lib_lookup2() local 451 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { udp4_lib_lookup2() 452 score = compute_score2(sk, net, saddr, sport, udp4_lib_lookup2() 455 result = sk; udp4_lib_lookup2() 457 reuseport = sk->sk_reuseport; udp4_lib_lookup2() 466 result = sk; udp4_lib_lookup2() 496 struct sock *sk, *result; __udp4_lib_lookup() local 532 sk_nulls_for_each_rcu(sk, node, &hslot->head) { __udp4_lib_lookup() 533 score = compute_score(sk, net, saddr, hnum, sport, __udp4_lib_lookup() 536 result = sk; __udp4_lib_lookup() 538 reuseport = sk->sk_reuseport; __udp4_lib_lookup() 547 result = sk; __udp4_lib_lookup() 591 static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, __udp_is_mcast_sock() argument 596 struct inet_sock *inet = inet_sk(sk); __udp_is_mcast_sock() 598 if (!net_eq(sock_net(sk), net) || __udp_is_mcast_sock() 599 udp_sk(sk)->udp_port_hash != hnum || __udp_is_mcast_sock() 603 ipv6_only_sock(sk) || __udp_is_mcast_sock() 604 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) __udp_is_mcast_sock() 606 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) __udp_is_mcast_sock() 629 struct sock *sk; __udp4_lib_err() local 634 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, __udp4_lib_err() 636 if (!sk) { __udp4_lib_err() 643 inet = inet_sk(sk); __udp4_lib_err() 658 ipv4_sk_update_pmtu(skb, sk, info); __udp4_lib_err() 673 ipv4_sk_redirect(skb, sk); __udp4_lib_err() 682 if (!harderr || sk->sk_state != TCP_ESTABLISHED) __udp4_lib_err() 685 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); __udp4_lib_err() 687 sk->sk_err = err; __udp4_lib_err() 688 sk->sk_error_report(sk); __udp4_lib_err() 690 sock_put(sk); __udp4_lib_err() 701 void udp_flush_pending_frames(struct sock *sk) udp_flush_pending_frames() argument 703 struct udp_sock *up = udp_sk(sk); udp_flush_pending_frames() 708 ip_flush_pending_frames(sk); udp_flush_pending_frames() 798 struct sock *sk = skb->sk; udp_send_skb() local 799 struct inet_sock *inet = inet_sk(sk); udp_send_skb() 802 int is_udplite = IS_UDPLITE(sk); udp_send_skb() 819 else if (sk->sk_no_check_tx) { /* UDP csum disabled */ udp_send_skb() 834 sk->sk_protocol, csum); udp_send_skb() 839 err = ip_send_skb(sock_net(sk), skb); udp_send_skb() 842 UDP_INC_STATS_USER(sock_net(sk), udp_send_skb() 847 UDP_INC_STATS_USER(sock_net(sk), udp_send_skb() 855 int udp_push_pending_frames(struct sock *sk) udp_push_pending_frames() argument 857 struct udp_sock *up = udp_sk(sk); udp_push_pending_frames() 858 struct inet_sock *inet = inet_sk(sk); udp_push_pending_frames() 863 skb = ip_finish_skb(sk, fl4); udp_push_pending_frames() 876 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) udp_sendmsg() argument 878 struct inet_sock *inet = inet_sk(sk); udp_sendmsg() 879 struct udp_sock *up = udp_sk(sk); udp_sendmsg() 890 int err, is_udplite = IS_UDPLITE(sk); udp_sendmsg() 919 lock_sock(sk); udp_sendmsg() 922 release_sock(sk); udp_sendmsg() 927 release_sock(sk); udp_sendmsg() 948 if (sk->sk_state != TCP_ESTABLISHED) udp_sendmsg() 959 ipc.oif = sk->sk_bound_dev_if; udp_sendmsg() 961 sock_tx_timestamp(sk, &ipc.tx_flags); udp_sendmsg() 964 err = ip_cmsg_send(sock_net(sk), msg, &ipc, udp_sendmsg() 965 sk->sk_family == AF_INET6); udp_sendmsg() 997 if (sock_flag(sk, SOCK_LOCALROUTE) || udp_sendmsg() 1014 rt = (struct rtable *)sk_dst_check(sk, 0); udp_sendmsg() 1017 struct net *net = sock_net(sk); udp_sendmsg() 1020 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, udp_sendmsg() 1021 RT_SCOPE_UNIVERSE, sk->sk_protocol, udp_sendmsg() 1022 inet_sk_flowi_flags(sk), udp_sendmsg() 1025 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); udp_sendmsg() 1026 rt = ip_route_output_flow(net, fl4, sk); udp_sendmsg() 1037 !sock_flag(sk, SOCK_BROADCAST)) udp_sendmsg() 1040 sk_dst_set(sk, dst_clone(&rt->dst)); udp_sendmsg() 1053 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, udp_sendmsg() 1062 lock_sock(sk); udp_sendmsg() 1066 release_sock(sk); udp_sendmsg() 1084 err = ip_append_data(sk, fl4, getfrag, msg, ulen, udp_sendmsg() 1088 udp_flush_pending_frames(sk); udp_sendmsg() 1090 err = udp_push_pending_frames(sk); udp_sendmsg() 1091 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) udp_sendmsg() 1093 release_sock(sk); udp_sendmsg() 1108 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { udp_sendmsg() 1109 UDP_INC_STATS_USER(sock_net(sk), udp_sendmsg() 1123 int udp_sendpage(struct sock *sk, struct page *page, int offset, udp_sendpage() argument 1126 struct inet_sock *inet = inet_sk(sk); udp_sendpage() 1127 struct udp_sock *up = udp_sk(sk); udp_sendpage() 1140 ret = udp_sendmsg(sk, &msg, 0); udp_sendpage() 1145 lock_sock(sk); udp_sendpage() 1148 release_sock(sk); udp_sendpage() 1154 ret = ip_append_page(sk, &inet->cork.fl.u.ip4, udp_sendpage() 1157 release_sock(sk); udp_sendpage() 1158 return sock_no_sendpage(sk->sk_socket, page, offset, udp_sendpage() 1162 udp_flush_pending_frames(sk); udp_sendpage() 1168 ret = udp_push_pending_frames(sk); udp_sendpage() 1172 release_sock(sk); udp_sendpage() 1178 * @sk: socket 1183 static unsigned int first_packet_length(struct sock *sk) first_packet_length() argument 1185 struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; first_packet_length() 1194 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, first_packet_length() 1195 IS_UDPLITE(sk)); first_packet_length() 1196 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, first_packet_length() 1197 IS_UDPLITE(sk)); first_packet_length() 1198 atomic_inc(&sk->sk_drops); first_packet_length() 1206 bool slow = lock_sock_fast(sk); first_packet_length() 1209 sk_mem_reclaim_partial(sk); first_packet_length() 1210 unlock_sock_fast(sk, slow); first_packet_length() 1219 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) udp_ioctl() argument 1224 int amount = sk_wmem_alloc_get(sk); udp_ioctl() 1231 unsigned int amount = first_packet_length(sk); udp_ioctl() 1257 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, udp_recvmsg() argument 1260 struct inet_sock *inet = inet_sk(sk); udp_recvmsg() 1266 int is_udplite = IS_UDPLITE(sk); udp_recvmsg() 1270 return ip_recv_error(sk, msg, len, addr_len); udp_recvmsg() 1273 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), udp_recvmsg() 1310 atomic_inc(&sk->sk_drops); udp_recvmsg() 1311 UDP_INC_STATS_USER(sock_net(sk), udp_recvmsg() 1318 UDP_INC_STATS_USER(sock_net(sk), udp_recvmsg() 1321 sock_recv_ts_and_drops(msg, sk, skb); udp_recvmsg() 1339 skb_free_datagram_locked(sk, skb); udp_recvmsg() 1344 slow = lock_sock_fast(sk); udp_recvmsg() 1345 if (!skb_kill_datagram(sk, skb, flags)) { udp_recvmsg() 1346 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); udp_recvmsg() 1347 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); udp_recvmsg() 1349 unlock_sock_fast(sk, slow); udp_recvmsg() 1357 int udp_disconnect(struct sock *sk, int flags) udp_disconnect() argument 1359 struct inet_sock *inet = inet_sk(sk); udp_disconnect() 1364 sk->sk_state = TCP_CLOSE; udp_disconnect() 1367 sock_rps_reset_rxhash(sk); udp_disconnect() 1368 sk->sk_bound_dev_if = 0; udp_disconnect() 1369 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) udp_disconnect() 1370 inet_reset_saddr(sk); udp_disconnect() 1372 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { udp_disconnect() 1373 sk->sk_prot->unhash(sk); udp_disconnect() 1376 sk_dst_reset(sk); udp_disconnect() 1381 void udp_lib_unhash(struct sock *sk) udp_lib_unhash() argument 1383 if (sk_hashed(sk)) { udp_lib_unhash() 1384 struct udp_table *udptable = sk->sk_prot->h.udp_table; udp_lib_unhash() 1387 hslot = udp_hashslot(udptable, sock_net(sk), udp_lib_unhash() 1388 udp_sk(sk)->udp_port_hash); udp_lib_unhash() 1389 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); udp_lib_unhash() 1392 if (sk_nulls_del_node_init_rcu(sk)) { udp_lib_unhash() 1394 inet_sk(sk)->inet_num = 0; udp_lib_unhash() 1395 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); udp_lib_unhash() 1398 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); udp_lib_unhash() 1410 void udp_lib_rehash(struct sock *sk, u16 newhash) udp_lib_rehash() argument 1412 if (sk_hashed(sk)) { udp_lib_rehash() 1413 struct udp_table *udptable = sk->sk_prot->h.udp_table; udp_lib_rehash() 1416 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); udp_lib_rehash() 1418 udp_sk(sk)->udp_portaddr_hash = newhash; udp_lib_rehash() 1420 hslot = udp_hashslot(udptable, sock_net(sk), udp_lib_rehash() 1421 udp_sk(sk)->udp_port_hash); udp_lib_rehash() 1426 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); udp_lib_rehash() 1431 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, udp_lib_rehash() 1442 static void udp_v4_rehash(struct sock *sk) udp_v4_rehash() argument 1444 u16 new_hash = udp4_portaddr_hash(sock_net(sk), udp_v4_rehash() 1445 inet_sk(sk)->inet_rcv_saddr, udp_v4_rehash() 1446 inet_sk(sk)->inet_num); udp_v4_rehash() 1447 udp_lib_rehash(sk, new_hash); udp_v4_rehash() 1450 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) __udp_queue_rcv_skb() argument 1454 if (inet_sk(sk)->inet_daddr) { __udp_queue_rcv_skb() 1455 sock_rps_save_rxhash(sk, skb); __udp_queue_rcv_skb() 1456 sk_mark_napi_id(sk, skb); __udp_queue_rcv_skb() 1457 sk_incoming_cpu_update(sk); __udp_queue_rcv_skb() 1460 rc = sock_queue_rcv_skb(sk, skb); __udp_queue_rcv_skb() 1462 int is_udplite = IS_UDPLITE(sk); __udp_queue_rcv_skb() 1466 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, __udp_queue_rcv_skb() 1468 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); __udp_queue_rcv_skb() 1470 trace_udp_fail_queue_rcv_skb(rc, sk); __udp_queue_rcv_skb() 1494 int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) udp_queue_rcv_skb() argument 1496 struct udp_sock *up = udp_sk(sk); udp_queue_rcv_skb() 1498 int is_udplite = IS_UDPLITE(sk); udp_queue_rcv_skb() 1503 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) udp_queue_rcv_skb() 1508 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); udp_queue_rcv_skb() 1530 ret = encap_rcv(sk, skb); udp_queue_rcv_skb() 1532 UDP_INC_STATS_BH(sock_net(sk), udp_queue_rcv_skb() 1576 if (rcu_access_pointer(sk->sk_filter) && udp_queue_rcv_skb() 1580 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { udp_queue_rcv_skb() 1581 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, udp_queue_rcv_skb() 1588 ipv4_pktinfo_prepare(sk, skb); udp_queue_rcv_skb() 1589 bh_lock_sock(sk); udp_queue_rcv_skb() 1590 if (!sock_owned_by_user(sk)) udp_queue_rcv_skb() 1591 rc = __udp_queue_rcv_skb(sk, skb); udp_queue_rcv_skb() 1592 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { udp_queue_rcv_skb() 1593 bh_unlock_sock(sk); udp_queue_rcv_skb() 1596 bh_unlock_sock(sk); udp_queue_rcv_skb() 1601 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); udp_queue_rcv_skb() 1603 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); udp_queue_rcv_skb() 1604 atomic_inc(&sk->sk_drops); udp_queue_rcv_skb() 1614 struct sock *sk; flush_stack() local 1617 sk = stack[i]; flush_stack() 1622 atomic_inc(&sk->sk_drops); flush_stack() 1623 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, flush_stack() 1624 IS_UDPLITE(sk)); flush_stack() 1625 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, flush_stack() 1626 IS_UDPLITE(sk)); flush_stack() 1629 if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0) flush_stack() 1632 sock_put(sk); flush_stack() 1641 static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) udp_sk_rx_dst_set() argument 1646 old = xchg(&sk->sk_rx_dst, dst); udp_sk_rx_dst_set() 1661 struct sock *sk, *stack[256 / sizeof(struct sock *)]; __udp4_lib_mcast_deliver() local 1666 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); __udp4_lib_mcast_deliver() 1676 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); __udp4_lib_mcast_deliver() 1680 sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) { __udp4_lib_mcast_deliver() 1681 if (__udp_is_mcast_sock(net, sk, __udp4_lib_mcast_deliver() 1690 stack[count++] = sk; __udp4_lib_mcast_deliver() 1691 sock_hold(sk); __udp4_lib_mcast_deliver() 1747 struct sock *sk; __udp4_lib_rcv() local 1778 sk = skb_steal_sock(skb); __udp4_lib_rcv() 1779 if (sk) { __udp4_lib_rcv() 1783 if (unlikely(sk->sk_rx_dst != dst)) __udp4_lib_rcv() 1784 udp_sk_rx_dst_set(sk, dst); __udp4_lib_rcv() 1786 ret = udp_queue_rcv_skb(sk, skb); __udp4_lib_rcv() 1787 sock_put(sk); __udp4_lib_rcv() 1800 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); __udp4_lib_rcv() 1801 if (sk) { __udp4_lib_rcv() 1804 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) __udp4_lib_rcv() 1808 ret = udp_queue_rcv_skb(sk, skb); __udp4_lib_rcv() 1809 sock_put(sk); __udp4_lib_rcv() 1869 struct sock *sk, *result; __udp4_lib_mcast_demux_lookup() local 1883 sk_nulls_for_each_rcu(sk, node, &hslot->head) { __udp4_lib_mcast_demux_lookup() 1884 if (__udp_is_mcast_sock(net, sk, __udp4_lib_mcast_demux_lookup() 1888 result = sk; __udp4_lib_mcast_demux_lookup() 1925 struct sock *sk, *result; __udp4_lib_demux_lookup() local 1936 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { __udp4_lib_demux_lookup() 1937 if (INET_MATCH(sk, net, acookie, __udp4_lib_demux_lookup() 1939 result = sk; __udp4_lib_demux_lookup() 1947 else if (unlikely(!INET_MATCH(sk, net, acookie, __udp4_lib_demux_lookup() 1963 struct sock *sk; udp_v4_early_demux() local 1986 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, udp_v4_early_demux() 1989 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, udp_v4_early_demux() 1995 if (!sk) udp_v4_early_demux() 1998 skb->sk = sk; udp_v4_early_demux() 2000 dst = READ_ONCE(sk->sk_rx_dst); udp_v4_early_demux() 2020 void udp_destroy_sock(struct sock *sk) udp_destroy_sock() argument 2022 struct udp_sock *up = udp_sk(sk); udp_destroy_sock() 2023 bool slow = lock_sock_fast(sk); udp_destroy_sock() 2024 udp_flush_pending_frames(sk); udp_destroy_sock() 2025 unlock_sock_fast(sk, slow); udp_destroy_sock() 2027 void (*encap_destroy)(struct sock *sk); udp_destroy_sock() 2030 encap_destroy(sk); udp_destroy_sock() 2037 int udp_lib_setsockopt(struct sock *sk, int level, int optname, udp_lib_setsockopt() argument 2041 struct udp_sock *up = udp_sk(sk); udp_lib_setsockopt() 2044 int is_udplite = IS_UDPLITE(sk); udp_lib_setsockopt() 2060 lock_sock(sk); udp_lib_setsockopt() 2061 push_pending_frames(sk); udp_lib_setsockopt() 2062 release_sock(sk); udp_lib_setsockopt() 2130 int udp_setsockopt(struct sock *sk, int level, int optname, udp_setsockopt() argument 2134 return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_setsockopt() 2136 return ip_setsockopt(sk, level, optname, optval, optlen); udp_setsockopt() 2140 int compat_udp_setsockopt(struct sock *sk, int level, int optname, compat_udp_setsockopt() argument 2144 return udp_lib_setsockopt(sk, level, optname, optval, optlen, compat_udp_setsockopt() 2146 return compat_ip_setsockopt(sk, level, optname, optval, optlen); compat_udp_setsockopt() 2150 int udp_lib_getsockopt(struct sock *sk, int level, int optname, udp_lib_getsockopt() argument 2153 struct udp_sock *up = udp_sk(sk); udp_lib_getsockopt() 2203 int udp_getsockopt(struct sock *sk, int level, int optname, udp_getsockopt() argument 2207 return udp_lib_getsockopt(sk, level, optname, optval, optlen); udp_getsockopt() 2208 return ip_getsockopt(sk, level, optname, optval, optlen); udp_getsockopt() 2212 int compat_udp_getsockopt(struct sock *sk, int level, int optname, compat_udp_getsockopt() argument 2216 return udp_lib_getsockopt(sk, level, optname, optval, optlen); compat_udp_getsockopt() 2217 return compat_ip_getsockopt(sk, level, optname, optval, optlen); compat_udp_getsockopt() 2236 struct sock *sk = sock->sk; udp_poll() local 2238 sock_rps_record_flow(sk); udp_poll() 2242 !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) udp_poll() 2289 struct sock *sk; udp_get_first() local 2302 sk_nulls_for_each(sk, node, &hslot->head) { udp_get_first() 2303 if (!net_eq(sock_net(sk), net)) udp_get_first() 2305 if (sk->sk_family == state->family) udp_get_first() 2310 sk = NULL; udp_get_first() 2312 return sk; udp_get_first() 2315 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) udp_get_next() argument 2321 sk = sk_nulls_next(sk); udp_get_next() 2322 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); udp_get_next() 2324 if (!sk) { udp_get_next() 2329 return sk; udp_get_next() 2334 struct sock *sk = udp_get_first(seq, 0); udp_get_idx() local 2336 if (sk) udp_get_idx() 2337 while (pos && (sk = udp_get_next(seq, sk)) != NULL) udp_get_idx() 2339 return pos ? NULL : sk; udp_get_idx() 2352 struct sock *sk; udp_seq_next() local 2355 sk = udp_get_idx(seq, 0); udp_seq_next() 2357 sk = udp_get_next(seq, v); udp_seq_next() 2360 return sk; udp_seq_next() 136 udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, unsigned long *bitmap, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2), unsigned int log) udp_lib_lport_inuse() argument 170 udp_lib_lport_inuse2(struct net *net, __u16 num, struct udp_hslot *hslot2, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2)) udp_lib_lport_inuse2() argument
|
H A D | tcp_scalable.c | 18 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) tcp_scalable_cong_avoid() argument 20 struct tcp_sock *tp = tcp_sk(sk); tcp_scalable_cong_avoid() 22 if (!tcp_is_cwnd_limited(sk)) tcp_scalable_cong_avoid() 32 static u32 tcp_scalable_ssthresh(struct sock *sk) tcp_scalable_ssthresh() argument 34 const struct tcp_sock *tp = tcp_sk(sk); tcp_scalable_ssthresh()
|
H A D | tcp_vegas.h | 18 void tcp_vegas_init(struct sock *sk); 19 void tcp_vegas_state(struct sock *sk, u8 ca_state); 20 void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us); 21 void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); 22 size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
|
H A D | tcp_highspeed.c | 99 static void hstcp_init(struct sock *sk) hstcp_init() argument 101 struct tcp_sock *tp = tcp_sk(sk); hstcp_init() 102 struct hstcp *ca = inet_csk_ca(sk); hstcp_init() 111 static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) hstcp_cong_avoid() argument 113 struct tcp_sock *tp = tcp_sk(sk); hstcp_cong_avoid() 114 struct hstcp *ca = inet_csk_ca(sk); hstcp_cong_avoid() 116 if (!tcp_is_cwnd_limited(sk)) hstcp_cong_avoid() 150 static u32 hstcp_ssthresh(struct sock *sk) hstcp_ssthresh() argument 152 const struct tcp_sock *tp = tcp_sk(sk); hstcp_ssthresh() 153 const struct hstcp *ca = inet_csk_ca(sk); hstcp_ssthresh()
|
H A D | xfrm4_output.c | 33 if (skb->sk) xfrm4_tunnel_check_size() 72 int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb) xfrm4_output_finish() argument 80 return xfrm_output(sk, skb); xfrm4_output_finish() 83 static int __xfrm4_output(struct sock *sk, struct sk_buff *skb) __xfrm4_output() argument 90 return dst_output_sk(sk, skb); __xfrm4_output() 94 return x->outer_mode->afinfo->output_finish(sk, skb); __xfrm4_output() 97 int xfrm4_output(struct sock *sk, struct sk_buff *skb) xfrm4_output() argument 99 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, xfrm4_output() 109 ip_local_error(skb->sk, EMSGSIZE, hdr->daddr, xfrm4_local_error() 110 inet_sk(skb->sk)->inet_dport, mtu); xfrm4_local_error()
|
H A D | tcp_output.c | 68 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 72 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) tcp_event_new_data_sent() argument 74 struct inet_connection_sock *icsk = inet_csk(sk); tcp_event_new_data_sent() 75 struct tcp_sock *tp = tcp_sk(sk); tcp_event_new_data_sent() 78 tcp_advance_send_head(sk, skb); tcp_event_new_data_sent() 84 tcp_rearm_rto(sk); tcp_event_new_data_sent() 87 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, tcp_event_new_data_sent() 97 static inline __u32 tcp_acceptable_seq(const struct sock *sk) tcp_acceptable_seq() argument 99 const struct tcp_sock *tp = tcp_sk(sk); tcp_acceptable_seq() 121 static __u16 tcp_advertise_mss(struct sock *sk) tcp_advertise_mss() argument 123 struct tcp_sock *tp = tcp_sk(sk); tcp_advertise_mss() 124 const struct dst_entry *dst = __sk_dst_get(sk); tcp_advertise_mss() 141 static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst) tcp_cwnd_restart() argument 143 struct tcp_sock *tp = tcp_sk(sk); tcp_cwnd_restart() 148 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); tcp_cwnd_restart() 150 tp->snd_ssthresh = tcp_current_ssthresh(sk); tcp_cwnd_restart() 153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) tcp_cwnd_restart() 162 struct sock *sk) tcp_event_data_sent() 164 struct inet_connection_sock *icsk = inet_csk(sk); tcp_event_data_sent() 166 const struct dst_entry *dst = __sk_dst_get(sk); tcp_event_data_sent() 170 tcp_cwnd_restart(sk, __sk_dst_get(sk)); tcp_event_data_sent() 183 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) tcp_event_ack_sent() argument 185 tcp_dec_quickack_mode(sk, pkts); tcp_event_ack_sent() 186 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); tcp_event_ack_sent() 269 static u16 tcp_select_window(struct sock *sk) tcp_select_window() argument 271 struct tcp_sock *tp = tcp_sk(sk); tcp_select_window() 274 u32 new_win = __tcp_select_window(sk); tcp_select_window() 286 NET_INC_STATS(sock_net(sk), tcp_select_window() 308 NET_INC_STATS(sock_net(sk), tcp_select_window() 311 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); tcp_select_window() 318 static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) tcp_ecn_send_synack() argument 320 const struct tcp_sock *tp = tcp_sk(sk); tcp_ecn_send_synack() 325 else if (tcp_ca_needs_ecn(sk)) tcp_ecn_send_synack() 326 INET_ECN_xmit(sk); tcp_ecn_send_synack() 330 static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) tcp_ecn_send_syn() argument 332 struct tcp_sock *tp = tcp_sk(sk); tcp_ecn_send_syn() 333 bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || tcp_ecn_send_syn() 334 tcp_ca_needs_ecn(sk); tcp_ecn_send_syn() 337 const struct dst_entry *dst = __sk_dst_get(sk); tcp_ecn_send_syn() 348 if (tcp_ca_needs_ecn(sk)) tcp_ecn_send_syn() 349 INET_ECN_xmit(sk); tcp_ecn_send_syn() 355 struct sock *sk) tcp_ecn_make_synack() 359 if (tcp_ca_needs_ecn(sk)) tcp_ecn_make_synack() 360 INET_ECN_xmit(sk); tcp_ecn_make_synack() 367 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, tcp_ecn_send() argument 370 struct tcp_sock *tp = tcp_sk(sk); tcp_ecn_send() 376 INET_ECN_xmit(sk); tcp_ecn_send() 382 } else if (!tcp_ca_needs_ecn(sk)) { tcp_ecn_send() 384 INET_ECN_dontxmit(sk); tcp_ecn_send() 547 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, tcp_syn_options() argument 551 struct tcp_sock *tp = tcp_sk(sk); tcp_syn_options() 556 *md5 = tp->af_specific->md5_lookup(sk, sk); tcp_syn_options() 574 opts->mss = tcp_advertise_mss(sk); tcp_syn_options() 613 static unsigned int tcp_synack_options(struct sock *sk, tcp_synack_options() argument 676 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, tcp_established_options() argument 680 struct tcp_sock *tp = tcp_sk(sk); tcp_established_options() 687 *md5 = tp->af_specific->md5_lookup(sk, sk); tcp_established_options() 726 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc 738 static void tcp_tsq_handler(struct sock *sk) tcp_tsq_handler() argument 740 if ((1 << sk->sk_state) & tcp_tsq_handler() 743 tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle, tcp_tsq_handler() 759 struct sock *sk; tcp_tasklet_func() local 769 sk = (struct sock *)tp; tcp_tasklet_func() 770 bh_lock_sock(sk); tcp_tasklet_func() 772 if (!sock_owned_by_user(sk)) { tcp_tasklet_func() 773 tcp_tsq_handler(sk); tcp_tasklet_func() 778 bh_unlock_sock(sk); tcp_tasklet_func() 781 sk_free(sk); tcp_tasklet_func() 791 * @sk: socket 796 void tcp_release_cb(struct sock *sk) tcp_release_cb() argument 798 struct tcp_sock *tp = tcp_sk(sk); tcp_release_cb() 810 tcp_tsq_handler(sk); tcp_release_cb() 816 * 3) socket owned by us (sk->sk_lock.owned == 1) tcp_release_cb() 821 sock_release_ownership(sk); tcp_release_cb() 824 tcp_write_timer_handler(sk); tcp_release_cb() 825 __sock_put(sk); tcp_release_cb() 828 tcp_delack_timer_handler(sk); tcp_release_cb() 829 __sock_put(sk); tcp_release_cb() 832 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); tcp_release_cb() 833 __sock_put(sk); tcp_release_cb() 859 struct sock *sk = skb->sk; tcp_wfree() local 860 struct tcp_sock *tp = tcp_sk(sk); tcp_wfree() 866 wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc); tcp_wfree() 892 sk_free(sk); tcp_wfree() 906 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tcp_transmit_skb() argument 909 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_transmit_skb() 932 inet = inet_sk(sk); tcp_transmit_skb() 933 tp = tcp_sk(sk); tcp_transmit_skb() 938 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); tcp_transmit_skb() 940 tcp_options_size = tcp_established_options(sk, skb, &opts, tcp_transmit_skb() 945 tcp_ca_event(sk, CA_EVENT_TX_START); tcp_transmit_skb() 954 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); tcp_transmit_skb() 960 skb->sk = sk; tcp_transmit_skb() 962 skb_set_hash_from_sk(skb, sk); tcp_transmit_skb() 963 atomic_add(skb->truesize, &sk->sk_wmem_alloc); tcp_transmit_skb() 980 th->window = htons(tcp_select_window(sk)); tcp_transmit_skb() 998 tcp_ecn_send(sk, skb, tcp_header_size); tcp_transmit_skb() 1003 sk_nocaps_add(sk, NETIF_F_GSO_MASK); tcp_transmit_skb() 1005 md5, sk, skb); tcp_transmit_skb() 1009 icsk->icsk_af_ops->send_check(sk, skb); tcp_transmit_skb() 1012 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); tcp_transmit_skb() 1015 tcp_event_data_sent(tp, sk); tcp_transmit_skb() 1018 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_transmit_skb() 1031 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); tcp_transmit_skb() 1036 tcp_enter_cwr(sk); tcp_transmit_skb() 1046 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) tcp_queue_skb() argument 1048 struct tcp_sock *tp = tcp_sk(sk); tcp_queue_skb() 1053 tcp_add_write_queue_tail(sk, skb); tcp_queue_skb() 1054 sk->sk_wmem_queued += skb->truesize; tcp_queue_skb() 1055 sk_mem_charge(sk, skb->truesize); tcp_queue_skb() 1059 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, tcp_set_skb_tso_segs() argument 1077 shinfo->gso_type = sk->sk_gso_type; tcp_set_skb_tso_segs() 1084 static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, tcp_adjust_fackets_out() argument 1087 struct tcp_sock *tp = tcp_sk(sk); tcp_adjust_fackets_out() 1099 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) tcp_adjust_pcount() argument 1101 struct tcp_sock *tp = tcp_sk(sk); tcp_adjust_pcount() 1116 tcp_adjust_fackets_out(sk, skb, decr); tcp_adjust_pcount() 1146 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, tcp_fragment() argument 1149 struct tcp_sock *tp = tcp_sk(sk); tcp_fragment() 1166 buff = sk_stream_alloc_skb(sk, nsize, gfp); tcp_fragment() 1170 sk->sk_wmem_queued += buff->truesize; tcp_fragment() 1171 sk_mem_charge(sk, buff->truesize); tcp_fragment() 1209 tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_fragment() 1210 tcp_set_skb_tso_segs(sk, buff, mss_now); tcp_fragment() 1220 tcp_adjust_pcount(sk, skb, diff); tcp_fragment() 1225 tcp_insert_write_queue_after(skb, buff, sk); tcp_fragment() 1273 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) tcp_trim_head() argument 1284 sk->sk_wmem_queued -= len; tcp_trim_head() 1285 sk_mem_uncharge(sk, len); tcp_trim_head() 1286 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); tcp_trim_head() 1290 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); tcp_trim_head() 1296 static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) __tcp_mtu_to_mss() argument 1298 const struct tcp_sock *tp = tcp_sk(sk); __tcp_mtu_to_mss() 1299 const struct inet_connection_sock *icsk = inet_csk(sk); __tcp_mtu_to_mss() 1309 const struct dst_entry *dst = __sk_dst_get(sk); __tcp_mtu_to_mss() 1329 int tcp_mtu_to_mss(struct sock *sk, int pmtu) tcp_mtu_to_mss() argument 1332 return __tcp_mtu_to_mss(sk, pmtu) - tcp_mtu_to_mss() 1333 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); tcp_mtu_to_mss() 1337 int tcp_mss_to_mtu(struct sock *sk, int mss) tcp_mss_to_mtu() argument 1339 const struct tcp_sock *tp = tcp_sk(sk); tcp_mss_to_mtu() 1340 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_mss_to_mtu() 1350 const struct dst_entry *dst = __sk_dst_get(sk); tcp_mss_to_mtu() 1359 void tcp_mtup_init(struct sock *sk) tcp_mtup_init() argument 1361 struct tcp_sock *tp = tcp_sk(sk); tcp_mtup_init() 1362 struct inet_connection_sock *icsk = inet_csk(sk); tcp_mtup_init() 1363 struct net *net = sock_net(sk); tcp_mtup_init() 1368 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); tcp_mtup_init() 1384 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 1394 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1397 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) tcp_sync_mss() argument 1399 struct tcp_sock *tp = tcp_sk(sk); tcp_sync_mss() 1400 struct inet_connection_sock *icsk = inet_csk(sk); tcp_sync_mss() 1406 mss_now = tcp_mtu_to_mss(sk, pmtu); tcp_sync_mss() 1412 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); tcp_sync_mss() 1422 unsigned int tcp_current_mss(struct sock *sk) tcp_current_mss() argument 1424 const struct tcp_sock *tp = tcp_sk(sk); tcp_current_mss() 1425 const struct dst_entry *dst = __sk_dst_get(sk); tcp_current_mss() 1435 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) tcp_current_mss() 1436 mss_now = tcp_sync_mss(sk, mtu); tcp_current_mss() 1439 header_len = tcp_established_options(sk, NULL, &opts, &md5) + tcp_current_mss() 1457 static void tcp_cwnd_application_limited(struct sock *sk) tcp_cwnd_application_limited() argument 1459 struct tcp_sock *tp = tcp_sk(sk); tcp_cwnd_application_limited() 1461 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && tcp_cwnd_application_limited() 1462 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { tcp_cwnd_application_limited() 1464 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); tcp_cwnd_application_limited() 1467 tp->snd_ssthresh = tcp_current_ssthresh(sk); tcp_cwnd_application_limited() 1475 static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) tcp_cwnd_validate() argument 1477 struct tcp_sock *tp = tcp_sk(sk); tcp_cwnd_validate() 1489 if (tcp_is_cwnd_limited(sk)) { tcp_cwnd_validate() 1499 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) tcp_cwnd_validate() 1500 tcp_cwnd_application_limited(sk); tcp_cwnd_validate() 1544 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now) tcp_tso_autosize() argument 1548 bytes = min(sk->sk_pacing_rate >> 10, tcp_tso_autosize() 1549 sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); tcp_tso_autosize() 1558 return min_t(u32, segs, sk->sk_gso_max_segs); tcp_tso_autosize() 1562 static unsigned int tcp_mss_split_point(const struct sock *sk, tcp_mss_split_point() argument 1568 const struct tcp_sock *tp = tcp_sk(sk); tcp_mss_split_point() 1574 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) tcp_mss_split_point() 1622 static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, tcp_init_tso_segs() argument 1628 tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_init_tso_segs() 1673 /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1677 static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, tcp_snd_test() argument 1680 const struct tcp_sock *tp = tcp_sk(sk); tcp_snd_test() 1683 tcp_init_tso_segs(sk, skb, cur_mss); tcp_snd_test() 1696 bool tcp_may_send_now(struct sock *sk) tcp_may_send_now() argument 1698 const struct tcp_sock *tp = tcp_sk(sk); tcp_may_send_now() 1699 struct sk_buff *skb = tcp_send_head(sk); tcp_may_send_now() 1702 tcp_snd_test(sk, skb, tcp_current_mss(sk), tcp_may_send_now() 1703 (tcp_skb_is_last(sk, skb) ? tcp_may_send_now() 1714 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, tso_fragment() argument 1723 return tcp_fragment(sk, skb, len, mss_now, gfp); tso_fragment() 1725 buff = sk_stream_alloc_skb(sk, 0, gfp); tso_fragment() 1729 sk->sk_wmem_queued += buff->truesize; tso_fragment() 1730 sk_mem_charge(sk, buff->truesize); tso_fragment() 1752 tcp_set_skb_tso_segs(sk, skb, mss_now); tso_fragment() 1753 tcp_set_skb_tso_segs(sk, buff, mss_now); tso_fragment() 1757 tcp_insert_write_queue_after(skb, buff, sk); tso_fragment() 1767 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, tcp_tso_should_defer() argument 1770 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_tso_should_defer() 1772 struct tcp_sock *tp = tcp_sk(sk); tcp_tso_should_defer() 1805 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) tcp_tso_should_defer() 1828 head = tcp_write_queue_head(sk); tcp_tso_should_defer() 1846 static inline void tcp_mtu_check_reprobe(struct sock *sk) tcp_mtu_check_reprobe() argument 1848 struct inet_connection_sock *icsk = inet_csk(sk); tcp_mtu_check_reprobe() 1849 struct tcp_sock *tp = tcp_sk(sk); tcp_mtu_check_reprobe() 1850 struct net *net = sock_net(sk); tcp_mtu_check_reprobe() 1857 int mss = tcp_current_mss(sk); tcp_mtu_check_reprobe() 1864 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); tcp_mtu_check_reprobe() 1880 static int tcp_mtu_probe(struct sock *sk) tcp_mtu_probe() argument 1882 struct tcp_sock *tp = tcp_sk(sk); tcp_mtu_probe() 1883 struct inet_connection_sock *icsk = inet_csk(sk); tcp_mtu_probe() 1885 struct net *net = sock_net(sk); tcp_mtu_probe() 1899 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || tcp_mtu_probe() 1908 mss_now = tcp_current_mss(sk); tcp_mtu_probe() 1909 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + tcp_mtu_probe() 1917 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || tcp_mtu_probe() 1922 tcp_mtu_check_reprobe(sk); tcp_mtu_probe() 1944 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC); tcp_mtu_probe() 1947 sk->sk_wmem_queued += nskb->truesize; tcp_mtu_probe() 1948 sk_mem_charge(sk, nskb->truesize); tcp_mtu_probe() 1950 skb = tcp_send_head(sk); tcp_mtu_probe() 1959 tcp_insert_write_queue_before(nskb, skb, sk); tcp_mtu_probe() 1962 tcp_for_write_queue_from_safe(skb, next, sk) { tcp_for_write_queue_from_safe() 1975 tcp_unlink_write_queue(skb, sk); tcp_for_write_queue_from_safe() 1976 sk_wmem_free_skb(sk, skb); tcp_for_write_queue_from_safe() 1987 tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_for_write_queue_from_safe() 1997 tcp_init_tso_segs(sk, nskb, nskb->len); 2002 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 2006 tcp_event_new_data_sent(sk, nskb); 2008 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 2032 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, tcp_write_xmit() argument 2035 struct tcp_sock *tp = tcp_sk(sk); tcp_write_xmit() 2047 result = tcp_mtu_probe(sk); tcp_write_xmit() 2055 max_segs = tcp_tso_autosize(sk, mss_now); tcp_write_xmit() 2056 while ((skb = tcp_send_head(sk))) { tcp_write_xmit() 2059 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); tcp_write_xmit() 2083 (tcp_skb_is_last(sk, skb) ? tcp_write_xmit() 2088 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, tcp_write_xmit() 2095 limit = tcp_mss_split_point(sk, skb, mss_now, tcp_write_xmit() 2102 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) tcp_write_xmit() 2115 limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10); tcp_write_xmit() 2118 if (atomic_read(&sk->sk_wmem_alloc) > limit) { tcp_write_xmit() 2125 if (atomic_read(&sk->sk_wmem_alloc) > limit) tcp_write_xmit() 2129 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) tcp_write_xmit() 2136 tcp_event_new_data_sent(sk, skb); tcp_write_xmit() 2146 if (tcp_in_cwnd_reduction(sk)) tcp_write_xmit() 2151 tcp_schedule_loss_probe(sk); tcp_write_xmit() 2152 tcp_cwnd_validate(sk, is_cwnd_limited); tcp_write_xmit() 2155 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); tcp_write_xmit() 2158 bool tcp_schedule_loss_probe(struct sock *sk) tcp_schedule_loss_probe() argument 2160 struct inet_connection_sock *icsk = inet_csk(sk); tcp_schedule_loss_probe() 2161 struct tcp_sock *tp = tcp_sk(sk); tcp_schedule_loss_probe() 2169 tcp_rearm_rto(sk); tcp_schedule_loss_probe() 2175 if (sk->sk_state == TCP_SYN_RECV) tcp_schedule_loss_probe() 2186 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) tcp_schedule_loss_probe() 2190 tcp_send_head(sk)) tcp_schedule_loss_probe() 2204 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; tcp_schedule_loss_probe() 2211 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, tcp_schedule_loss_probe() 2221 static bool skb_still_in_host_queue(const struct sock *sk, skb_still_in_host_queue() argument 2224 if (unlikely(skb_fclone_busy(sk, skb))) { skb_still_in_host_queue() 2225 NET_INC_STATS_BH(sock_net(sk), skb_still_in_host_queue() 2235 void tcp_send_loss_probe(struct sock *sk) tcp_send_loss_probe() argument 2237 struct tcp_sock *tp = tcp_sk(sk); tcp_send_loss_probe() 2240 int mss = tcp_current_mss(sk); tcp_send_loss_probe() 2243 if (tcp_send_head(sk)) { tcp_send_loss_probe() 2244 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); tcp_send_loss_probe() 2253 skb = tcp_write_queue_tail(sk); tcp_send_loss_probe() 2257 if (skb_still_in_host_queue(sk, skb)) tcp_send_loss_probe() 2265 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss, tcp_send_loss_probe() 2268 skb = tcp_write_queue_tail(sk); tcp_send_loss_probe() 2274 err = __tcp_retransmit_skb(sk, skb); tcp_send_loss_probe() 2281 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, tcp_send_loss_probe() 2282 inet_csk(sk)->icsk_rto, tcp_send_loss_probe() 2286 NET_INC_STATS_BH(sock_net(sk), tcp_send_loss_probe() 2294 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, __tcp_push_pending_frames() argument 2301 if (unlikely(sk->sk_state == TCP_CLOSE)) __tcp_push_pending_frames() 2304 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, __tcp_push_pending_frames() 2305 sk_gfp_atomic(sk, GFP_ATOMIC))) __tcp_push_pending_frames() 2306 tcp_check_probe_timer(sk); __tcp_push_pending_frames() 2312 void tcp_push_one(struct sock *sk, unsigned int mss_now) tcp_push_one() argument 2314 struct sk_buff *skb = tcp_send_head(sk); tcp_push_one() 2318 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); tcp_push_one() 2373 u32 __tcp_select_window(struct sock *sk) __tcp_select_window() argument 2375 struct inet_connection_sock *icsk = inet_csk(sk); __tcp_select_window() 2376 struct tcp_sock *tp = tcp_sk(sk); __tcp_select_window() 2384 int free_space = tcp_space(sk); __tcp_select_window() 2385 int allowed_space = tcp_full_space(sk); __tcp_select_window() 2395 if (sk_under_memory_pressure(sk)) __tcp_select_window() 2452 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) tcp_collapse_retrans() argument 2454 struct tcp_sock *tp = tcp_sk(sk); tcp_collapse_retrans() 2455 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); tcp_collapse_retrans() 2463 tcp_highest_sack_combine(sk, next_skb, skb); tcp_collapse_retrans() 2465 tcp_unlink_write_queue(next_skb, sk); tcp_collapse_retrans() 2492 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); tcp_collapse_retrans() 2494 sk_wmem_free_skb(sk, next_skb); tcp_collapse_retrans() 2498 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) tcp_can_collapse() argument 2507 if (skb == tcp_send_head(sk)) tcp_can_collapse() 2519 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, tcp_retrans_try_collapse() argument 2522 struct tcp_sock *tp = tcp_sk(sk); tcp_retrans_try_collapse() 2531 tcp_for_write_queue_from_safe(skb, tmp, sk) { tcp_for_write_queue_from_safe() 2532 if (!tcp_can_collapse(sk, skb)) tcp_for_write_queue_from_safe() 2553 tcp_collapse_retrans(sk, to); tcp_for_write_queue_from_safe() 2561 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) __tcp_retransmit_skb() argument 2563 struct tcp_sock *tp = tcp_sk(sk); __tcp_retransmit_skb() 2564 struct inet_connection_sock *icsk = inet_csk(sk); __tcp_retransmit_skb() 2576 if (atomic_read(&sk->sk_wmem_alloc) > __tcp_retransmit_skb() 2577 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) __tcp_retransmit_skb() 2580 if (skb_still_in_host_queue(sk, skb)) __tcp_retransmit_skb() 2586 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) __tcp_retransmit_skb() 2590 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) __tcp_retransmit_skb() 2593 cur_mss = tcp_current_mss(sk); __tcp_retransmit_skb() 2605 if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC)) __tcp_retransmit_skb() 2613 tcp_init_tso_segs(sk, skb, cur_mss); __tcp_retransmit_skb() 2614 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); __tcp_retransmit_skb() 2618 tcp_retrans_try_collapse(sk, skb, cur_mss); __tcp_retransmit_skb() 2632 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : __tcp_retransmit_skb() 2635 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); __tcp_retransmit_skb() 2641 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); __tcp_retransmit_skb() 2643 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); __tcp_retransmit_skb() 2649 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) tcp_retransmit_skb() argument 2651 struct tcp_sock *tp = tcp_sk(sk); tcp_retransmit_skb() 2652 int err = __tcp_retransmit_skb(sk, skb); tcp_retransmit_skb() 2674 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); tcp_retransmit_skb() 2686 static bool tcp_can_forward_retransmit(struct sock *sk) tcp_can_forward_retransmit() argument 2688 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_can_forward_retransmit() 2689 const struct tcp_sock *tp = tcp_sk(sk); tcp_can_forward_retransmit() 2707 if (tcp_may_send_now(sk)) tcp_can_forward_retransmit() 2721 void tcp_xmit_retransmit_queue(struct sock *sk) tcp_xmit_retransmit_queue() argument 2723 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_xmit_retransmit_queue() 2724 struct tcp_sock *tp = tcp_sk(sk); tcp_xmit_retransmit_queue() 2743 skb = tcp_write_queue_head(sk); tcp_xmit_retransmit_queue() 2747 tcp_for_write_queue_from(skb, sk) { tcp_for_write_queue_from() 2750 if (skb == tcp_send_head(sk)) tcp_for_write_queue_from() 2774 if (!tcp_can_forward_retransmit(sk)) tcp_for_write_queue_from() 2800 if (tcp_retransmit_skb(sk, skb)) tcp_for_write_queue_from() 2803 NET_INC_STATS_BH(sock_net(sk), mib_idx); tcp_for_write_queue_from() 2805 if (tcp_in_cwnd_reduction(sk)) tcp_for_write_queue_from() 2808 if (skb == tcp_write_queue_head(sk)) tcp_for_write_queue_from() 2809 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, tcp_for_write_queue_from() 2810 inet_csk(sk)->icsk_rto, tcp_for_write_queue_from() 2820 static void sk_forced_wmem_schedule(struct sock *sk, int size) sk_forced_wmem_schedule() argument 2824 if (size <= sk->sk_forward_alloc) sk_forced_wmem_schedule() 2827 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; sk_forced_wmem_schedule() 2828 sk_memory_allocated_add(sk, amt, &status); sk_forced_wmem_schedule() 2834 void tcp_send_fin(struct sock *sk) tcp_send_fin() argument 2836 struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); tcp_send_fin() 2837 struct tcp_sock *tp = tcp_sk(sk); tcp_send_fin() 2844 if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) { tcp_send_fin() 2849 if (!tcp_send_head(sk)) { tcp_send_fin() 2860 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); tcp_send_fin() 2867 sk_forced_wmem_schedule(sk, skb->truesize); tcp_send_fin() 2871 tcp_queue_skb(sk, skb); tcp_send_fin() 2873 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); tcp_send_fin() 2881 void tcp_send_active_reset(struct sock *sk, gfp_t priority) tcp_send_active_reset() argument 2888 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); tcp_send_active_reset() 2894 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), tcp_send_active_reset() 2898 if (tcp_transmit_skb(sk, skb, 0, priority)) tcp_send_active_reset() 2899 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); tcp_send_active_reset() 2901 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); tcp_send_active_reset() 2910 int tcp_send_synack(struct sock *sk) tcp_send_synack() argument 2914 skb = tcp_write_queue_head(sk); tcp_send_synack() 2924 tcp_unlink_write_queue(skb, sk); tcp_send_synack() 2926 __tcp_add_write_queue_head(sk, nskb); tcp_send_synack() 2927 sk_wmem_free_skb(sk, skb); tcp_send_synack() 2928 sk->sk_wmem_queued += nskb->truesize; tcp_send_synack() 2929 sk_mem_charge(sk, nskb->truesize); tcp_send_synack() 2934 tcp_ecn_send_synack(sk, skb); tcp_send_synack() 2936 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); tcp_send_synack() 2941 * sk: listener socket 2948 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, tcp_make_synack() argument 2954 struct tcp_sock *tp = tcp_sk(sk); tcp_make_synack() 2961 skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC); tcp_make_synack() 2985 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); tcp_make_synack() 2987 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, tcp_make_synack() 2997 tcp_ecn_make_synack(req, th, sk); tcp_make_synack() 3014 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS); tcp_make_synack() 3030 static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) tcp_ca_dst_init() argument 3032 struct inet_connection_sock *icsk = inet_csk(sk); tcp_ca_dst_init() 3050 static void tcp_connect_init(struct sock *sk) tcp_connect_init() argument 3052 const struct dst_entry *dst = __sk_dst_get(sk); tcp_connect_init() 3053 struct tcp_sock *tp = tcp_sk(sk); tcp_connect_init() 3063 if (tp->af_specific->md5_lookup(sk, sk)) tcp_connect_init() 3071 tcp_mtup_init(sk); tcp_connect_init() 3072 tcp_sync_mss(sk, dst_mtu(dst)); tcp_connect_init() 3074 tcp_ca_dst_init(sk, dst); tcp_connect_init() 3082 tcp_initialize_rcv_mss(sk); tcp_connect_init() 3085 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && tcp_connect_init() 3086 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) tcp_connect_init() 3087 tp->window_clamp = tcp_full_space(sk); tcp_connect_init() 3089 tcp_select_initial_window(tcp_full_space(sk), tcp_connect_init() 3100 sk->sk_err = 0; tcp_connect_init() 3101 sock_reset_flag(sk, SOCK_DONE); tcp_connect_init() 3116 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; tcp_connect_init() 3117 inet_csk(sk)->icsk_retransmits = 0; tcp_connect_init() 3121 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) tcp_connect_queue_skb() argument 3123 struct tcp_sock *tp = tcp_sk(sk); tcp_connect_queue_skb() 3128 __tcp_add_write_queue_tail(sk, skb); tcp_connect_queue_skb() 3129 sk->sk_wmem_queued += skb->truesize; tcp_connect_queue_skb() 3130 sk_mem_charge(sk, skb->truesize); tcp_connect_queue_skb() 3142 static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) tcp_send_syn_data() argument 3144 struct tcp_sock *tp = tcp_sk(sk); tcp_send_syn_data() 3151 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, tcp_send_syn_data() 3171 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - tcp_send_syn_data() 3179 syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation); tcp_send_syn_data() 3201 tcp_connect_queue_skb(sk, syn_data); tcp_send_syn_data() 3203 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); tcp_send_syn_data() 3216 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); tcp_send_syn_data() 3224 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); tcp_send_syn_data() 3233 int tcp_connect(struct sock *sk) tcp_connect() argument 3235 struct tcp_sock *tp = tcp_sk(sk); tcp_connect() 3239 tcp_connect_init(sk); tcp_connect() 3242 tcp_finish_connect(sk, NULL); tcp_connect() 3246 buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); tcp_connect() 3252 tcp_connect_queue_skb(sk, buff); tcp_connect() 3253 tcp_ecn_send_syn(sk, buff); tcp_connect() 3256 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : tcp_connect() 3257 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); tcp_connect() 3266 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); tcp_connect() 3269 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, tcp_connect() 3270 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); tcp_connect() 3279 void tcp_send_delayed_ack(struct sock *sk) tcp_send_delayed_ack() argument 3281 struct inet_connection_sock *icsk = inet_csk(sk); tcp_send_delayed_ack() 3285 tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); tcp_send_delayed_ack() 3288 const struct tcp_sock *tp = tcp_sk(sk); tcp_send_delayed_ack() 3298 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements tcp_send_delayed_ack() 3322 tcp_send_ack(sk); tcp_send_delayed_ack() 3331 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); tcp_send_delayed_ack() 3335 void tcp_send_ack(struct sock *sk) tcp_send_ack() argument 3340 if (sk->sk_state == TCP_CLOSE) tcp_send_ack() 3343 tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); tcp_send_ack() 3349 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); tcp_send_ack() 3351 inet_csk_schedule_ack(sk); tcp_send_ack() 3352 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; tcp_send_ack() 3353 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, tcp_send_ack() 3360 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); tcp_send_ack() 3372 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); tcp_send_ack() 3387 static int tcp_xmit_probe_skb(struct sock *sk, int urgent) tcp_xmit_probe_skb() argument 3389 struct tcp_sock *tp = tcp_sk(sk); tcp_xmit_probe_skb() 3393 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); tcp_xmit_probe_skb() 3405 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); tcp_xmit_probe_skb() 3408 void tcp_send_window_probe(struct sock *sk) tcp_send_window_probe() argument 3410 if (sk->sk_state == TCP_ESTABLISHED) { tcp_send_window_probe() 3411 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; tcp_send_window_probe() 3412 tcp_xmit_probe_skb(sk, 0); tcp_send_window_probe() 3417 int tcp_write_wakeup(struct sock *sk) tcp_write_wakeup() argument 3419 struct tcp_sock *tp = tcp_sk(sk); tcp_write_wakeup() 3422 if (sk->sk_state == TCP_CLOSE) tcp_write_wakeup() 3425 skb = tcp_send_head(sk); tcp_write_wakeup() 3428 unsigned int mss = tcp_current_mss(sk); tcp_write_wakeup() 3442 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC)) tcp_write_wakeup() 3445 tcp_set_skb_tso_segs(sk, skb, mss); tcp_write_wakeup() 3448 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); tcp_write_wakeup() 3450 tcp_event_new_data_sent(sk, skb); tcp_write_wakeup() 3454 tcp_xmit_probe_skb(sk, 1); tcp_write_wakeup() 3455 return tcp_xmit_probe_skb(sk, 0); tcp_write_wakeup() 3462 void tcp_send_probe0(struct sock *sk) tcp_send_probe0() argument 3464 struct inet_connection_sock *icsk = inet_csk(sk); tcp_send_probe0() 3465 struct tcp_sock *tp = tcp_sk(sk); tcp_send_probe0() 3469 err = tcp_write_wakeup(sk); tcp_send_probe0() 3471 if (tp->packets_out || !tcp_send_head(sk)) { tcp_send_probe0() 3494 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, tcp_send_probe0() 3499 int tcp_rtx_synack(struct sock *sk, struct request_sock *req) tcp_rtx_synack() argument 3505 res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL); tcp_rtx_synack() 3507 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); tcp_rtx_synack() 3508 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); tcp_rtx_synack() 161 tcp_event_data_sent(struct tcp_sock *tp, struct sock *sk) tcp_event_data_sent() argument 354 tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th, struct sock *sk) tcp_ecn_make_synack() argument
|
H A D | tcp_westwood.c | 60 static void tcp_westwood_init(struct sock *sk) tcp_westwood_init() argument 62 struct westwood *w = inet_csk_ca(sk); tcp_westwood_init() 72 w->snd_una = tcp_sk(sk)->snd_una; tcp_westwood_init() 102 static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt) tcp_westwood_pkts_acked() argument 104 struct westwood *w = inet_csk_ca(sk); tcp_westwood_pkts_acked() 115 static void westwood_update_window(struct sock *sk) westwood_update_window() argument 117 struct westwood *w = inet_csk_ca(sk); westwood_update_window() 125 w->snd_una = tcp_sk(sk)->snd_una; westwood_update_window() 161 static inline void westwood_fast_bw(struct sock *sk) westwood_fast_bw() argument 163 const struct tcp_sock *tp = tcp_sk(sk); westwood_fast_bw() 164 struct westwood *w = inet_csk_ca(sk); westwood_fast_bw() 166 westwood_update_window(sk); westwood_fast_bw() 178 static inline u32 westwood_acked_count(struct sock *sk) westwood_acked_count() argument 180 const struct tcp_sock *tp = tcp_sk(sk); westwood_acked_count() 181 struct westwood *w = inet_csk_ca(sk); westwood_acked_count() 215 static u32 tcp_westwood_bw_rttmin(const struct sock *sk) tcp_westwood_bw_rttmin() argument 217 const struct tcp_sock *tp = tcp_sk(sk); tcp_westwood_bw_rttmin() 218 const struct westwood *w = inet_csk_ca(sk); tcp_westwood_bw_rttmin() 223 static void tcp_westwood_ack(struct sock *sk, u32 ack_flags) tcp_westwood_ack() argument 226 struct westwood *w = inet_csk_ca(sk); tcp_westwood_ack() 228 westwood_update_window(sk); tcp_westwood_ack() 229 w->bk += westwood_acked_count(sk); tcp_westwood_ack() 235 westwood_fast_bw(sk); tcp_westwood_ack() 238 static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) tcp_westwood_event() argument 240 struct tcp_sock *tp = tcp_sk(sk); tcp_westwood_event() 241 struct westwood *w = inet_csk_ca(sk); tcp_westwood_event() 245 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); tcp_westwood_event() 248 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); tcp_westwood_event() 259 static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr, tcp_westwood_info() argument 262 const struct westwood *ca = inet_csk_ca(sk); tcp_westwood_info()
|
H A D | ip_sockglue.c | 157 struct inet_sock *inet = inet_sk(skb->sk); ip_cmsg_recv_offset() 317 int ip_ra_control(struct sock *sk, unsigned char on, ip_ra_control() argument 323 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) ip_ra_control() 333 if (ra->sk == sk) { ip_ra_control() 339 /* dont let ip_call_ra_chain() use sk again */ ip_ra_control() 340 ra->sk = NULL; ip_ra_control() 345 ra->destructor(sk); ip_ra_control() 347 * Delay sock_put(sk) and kfree(ra) after one rcu grace ip_ra_control() 351 ra->saved_sk = sk; ip_ra_control() 360 new_ra->sk = sk; ip_ra_control() 365 sock_hold(sk); ip_ra_control() 371 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, ip_icmp_error() argument 394 if (sock_queue_err_skb(sk, skb) == 0) ip_icmp_error() 400 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info) ip_local_error() argument 402 struct inet_sock *inet = inet_sk(sk); ip_local_error() 433 if (sock_queue_err_skb(sk, skb)) ip_local_error() 451 static bool ipv4_datagram_support_cmsg(const struct sock *sk, ipv4_datagram_support_cmsg() argument 467 if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || ipv4_datagram_support_cmsg() 480 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) ip_recv_error() argument 492 WARN_ON_ONCE(sk->sk_family == AF_INET6); ip_recv_error() 495 skb = sock_dequeue_err_skb(sk); ip_recv_error() 508 sock_recv_timestamp(msg, sk, skb); ip_recv_error() 525 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) { ip_recv_error() 528 if (inet_sk(sk)->cmsg_flags) ip_recv_error() 572 static int do_ip_setsockopt(struct sock *sk, int level, do_ip_setsockopt() argument 575 struct inet_sock *inet = inet_sk(sk); do_ip_setsockopt() 617 return ip_mroute_setsockopt(sk, optname, optval, optlen); do_ip_setsockopt() 622 lock_sock(sk); do_ip_setsockopt() 631 err = ip_options_get_from_user(sock_net(sk), &opt, do_ip_setsockopt() 636 sock_owned_by_user(sk)); do_ip_setsockopt() 638 struct inet_connection_sock *icsk = inet_csk(sk); do_ip_setsockopt() 640 if (sk->sk_family == PF_INET || do_ip_setsockopt() 641 (!((1 << sk->sk_state) & do_ip_setsockopt() 649 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); do_ip_setsockopt() 704 inet_inc_convert_csum(sk); do_ip_setsockopt() 709 inet_dec_convert_csum(sk); do_ip_setsockopt() 715 if (sk->sk_type == SOCK_STREAM) { do_ip_setsockopt() 721 sk->sk_priority = rt_tos2priority(val); do_ip_setsockopt() 722 sk_dst_reset(sk); do_ip_setsockopt() 733 if (sk->sk_type != SOCK_RAW) { do_ip_setsockopt() 740 if (sk->sk_type != SOCK_RAW) { do_ip_setsockopt() 754 skb_queue_purge(&sk->sk_error_queue); do_ip_setsockopt() 757 if (sk->sk_type == SOCK_STREAM) do_ip_setsockopt() 787 dev = dev_get_by_index(sock_net(sk), ifindex); do_ip_setsockopt() 794 if (sk->sk_bound_dev_if) do_ip_setsockopt() 806 if (sk->sk_type == SOCK_STREAM) do_ip_setsockopt() 839 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr); do_ip_setsockopt() 843 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex); do_ip_setsockopt() 852 if (sk->sk_bound_dev_if && do_ip_setsockopt() 853 mreq.imr_ifindex != sk->sk_bound_dev_if) do_ip_setsockopt() 868 if (inet_sk(sk)->is_icsk) do_ip_setsockopt() 884 err = ip_mc_join_group(sk, &mreq); do_ip_setsockopt() 886 err = ip_mc_leave_group(sk, &mreq); do_ip_setsockopt() 921 err = ip_mc_msfilter(sk, msf, 0); do_ip_setsockopt() 951 err = ip_mc_join_group(sk, &mreq); do_ip_setsockopt() 960 err = ip_mc_source(add, omode, sk, &mreqs, 0); do_ip_setsockopt() 983 err = ip_mc_join_group(sk, &mreq); do_ip_setsockopt() 985 err = ip_mc_leave_group(sk, &mreq); do_ip_setsockopt() 1028 err = ip_mc_join_group(sk, &mreq); do_ip_setsockopt() 1038 err = ip_mc_source(add, omode, sk, &mreqs, do_ip_setsockopt() 1101 err = ip_mc_msfilter(sk, msf, ifindex); do_ip_setsockopt() 1115 err = ip_ra_control(sk, val ? 1 : 0, NULL); do_ip_setsockopt() 1127 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) do_ip_setsockopt() 1129 err = xfrm_user_policy(sk, optname, optval, optlen); do_ip_setsockopt() 1133 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && do_ip_setsockopt() 1134 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { do_ip_setsockopt() 1155 release_sock(sk); do_ip_setsockopt() 1161 release_sock(sk); do_ip_setsockopt() 1169 * @sk: socket 1176 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) ipv4_pktinfo_prepare() argument 1179 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) || ipv4_pktinfo_prepare() 1180 ipv6_sk_rxinfo(sk); ipv4_pktinfo_prepare() 1192 int ip_setsockopt(struct sock *sk, int level, ip_setsockopt() argument 1200 err = do_ip_setsockopt(sk, level, optname, optval, optlen); ip_setsockopt() 1207 lock_sock(sk); ip_setsockopt() 1208 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); ip_setsockopt() 1209 release_sock(sk); ip_setsockopt() 1217 int compat_ip_setsockopt(struct sock *sk, int level, int optname, compat_ip_setsockopt() argument 1226 return compat_mc_setsockopt(sk, level, optname, optval, optlen, compat_ip_setsockopt() 1229 err = do_ip_setsockopt(sk, level, optname, optval, optlen); compat_ip_setsockopt() 1236 lock_sock(sk); compat_ip_setsockopt() 1237 err = compat_nf_setsockopt(sk, PF_INET, optname, compat_ip_setsockopt() 1239 release_sock(sk); compat_ip_setsockopt() 1252 static int do_ip_getsockopt(struct sock *sk, int level, int optname, do_ip_getsockopt() argument 1255 struct inet_sock *inet = inet_sk(sk); do_ip_getsockopt() 1263 return ip_mroute_getsockopt(sk, optname, optval, optlen); do_ip_getsockopt() 1270 lock_sock(sk); do_ip_getsockopt() 1280 sock_owned_by_user(sk)); do_ip_getsockopt() 1286 release_sock(sk); do_ip_getsockopt() 1345 dst = sk_dst_get(sk); do_ip_getsockopt() 1351 release_sock(sk); do_ip_getsockopt() 1373 release_sock(sk); do_ip_getsockopt() 1387 release_sock(sk); do_ip_getsockopt() 1391 release_sock(sk); do_ip_getsockopt() 1394 err = ip_mc_msfget(sk, &msf, do_ip_getsockopt() 1396 release_sock(sk); do_ip_getsockopt() 1405 release_sock(sk); do_ip_getsockopt() 1409 release_sock(sk); do_ip_getsockopt() 1412 err = ip_mc_gsfget(sk, &gsf, do_ip_getsockopt() 1415 release_sock(sk); do_ip_getsockopt() 1425 release_sock(sk); do_ip_getsockopt() 1427 if (sk->sk_type != SOCK_STREAM) do_ip_getsockopt() 1463 release_sock(sk); do_ip_getsockopt() 1466 release_sock(sk); do_ip_getsockopt() 1485 int ip_getsockopt(struct sock *sk, int level, ip_getsockopt() argument 1490 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0); ip_getsockopt() 1500 lock_sock(sk); ip_getsockopt() 1501 err = nf_getsockopt(sk, PF_INET, optname, optval, ip_getsockopt() 1503 release_sock(sk); ip_getsockopt() 1514 int compat_ip_getsockopt(struct sock *sk, int level, int optname, compat_ip_getsockopt() argument 1520 return compat_mc_getsockopt(sk, level, optname, optval, optlen, compat_ip_getsockopt() 1523 err = do_ip_getsockopt(sk, level, optname, optval, optlen, compat_ip_getsockopt() 1535 lock_sock(sk); compat_ip_getsockopt() 1536 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); compat_ip_getsockopt() 1537 release_sock(sk); compat_ip_getsockopt()
|
H A D | ip_output.c | 25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit 94 int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb) __ip_local_out_sk() argument 100 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, NULL, __ip_local_out_sk() 106 return __ip_local_out_sk(skb->sk, skb); __ip_local_out() 109 int ip_local_out_sk(struct sock *sk, struct sk_buff *skb) ip_local_out_sk() argument 115 err = dst_output_sk(sk, skb); ip_local_out_sk() 134 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, ip_build_and_send_pkt() argument 137 struct inet_sock *inet = inet_sk(sk); ip_build_and_send_pkt() 148 if (ip_dont_fragment(sk, &rt->dst)) ip_build_and_send_pkt() 155 iph->protocol = sk->sk_protocol; ip_build_and_send_pkt() 156 ip_select_ident(sock_net(sk), skb, sk); ip_build_and_send_pkt() local 163 skb->priority = sk->sk_priority; ip_build_and_send_pkt() 164 skb->mark = sk->sk_mark; ip_build_and_send_pkt() 171 static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb) ip_finish_output2() argument 194 if (skb->sk) ip_finish_output2() 195 skb_set_owner_w(skb2, skb->sk); ip_finish_output2() 219 static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) ip_finish_output_gso() argument 228 return ip_finish_output2(sk, skb); ip_finish_output_gso() 252 err = ip_fragment(sk, segs, ip_finish_output2); ip_finish_output_gso() 262 static int ip_finish_output(struct sock *sk, struct sk_buff *skb) ip_finish_output() argument 268 return dst_output_sk(sk, skb); ip_finish_output() 272 return ip_finish_output_gso(sk, skb); ip_finish_output() 275 return ip_fragment(sk, skb, ip_finish_output2); ip_finish_output() 277 return ip_finish_output2(sk, skb); ip_finish_output() 280 int ip_mc_output(struct sock *sk, struct sk_buff *skb) ip_mc_output() argument 298 if (sk_mc_loop(sk) ip_mc_output() 316 sk, newskb, NULL, newskb->dev, ip_mc_output() 331 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, newskb, ip_mc_output() 335 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, NULL, ip_mc_output() 340 int ip_output(struct sock *sk, struct sk_buff *skb) ip_output() argument 349 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, ip_output() 369 /* Note: skb->sk can be different from sk, in case of tunnels */ ip_queue_xmit() 370 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) ip_queue_xmit() argument 372 struct inet_sock *inet = inet_sk(sk); ip_queue_xmit() 390 rt = (struct rtable *)__sk_dst_check(sk, 0); ip_queue_xmit() 403 rt = ip_route_output_ports(sock_net(sk), fl4, sk, ip_queue_xmit() 407 sk->sk_protocol, ip_queue_xmit() 408 RT_CONN_FLAGS(sk), ip_queue_xmit() 409 sk->sk_bound_dev_if); ip_queue_xmit() 412 sk_setup_caps(sk, &rt->dst); ip_queue_xmit() 425 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) ip_queue_xmit() 430 iph->protocol = sk->sk_protocol; ip_queue_xmit() 440 ip_select_ident_segs(sock_net(sk), skb, sk, ip_queue_xmit() local 443 /* TODO : should we use skb->sk here instead of sk ? */ ip_queue_xmit() 444 skb->priority = sk->sk_priority; ip_queue_xmit() 445 skb->mark = sk->sk_mark; ip_queue_xmit() 453 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); ip_queue_xmit() 489 int ip_fragment(struct sock *sk, struct sk_buff *skb, ip_fragment() argument 561 BUG_ON(frag->sk); skb_walk_frags() 562 if (skb->sk) { skb_walk_frags() 563 frag->sk = skb->sk; skb_walk_frags() 603 err = output(sk, skb); 632 frag2->sk = NULL; skb_walk_frags() 693 if (skb->sk) 694 skb_set_owner_w(skb2, skb->sk); 740 err = output(sk, skb2); 786 static inline int ip_ufo_append_data(struct sock *sk, ip_ufo_append_data() argument 802 skb = sock_alloc_send_skb(sk, ip_ufo_append_data() 834 return skb_append_datato_frags(sk, skb, getfrag, from, ip_ufo_append_data() 838 static int __ip_append_data(struct sock *sk, __ip_append_data() argument 848 struct inet_sock *inet = inet_sk(sk); __ip_append_data() 868 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) __ip_append_data() 869 tskey = sk->sk_tskey++; __ip_append_data() 875 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; __ip_append_data() 878 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, __ip_append_data() 895 (sk->sk_protocol == IPPROTO_UDP) && __ip_append_data() 897 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { __ip_append_data() 898 err = ip_ufo_append_data(sk, queue, getfrag, from, length, __ip_append_data() 961 skb = sock_alloc_send_skb(sk, __ip_append_data() 966 if (atomic_read(&sk->sk_wmem_alloc) <= __ip_append_data() 967 2 * sk->sk_sndbuf) __ip_append_data() 968 skb = sock_wmalloc(sk, __ip_append_data() 970 sk->sk_allocation); __ip_append_data() 1046 if (!sk_page_frag_refill(sk, pfrag)) __ip_append_data() 1071 atomic_add(copy, &sk->sk_wmem_alloc); __ip_append_data() 1083 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); __ip_append_data() 1087 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, ip_setup_cork() argument 1100 sk->sk_allocation); ip_setup_cork() 1115 cork->fragsize = ip_sk_use_pmtu(sk) ? ip_setup_cork() 1138 int ip_append_data(struct sock *sk, struct flowi4 *fl4, ip_append_data() argument 1145 struct inet_sock *inet = inet_sk(sk); ip_append_data() 1151 if (skb_queue_empty(&sk->sk_write_queue)) { ip_append_data() 1152 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp); ip_append_data() 1159 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, ip_append_data() 1160 sk_page_frag(sk), getfrag, ip_append_data() 1164 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, ip_append_page() argument 1167 struct inet_sock *inet = inet_sk(sk); ip_append_page() 1184 if (skb_queue_empty(&sk->sk_write_queue)) ip_append_page() 1200 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; ip_append_page() 1203 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, ip_append_page() 1208 skb = skb_peek_tail(&sk->sk_write_queue); ip_append_page() 1214 (sk->sk_protocol == IPPROTO_UDP) && ip_append_page() 1240 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); ip_append_page() 1273 __skb_queue_tail(&sk->sk_write_queue, skb); ip_append_page() 1299 atomic_add(len, &sk->sk_wmem_alloc); ip_append_page() 1307 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); ip_append_page() 1324 struct sk_buff *__ip_make_skb(struct sock *sk, __ip_make_skb() argument 1331 struct inet_sock *inet = inet_sk(sk); __ip_make_skb() 1332 struct net *net = sock_net(sk); __ip_make_skb() 1355 tmp_skb->sk = NULL; __ip_make_skb() 1362 skb->ignore_df = ip_sk_ignore_df(sk); __ip_make_skb() 1370 ip_dont_fragment(sk, &rt->dst))) __ip_make_skb() 1389 iph->protocol = sk->sk_protocol; __ip_make_skb() 1391 ip_select_ident(net, skb, sk); __ip_make_skb() 1398 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority; __ip_make_skb() 1399 skb->mark = sk->sk_mark; __ip_make_skb() 1431 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4) ip_push_pending_frames() argument 1435 skb = ip_finish_skb(sk, fl4); ip_push_pending_frames() 1440 return ip_send_skb(sock_net(sk), skb); ip_push_pending_frames() 1446 static void __ip_flush_pending_frames(struct sock *sk, __ip_flush_pending_frames() argument 1458 void ip_flush_pending_frames(struct sock *sk) ip_flush_pending_frames() argument 1460 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base); ip_flush_pending_frames() 1463 struct sk_buff *ip_make_skb(struct sock *sk, ip_make_skb() argument 1483 err = ip_setup_cork(sk, &cork, ipc, rtp); ip_make_skb() 1487 err = __ip_append_data(sk, fl4, &queue, &cork, ip_make_skb() 1491 __ip_flush_pending_frames(sk, &queue, &cork); ip_make_skb() 1495 return __ip_make_skb(sk, fl4, &queue, &cork); ip_make_skb() 1515 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, ip_send_unicast_reply() argument 1525 struct net *net = sock_net(sk); ip_send_unicast_reply() 1557 inet_sk(sk)->tos = arg->tos; ip_send_unicast_reply() 1559 sk->sk_priority = skb->priority; ip_send_unicast_reply() 1560 sk->sk_protocol = ip_hdr(skb)->protocol; ip_send_unicast_reply() 1561 sk->sk_bound_dev_if = arg->bound_dev_if; ip_send_unicast_reply() 1562 sk->sk_sndbuf = sysctl_wmem_default; ip_send_unicast_reply() 1563 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, ip_send_unicast_reply() 1566 ip_flush_pending_frames(sk); ip_send_unicast_reply() 1570 nskb = skb_peek(&sk->sk_write_queue); ip_send_unicast_reply() 1578 ip_push_pending_frames(sk, &fl4); ip_send_unicast_reply()
|
H A D | tcp_dctcp.c | 84 static void dctcp_init(struct sock *sk) dctcp_init() argument 86 const struct tcp_sock *tp = tcp_sk(sk); dctcp_init() 89 (sk->sk_state == TCP_LISTEN || dctcp_init() 90 sk->sk_state == TCP_CLOSE)) { dctcp_init() 91 struct dctcp *ca = inet_csk_ca(sk); dctcp_init() 106 * ECT from sk since it is set during 3WHS for DCTCP. dctcp_init() 108 inet_csk(sk)->icsk_ca_ops = &dctcp_reno; dctcp_init() 109 INET_ECN_dontxmit(sk); dctcp_init() 112 static u32 dctcp_ssthresh(struct sock *sk) dctcp_ssthresh() argument 114 const struct dctcp *ca = inet_csk_ca(sk); dctcp_ssthresh() 115 struct tcp_sock *tp = tcp_sk(sk); dctcp_ssthresh() 126 static void dctcp_ce_state_0_to_1(struct sock *sk) dctcp_ce_state_0_to_1() argument 128 struct dctcp *ca = inet_csk_ca(sk); dctcp_ce_state_0_to_1() 129 struct tcp_sock *tp = tcp_sk(sk); dctcp_ce_state_0_to_1() 144 tcp_send_ack(sk); dctcp_ce_state_0_to_1() 156 static void dctcp_ce_state_1_to_0(struct sock *sk) dctcp_ce_state_1_to_0() argument 158 struct dctcp *ca = inet_csk_ca(sk); dctcp_ce_state_1_to_0() 159 struct tcp_sock *tp = tcp_sk(sk); dctcp_ce_state_1_to_0() 174 tcp_send_ack(sk); dctcp_ce_state_1_to_0() 186 static void dctcp_update_alpha(struct sock *sk, u32 flags) dctcp_update_alpha() argument 188 const struct tcp_sock *tp = tcp_sk(sk); dctcp_update_alpha() 189 struct dctcp *ca = inet_csk_ca(sk); dctcp_update_alpha() 196 acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss; dctcp_update_alpha() 225 static void dctcp_state(struct sock *sk, u8 new_state) dctcp_state() argument 228 struct dctcp *ca = inet_csk_ca(sk); dctcp_state() 242 static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev) dctcp_update_ack_reserved() argument 244 struct dctcp *ca = inet_csk_ca(sk); dctcp_update_ack_reserved() 261 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) dctcp_cwnd_event() argument 265 dctcp_ce_state_0_to_1(sk); dctcp_cwnd_event() 268 dctcp_ce_state_1_to_0(sk); dctcp_cwnd_event() 272 dctcp_update_ack_reserved(sk, ev); dctcp_cwnd_event() 280 static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, dctcp_get_info() argument 283 const struct dctcp *ca = inet_csk_ca(sk); dctcp_get_info() 291 if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { dctcp_get_info()
|
H A D | tcp_htcp.c | 67 static u32 htcp_cwnd_undo(struct sock *sk) htcp_cwnd_undo() argument 69 const struct tcp_sock *tp = tcp_sk(sk); htcp_cwnd_undo() 70 struct htcp *ca = inet_csk_ca(sk); htcp_cwnd_undo() 82 static inline void measure_rtt(struct sock *sk, u32 srtt) measure_rtt() argument 84 const struct inet_connection_sock *icsk = inet_csk(sk); measure_rtt() 85 struct htcp *ca = inet_csk_ca(sk); measure_rtt() 101 static void measure_achieved_throughput(struct sock *sk, measure_achieved_throughput() argument 104 const struct inet_connection_sock *icsk = inet_csk(sk); measure_achieved_throughput() 105 const struct tcp_sock *tp = tcp_sk(sk); measure_achieved_throughput() 106 struct htcp *ca = inet_csk_ca(sk); measure_achieved_throughput() 113 measure_rtt(sk, usecs_to_jiffies(rtt)); measure_achieved_throughput() 208 static void htcp_param_update(struct sock *sk) htcp_param_update() argument 210 struct htcp *ca = inet_csk_ca(sk); htcp_param_update() 222 static u32 htcp_recalc_ssthresh(struct sock *sk) htcp_recalc_ssthresh() argument 224 const struct tcp_sock *tp = tcp_sk(sk); htcp_recalc_ssthresh() 225 const struct htcp *ca = inet_csk_ca(sk); htcp_recalc_ssthresh() 227 htcp_param_update(sk); htcp_recalc_ssthresh() 231 static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) htcp_cong_avoid() argument 233 struct tcp_sock *tp = tcp_sk(sk); htcp_cong_avoid() 234 struct htcp *ca = inet_csk_ca(sk); htcp_cong_avoid() 236 if (!tcp_is_cwnd_limited(sk)) htcp_cong_avoid() 257 static void htcp_init(struct sock *sk) htcp_init() argument 259 struct htcp *ca = inet_csk_ca(sk); htcp_init() 268 static void htcp_state(struct sock *sk, u8 new_state) htcp_state() argument 273 struct htcp *ca = inet_csk_ca(sk); htcp_state() 284 htcp_reset(inet_csk_ca(sk)); htcp_state()
|
H A D | inet_diag.c | 69 static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk) inet_diag_msg_common_fill() argument 71 r->idiag_family = sk->sk_family; inet_diag_msg_common_fill() 73 r->id.idiag_sport = htons(sk->sk_num); inet_diag_msg_common_fill() 74 r->id.idiag_dport = sk->sk_dport; inet_diag_msg_common_fill() 75 r->id.idiag_if = sk->sk_bound_dev_if; inet_diag_msg_common_fill() 76 sock_diag_save_cookie(sk, r->id.idiag_cookie); inet_diag_msg_common_fill() 79 if (sk->sk_family == AF_INET6) { inet_diag_msg_common_fill() 80 *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr; inet_diag_msg_common_fill() 81 *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr; inet_diag_msg_common_fill() 88 r->id.idiag_src[0] = sk->sk_rcv_saddr; inet_diag_msg_common_fill() 89 r->id.idiag_dst[0] = sk->sk_daddr; inet_diag_msg_common_fill() 107 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, inet_sk_diag_fill() argument 113 const struct inet_sock *inet = inet_sk(sk); inet_sk_diag_fill() 131 BUG_ON(!sk_fullsock(sk)); inet_sk_diag_fill() 133 inet_diag_msg_common_fill(r, sk); inet_sk_diag_fill() 134 r->idiag_state = sk->sk_state; inet_sk_diag_fill() 138 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) inet_sk_diag_fill() 152 inet6_sk(sk)->tclass) < 0) inet_sk_diag_fill() 157 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); inet_sk_diag_fill() 158 r->idiag_inode = sock_i_ino(sk); inet_sk_diag_fill() 162 .idiag_rmem = sk_rmem_alloc_get(sk), inet_sk_diag_fill() 163 .idiag_wmem = sk->sk_wmem_queued, inet_sk_diag_fill() 164 .idiag_fmem = sk->sk_forward_alloc, inet_sk_diag_fill() 165 .idiag_tmem = sk_wmem_alloc_get(sk), inet_sk_diag_fill() 173 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO)) inet_sk_diag_fill() 177 handler->idiag_get_info(sk, r, NULL); inet_sk_diag_fill() 193 } else if (timer_pending(&sk->sk_timer)) { inet_sk_diag_fill() 196 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires); inet_sk_diag_fill() 224 handler->idiag_get_info(sk, r, info); inet_sk_diag_fill() 226 if (sk->sk_state < TCP_TIME_WAIT) { inet_sk_diag_fill() 234 sz = ca_ops->get_info(sk, ext, &attr, &info); inet_sk_diag_fill() 250 static int inet_csk_diag_fill(struct sock *sk, inet_csk_diag_fill() argument 257 return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, inet_csk_diag_fill() 261 static int inet_twsk_diag_fill(struct sock *sk, inet_twsk_diag_fill() argument 266 struct inet_timewait_sock *tw = inet_twsk(sk); inet_twsk_diag_fill() 283 inet_diag_msg_common_fill(r, sk); inet_twsk_diag_fill() 298 static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, inet_req_diag_fill() argument 312 inet_diag_msg_common_fill(r, sk); inet_req_diag_fill() 315 r->idiag_retrans = inet_reqsk(sk)->num_retrans; inet_req_diag_fill() 320 tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies; inet_req_diag_fill() 331 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, sk_diag_fill() argument 337 if (sk->sk_state == TCP_TIME_WAIT) sk_diag_fill() 338 return inet_twsk_diag_fill(sk, skb, portid, seq, sk_diag_fill() 341 if (sk->sk_state == TCP_NEW_SYN_RECV) sk_diag_fill() 342 return inet_req_diag_fill(sk, skb, portid, seq, sk_diag_fill() 345 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, sk_diag_fill() 354 struct net *net = sock_net(in_skb->sk); inet_diag_dump_one_icsk() 356 struct sock *sk; inet_diag_dump_one_icsk() local 361 sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0], inet_diag_dump_one_icsk() 366 sk = inet6_lookup(net, hashinfo, inet_diag_dump_one_icsk() 377 if (!sk) inet_diag_dump_one_icsk() 380 err = sock_diag_check_cookie(sk, req->id.idiag_cookie); inet_diag_dump_one_icsk() 390 err = sk_diag_fill(sk, rep, req, inet_diag_dump_one_icsk() 391 sk_user_ns(NETLINK_CB(in_skb).sk), inet_diag_dump_one_icsk() 405 if (sk) inet_diag_dump_one_icsk() 406 sock_gen_put(sk); inet_diag_dump_one_icsk() 544 const struct sock *sk) entry_fill_addrs() 547 if (sk->sk_family == AF_INET6) { entry_fill_addrs() 548 entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32; entry_fill_addrs() 549 entry->daddr = sk->sk_v6_daddr.s6_addr32; entry_fill_addrs() 553 entry->saddr = &sk->sk_rcv_saddr; entry_fill_addrs() 554 entry->daddr = &sk->sk_daddr; entry_fill_addrs() 558 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) inet_diag_bc_sk() argument 560 struct inet_sock *inet = inet_sk(sk); inet_diag_bc_sk() 566 entry.family = sk->sk_family; inet_diag_bc_sk() 567 entry_fill_addrs(&entry, sk); inet_diag_bc_sk() 570 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0; inet_diag_bc_sk() 688 static int inet_csk_diag_dump(struct sock *sk, inet_csk_diag_dump() argument 694 if (!inet_diag_bc_sk(bc, sk)) inet_csk_diag_dump() 697 return inet_csk_diag_fill(sk, skb, r, inet_csk_diag_dump() 698 sk_user_ns(NETLINK_CB(cb->skb).sk), inet_csk_diag_dump() 729 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, inet_diag_dump_reqs() argument 734 struct inet_connection_sock *icsk = inet_csk(sk); inet_diag_dump_reqs() 735 struct inet_sock *inet = inet_sk(sk); inet_diag_dump_reqs() 747 entry.family = sk->sk_family; inet_diag_dump_reqs() 757 entry.userlocks = sk->sk_userlocks; inet_diag_dump_reqs() 806 struct net *net = sock_net(skb->sk); inet_diag_dump_icsk() 819 struct sock *sk; inet_diag_dump_icsk() local 824 sk_nulls_for_each(sk, node, &ilb->head) { inet_diag_dump_icsk() 825 struct inet_sock *inet = inet_sk(sk); inet_diag_dump_icsk() 827 if (!net_eq(sock_net(sk), net)) inet_diag_dump_icsk() 836 sk->sk_family != r->sdiag_family) inet_diag_dump_icsk() 848 if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) { inet_diag_dump_icsk() 857 if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) { inet_diag_dump_icsk() 885 struct sock *sk; inet_diag_dump_icsk() local 896 sk_nulls_for_each(sk, node, &head->chain) { inet_diag_dump_icsk() 899 if (!net_eq(sock_net(sk), net)) inet_diag_dump_icsk() 903 state = (sk->sk_state == TCP_TIME_WAIT) ? inet_diag_dump_icsk() 904 inet_twsk(sk)->tw_substate : sk->sk_state; inet_diag_dump_icsk() 908 sk->sk_family != r->sdiag_family) inet_diag_dump_icsk() 910 if (r->id.idiag_sport != htons(sk->sk_num) && inet_diag_dump_icsk() 913 if (r->id.idiag_dport != sk->sk_dport && inet_diag_dump_icsk() 918 if (!inet_diag_bc_sk(bc, sk)) inet_diag_dump_icsk() 921 res = sk_diag_fill(sk, skb, r, inet_diag_dump_icsk() 922 sk_user_ns(NETLINK_CB(cb->skb).sk), inet_diag_dump_icsk() 1023 struct net *net = sock_net(skb->sk); inet_diag_rcv_msg_compat() 1054 struct net *net = sock_net(skb->sk); inet_diag_handler_dump() 543 entry_fill_addrs(struct inet_diag_entry *entry, const struct sock *sk) entry_fill_addrs() argument
|
H A D | tcp_hybla.c | 33 static inline void hybla_recalc_param (struct sock *sk) hybla_recalc_param() argument 35 struct hybla *ca = inet_csk_ca(sk); hybla_recalc_param() 38 tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC), hybla_recalc_param() 45 static void hybla_init(struct sock *sk) hybla_init() argument 47 struct tcp_sock *tp = tcp_sk(sk); hybla_init() 48 struct hybla *ca = inet_csk_ca(sk); hybla_init() 60 hybla_recalc_param(sk); hybla_init() 67 static void hybla_state(struct sock *sk, u8 ca_state) hybla_state() argument 69 struct hybla *ca = inet_csk_ca(sk); hybla_state() 89 static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) hybla_cong_avoid() argument 91 struct tcp_sock *tp = tcp_sk(sk); hybla_cong_avoid() 92 struct hybla *ca = inet_csk_ca(sk); hybla_cong_avoid() 98 hybla_recalc_param(sk); hybla_cong_avoid() 102 if (!tcp_is_cwnd_limited(sk)) hybla_cong_avoid() 106 tcp_reno_cong_avoid(sk, ack, acked); hybla_cong_avoid() 111 hybla_recalc_param(sk); hybla_cong_avoid()
|
H A D | tcp_illinois.c | 56 static void rtt_reset(struct sock *sk) rtt_reset() argument 58 struct tcp_sock *tp = tcp_sk(sk); rtt_reset() 59 struct illinois *ca = inet_csk_ca(sk); rtt_reset() 68 static void tcp_illinois_init(struct sock *sk) tcp_illinois_init() argument 70 struct illinois *ca = inet_csk_ca(sk); tcp_illinois_init() 81 rtt_reset(sk); tcp_illinois_init() 85 static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt) tcp_illinois_acked() argument 87 struct illinois *ca = inet_csk_ca(sk); tcp_illinois_acked() 220 static void update_params(struct sock *sk) update_params() argument 222 struct tcp_sock *tp = tcp_sk(sk); update_params() 223 struct illinois *ca = inet_csk_ca(sk); update_params() 236 rtt_reset(sk); update_params() 242 static void tcp_illinois_state(struct sock *sk, u8 new_state) tcp_illinois_state() argument 244 struct illinois *ca = inet_csk_ca(sk); tcp_illinois_state() 251 rtt_reset(sk); tcp_illinois_state() 258 static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) tcp_illinois_cong_avoid() argument 260 struct tcp_sock *tp = tcp_sk(sk); tcp_illinois_cong_avoid() 261 struct illinois *ca = inet_csk_ca(sk); tcp_illinois_cong_avoid() 264 update_params(sk); tcp_illinois_cong_avoid() 267 if (!tcp_is_cwnd_limited(sk)) tcp_illinois_cong_avoid() 293 static u32 tcp_illinois_ssthresh(struct sock *sk) tcp_illinois_ssthresh() argument 295 struct tcp_sock *tp = tcp_sk(sk); tcp_illinois_ssthresh() 296 struct illinois *ca = inet_csk_ca(sk); tcp_illinois_ssthresh() 303 static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr, tcp_illinois_info() argument 306 const struct illinois *ca = inet_csk_ca(sk); tcp_illinois_info()
|
H A D | tcp_diag.c | 19 static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, tcp_diag_get_info() argument 22 const struct tcp_sock *tp = tcp_sk(sk); tcp_diag_get_info() 25 if (sk->sk_state == TCP_LISTEN) { tcp_diag_get_info() 26 r->idiag_rqueue = sk->sk_ack_backlog; tcp_diag_get_info() 27 r->idiag_wqueue = sk->sk_max_ack_backlog; tcp_diag_get_info() 33 tcp_get_info(sk, info); tcp_diag_get_info()
|
H A D | tcp_veno.c | 44 static inline void veno_enable(struct sock *sk) veno_enable() argument 46 struct veno *veno = inet_csk_ca(sk); veno_enable() 54 static inline void veno_disable(struct sock *sk) veno_disable() argument 56 struct veno *veno = inet_csk_ca(sk); veno_disable() 62 static void tcp_veno_init(struct sock *sk) tcp_veno_init() argument 64 struct veno *veno = inet_csk_ca(sk); tcp_veno_init() 68 veno_enable(sk); tcp_veno_init() 72 static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us) tcp_veno_pkts_acked() argument 74 struct veno *veno = inet_csk_ca(sk); tcp_veno_pkts_acked() 94 static void tcp_veno_state(struct sock *sk, u8 ca_state) tcp_veno_state() argument 97 veno_enable(sk); tcp_veno_state() 99 veno_disable(sk); tcp_veno_state() 111 static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event) tcp_veno_cwnd_event() argument 114 tcp_veno_init(sk); tcp_veno_cwnd_event() 117 static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) tcp_veno_cong_avoid() argument 119 struct tcp_sock *tp = tcp_sk(sk); tcp_veno_cong_avoid() 120 struct veno *veno = inet_csk_ca(sk); tcp_veno_cong_avoid() 123 tcp_reno_cong_avoid(sk, ack, acked); tcp_veno_cong_avoid() 128 if (!tcp_is_cwnd_limited(sk)) tcp_veno_cong_avoid() 136 tcp_reno_cong_avoid(sk, ack, acked); tcp_veno_cong_avoid() 190 static u32 tcp_veno_ssthresh(struct sock *sk) tcp_veno_ssthresh() argument 192 const struct tcp_sock *tp = tcp_sk(sk); tcp_veno_ssthresh() 193 struct veno *veno = inet_csk_ca(sk); tcp_veno_ssthresh()
|
H A D | tcp_cubic.c | 129 static inline void bictcp_hystart_reset(struct sock *sk) bictcp_hystart_reset() argument 131 struct tcp_sock *tp = tcp_sk(sk); bictcp_hystart_reset() 132 struct bictcp *ca = inet_csk_ca(sk); bictcp_hystart_reset() 140 static void bictcp_init(struct sock *sk) bictcp_init() argument 142 struct bictcp *ca = inet_csk_ca(sk); bictcp_init() 148 bictcp_hystart_reset(sk); bictcp_init() 151 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; bictcp_init() 154 static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) bictcp_cwnd_event() argument 157 struct bictcp *ca = inet_csk_ca(sk); bictcp_cwnd_event() 161 delta = now - tcp_sk(sk)->lsndtime; bictcp_cwnd_event() 336 static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) bictcp_cong_avoid() argument 338 struct tcp_sock *tp = tcp_sk(sk); bictcp_cong_avoid() 339 struct bictcp *ca = inet_csk_ca(sk); bictcp_cong_avoid() 341 if (!tcp_is_cwnd_limited(sk)) bictcp_cong_avoid() 346 bictcp_hystart_reset(sk); bictcp_cong_avoid() 355 static u32 bictcp_recalc_ssthresh(struct sock *sk) bictcp_recalc_ssthresh() argument 357 const struct tcp_sock *tp = tcp_sk(sk); bictcp_recalc_ssthresh() 358 struct bictcp *ca = inet_csk_ca(sk); bictcp_recalc_ssthresh() 374 static u32 bictcp_undo_cwnd(struct sock *sk) bictcp_undo_cwnd() argument 376 struct bictcp *ca = inet_csk_ca(sk); bictcp_undo_cwnd() 378 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); bictcp_undo_cwnd() 381 static void bictcp_state(struct sock *sk, u8 new_state) bictcp_state() argument 384 bictcp_reset(inet_csk_ca(sk)); bictcp_state() 385 bictcp_hystart_reset(sk); bictcp_state() 389 static void hystart_update(struct sock *sk, u32 delay) hystart_update() argument 391 struct tcp_sock *tp = tcp_sk(sk); hystart_update() 392 struct bictcp *ca = inet_csk_ca(sk); hystart_update() 405 NET_INC_STATS_BH(sock_net(sk), hystart_update() 407 NET_ADD_STATS_BH(sock_net(sk), hystart_update() 426 NET_INC_STATS_BH(sock_net(sk), hystart_update() 428 NET_ADD_STATS_BH(sock_net(sk), hystart_update() 440 static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) bictcp_acked() argument 442 const struct tcp_sock *tp = tcp_sk(sk); bictcp_acked() 443 struct bictcp *ca = inet_csk_ca(sk); bictcp_acked() 465 hystart_update(sk, delay); bictcp_acked()
|
H A D | tcp_bic.c | 70 static void bictcp_init(struct sock *sk) bictcp_init() argument 72 struct bictcp *ca = inet_csk_ca(sk); bictcp_init() 78 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; bictcp_init() 141 static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) bictcp_cong_avoid() argument 143 struct tcp_sock *tp = tcp_sk(sk); bictcp_cong_avoid() 144 struct bictcp *ca = inet_csk_ca(sk); bictcp_cong_avoid() 146 if (!tcp_is_cwnd_limited(sk)) bictcp_cong_avoid() 161 static u32 bictcp_recalc_ssthresh(struct sock *sk) bictcp_recalc_ssthresh() argument 163 const struct tcp_sock *tp = tcp_sk(sk); bictcp_recalc_ssthresh() 164 struct bictcp *ca = inet_csk_ca(sk); bictcp_recalc_ssthresh() 183 static u32 bictcp_undo_cwnd(struct sock *sk) bictcp_undo_cwnd() argument 185 const struct tcp_sock *tp = tcp_sk(sk); bictcp_undo_cwnd() 186 const struct bictcp *ca = inet_csk_ca(sk); bictcp_undo_cwnd() 191 static void bictcp_state(struct sock *sk, u8 new_state) bictcp_state() argument 194 bictcp_reset(inet_csk_ca(sk)); bictcp_state() 200 static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt) bictcp_acked() argument 202 const struct inet_connection_sock *icsk = inet_csk(sk); bictcp_acked() 205 struct bictcp *ca = inet_csk_ca(sk); bictcp_acked()
|
H A D | tcp_vegas.c | 70 static void vegas_enable(struct sock *sk) vegas_enable() argument 72 const struct tcp_sock *tp = tcp_sk(sk); vegas_enable() 73 struct vegas *vegas = inet_csk_ca(sk); vegas_enable() 86 static inline void vegas_disable(struct sock *sk) vegas_disable() argument 88 struct vegas *vegas = inet_csk_ca(sk); vegas_disable() 93 void tcp_vegas_init(struct sock *sk) tcp_vegas_init() argument 95 struct vegas *vegas = inet_csk_ca(sk); tcp_vegas_init() 98 vegas_enable(sk); tcp_vegas_init() 110 void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us) tcp_vegas_pkts_acked() argument 112 struct vegas *vegas = inet_csk_ca(sk); tcp_vegas_pkts_acked() 133 void tcp_vegas_state(struct sock *sk, u8 ca_state) tcp_vegas_state() argument 136 vegas_enable(sk); tcp_vegas_state() 138 vegas_disable(sk); tcp_vegas_state() 151 void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event) tcp_vegas_cwnd_event() argument 155 tcp_vegas_init(sk); tcp_vegas_cwnd_event() 164 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) tcp_vegas_cong_avoid() argument 166 struct tcp_sock *tp = tcp_sk(sk); tcp_vegas_cong_avoid() 167 struct vegas *vegas = inet_csk_ca(sk); tcp_vegas_cong_avoid() 170 tcp_reno_cong_avoid(sk, ack, acked); tcp_vegas_cong_avoid() 195 tcp_reno_cong_avoid(sk, ack, acked); tcp_vegas_cong_avoid() 276 tp->snd_ssthresh = tcp_current_ssthresh(sk); tcp_vegas_cong_avoid() 289 size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr, tcp_vegas_get_info() argument 292 const struct vegas *ca = inet_csk_ca(sk); tcp_vegas_get_info()
|
H A D | inet_timewait_sock.c | 131 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, __inet_twsk_hashdance() argument 134 const struct inet_sock *inet = inet_sk(sk); __inet_twsk_hashdance() 135 const struct inet_connection_sock *icsk = inet_csk(sk); __inet_twsk_hashdance() 136 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); __inet_twsk_hashdance() 137 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); __inet_twsk_hashdance() 168 if (__sk_nulls_del_node_init_rcu(sk)) __inet_twsk_hashdance() 169 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); __inet_twsk_hashdance() 186 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, inet_twsk_alloc() argument 195 tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, inet_twsk_alloc() 198 const struct inet_sock *inet = inet_sk(sk); inet_twsk_alloc() 206 tw->tw_bound_dev_if = sk->sk_bound_dev_if; inet_twsk_alloc() 213 tw->tw_family = sk->sk_family; inet_twsk_alloc() 214 tw->tw_reuse = sk->sk_reuse; inet_twsk_alloc() 215 tw->tw_hash = sk->sk_hash; inet_twsk_alloc() 218 tw->tw_prot = sk->sk_prot_creator; inet_twsk_alloc() 219 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie)); inet_twsk_alloc() 220 twsk_net_set(tw, sock_net(sk)); inet_twsk_alloc() 289 struct sock *sk; inet_twsk_purge() local 299 sk_nulls_for_each_rcu(sk, node, &head->chain) { inet_twsk_purge() 300 if (sk->sk_state != TCP_TIME_WAIT) inet_twsk_purge() 302 tw = inet_twsk(sk); inet_twsk_purge()
|
H A D | tcp_cong.c | 150 void tcp_assign_congestion_control(struct sock *sk) tcp_assign_congestion_control() argument 152 struct inet_connection_sock *icsk = inet_csk(sk); tcp_assign_congestion_control() 175 void tcp_init_congestion_control(struct sock *sk) tcp_init_congestion_control() argument 177 const struct inet_connection_sock *icsk = inet_csk(sk); tcp_init_congestion_control() 180 icsk->icsk_ca_ops->init(sk); tcp_init_congestion_control() 183 static void tcp_reinit_congestion_control(struct sock *sk, tcp_reinit_congestion_control() argument 186 struct inet_connection_sock *icsk = inet_csk(sk); tcp_reinit_congestion_control() 188 tcp_cleanup_congestion_control(sk); tcp_reinit_congestion_control() 192 if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) tcp_reinit_congestion_control() 193 icsk->icsk_ca_ops->init(sk); tcp_reinit_congestion_control() 197 void tcp_cleanup_congestion_control(struct sock *sk) tcp_cleanup_congestion_control() argument 199 struct inet_connection_sock *icsk = inet_csk(sk); tcp_cleanup_congestion_control() 202 icsk->icsk_ca_ops->release(sk); tcp_cleanup_congestion_control() 327 int tcp_set_congestion_control(struct sock *sk, const char *name) tcp_set_congestion_control() argument 329 struct inet_connection_sock *icsk = inet_csk(sk); tcp_set_congestion_control() 346 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) tcp_set_congestion_control() 351 tcp_reinit_congestion_control(sk, ca); tcp_set_congestion_control() 408 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked) tcp_reno_cong_avoid() argument 410 struct tcp_sock *tp = tcp_sk(sk); tcp_reno_cong_avoid() 412 if (!tcp_is_cwnd_limited(sk)) tcp_reno_cong_avoid() 427 u32 tcp_reno_ssthresh(struct sock *sk) tcp_reno_ssthresh() argument 429 const struct tcp_sock *tp = tcp_sk(sk); tcp_reno_ssthresh()
|
/linux-4.1.27/net/llc/ |
H A D | llc_c_ac.c | 32 static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb); 33 static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb); 34 static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *ev); 36 static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb); 38 static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, 41 static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb); 45 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb) llc_conn_ac_clear_remote_busy() argument 47 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_clear_remote_busy() 56 llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); llc_conn_ac_clear_remote_busy() 61 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb) llc_conn_ac_conn_ind() argument 69 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb) llc_conn_ac_conn_confirm() argument 77 static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *skb) llc_conn_ac_data_confirm() argument 85 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb) llc_conn_ac_data_ind() argument 87 llc_conn_rtn_pdu(sk, skb); llc_conn_ac_data_ind() 91 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb) llc_conn_ac_disc_ind() argument 119 int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb) llc_conn_ac_disc_confirm() argument 128 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb) llc_conn_ac_rst_ind() argument 134 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_rst_ind() 167 int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb) llc_conn_ac_rst_confirm() argument 176 int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk, llc_conn_ac_clear_remote_busy_if_f_eq_1() argument 183 LLC_I_PF_IS_1(pdu) && llc_sk(sk)->ack_pf) llc_conn_ac_clear_remote_busy_if_f_eq_1() 184 llc_conn_ac_clear_remote_busy(sk, skb); llc_conn_ac_clear_remote_busy_if_f_eq_1() 188 int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk, llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2() argument 191 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2() 198 int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_disc_cmd_p_set_x() argument 201 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_disc_cmd_p_set_x() 202 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); llc_conn_ac_send_disc_cmd_p_set_x() 213 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_disc_cmd_p_set_x() 214 llc_conn_ac_set_p_flag_1(sk, skb); llc_conn_ac_send_disc_cmd_p_set_x() 223 int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_dm_rsp_f_set_p() argument 226 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_dm_rsp_f_set_p() 227 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); llc_conn_ac_send_dm_rsp_f_set_p() 240 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_dm_rsp_f_set_p() 249 int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_dm_rsp_f_set_1() argument 252 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_dm_rsp_f_set_1() 253 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); llc_conn_ac_send_dm_rsp_f_set_1() 264 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_dm_rsp_f_set_1() 273 int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_frmr_rsp_f_set_x() argument 279 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_frmr_rsp_f_set_x() 286 nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, llc_conn_ac_send_frmr_rsp_f_set_x() 298 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_frmr_rsp_f_set_x() 307 int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_resend_frmr_rsp_f_set_0() argument 310 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_resend_frmr_rsp_f_set_0() 311 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, llc_conn_ac_resend_frmr_rsp_f_set_0() 325 llc_conn_send_pdu(sk, nskb); llc_conn_ac_resend_frmr_rsp_f_set_0() 334 int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) llc_conn_ac_resend_frmr_rsp_f_set_p() argument 339 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_resend_frmr_rsp_f_set_p() 342 nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, llc_conn_ac_resend_frmr_rsp_f_set_p() 355 llc_conn_send_pdu(sk, nskb); llc_conn_ac_resend_frmr_rsp_f_set_p() 364 int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_i_cmd_p_set_1() argument 367 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_i_cmd_p_set_1() 375 llc_conn_send_pdu(sk, skb); llc_conn_ac_send_i_cmd_p_set_1() 376 llc_conn_ac_inc_vs_by_1(sk, skb); llc_conn_ac_send_i_cmd_p_set_1() 381 static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_i_cmd_p_set_0() argument 384 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_i_cmd_p_set_0() 392 llc_conn_send_pdu(sk, skb); llc_conn_ac_send_i_cmd_p_set_0() 393 llc_conn_ac_inc_vs_by_1(sk, skb); llc_conn_ac_send_i_cmd_p_set_0() 398 int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_i_xxx_x_set_0() argument 401 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_i_xxx_x_set_0() 409 llc_conn_send_pdu(sk, skb); llc_conn_ac_send_i_xxx_x_set_0() 410 llc_conn_ac_inc_vs_by_1(sk, skb); llc_conn_ac_send_i_xxx_x_set_0() 415 int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_resend_i_xxx_x_set_0() argument 420 llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); llc_conn_ac_resend_i_xxx_x_set_0() 424 int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk, llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr() argument 430 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr() 431 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr() 441 llc_conn_send_pdu(sk, nskb); llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr() 448 llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr() 453 int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_resend_i_rsp_f_set_1() argument 458 llc_conn_resend_i_pdu_as_rsp(sk, nr, 1); llc_conn_ac_resend_i_rsp_f_set_1() 462 int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rej_cmd_p_set_1() argument 465 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_rej_cmd_p_set_1() 466 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_rej_cmd_p_set_1() 477 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_rej_cmd_p_set_1() 486 int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rej_rsp_f_set_1() argument 489 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_rej_rsp_f_set_1() 490 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_rej_rsp_f_set_1() 501 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_rej_rsp_f_set_1() 510 int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rej_xxx_x_set_0() argument 513 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_rej_xxx_x_set_0() 514 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_rej_xxx_x_set_0() 525 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_rej_xxx_x_set_0() 534 int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rnr_cmd_p_set_1() argument 537 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_rnr_cmd_p_set_1() 538 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_rnr_cmd_p_set_1() 549 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_rnr_cmd_p_set_1() 558 int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rnr_rsp_f_set_1() argument 561 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_rnr_rsp_f_set_1() 562 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_rnr_rsp_f_set_1() 573 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_rnr_rsp_f_set_1() 582 int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rnr_xxx_x_set_0() argument 585 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_rnr_xxx_x_set_0() 586 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_rnr_xxx_x_set_0() 597 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_rnr_xxx_x_set_0() 606 int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_remote_busy() argument 608 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_set_remote_busy() 618 int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_opt_send_rnr_xxx_x_set_0() argument 621 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_opt_send_rnr_xxx_x_set_0() 622 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_opt_send_rnr_xxx_x_set_0() 633 llc_conn_send_pdu(sk, nskb); llc_conn_ac_opt_send_rnr_xxx_x_set_0() 642 int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rr_cmd_p_set_1() argument 645 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_rr_cmd_p_set_1() 646 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_rr_cmd_p_set_1() 657 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_rr_cmd_p_set_1() 666 int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rr_rsp_f_set_1() argument 669 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_rr_rsp_f_set_1() 670 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_rr_rsp_f_set_1() 682 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_rr_rsp_f_set_1() 691 int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_ack_rsp_f_set_1() argument 694 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_ack_rsp_f_set_1() 695 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_ack_rsp_f_set_1() 706 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_ack_rsp_f_set_1() 715 int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_rr_xxx_x_set_0() argument 718 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_rr_xxx_x_set_0() 719 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_rr_xxx_x_set_0() 730 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_rr_xxx_x_set_0() 739 int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_ack_xxx_x_set_0() argument 742 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_ack_xxx_x_set_0() 743 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_ack_xxx_x_set_0() 754 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_ack_xxx_x_set_0() 763 void llc_conn_set_p_flag(struct sock *sk, u8 value) llc_conn_set_p_flag() argument 765 int state_changed = llc_sk(sk)->p_flag && !value; llc_conn_set_p_flag() 767 llc_sk(sk)->p_flag = value; llc_conn_set_p_flag() 770 sk->sk_state_change(sk); llc_conn_set_p_flag() 773 int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_sabme_cmd_p_set_x() argument 776 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_sabme_cmd_p_set_x() 777 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); llc_conn_ac_send_sabme_cmd_p_set_x() 791 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_sabme_cmd_p_set_x() 792 llc_conn_set_p_flag(sk, 1); llc_conn_ac_send_sabme_cmd_p_set_x() 801 int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_ua_rsp_f_set_p() argument 805 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_ua_rsp_f_set_p() 806 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); llc_conn_ac_send_ua_rsp_f_set_p() 819 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_ua_rsp_f_set_p() 828 int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_s_flag_0() argument 830 llc_sk(sk)->s_flag = 0; llc_conn_ac_set_s_flag_0() 834 int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_s_flag_1() argument 836 llc_sk(sk)->s_flag = 1; llc_conn_ac_set_s_flag_1() 840 int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_start_p_timer() argument 842 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_start_p_timer() 844 llc_conn_set_p_flag(sk, 1); llc_conn_ac_start_p_timer() 852 * @sk: current connection structure 860 int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_ack_if_needed() argument 863 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_ack_if_needed() 874 llc_conn_ac_send_rr_rsp_f_set_ackpf(sk, skb); llc_conn_ac_send_ack_if_needed() 877 llc_conn_ac_inc_npta_value(sk, skb); llc_conn_ac_send_ack_if_needed() 884 * @sk: current connection structure 891 int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb) llc_conn_ac_rst_sendack_flag() argument 893 llc_sk(sk)->ack_must_be_send = llc_sk(sk)->ack_pf = 0; llc_conn_ac_rst_sendack_flag() 899 * @sk: current connection structure 907 static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, llc_conn_ac_send_i_rsp_f_set_ackpf() argument 911 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_i_rsp_f_set_ackpf() 919 llc_conn_send_pdu(sk, skb); llc_conn_ac_send_i_rsp_f_set_ackpf() 920 llc_conn_ac_inc_vs_by_1(sk, skb); llc_conn_ac_send_i_rsp_f_set_ackpf() 927 * @sk: current connection structure. 935 int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb) llc_conn_ac_send_i_as_ack() argument 937 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_i_as_ack() 940 llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); llc_conn_ac_send_i_as_ack() 944 llc_conn_ac_send_i_cmd_p_set_0(sk, skb); llc_conn_ac_send_i_as_ack() 950 * @sk: current connection structure. 958 static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, llc_conn_ac_send_rr_rsp_f_set_ackpf() argument 962 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_send_rr_rsp_f_set_ackpf() 963 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); llc_conn_ac_send_rr_rsp_f_set_ackpf() 974 llc_conn_send_pdu(sk, nskb); llc_conn_ac_send_rr_rsp_f_set_ackpf() 985 * @sk: current connection structure. 993 static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb) llc_conn_ac_inc_npta_value() argument 995 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_inc_npta_value() 1010 * @sk: current connection structure. 1016 int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb) llc_conn_ac_adjust_npta_by_rr() argument 1018 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_adjust_npta_by_rr() 1036 * @sk: current connection structure. 1042 int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb) llc_conn_ac_adjust_npta_by_rnr() argument 1044 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_adjust_npta_by_rnr() 1060 * @sk: current connection structure. 1067 int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb) llc_conn_ac_dec_tx_win_size() argument 1069 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_dec_tx_win_size() 1081 * @sk: current connection structure. 1087 int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb) llc_conn_ac_inc_tx_win_size() argument 1089 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_inc_tx_win_size() 1097 int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_all_timers() argument 1099 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_stop_all_timers() 1110 int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_other_timers() argument 1112 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_stop_other_timers() 1122 int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_start_ack_timer() argument 1124 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_start_ack_timer() 1130 int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_start_rej_timer() argument 1132 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_start_rej_timer() 1139 int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, llc_conn_ac_start_ack_tmr_if_not_running() argument 1142 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_start_ack_tmr_if_not_running() 1150 int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_ack_timer() argument 1152 del_timer(&llc_sk(sk)->ack_timer.timer); llc_conn_ac_stop_ack_timer() 1156 int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_p_timer() argument 1158 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_stop_p_timer() 1161 llc_conn_set_p_flag(sk, 0); llc_conn_ac_stop_p_timer() 1165 int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb) llc_conn_ac_stop_rej_timer() argument 1167 del_timer(&llc_sk(sk)->rej_sent_timer.timer); llc_conn_ac_stop_rej_timer() 1171 int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb) llc_conn_ac_upd_nr_received() argument 1176 struct llc_sock *llc = llc_sk(sk); llc_conn_ac_upd_nr_received() 1179 acked = llc_conn_remove_acked_pdus(sk, llc->last_nr, &unacked); llc_conn_ac_upd_nr_received() 1190 llc_conn_ac_data_confirm(sk, skb); llc_conn_ac_upd_nr_received() 1201 llc_conn_ac_data_confirm(sk, skb); llc_conn_ac_upd_nr_received() 1207 int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb) llc_conn_ac_upd_p_flag() argument 1216 llc_conn_set_p_flag(sk, 0); llc_conn_ac_upd_p_flag() 1217 llc_conn_ac_stop_p_timer(sk, skb); llc_conn_ac_upd_p_flag() 1223 int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_data_flag_2() argument 1225 llc_sk(sk)->data_flag = 2; llc_conn_ac_set_data_flag_2() 1229 int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_data_flag_0() argument 1231 llc_sk(sk)->data_flag = 0; llc_conn_ac_set_data_flag_0() 1235 int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_data_flag_1() argument 1237 llc_sk(sk)->data_flag = 1; llc_conn_ac_set_data_flag_1() 1241 int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk, llc_conn_ac_set_data_flag_1_if_data_flag_eq_0() argument 1244 if (!llc_sk(sk)->data_flag) llc_conn_ac_set_data_flag_1_if_data_flag_eq_0() 1245 llc_sk(sk)->data_flag = 1; llc_conn_ac_set_data_flag_1_if_data_flag_eq_0() 1249 int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_p_flag_0() argument 1251 llc_conn_set_p_flag(sk, 0); llc_conn_ac_set_p_flag_0() 1255 static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_p_flag_1() argument 1257 llc_conn_set_p_flag(sk, 1); llc_conn_ac_set_p_flag_1() 1261 int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_remote_busy_0() argument 1263 llc_sk(sk)->remote_busy_flag = 0; llc_conn_ac_set_remote_busy_0() 1267 int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_cause_flag_0() argument 1269 llc_sk(sk)->cause_flag = 0; llc_conn_ac_set_cause_flag_0() 1273 int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_cause_flag_1() argument 1275 llc_sk(sk)->cause_flag = 1; llc_conn_ac_set_cause_flag_1() 1279 int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_retry_cnt_0() argument 1281 llc_sk(sk)->retry_count = 0; llc_conn_ac_set_retry_cnt_0() 1285 int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_inc_retry_cnt_by_1() argument 1287 llc_sk(sk)->retry_count++; llc_conn_ac_inc_retry_cnt_by_1() 1291 int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_vr_0() argument 1293 llc_sk(sk)->vR = 0; llc_conn_ac_set_vr_0() 1297 int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_inc_vr_by_1() argument 1299 llc_sk(sk)->vR = PDU_GET_NEXT_Vr(llc_sk(sk)->vR); llc_conn_ac_inc_vr_by_1() 1303 int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_vs_0() argument 1305 llc_sk(sk)->vS = 0; llc_conn_ac_set_vs_0() 1309 int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb) llc_conn_ac_set_vs_nr() argument 1311 llc_sk(sk)->vS = llc_sk(sk)->last_nr; llc_conn_ac_set_vs_nr() 1315 static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb) llc_conn_ac_inc_vs_by_1() argument 1317 llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % LLC_2_SEQ_NBR_MODULO; llc_conn_ac_inc_vs_by_1() 1323 struct sock *sk = (struct sock *)timeout_data; llc_conn_tmr_common_cb() local 1326 bh_lock_sock(sk); llc_conn_tmr_common_cb() 1330 skb_set_owner_r(skb, sk); llc_conn_tmr_common_cb() 1332 llc_process_tmr_ev(sk, skb); llc_conn_tmr_common_cb() 1334 bh_unlock_sock(sk); llc_conn_tmr_common_cb() 1357 int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb) llc_conn_ac_rst_vs() argument 1359 llc_sk(sk)->X = llc_sk(sk)->vS; llc_conn_ac_rst_vs() 1360 llc_conn_ac_set_vs_nr(sk, skb); llc_conn_ac_rst_vs() 1364 int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb) llc_conn_ac_upd_vs() argument 1369 if (llc_circular_between(llc_sk(sk)->vS, nr, llc_sk(sk)->X)) llc_conn_ac_upd_vs() 1370 llc_conn_ac_set_vs_nr(sk, skb); llc_conn_ac_upd_vs() 1380 * @sk: closed connection 1383 int llc_conn_disc(struct sock *sk, struct sk_buff *skb) llc_conn_disc() argument 1391 * @sk : reseting connection. 1396 int llc_conn_reset(struct sock *sk, struct sk_buff *skb) llc_conn_reset() argument 1398 llc_sk_reset(sk); llc_conn_reset() 1421 * @sk: active connection 1430 static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb) llc_process_tmr_ev() argument 1432 if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) { llc_process_tmr_ev() 1437 if (!sock_owned_by_user(sk)) llc_process_tmr_ev() 1438 llc_conn_state_process(sk, skb); llc_process_tmr_ev() 1441 __sk_add_backlog(sk, skb); llc_process_tmr_ev()
|
H A D | llc_c_ev.c | 67 * @sk: current connection. 75 static u16 llc_util_nr_inside_tx_window(struct sock *sk, u8 nr) llc_util_nr_inside_tx_window() argument 80 struct llc_sock *llc = llc_sk(sk); llc_util_nr_inside_tx_window() 99 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb) llc_conn_ev_conn_req() argument 107 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb) llc_conn_ev_data_req() argument 115 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb) llc_conn_ev_disc_req() argument 123 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rst_req() argument 131 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb) llc_conn_ev_local_busy_detected() argument 139 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb) llc_conn_ev_local_busy_cleared() argument 147 int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_bad_pdu() argument 152 int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_disc_cmd_pbit_set_x() argument 160 int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_dm_rsp_fbit_set_x() argument 168 int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_frmr_rsp_fbit_set_x() argument 176 int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_cmd_pbit_set_0() argument 180 return llc_conn_space(sk, skb) && llc_conn_ev_rx_i_cmd_pbit_set_0() 183 LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1; llc_conn_ev_rx_i_cmd_pbit_set_0() 186 int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_cmd_pbit_set_1() argument 190 return llc_conn_space(sk, skb) && llc_conn_ev_rx_i_cmd_pbit_set_1() 193 LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1; llc_conn_ev_rx_i_cmd_pbit_set_1() 196 int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns() argument 200 const u8 vr = llc_sk(sk)->vR; llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns() 205 !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns() 208 int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns() argument 212 const u8 vr = llc_sk(sk)->vR; llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns() 217 !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns() 220 int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns() argument 224 const u8 vr = llc_sk(sk)->vR; llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns() 228 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns() 231 __func__, llc_sk(sk)->state, ns, vr); llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns() 235 int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_0() argument 239 return llc_conn_space(sk, skb) && llc_conn_ev_rx_i_rsp_fbit_set_0() 242 LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1; llc_conn_ev_rx_i_rsp_fbit_set_0() 245 int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_1() argument 251 LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1; llc_conn_ev_rx_i_rsp_fbit_set_1() 254 int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_i_rsp_fbit_set_x() argument 258 return llc_conn_space(sk, skb) && llc_conn_ev_rx_i_rsp_fbit_set_x() 260 LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1; llc_conn_ev_rx_i_rsp_fbit_set_x() 263 int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns() argument 267 const u8 vr = llc_sk(sk)->vR; llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns() 272 !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns() 275 int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns() argument 279 const u8 vr = llc_sk(sk)->vR; llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns() 284 !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns() 287 int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns() argument 291 const u8 vr = llc_sk(sk)->vR; llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns() 295 !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns() 298 int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns() argument 302 const u8 vr = llc_sk(sk)->vR; llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns() 306 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns() 309 __func__, llc_sk(sk)->state, ns, vr); llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns() 313 int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rej_cmd_pbit_set_0() argument 322 int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rej_cmd_pbit_set_1() argument 331 int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rej_rsp_fbit_set_0() argument 340 int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rej_rsp_fbit_set_1() argument 349 int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rej_rsp_fbit_set_x() argument 357 int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rnr_cmd_pbit_set_0() argument 366 int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rnr_cmd_pbit_set_1() argument 375 int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rnr_rsp_fbit_set_0() argument 384 int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rnr_rsp_fbit_set_1() argument 393 int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rr_cmd_pbit_set_0() argument 402 int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rr_cmd_pbit_set_1() argument 411 int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rr_rsp_fbit_set_0() argument 415 return llc_conn_space(sk, skb) && llc_conn_ev_rx_rr_rsp_fbit_set_0() 421 int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_rr_rsp_fbit_set_1() argument 425 return llc_conn_space(sk, skb) && llc_conn_ev_rx_rr_rsp_fbit_set_1() 431 int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_sabme_cmd_pbit_set_x() argument 439 int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_ua_rsp_fbit_set_x() argument 447 int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_xxx_cmd_pbit_set_1() argument 462 int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_xxx_cmd_pbit_set_x() argument 481 int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_xxx_rsp_fbit_set_x() argument 502 int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk, llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr() argument 507 const u8 vs = llc_sk(sk)->vS; llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr() 512 nr != vs && llc_util_nr_inside_tx_window(sk, nr)) { llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr() 514 __func__, llc_sk(sk)->state, vs, nr); llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr() 520 int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk, llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr() argument 525 const u8 vs = llc_sk(sk)->vS; llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr() 530 nr != vs && llc_util_nr_inside_tx_window(sk, nr)) { llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr() 533 __func__, llc_sk(sk)->state, vs, nr); llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr() 538 int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rx_any_frame() argument 543 int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb) llc_conn_ev_p_tmr_exp() argument 550 int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb) llc_conn_ev_ack_tmr_exp() argument 557 int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb) llc_conn_ev_rej_tmr_exp() argument 564 int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb) llc_conn_ev_busy_tmr_exp() argument 571 int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb) llc_conn_ev_init_p_f_cycle() argument 576 int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb) llc_conn_ev_tx_buffer_full() argument 590 int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_data_flag_eq_1() argument 592 return llc_sk(sk)->data_flag != 1; llc_conn_ev_qlfy_data_flag_eq_1() 595 int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_data_flag_eq_0() argument 597 return llc_sk(sk)->data_flag; llc_conn_ev_qlfy_data_flag_eq_0() 600 int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_data_flag_eq_2() argument 602 return llc_sk(sk)->data_flag != 2; llc_conn_ev_qlfy_data_flag_eq_2() 605 int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_p_flag_eq_1() argument 607 return llc_sk(sk)->p_flag != 1; llc_conn_ev_qlfy_p_flag_eq_1() 612 * @sk: current connection structure. 621 int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_last_frame_eq_1() argument 623 return !(skb_queue_len(&llc_sk(sk)->pdu_unack_q) + 1 == llc_sk(sk)->k); llc_conn_ev_qlfy_last_frame_eq_1() 628 * @sk: current connection structure. 635 int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_last_frame_eq_0() argument 637 return skb_queue_len(&llc_sk(sk)->pdu_unack_q) + 1 == llc_sk(sk)->k; llc_conn_ev_qlfy_last_frame_eq_0() 640 int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_p_flag_eq_0() argument 642 return llc_sk(sk)->p_flag; llc_conn_ev_qlfy_p_flag_eq_0() 645 int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_p_flag_eq_f() argument 650 return llc_sk(sk)->p_flag == f_bit ? 0 : 1; llc_conn_ev_qlfy_p_flag_eq_f() 653 int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_remote_busy_eq_0() argument 655 return llc_sk(sk)->remote_busy_flag; llc_conn_ev_qlfy_remote_busy_eq_0() 658 int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_remote_busy_eq_1() argument 660 return !llc_sk(sk)->remote_busy_flag; llc_conn_ev_qlfy_remote_busy_eq_1() 663 int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_retry_cnt_lt_n2() argument 665 return !(llc_sk(sk)->retry_count < llc_sk(sk)->n2); llc_conn_ev_qlfy_retry_cnt_lt_n2() 668 int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_retry_cnt_gte_n2() argument 670 return !(llc_sk(sk)->retry_count >= llc_sk(sk)->n2); llc_conn_ev_qlfy_retry_cnt_gte_n2() 673 int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_s_flag_eq_1() argument 675 return !llc_sk(sk)->s_flag; llc_conn_ev_qlfy_s_flag_eq_1() 678 int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_s_flag_eq_0() argument 680 return llc_sk(sk)->s_flag; llc_conn_ev_qlfy_s_flag_eq_0() 683 int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_cause_flag_eq_1() argument 685 return !llc_sk(sk)->cause_flag; llc_conn_ev_qlfy_cause_flag_eq_1() 688 int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_cause_flag_eq_0() argument 690 return llc_sk(sk)->cause_flag; llc_conn_ev_qlfy_cause_flag_eq_0() 693 int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_conn() argument 701 int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_disc() argument 709 int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_failed() argument 717 int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk, llc_conn_ev_qlfy_set_status_remote_busy() argument 726 int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_refuse() argument 734 int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_conflict() argument 742 int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb) llc_conn_ev_qlfy_set_status_rst_done() argument
|
H A D | af_llc.c | 41 static int llc_ui_wait_for_conn(struct sock *sk, long timeout); 42 static int llc_ui_wait_for_disc(struct sock *sk, long timeout); 43 static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout); 88 * @sk: Socket which contains a valid llc socket type. 95 static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr) llc_ui_header_len() argument 101 else if (sk->sk_type == SOCK_STREAM) llc_ui_header_len() 108 * @sk: Connection the socket is using. 115 static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock) llc_ui_send_data() argument 117 struct llc_sock* llc = llc_sk(sk); llc_ui_send_data() 123 long timeout = sock_sndtimeo(sk, noblock); llc_ui_send_data() 125 rc = llc_ui_wait_for_busy_core(sk, timeout); llc_ui_send_data() 128 rc = llc_build_and_send_pkt(sk, skb); llc_ui_send_data() 132 static void llc_ui_sk_init(struct socket *sock, struct sock *sk) llc_ui_sk_init() argument 134 sock_graft(sk, sock); llc_ui_sk_init() 135 sk->sk_type = sock->type; llc_ui_sk_init() 149 * @sock: Socket to initialize and attach allocated sk to. 160 struct sock *sk; llc_ui_create() local 171 sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto); llc_ui_create() 172 if (sk) { llc_ui_create() 174 llc_ui_sk_init(sock, sk); llc_ui_create() 188 struct sock *sk = sock->sk; llc_ui_release() local 191 if (unlikely(sk == NULL)) llc_ui_release() 193 sock_hold(sk); llc_ui_release() 194 lock_sock(sk); llc_ui_release() 195 llc = llc_sk(sk); llc_ui_release() 198 if (!llc_send_disc(sk)) llc_ui_release() 199 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); llc_ui_release() 200 if (!sock_flag(sk, SOCK_ZAPPED)) llc_ui_release() 201 llc_sap_remove_socket(llc->sap, sk); llc_ui_release() 202 release_sock(sk); llc_ui_release() 205 sock_put(sk); llc_ui_release() 206 llc_sk_free(sk); llc_ui_release() 253 struct sock *sk = sock->sk; llc_ui_autobind() local 254 struct llc_sock *llc = llc_sk(sk); llc_ui_autobind() 258 if (!sock_flag(sk, SOCK_ZAPPED)) llc_ui_autobind() 261 if (sk->sk_bound_dev_if) { llc_ui_autobind() 262 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); llc_ui_autobind() 282 llc_sap_add_socket(sap, sk); llc_ui_autobind() 283 sock_reset_flag(sk, SOCK_ZAPPED); llc_ui_autobind() 306 struct sock *sk = sock->sk; llc_ui_bind() local 307 struct llc_sock *llc = llc_sk(sk); llc_ui_bind() 312 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) llc_ui_bind() 319 if (sk->sk_bound_dev_if) { llc_ui_bind() 320 llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); llc_ui_bind() 377 llc_sap_add_socket(sap, sk); llc_ui_bind() 378 sock_reset_flag(sk, SOCK_ZAPPED); llc_ui_bind() 399 struct sock *sk = sock->sk; llc_ui_shutdown() local 402 lock_sock(sk); llc_ui_shutdown() 403 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) llc_ui_shutdown() 408 rc = llc_send_disc(sk); llc_ui_shutdown() 410 rc = llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); llc_ui_shutdown() 412 sk->sk_state_change(sk); llc_ui_shutdown() 414 release_sock(sk); llc_ui_shutdown() 435 struct sock *sk = sock->sk; llc_ui_connect() local 436 struct llc_sock *llc = llc_sk(sk); llc_ui_connect() 440 lock_sock(sk); llc_ui_connect() 446 if (unlikely(sk->sk_type != SOCK_STREAM)) llc_ui_connect() 452 if (sock_flag(sk, SOCK_ZAPPED)) { llc_ui_connect() 461 sk->sk_state = TCP_SYN_SENT; llc_ui_connect() 463 rc = llc_establish_connection(sk, llc->dev->dev_addr, llc_ui_connect() 468 sk->sk_state = TCP_CLOSE; llc_ui_connect() 472 if (sk->sk_state == TCP_SYN_SENT) { llc_ui_connect() 473 const long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); llc_ui_connect() 475 if (!timeo || !llc_ui_wait_for_conn(sk, timeo)) llc_ui_connect() 483 if (sk->sk_state == TCP_CLOSE) llc_ui_connect() 489 release_sock(sk); llc_ui_connect() 492 rc = sock_error(sk) ? : -ECONNABORTED; llc_ui_connect() 507 struct sock *sk = sock->sk; llc_ui_listen() local 510 lock_sock(sk); llc_ui_listen() 514 if (unlikely(sk->sk_type != SOCK_STREAM)) llc_ui_listen() 517 if (sock_flag(sk, SOCK_ZAPPED)) llc_ui_listen() 522 sk->sk_max_ack_backlog = backlog; llc_ui_listen() 523 if (sk->sk_state != TCP_LISTEN) { llc_ui_listen() 524 sk->sk_ack_backlog = 0; llc_ui_listen() 525 sk->sk_state = TCP_LISTEN; llc_ui_listen() 527 sk->sk_socket->flags |= __SO_ACCEPTCON; llc_ui_listen() 529 release_sock(sk); llc_ui_listen() 533 static int llc_ui_wait_for_disc(struct sock *sk, long timeout) llc_ui_wait_for_disc() argument 539 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); llc_ui_wait_for_disc() 540 if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE)) llc_ui_wait_for_disc() 550 finish_wait(sk_sleep(sk), &wait); llc_ui_wait_for_disc() 554 static int llc_ui_wait_for_conn(struct sock *sk, long timeout) llc_ui_wait_for_conn() argument 559 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); llc_ui_wait_for_conn() 560 if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT)) llc_ui_wait_for_conn() 565 finish_wait(sk_sleep(sk), &wait); llc_ui_wait_for_conn() 569 static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout) llc_ui_wait_for_busy_core() argument 572 struct llc_sock *llc = llc_sk(sk); llc_ui_wait_for_busy_core() 576 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); llc_ui_wait_for_busy_core() 578 if (sk_wait_event(sk, &timeout, llc_ui_wait_for_busy_core() 579 (sk->sk_shutdown & RCV_SHUTDOWN) || llc_ui_wait_for_busy_core() 591 finish_wait(sk_sleep(sk), &wait); llc_ui_wait_for_busy_core() 595 static int llc_wait_data(struct sock *sk, long timeo) llc_wait_data() argument 603 rc = sock_error(sk); llc_wait_data() 607 if (sk->sk_shutdown & RCV_SHUTDOWN) llc_wait_data() 616 if (sk_wait_data(sk, &timeo)) llc_wait_data() 624 struct llc_sock *llc = llc_sk(skb->sk); llc_cmsg_rcv() 629 info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex; llc_cmsg_rcv() 647 struct sock *sk = sock->sk, *newsk; llc_ui_accept() local 653 llc_sk(sk)->laddr.lsap); llc_ui_accept() 654 lock_sock(sk); llc_ui_accept() 655 if (unlikely(sk->sk_type != SOCK_STREAM)) llc_ui_accept() 659 sk->sk_state != TCP_LISTEN)) llc_ui_accept() 662 if (skb_queue_empty(&sk->sk_receive_queue)) { llc_ui_accept() 663 rc = llc_wait_data(sk, sk->sk_rcvtimeo); llc_ui_accept() 668 llc_sk(sk)->laddr.lsap); llc_ui_accept() 669 skb = skb_dequeue(&sk->sk_receive_queue); llc_ui_accept() 671 if (!skb->sk) llc_ui_accept() 674 newsk = skb->sk; llc_ui_accept() 680 llc = llc_sk(sk); llc_ui_accept() 686 sk->sk_state = TCP_LISTEN; llc_ui_accept() 687 sk->sk_ack_backlog--; llc_ui_accept() 689 llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap); llc_ui_accept() 693 release_sock(sk); llc_ui_accept() 713 struct sock *sk = sock->sk; llc_ui_recvmsg() local 714 struct llc_sock *llc = llc_sk(sk); llc_ui_recvmsg() 723 lock_sock(sk); llc_ui_recvmsg() 725 if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) llc_ui_recvmsg() 728 timeo = sock_rcvtimeo(sk, nonblock); llc_ui_recvmsg() 736 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); llc_ui_recvmsg() 756 skb = skb_peek(&sk->sk_receive_queue); llc_ui_recvmsg() 763 if (copied >= target && !sk->sk_backlog.tail) llc_ui_recvmsg() 767 if (sk->sk_err || llc_ui_recvmsg() 768 sk->sk_state == TCP_CLOSE || llc_ui_recvmsg() 769 (sk->sk_shutdown & RCV_SHUTDOWN) || llc_ui_recvmsg() 774 if (sock_flag(sk, SOCK_DONE)) llc_ui_recvmsg() 777 if (sk->sk_err) { llc_ui_recvmsg() 778 copied = sock_error(sk); llc_ui_recvmsg() 781 if (sk->sk_shutdown & RCV_SHUTDOWN) llc_ui_recvmsg() 784 if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE) { llc_ui_recvmsg() 785 if (!sock_flag(sk, SOCK_DONE)) { llc_ui_recvmsg() 802 release_sock(sk); llc_ui_recvmsg() 803 lock_sock(sk); llc_ui_recvmsg() 805 sk_wait_data(sk, &timeo); llc_ui_recvmsg() 836 if (sk->sk_type != SOCK_STREAM) llc_ui_recvmsg() 840 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); llc_ui_recvmsg() 841 sk_eat_skb(sk, skb); llc_ui_recvmsg() 842 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); llc_ui_recvmsg() 852 release_sock(sk); llc_ui_recvmsg() 859 if (llc_sk(sk)->cmsg_flags) llc_ui_recvmsg() 863 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); llc_ui_recvmsg() 864 sk_eat_skb(sk, skb); llc_ui_recvmsg() 865 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); llc_ui_recvmsg() 883 struct sock *sk = sock->sk; llc_ui_sendmsg() local 884 struct llc_sock *llc = llc_sk(sk); llc_ui_sendmsg() 894 lock_sock(sk); llc_ui_sendmsg() 904 if (sock_flag(sk, SOCK_ZAPPED)) { llc_ui_sendmsg() 910 hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr); llc_ui_sendmsg() 915 release_sock(sk); llc_ui_sendmsg() 916 skb = sock_alloc_send_skb(sk, size, noblock, &rc); llc_ui_sendmsg() 917 lock_sock(sk); llc_ui_sendmsg() 926 if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) { llc_ui_sendmsg() 942 if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua)) llc_ui_sendmsg() 944 rc = llc_ui_send_data(sk, skb, noblock); llc_ui_sendmsg() 952 release_sock(sk); llc_ui_sendmsg() 969 struct sock *sk = sock->sk; llc_ui_getname() local 970 struct llc_sock *llc = llc_sk(sk); llc_ui_getname() 974 lock_sock(sk); llc_ui_getname() 975 if (sock_flag(sk, SOCK_ZAPPED)) llc_ui_getname() 980 if (sk->sk_state != TCP_ESTABLISHED) llc_ui_getname() 1002 release_sock(sk); llc_ui_getname() 1033 struct sock *sk = sock->sk; llc_ui_setsockopt() local 1034 struct llc_sock *llc = llc_sk(sk); llc_ui_setsockopt() 1038 lock_sock(sk); llc_ui_setsockopt() 1098 release_sock(sk); llc_ui_setsockopt() 1115 struct sock *sk = sock->sk; llc_ui_getsockopt() local 1116 struct llc_sock *llc = llc_sk(sk); llc_ui_getsockopt() 1119 lock_sock(sk); llc_ui_getsockopt() 1156 release_sock(sk); llc_ui_getsockopt()
|
H A D | llc_conn.c | 33 static void llc_conn_send_pdus(struct sock *sk); 34 static int llc_conn_service(struct sock *sk, struct sk_buff *skb); 35 static int llc_exec_conn_trans_actions(struct sock *sk, 38 static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, 51 * @sk: connection 59 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) llc_conn_state_process() argument 62 struct llc_sock *llc = llc_sk(skb->sk); llc_conn_state_process() 75 rc = llc_conn_service(skb->sk, skb); llc_conn_state_process() 93 llc_save_primitive(sk, skb, LLC_DATA_PRIM); llc_conn_state_process() 94 if (unlikely(sock_queue_rcv_skb(sk, skb))) { llc_conn_state_process() 106 * skb->sk pointing to the newly created struct sock in llc_conn_state_process() 109 skb_queue_tail(&sk->sk_receive_queue, skb); llc_conn_state_process() 110 sk->sk_state_change(sk); llc_conn_state_process() 113 sock_hold(sk); llc_conn_state_process() 114 if (sk->sk_type == SOCK_STREAM && llc_conn_state_process() 115 sk->sk_state == TCP_ESTABLISHED) { llc_conn_state_process() 116 sk->sk_shutdown = SHUTDOWN_MASK; llc_conn_state_process() 117 sk->sk_socket->state = SS_UNCONNECTED; llc_conn_state_process() 118 sk->sk_state = TCP_CLOSE; llc_conn_state_process() 119 if (!sock_flag(sk, SOCK_DEAD)) { llc_conn_state_process() 120 sock_set_flag(sk, SOCK_DEAD); llc_conn_state_process() 121 sk->sk_state_change(sk); llc_conn_state_process() 125 sock_put(sk); llc_conn_state_process() 148 sk->sk_write_space(sk); llc_conn_state_process() 153 if (sk->sk_type == SOCK_STREAM && llc_conn_state_process() 154 sk->sk_state == TCP_SYN_SENT) { llc_conn_state_process() 156 sk->sk_socket->state = SS_UNCONNECTED; llc_conn_state_process() 157 sk->sk_state = TCP_CLOSE; llc_conn_state_process() 159 sk->sk_socket->state = SS_CONNECTED; llc_conn_state_process() 160 sk->sk_state = TCP_ESTABLISHED; llc_conn_state_process() 162 sk->sk_state_change(sk); llc_conn_state_process() 166 sock_hold(sk); llc_conn_state_process() 167 if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSING) { llc_conn_state_process() 168 sk->sk_socket->state = SS_UNCONNECTED; llc_conn_state_process() 169 sk->sk_state = TCP_CLOSE; llc_conn_state_process() 170 sk->sk_state_change(sk); llc_conn_state_process() 172 sock_put(sk); llc_conn_state_process() 196 void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) llc_conn_send_pdu() argument 199 skb_queue_tail(&sk->sk_write_queue, skb); llc_conn_send_pdu() 200 llc_conn_send_pdus(sk); llc_conn_send_pdu() 205 * @sk: Active connection 212 void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb) llc_conn_rtn_pdu() argument 221 * @sk: active connection 229 void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit) llc_conn_resend_i_pdu_as_cmd() argument 237 llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus); llc_conn_resend_i_pdu_as_cmd() 244 llc = llc_sk(sk); llc_conn_resend_i_pdu_as_cmd() 250 skb_queue_tail(&sk->sk_write_queue, skb); llc_conn_resend_i_pdu_as_cmd() 258 llc_conn_send_pdus(sk); llc_conn_resend_i_pdu_as_cmd() 264 * @sk: active connection. 272 void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit) llc_conn_resend_i_pdu_as_rsp() argument 276 struct llc_sock *llc = llc_sk(sk); llc_conn_resend_i_pdu_as_rsp() 279 llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus); llc_conn_resend_i_pdu_as_rsp() 291 skb_queue_tail(&sk->sk_write_queue, skb); llc_conn_resend_i_pdu_as_rsp() 299 llc_conn_send_pdus(sk); llc_conn_resend_i_pdu_as_rsp() 305 * @sk: active connection 312 int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked) llc_conn_remove_acked_pdus() argument 318 struct llc_sock *llc = llc_sk(sk); llc_conn_remove_acked_pdus() 342 * @sk: active connection 346 static void llc_conn_send_pdus(struct sock *sk) llc_conn_send_pdus() argument 350 while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) { llc_conn_send_pdus() 357 skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb); llc_conn_send_pdus() 368 * @sk: connection 375 static int llc_conn_service(struct sock *sk, struct sk_buff *skb) llc_conn_service() argument 378 struct llc_sock *llc = llc_sk(sk); llc_conn_service() 384 trans = llc_qualify_conn_ev(sk, skb); llc_conn_service() 386 rc = llc_exec_conn_trans_actions(sk, trans, skb); llc_conn_service() 390 sk->sk_state_change(sk); llc_conn_service() 399 * @sk: connection 405 static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, llc_qualify_conn_ev() argument 411 struct llc_sock *llc = llc_sk(sk); llc_qualify_conn_ev() 421 if (!((*next_trans)->ev)(sk, skb)) { llc_qualify_conn_ev() 430 !(*next_qualifier)(sk, skb); next_qualifier++) llc_qualify_conn_ev() 445 * @sk: connection 452 static int llc_exec_conn_trans_actions(struct sock *sk, llc_exec_conn_trans_actions() argument 461 int rc2 = (*next_action)(sk, skb); llc_exec_conn_trans_actions() 475 const struct sock *sk) llc_estab_match() 477 struct llc_sock *llc = llc_sk(sk); llc_estab_match() 537 struct sock *sk; llc_lookup_established() local 540 sk = __llc_lookup_established(sap, daddr, laddr); llc_lookup_established() 542 return sk; llc_lookup_established() 547 const struct sock *sk) llc_listener_match() 549 struct llc_sock *llc = llc_sk(sk); llc_listener_match() 551 return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN && llc_listener_match() 618 struct sock *sk = __llc_lookup_established(sap, daddr, laddr); __llc_lookup() local 620 return sk ? : llc_lookup_listener(sap, laddr); __llc_lookup() 709 * @sk: socket 713 void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk) llc_sap_add_socket() argument 715 struct llc_sock *llc = llc_sk(sk); llc_sap_add_socket() 720 llc_sk(sk)->sap = sap; llc_sap_add_socket() 724 sk_nulls_add_node_rcu(sk, laddr_hb); llc_sap_add_socket() 732 * @sk: socket 737 void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk) llc_sap_remove_socket() argument 739 struct llc_sock *llc = llc_sk(sk); llc_sap_remove_socket() 742 sk_nulls_del_node_init_rcu(sk); llc_sap_remove_socket() 751 * @sk: current connection structure. 756 static int llc_conn_rcv(struct sock *sk, struct sk_buff *skb) llc_conn_rcv() argument 762 return llc_conn_state_process(sk, skb); llc_conn_rcv() 765 static struct sock *llc_create_incoming_sock(struct sock *sk, llc_create_incoming_sock() argument 770 struct sock *newsk = llc_sk_alloc(sock_net(sk), sk->sk_family, GFP_ATOMIC, llc_create_incoming_sock() 771 sk->sk_prot); llc_create_incoming_sock() 772 struct llc_sock *newllc, *llc = llc_sk(sk); llc_create_incoming_sock() 790 struct sock *sk; llc_conn_handler() local 797 sk = __llc_lookup(sap, &saddr, &daddr); llc_conn_handler() 798 if (!sk) llc_conn_handler() 801 bh_lock_sock(sk); llc_conn_handler() 810 if (unlikely(sk->sk_state == TCP_LISTEN)) { llc_conn_handler() 811 struct sock *newsk = llc_create_incoming_sock(sk, skb->dev, llc_conn_handler() 824 skb->sk = sk; llc_conn_handler() 826 if (!sock_owned_by_user(sk)) llc_conn_handler() 827 llc_conn_rcv(sk, skb); llc_conn_handler() 831 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) llc_conn_handler() 835 bh_unlock_sock(sk); llc_conn_handler() 836 sock_put(sk); llc_conn_handler() 853 * @sk: LLC sock (p8022 connection) 861 static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb) llc_backlog_rcv() argument 864 struct llc_sock *llc = llc_sk(sk); llc_backlog_rcv() 868 rc = llc_conn_rcv(sk, skb); llc_backlog_rcv() 874 rc = llc_conn_state_process(sk, skb); llc_backlog_rcv() 890 * @sk: socket to initialize. 894 static void llc_sk_init(struct sock *sk) llc_sk_init() argument 896 struct llc_sock *llc = llc_sk(sk); llc_sk_init() 903 (unsigned long)sk); llc_sk_init() 907 (unsigned long)sk); llc_sk_init() 911 (unsigned long)sk); llc_sk_init() 915 (unsigned long)sk); llc_sk_init() 923 sk->sk_backlog_rcv = llc_backlog_rcv; llc_sk_init() 936 struct sock *sk = sk_alloc(net, family, priority, prot); llc_sk_alloc() local 938 if (!sk) llc_sk_alloc() 940 llc_sk_init(sk); llc_sk_alloc() 941 sock_init_data(NULL, sk); llc_sk_alloc() 944 printk(KERN_DEBUG "LLC socket %p created in %s, now we have %d alive\n", sk, llc_sk_alloc() 948 return sk; llc_sk_alloc() 953 * @sk - socket to free 957 void llc_sk_free(struct sock *sk) llc_sk_free() argument 959 struct llc_sock *llc = llc_sk(sk); llc_sk_free() 963 llc_conn_ac_stop_all_timers(sk, NULL); llc_sk_free() 967 skb_queue_len(&sk->sk_write_queue)); llc_sk_free() 969 skb_queue_purge(&sk->sk_receive_queue); llc_sk_free() 970 skb_queue_purge(&sk->sk_write_queue); llc_sk_free() 973 if (atomic_read(&sk->sk_refcnt) != 1) { llc_sk_free() 975 sk, __func__, atomic_read(&sk->sk_refcnt)); llc_sk_free() 980 printk(KERN_DEBUG "LLC socket %p released in %s, %d are still alive\n", sk, llc_sk_free() 984 sock_put(sk); llc_sk_free() 989 * @sk: LLC socket to reset 994 void llc_sk_reset(struct sock *sk) llc_sk_reset() argument 996 struct llc_sock *llc = llc_sk(sk); llc_sk_reset() 998 llc_conn_ac_stop_all_timers(sk, NULL); llc_sk_reset() 999 skb_queue_purge(&sk->sk_write_queue); llc_sk_reset() 1004 llc_conn_set_p_flag(sk, 0); llc_sk_reset() 472 llc_estab_match(const struct llc_sap *sap, const struct llc_addr *daddr, const struct llc_addr *laddr, const struct sock *sk) llc_estab_match() argument 545 llc_listener_match(const struct llc_sap *sap, const struct llc_addr *laddr, const struct sock *sk) llc_listener_match() argument
|
H A D | llc_if.c | 31 * @sk: connection 42 int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb) llc_build_and_send_pkt() argument 46 struct llc_sock *llc = llc_sk(sk); llc_build_and_send_pkt() 61 rc = llc_conn_state_process(sk, skb); llc_build_and_send_pkt() 68 * @sk: connection 79 int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap) llc_establish_connection() argument 84 struct llc_sock *llc = llc_sk(sk); llc_establish_connection() 94 sk = existing; llc_establish_connection() 99 sock_hold(sk); llc_establish_connection() 108 skb_set_owner_w(skb, sk); llc_establish_connection() 109 rc = llc_conn_state_process(sk, skb); llc_establish_connection() 112 sock_put(sk); llc_establish_connection() 118 * @sk: connection to be closed 125 int llc_send_disc(struct sock *sk) llc_send_disc() argument 131 sock_hold(sk); llc_send_disc() 132 if (sk->sk_type != SOCK_STREAM || sk->sk_state != TCP_ESTABLISHED || llc_send_disc() 133 llc_sk(sk)->state == LLC_CONN_STATE_ADM || llc_send_disc() 134 llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) llc_send_disc() 143 skb_set_owner_w(skb, sk); llc_send_disc() 144 sk->sk_state = TCP_CLOSING; llc_send_disc() 149 rc = llc_conn_state_process(sk, skb); llc_send_disc() 151 sock_put(sk); llc_send_disc()
|
H A D | llc_proc.c | 37 struct sock *sk = NULL; llc_get_sk_idx() local 46 sk_nulls_for_each(sk, node, head) { sk_nulls_for_each() 54 sk = NULL; 56 return sk; 70 struct sock *sk = NULL; laddr_hash_next() local 73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket]) laddr_hash_next() 77 return sk; laddr_hash_next() 82 struct sock* sk, *next; llc_seq_next() local 88 sk = llc_get_sk_idx(0); llc_seq_next() 91 sk = v; llc_seq_next() 92 next = sk_nulls_next(sk); llc_seq_next() 94 sk = next; llc_seq_next() 97 llc = llc_sk(sk); llc_seq_next() 99 sk = laddr_hash_next(sap, llc_sk_laddr_hashfn(sap, &llc->laddr)); llc_seq_next() 100 if (sk) llc_seq_next() 105 sk = laddr_hash_next(sap, -1); llc_seq_next() 106 if (sk) llc_seq_next() 111 return sk; llc_seq_next() 117 struct sock *sk = v; llc_seq_stop() local 118 struct llc_sock *llc = llc_sk(sk); llc_seq_stop() 128 struct sock* sk; llc_seq_socket_show() local 136 sk = v; llc_seq_socket_show() 137 llc = llc_sk(sk); llc_seq_socket_show() 140 seq_printf(seq, "%2X %2X ", sk->sk_type, 0); llc_seq_socket_show() 151 sk_wmem_alloc_get(sk), llc_seq_socket_show() 152 sk_rmem_alloc_get(sk) - llc->copied_seq, llc_seq_socket_show() 153 sk->sk_state, llc_seq_socket_show() 154 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), llc_seq_socket_show() 177 struct sock* sk; llc_seq_core_show() local 186 sk = v; llc_seq_core_show() 187 llc = llc_sk(sk); llc_seq_core_show() 198 !!sk->sk_backlog.tail, !!sock_owned_by_user(sk)); llc_seq_core_show()
|
/linux-4.1.27/net/iucv/ |
H A D | af_iucv.c | 52 #define __iucv_sock_wait(sk, condition, timeo, ret) \ 57 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ 67 release_sock(sk); \ 69 lock_sock(sk); \ 70 ret = sock_error(sk); \ 74 finish_wait(sk_sleep(sk), &__wait); \ 77 #define iucv_sock_wait(sk, condition, timeo) \ 81 __iucv_sock_wait(sk, condition, timeo, __ret); \ 85 static void iucv_sock_kill(struct sock *sk); 86 static void iucv_sock_close(struct sock *sk); 152 struct sock *sk; afiucv_pm_freeze() local 159 sk_for_each(sk, &iucv_sk_list.head) { afiucv_pm_freeze() 160 iucv = iucv_sk(sk); afiucv_pm_freeze() 161 switch (sk->sk_state) { afiucv_pm_freeze() 165 iucv_sever_path(sk, 0); afiucv_pm_freeze() 189 struct sock *sk; afiucv_pm_restore_thaw() local 195 sk_for_each(sk, &iucv_sk_list.head) { afiucv_pm_restore_thaw() 196 switch (sk->sk_state) { afiucv_pm_restore_thaw() 198 sk->sk_err = EPIPE; afiucv_pm_restore_thaw() 199 sk->sk_state = IUCV_DISCONN; afiucv_pm_restore_thaw() 200 sk->sk_state_change(sk); afiucv_pm_restore_thaw() 266 * @sk: sock structure 267 * @state: first iucv sk state 268 * @state: second iucv sk state 272 static int iucv_sock_in_state(struct sock *sk, int state, int state2) iucv_sock_in_state() argument 274 return (sk->sk_state == state || sk->sk_state == state2); iucv_sock_in_state() 279 * @sk: sock structure 285 static inline int iucv_below_msglim(struct sock *sk) iucv_below_msglim() argument 287 struct iucv_sock *iucv = iucv_sk(sk); iucv_below_msglim() 289 if (sk->sk_state != IUCV_CONNECTED) iucv_below_msglim() 301 static void iucv_sock_wake_msglim(struct sock *sk) iucv_sock_wake_msglim() argument 306 wq = rcu_dereference(sk->sk_wq); iucv_sock_wake_msglim() 309 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); iucv_sock_wake_msglim() 384 struct sock *sk; __iucv_get_sock_by_name() local 386 sk_for_each(sk, &iucv_sk_list.head) __iucv_get_sock_by_name() 387 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) __iucv_get_sock_by_name() 388 return sk; __iucv_get_sock_by_name() 393 static void iucv_sock_destruct(struct sock *sk) iucv_sock_destruct() argument 395 skb_queue_purge(&sk->sk_receive_queue); iucv_sock_destruct() 396 skb_queue_purge(&sk->sk_error_queue); iucv_sock_destruct() 398 sk_mem_reclaim(sk); iucv_sock_destruct() 400 if (!sock_flag(sk, SOCK_DEAD)) { iucv_sock_destruct() 401 pr_err("Attempt to release alive iucv socket %p\n", sk); iucv_sock_destruct() 405 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); iucv_sock_destruct() 406 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); iucv_sock_destruct() 407 WARN_ON(sk->sk_wmem_queued); iucv_sock_destruct() 408 WARN_ON(sk->sk_forward_alloc); iucv_sock_destruct() 414 struct sock *sk; iucv_sock_cleanup_listen() local 417 while ((sk = iucv_accept_dequeue(parent, NULL))) { iucv_sock_cleanup_listen() 418 iucv_sock_close(sk); iucv_sock_cleanup_listen() 419 iucv_sock_kill(sk); iucv_sock_cleanup_listen() 426 static void iucv_sock_kill(struct sock *sk) iucv_sock_kill() argument 428 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) iucv_sock_kill() 431 iucv_sock_unlink(&iucv_sk_list, sk); iucv_sock_kill() 432 sock_set_flag(sk, SOCK_DEAD); iucv_sock_kill() 433 sock_put(sk); iucv_sock_kill() 437 static void iucv_sever_path(struct sock *sk, int with_user_data) iucv_sever_path() argument 440 struct iucv_sock *iucv = iucv_sk(sk); iucv_sever_path() 457 static int iucv_send_ctrl(struct sock *sk, u8 flags) iucv_send_ctrl() argument 464 skb = sock_alloc_send_skb(sk, blen, 1, &err); iucv_send_ctrl() 467 err = afiucv_hs_send(NULL, sk, skb, flags); iucv_send_ctrl() 473 static void iucv_sock_close(struct sock *sk) iucv_sock_close() argument 475 struct iucv_sock *iucv = iucv_sk(sk); iucv_sock_close() 479 lock_sock(sk); iucv_sock_close() 481 switch (sk->sk_state) { iucv_sock_close() 483 iucv_sock_cleanup_listen(sk); iucv_sock_close() 488 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); iucv_sock_close() 489 sk->sk_state = IUCV_DISCONN; iucv_sock_close() 490 sk->sk_state_change(sk); iucv_sock_close() 493 sk->sk_state = IUCV_CLOSING; iucv_sock_close() 494 sk->sk_state_change(sk); iucv_sock_close() 497 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) iucv_sock_close() 498 timeo = sk->sk_lingertime; iucv_sock_close() 501 iucv_sock_wait(sk, iucv_sock_close() 502 iucv_sock_in_state(sk, IUCV_CLOSED, 0), iucv_sock_close() 507 sk->sk_state = IUCV_CLOSED; iucv_sock_close() 508 sk->sk_state_change(sk); iucv_sock_close() 510 sk->sk_err = ECONNRESET; iucv_sock_close() 511 sk->sk_state_change(sk); iucv_sock_close() 517 iucv_sever_path(sk, 1); iucv_sock_close() 523 sk->sk_bound_dev_if = 0; iucv_sock_close() 527 sock_set_flag(sk, SOCK_ZAPPED); iucv_sock_close() 529 release_sock(sk); iucv_sock_close() 532 static void iucv_sock_init(struct sock *sk, struct sock *parent) iucv_sock_init() argument 535 sk->sk_type = parent->sk_type; iucv_sock_init() 540 struct sock *sk; iucv_sock_alloc() local 543 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); iucv_sock_alloc() 544 if (!sk) iucv_sock_alloc() 546 iucv = iucv_sk(sk); iucv_sock_alloc() 548 sock_init_data(sock, sk); iucv_sock_alloc() 569 sk->sk_destruct = iucv_sock_destruct; iucv_sock_alloc() 570 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; iucv_sock_alloc() 571 sk->sk_allocation = GFP_DMA; iucv_sock_alloc() 573 sock_reset_flag(sk, SOCK_ZAPPED); iucv_sock_alloc() 575 sk->sk_protocol = proto; iucv_sock_alloc() 576 sk->sk_state = IUCV_OPEN; iucv_sock_alloc() 578 iucv_sock_link(&iucv_sk_list, sk); iucv_sock_alloc() 579 return sk; iucv_sock_alloc() 586 struct sock *sk; iucv_sock_create() local 598 /* currently, proto ops can handle both sk types */ iucv_sock_create() 605 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); iucv_sock_create() 606 if (!sk) iucv_sock_create() 609 iucv_sock_init(sk, NULL); iucv_sock_create() 614 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) iucv_sock_link() argument 617 sk_add_node(sk, &l->head); iucv_sock_link() 621 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) iucv_sock_unlink() argument 624 sk_del_node_init(sk); iucv_sock_unlink() 628 void iucv_accept_enqueue(struct sock *parent, struct sock *sk) iucv_accept_enqueue() argument 633 sock_hold(sk); iucv_accept_enqueue() 635 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); iucv_accept_enqueue() 637 iucv_sk(sk)->parent = parent; iucv_accept_enqueue() 641 void iucv_accept_unlink(struct sock *sk) iucv_accept_unlink() argument 644 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); iucv_accept_unlink() 647 list_del_init(&iucv_sk(sk)->accept_q); iucv_accept_unlink() 649 sk_acceptq_removed(iucv_sk(sk)->parent); iucv_accept_unlink() 650 iucv_sk(sk)->parent = NULL; iucv_accept_unlink() 651 sock_put(sk); iucv_accept_unlink() 657 struct sock *sk; iucv_accept_dequeue() local 660 sk = (struct sock *) isk; iucv_accept_dequeue() 661 lock_sock(sk); iucv_accept_dequeue() 663 if (sk->sk_state == IUCV_CLOSED) { iucv_accept_dequeue() 664 iucv_accept_unlink(sk); iucv_accept_dequeue() 665 release_sock(sk); iucv_accept_dequeue() 669 if (sk->sk_state == IUCV_CONNECTED || iucv_accept_dequeue() 670 sk->sk_state == IUCV_DISCONN || iucv_accept_dequeue() 672 iucv_accept_unlink(sk); iucv_accept_dequeue() 674 sock_graft(sk, newsock); iucv_accept_dequeue() 676 release_sock(sk); iucv_accept_dequeue() 677 return sk; iucv_accept_dequeue() 680 release_sock(sk); iucv_accept_dequeue() 702 struct sock *sk = sock->sk; iucv_sock_bind() local 715 lock_sock(sk); iucv_sock_bind() 716 if (sk->sk_state != IUCV_OPEN) { iucv_sock_bind() 723 iucv = iucv_sk(sk); iucv_sock_bind() 748 sk->sk_bound_dev_if = dev->ifindex; iucv_sock_bind() 751 sk->sk_state = IUCV_BOUND; iucv_sock_bind() 765 sk->sk_state = IUCV_BOUND; iucv_sock_bind() 777 release_sock(sk); iucv_sock_bind() 782 static int iucv_sock_autobind(struct sock *sk) iucv_sock_autobind() argument 784 struct iucv_sock *iucv = iucv_sk(sk); iucv_sock_autobind() 805 struct sock *sk = sock->sk; afiucv_path_connect() local 806 struct iucv_sock *iucv = iucv_sk(sk); afiucv_path_connect() 823 sk); afiucv_path_connect() 852 struct sock *sk = sock->sk; iucv_sock_connect() local 853 struct iucv_sock *iucv = iucv_sk(sk); iucv_sock_connect() 859 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) iucv_sock_connect() 862 if (sk->sk_state == IUCV_OPEN && iucv_sock_connect() 866 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) iucv_sock_connect() 869 if (sk->sk_state == IUCV_OPEN) { iucv_sock_connect() 870 err = iucv_sock_autobind(sk); iucv_sock_connect() 875 lock_sock(sk); iucv_sock_connect() 882 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); iucv_sock_connect() 888 if (sk->sk_state != IUCV_CONNECTED) iucv_sock_connect() 889 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, iucv_sock_connect() 891 sock_sndtimeo(sk, flags & O_NONBLOCK)); iucv_sock_connect() 893 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) iucv_sock_connect() 897 iucv_sever_path(sk, 0); iucv_sock_connect() 900 release_sock(sk); iucv_sock_connect() 907 struct sock *sk = sock->sk; iucv_sock_listen() local 910 lock_sock(sk); iucv_sock_listen() 913 if (sk->sk_state != IUCV_BOUND) iucv_sock_listen() 919 sk->sk_max_ack_backlog = backlog; iucv_sock_listen() 920 sk->sk_ack_backlog = 0; iucv_sock_listen() 921 sk->sk_state = IUCV_LISTEN; iucv_sock_listen() 925 release_sock(sk); iucv_sock_listen() 934 struct sock *sk = sock->sk, *nsk; iucv_sock_accept() local 938 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); iucv_sock_accept() 940 if (sk->sk_state != IUCV_LISTEN) { iucv_sock_accept() 945 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); iucv_sock_accept() 948 add_wait_queue_exclusive(sk_sleep(sk), &wait); iucv_sock_accept() 949 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { iucv_sock_accept() 956 release_sock(sk); iucv_sock_accept() 958 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); iucv_sock_accept() 960 if (sk->sk_state != IUCV_LISTEN) { iucv_sock_accept() 972 remove_wait_queue(sk_sleep(sk), &wait); iucv_sock_accept() 980 release_sock(sk); iucv_sock_accept() 988 struct sock *sk = sock->sk; iucv_sock_getname() local 989 struct iucv_sock *iucv = iucv_sk(sk); iucv_sock_getname() 1035 struct sock *sk = sock->sk; iucv_sock_sendmsg() local 1036 struct iucv_sock *iucv = iucv_sk(sk); iucv_sock_sendmsg() 1047 err = sock_error(sk); iucv_sock_sendmsg() 1055 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) iucv_sock_sendmsg() 1058 lock_sock(sk); iucv_sock_sendmsg() 1060 if (sk->sk_shutdown & SEND_SHUTDOWN) { iucv_sock_sendmsg() 1066 if (sk->sk_state != IUCV_CONNECTED) { iucv_sock_sendmsg() 1115 skb = sock_alloc_send_skb(sk, 1119 skb = sock_alloc_send_skb(sk, len, noblock, &err); 1130 timeo = sock_sndtimeo(sk, noblock); 1131 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); 1136 if (sk->sk_state != IUCV_CONNECTED) { 1147 err = afiucv_hs_send(&txmsg, sk, skb, 0); 1195 release_sock(sk); 1201 release_sock(sk); 1209 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) iucv_fragment_skb() argument 1216 if (dataleft >= sk->sk_rcvbuf / 4) iucv_fragment_skb() 1217 size = sk->sk_rcvbuf / 4; iucv_fragment_skb() 1237 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); iucv_fragment_skb() 1247 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, iucv_process_message() argument 1277 if (sk->sk_type == SOCK_STREAM && iucv_process_message() 1278 skb->truesize >= sk->sk_rcvbuf / 4) { iucv_process_message() 1279 rc = iucv_fragment_skb(sk, skb, len); iucv_process_message() 1286 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); iucv_process_message() 1295 if (sock_queue_rcv_skb(sk, skb)) iucv_process_message() 1296 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); iucv_process_message() 1303 static void iucv_process_message_q(struct sock *sk) iucv_process_message_q() argument 1305 struct iucv_sock *iucv = iucv_sk(sk); iucv_process_message_q() 1313 iucv_process_message(sk, skb, p->path, &p->msg); iucv_process_message_q() 1325 struct sock *sk = sock->sk; iucv_sock_recvmsg() local 1326 struct iucv_sock *iucv = iucv_sk(sk); iucv_sock_recvmsg() 1332 if ((sk->sk_state == IUCV_DISCONN) && iucv_sock_recvmsg() 1334 skb_queue_empty(&sk->sk_receive_queue) && iucv_sock_recvmsg() 1343 skb = skb_recv_datagram(sk, flags, noblock, &err); iucv_sock_recvmsg() 1345 if (sk->sk_shutdown & RCV_SHUTDOWN) iucv_sock_recvmsg() 1354 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; iucv_sock_recvmsg() 1359 skb_queue_head(&sk->sk_receive_queue, skb); iucv_sock_recvmsg() 1364 if (sk->sk_type == SOCK_SEQPACKET) { iucv_sock_recvmsg() 1379 skb_queue_head(&sk->sk_receive_queue, skb); iucv_sock_recvmsg() 1387 if (sk->sk_type == SOCK_STREAM) { iucv_sock_recvmsg() 1390 skb_queue_head(&sk->sk_receive_queue, skb); iucv_sock_recvmsg() 1400 iucv_sock_close(sk); iucv_sock_recvmsg() 1410 if (sock_queue_rcv_skb(sk, rskb)) { iucv_sock_recvmsg() 1420 iucv_process_message_q(sk); iucv_sock_recvmsg() 1423 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); iucv_sock_recvmsg() 1425 sk->sk_state = IUCV_DISCONN; iucv_sock_recvmsg() 1426 sk->sk_state_change(sk); iucv_sock_recvmsg() 1435 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) iucv_sock_recvmsg() 1444 struct sock *sk; iucv_accept_poll() local 1447 sk = (struct sock *) isk; iucv_accept_poll() 1449 if (sk->sk_state == IUCV_CONNECTED) iucv_accept_poll() 1459 struct sock *sk = sock->sk; iucv_sock_poll() local 1462 sock_poll_wait(file, sk_sleep(sk), wait); iucv_sock_poll() 1464 if (sk->sk_state == IUCV_LISTEN) iucv_sock_poll() 1465 return iucv_accept_poll(sk); iucv_sock_poll() 1467 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) iucv_sock_poll() 1469 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); iucv_sock_poll() 1471 if (sk->sk_shutdown & RCV_SHUTDOWN) iucv_sock_poll() 1474 if (sk->sk_shutdown == SHUTDOWN_MASK) iucv_sock_poll() 1477 if (!skb_queue_empty(&sk->sk_receive_queue) || iucv_sock_poll() 1478 (sk->sk_shutdown & RCV_SHUTDOWN)) iucv_sock_poll() 1481 if (sk->sk_state == IUCV_CLOSED) iucv_sock_poll() 1484 if (sk->sk_state == IUCV_DISCONN) iucv_sock_poll() 1487 if (sock_writeable(sk) && iucv_below_msglim(sk)) iucv_sock_poll() 1490 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); iucv_sock_poll() 1497 struct sock *sk = sock->sk; iucv_sock_shutdown() local 1498 struct iucv_sock *iucv = iucv_sk(sk); iucv_sock_shutdown() 1507 lock_sock(sk); iucv_sock_shutdown() 1508 switch (sk->sk_state) { iucv_sock_shutdown() 1539 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); iucv_sock_shutdown() 1542 sk->sk_shutdown |= how; iucv_sock_shutdown() 1549 /* skb_queue_purge(&sk->sk_receive_queue); */ iucv_sock_shutdown() 1551 skb_queue_purge(&sk->sk_receive_queue); iucv_sock_shutdown() 1555 sk->sk_state_change(sk); iucv_sock_shutdown() 1558 release_sock(sk); iucv_sock_shutdown() 1564 struct sock *sk = sock->sk; iucv_sock_release() local 1567 if (!sk) iucv_sock_release() 1570 iucv_sock_close(sk); iucv_sock_release() 1572 sock_orphan(sk); iucv_sock_release() 1573 iucv_sock_kill(sk); iucv_sock_release() 1581 struct sock *sk = sock->sk; iucv_sock_setsockopt() local 1582 struct iucv_sock *iucv = iucv_sk(sk); iucv_sock_setsockopt() 1597 lock_sock(sk); iucv_sock_setsockopt() 1606 switch (sk->sk_state) { iucv_sock_setsockopt() 1623 release_sock(sk); iucv_sock_setsockopt() 1631 struct sock *sk = sock->sk; iucv_sock_getsockopt() local 1632 struct iucv_sock *iucv = iucv_sk(sk); iucv_sock_getsockopt() 1652 lock_sock(sk); iucv_sock_getsockopt() 1655 release_sock(sk); iucv_sock_getsockopt() 1658 if (sk->sk_state == IUCV_OPEN) iucv_sock_getsockopt() 1684 struct sock *sk, *nsk; iucv_callback_connreq() local 1693 sk = NULL; iucv_callback_connreq() 1694 sk_for_each(sk, &iucv_sk_list.head) iucv_callback_connreq() 1695 if (sk->sk_state == IUCV_LISTEN && iucv_callback_connreq() 1696 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { iucv_callback_connreq() 1701 iucv = iucv_sk(sk); iucv_callback_connreq() 1709 bh_lock_sock(sk); iucv_callback_connreq() 1715 if (sk->sk_state != IUCV_LISTEN) { iucv_callback_connreq() 1722 if (sk_acceptq_is_full(sk)) { iucv_callback_connreq() 1729 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); iucv_callback_connreq() 1737 iucv_sock_init(nsk, sk); iucv_callback_connreq() 1762 iucv_accept_enqueue(sk, nsk); iucv_callback_connreq() 1766 sk->sk_data_ready(sk); iucv_callback_connreq() 1769 bh_unlock_sock(sk); iucv_callback_connreq() 1775 struct sock *sk = path->private; iucv_callback_connack() local 1777 sk->sk_state = IUCV_CONNECTED; iucv_callback_connack() 1778 sk->sk_state_change(sk); iucv_callback_connack() 1783 struct sock *sk = path->private; iucv_callback_rx() local 1784 struct iucv_sock *iucv = iucv_sk(sk); iucv_callback_rx() 1789 if (sk->sk_shutdown & RCV_SHUTDOWN) { iucv_callback_rx() 1800 len = atomic_read(&sk->sk_rmem_alloc); iucv_callback_rx() 1802 if (len > sk->sk_rcvbuf) iucv_callback_rx() 1809 iucv_process_message(sk, skb, path, msg); iucv_callback_rx() 1828 struct sock *sk = path->private; iucv_callback_txdone() local 1830 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; iucv_callback_txdone() 1834 bh_lock_sock(sk); iucv_callback_txdone() 1853 iucv_sock_wake_msglim(sk); iucv_callback_txdone() 1857 if (sk->sk_state == IUCV_CLOSING) { iucv_callback_txdone() 1858 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { iucv_callback_txdone() 1859 sk->sk_state = IUCV_CLOSED; iucv_callback_txdone() 1860 sk->sk_state_change(sk); iucv_callback_txdone() 1863 bh_unlock_sock(sk); iucv_callback_txdone() 1869 struct sock *sk = path->private; iucv_callback_connrej() local 1871 if (sk->sk_state == IUCV_CLOSED) iucv_callback_connrej() 1874 bh_lock_sock(sk); iucv_callback_connrej() 1875 iucv_sever_path(sk, 1); iucv_callback_connrej() 1876 sk->sk_state = IUCV_DISCONN; iucv_callback_connrej() 1878 sk->sk_state_change(sk); iucv_callback_connrej() 1879 bh_unlock_sock(sk); iucv_callback_connrej() 1887 struct sock *sk = path->private; iucv_callback_shutdown() local 1889 bh_lock_sock(sk); iucv_callback_shutdown() 1890 if (sk->sk_state != IUCV_CLOSED) { iucv_callback_shutdown() 1891 sk->sk_shutdown |= SEND_SHUTDOWN; iucv_callback_shutdown() 1892 sk->sk_state_change(sk); iucv_callback_shutdown() 1894 bh_unlock_sock(sk); iucv_callback_shutdown() 1922 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) afiucv_hs_callback_syn() argument 1929 iucv = iucv_sk(sk); afiucv_hs_callback_syn() 1939 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); afiucv_hs_callback_syn() 1940 bh_lock_sock(sk); afiucv_hs_callback_syn() 1941 if ((sk->sk_state != IUCV_LISTEN) || afiucv_hs_callback_syn() 1942 sk_acceptq_is_full(sk) || afiucv_hs_callback_syn() 1949 bh_unlock_sock(sk); afiucv_hs_callback_syn() 1954 iucv_sock_init(nsk, sk); afiucv_hs_callback_syn() 1965 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; afiucv_hs_callback_syn() 1974 iucv_accept_enqueue(sk, nsk); afiucv_hs_callback_syn() 1976 sk->sk_data_ready(sk); afiucv_hs_callback_syn() 1979 bh_unlock_sock(sk); afiucv_hs_callback_syn() 1988 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) afiucv_hs_callback_synack() argument 1990 struct iucv_sock *iucv = iucv_sk(sk); afiucv_hs_callback_synack() 1996 if (sk->sk_state != IUCV_BOUND) afiucv_hs_callback_synack() 1998 bh_lock_sock(sk); afiucv_hs_callback_synack() 2000 sk->sk_state = IUCV_CONNECTED; afiucv_hs_callback_synack() 2001 sk->sk_state_change(sk); afiucv_hs_callback_synack() 2002 bh_unlock_sock(sk); afiucv_hs_callback_synack() 2011 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) afiucv_hs_callback_synfin() argument 2013 struct iucv_sock *iucv = iucv_sk(sk); afiucv_hs_callback_synfin() 2017 if (sk->sk_state != IUCV_BOUND) afiucv_hs_callback_synfin() 2019 bh_lock_sock(sk); afiucv_hs_callback_synfin() 2020 sk->sk_state = IUCV_DISCONN; afiucv_hs_callback_synfin() 2021 sk->sk_state_change(sk); afiucv_hs_callback_synfin() 2022 bh_unlock_sock(sk); afiucv_hs_callback_synfin() 2031 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) afiucv_hs_callback_fin() argument 2033 struct iucv_sock *iucv = iucv_sk(sk); afiucv_hs_callback_fin() 2038 bh_lock_sock(sk); afiucv_hs_callback_fin() 2039 if (sk->sk_state == IUCV_CONNECTED) { afiucv_hs_callback_fin() 2040 sk->sk_state = IUCV_DISCONN; afiucv_hs_callback_fin() 2041 sk->sk_state_change(sk); afiucv_hs_callback_fin() 2043 bh_unlock_sock(sk); afiucv_hs_callback_fin() 2052 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) afiucv_hs_callback_win() argument 2054 struct iucv_sock *iucv = iucv_sk(sk); afiucv_hs_callback_win() 2061 if (sk->sk_state != IUCV_CONNECTED) afiucv_hs_callback_win() 2065 iucv_sock_wake_msglim(sk); afiucv_hs_callback_win() 2072 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) afiucv_hs_callback_rx() argument 2074 struct iucv_sock *iucv = iucv_sk(sk); afiucv_hs_callback_rx() 2081 if (sk->sk_state != IUCV_CONNECTED) { afiucv_hs_callback_rx() 2086 if (sk->sk_shutdown & RCV_SHUTDOWN) { afiucv_hs_callback_rx() 2102 if (sock_queue_rcv_skb(sk, skb)) { afiucv_hs_callback_rx() 2107 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); afiucv_hs_callback_rx() 2120 struct sock *sk; afiucv_hs_rcv() local 2134 sk = NULL; afiucv_hs_rcv() 2136 sk_for_each(sk, &iucv_sk_list.head) { afiucv_hs_rcv() 2138 if ((!memcmp(&iucv_sk(sk)->src_name, afiucv_hs_rcv() 2140 (!memcmp(&iucv_sk(sk)->src_user_id, afiucv_hs_rcv() 2142 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && afiucv_hs_rcv() 2143 (!memcmp(&iucv_sk(sk)->dst_user_id, afiucv_hs_rcv() 2145 iucv = iucv_sk(sk); afiucv_hs_rcv() 2149 if ((!memcmp(&iucv_sk(sk)->src_name, afiucv_hs_rcv() 2151 (!memcmp(&iucv_sk(sk)->src_user_id, afiucv_hs_rcv() 2153 (!memcmp(&iucv_sk(sk)->dst_name, afiucv_hs_rcv() 2155 (!memcmp(&iucv_sk(sk)->dst_user_id, afiucv_hs_rcv() 2157 iucv = iucv_sk(sk); afiucv_hs_rcv() 2164 sk = NULL; afiucv_hs_rcv() 2178 err = afiucv_hs_callback_syn(sk, skb); afiucv_hs_rcv() 2182 err = afiucv_hs_callback_synack(sk, skb); afiucv_hs_rcv() 2186 err = afiucv_hs_callback_synfin(sk, skb); afiucv_hs_rcv() 2190 err = afiucv_hs_callback_fin(sk, skb); afiucv_hs_rcv() 2193 err = afiucv_hs_callback_win(sk, skb); afiucv_hs_rcv() 2205 err = afiucv_hs_callback_rx(sk, skb); afiucv_hs_rcv() 2221 struct sock *isk = skb->sk; afiucv_hs_callback_txnotify() 2222 struct sock *sk = NULL; afiucv_hs_callback_txnotify() local 2230 sk_for_each(sk, &iucv_sk_list.head) afiucv_hs_callback_txnotify() 2231 if (sk == isk) { afiucv_hs_callback_txnotify() 2232 iucv = iucv_sk(sk); afiucv_hs_callback_txnotify() 2237 if (!iucv || sock_flag(sk, SOCK_ZAPPED)) afiucv_hs_callback_txnotify() 2252 iucv_sock_wake_msglim(sk); afiucv_hs_callback_txnotify() 2261 iucv_sock_wake_msglim(sk); afiucv_hs_callback_txnotify() 2271 if (sk->sk_state == IUCV_CONNECTED) { afiucv_hs_callback_txnotify() 2272 sk->sk_state = IUCV_DISCONN; afiucv_hs_callback_txnotify() 2273 sk->sk_state_change(sk); afiucv_hs_callback_txnotify() 2285 if (sk->sk_state == IUCV_CLOSING) { afiucv_hs_callback_txnotify() 2286 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { afiucv_hs_callback_txnotify() 2287 sk->sk_state = IUCV_CLOSED; afiucv_hs_callback_txnotify() 2288 sk->sk_state_change(sk); afiucv_hs_callback_txnotify() 2301 struct sock *sk; afiucv_netdev_event() local 2307 sk_for_each(sk, &iucv_sk_list.head) { afiucv_netdev_event() 2308 iucv = iucv_sk(sk); afiucv_netdev_event() 2310 (sk->sk_state == IUCV_CONNECTED)) { afiucv_netdev_event() 2312 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); afiucv_netdev_event() 2313 sk->sk_state = IUCV_DISCONN; afiucv_netdev_event() 2314 sk->sk_state_change(sk); afiucv_netdev_event()
|
/linux-4.1.27/net/ax25/ |
H A D | ax25_std_timer.c | 34 struct sock *sk = ax25->sk; ax25_std_heartbeat_expiry() local 36 if (sk) ax25_std_heartbeat_expiry() 37 bh_lock_sock(sk); ax25_std_heartbeat_expiry() 43 if (!sk || sock_flag(sk, SOCK_DESTROY) || ax25_std_heartbeat_expiry() 44 (sk->sk_state == TCP_LISTEN && ax25_std_heartbeat_expiry() 45 sock_flag(sk, SOCK_DEAD))) { ax25_std_heartbeat_expiry() 46 if (sk) { ax25_std_heartbeat_expiry() 47 sock_hold(sk); ax25_std_heartbeat_expiry() 49 bh_unlock_sock(sk); ax25_std_heartbeat_expiry() 50 sock_put(sk); ax25_std_heartbeat_expiry() 62 if (sk != NULL) { ax25_std_heartbeat_expiry() 63 if (atomic_read(&sk->sk_rmem_alloc) < ax25_std_heartbeat_expiry() 64 (sk->sk_rcvbuf >> 1) && ax25_std_heartbeat_expiry() 74 if (sk) ax25_std_heartbeat_expiry() 75 bh_unlock_sock(sk); ax25_std_heartbeat_expiry() 108 if (ax25->sk != NULL) { ax25_std_idletimer_expiry() 109 bh_lock_sock(ax25->sk); ax25_std_idletimer_expiry() 110 ax25->sk->sk_state = TCP_CLOSE; ax25_std_idletimer_expiry() 111 ax25->sk->sk_err = 0; ax25_std_idletimer_expiry() 112 ax25->sk->sk_shutdown |= SEND_SHUTDOWN; ax25_std_idletimer_expiry() 113 if (!sock_flag(ax25->sk, SOCK_DEAD)) { ax25_std_idletimer_expiry() 114 ax25->sk->sk_state_change(ax25->sk); ax25_std_idletimer_expiry() 115 sock_set_flag(ax25->sk, SOCK_DEAD); ax25_std_idletimer_expiry() 117 bh_unlock_sock(ax25->sk); ax25_std_idletimer_expiry()
|
H A D | ax25_ds_timer.c | 97 struct sock *sk=ax25->sk; ax25_ds_heartbeat_expiry() local 99 if (sk) ax25_ds_heartbeat_expiry() 100 bh_lock_sock(sk); ax25_ds_heartbeat_expiry() 107 if (!sk || sock_flag(sk, SOCK_DESTROY) || ax25_ds_heartbeat_expiry() 108 (sk->sk_state == TCP_LISTEN && ax25_ds_heartbeat_expiry() 109 sock_flag(sk, SOCK_DEAD))) { ax25_ds_heartbeat_expiry() 110 if (sk) { ax25_ds_heartbeat_expiry() 111 sock_hold(sk); ax25_ds_heartbeat_expiry() 113 bh_unlock_sock(sk); ax25_ds_heartbeat_expiry() 114 sock_put(sk); ax25_ds_heartbeat_expiry() 125 if (sk != NULL) { ax25_ds_heartbeat_expiry() 126 if (atomic_read(&sk->sk_rmem_alloc) < ax25_ds_heartbeat_expiry() 127 (sk->sk_rcvbuf >> 1) && ax25_ds_heartbeat_expiry() 137 if (sk) ax25_ds_heartbeat_expiry() 138 bh_unlock_sock(sk); ax25_ds_heartbeat_expiry() 169 if (ax25->sk != NULL) { ax25_ds_idletimer_expiry() 170 bh_lock_sock(ax25->sk); ax25_ds_idletimer_expiry() 171 ax25->sk->sk_state = TCP_CLOSE; ax25_ds_idletimer_expiry() 172 ax25->sk->sk_err = 0; ax25_ds_idletimer_expiry() 173 ax25->sk->sk_shutdown |= SEND_SHUTDOWN; ax25_ds_idletimer_expiry() 174 if (!sock_flag(ax25->sk, SOCK_DEAD)) { ax25_ds_idletimer_expiry() 175 ax25->sk->sk_state_change(ax25->sk); ax25_ds_idletimer_expiry() 176 sock_set_flag(ax25->sk, SOCK_DEAD); ax25_ds_idletimer_expiry() 178 bh_unlock_sock(ax25->sk); ax25_ds_idletimer_expiry()
|
H A D | af_ax25.c | 59 static void ax25_free_sock(struct sock *sk) ax25_free_sock() argument 61 ax25_cb_put(ax25_sk(sk)); ax25_free_sock() 165 if (s->sk && !ax25cmp(&s->source_addr, addr) && ax25_find_listener() 166 s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) { ax25_find_listener() 169 sock_hold(s->sk); ax25_find_listener() 171 return s->sk; ax25_find_listener() 186 struct sock *sk = NULL; ax25_get_socket() local 191 if (s->sk && !ax25cmp(&s->source_addr, my_addr) && ax25_get_socket() 193 s->sk->sk_type == type) { ax25_get_socket() 194 sk = s->sk; ax25_get_socket() 195 sock_hold(sk); ax25_get_socket() 202 return sk; ax25_get_socket() 216 if (s->sk && s->sk->sk_type != SOCK_SEQPACKET) ax25_find_cb() 250 if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && ax25_send_to_raw() 251 s->sk->sk_type == SOCK_RAW && ax25_send_to_raw() 252 s->sk->sk_protocol == proto && ax25_send_to_raw() 254 atomic_read(&s->sk->sk_rmem_alloc) <= s->sk->sk_rcvbuf) { ax25_send_to_raw() 257 if (sock_queue_rcv_skb(s->sk, copy) != 0) ax25_send_to_raw() 275 struct sock *sk; ax25_destroy_timer() local 277 sk=ax25->sk; ax25_destroy_timer() 279 bh_lock_sock(sk); ax25_destroy_timer() 280 sock_hold(sk); ax25_destroy_timer() 282 bh_unlock_sock(sk); ax25_destroy_timer() 283 sock_put(sk); ax25_destroy_timer() 306 if (ax25->sk != NULL) { ax25_destroy_socket() 307 while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) { ax25_destroy_socket() 308 if (skb->sk != ax25->sk) { ax25_destroy_socket() 310 ax25_cb *sax25 = ax25_sk(skb->sk); ax25_destroy_socket() 313 sock_orphan(skb->sk); ax25_destroy_socket() 316 skb->sk->sk_state = TCP_LISTEN; ax25_destroy_socket() 324 skb_queue_purge(&ax25->sk->sk_write_queue); ax25_destroy_socket() 327 if (ax25->sk != NULL) { ax25_destroy_socket() 328 if (sk_has_allocations(ax25->sk)) { ax25_destroy_socket() 335 struct sock *sk=ax25->sk; ax25_destroy_socket() local 336 ax25->sk=NULL; ax25_destroy_socket() 337 sock_put(sk); ax25_destroy_socket() 538 struct sock *sk = sock->sk; ax25_setsockopt() local 554 lock_sock(sk); ax25_setsockopt() 555 ax25 = ax25_sk(sk); ax25_setsockopt() 651 if (sk->sk_type == SOCK_SEQPACKET && ax25_setsockopt() 653 sk->sk_state == TCP_LISTEN)) { ax25_setsockopt() 672 release_sock(sk); ax25_setsockopt() 680 struct sock *sk = sock->sk; ax25_getsockopt() local 700 lock_sock(sk); ax25_getsockopt() 701 ax25 = ax25_sk(sk); ax25_getsockopt() 763 release_sock(sk); ax25_getsockopt() 766 release_sock(sk); ax25_getsockopt() 776 struct sock *sk = sock->sk; ax25_listen() local 779 lock_sock(sk); ax25_listen() 780 if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_LISTEN) { ax25_listen() 781 sk->sk_max_ack_backlog = backlog; ax25_listen() 782 sk->sk_state = TCP_LISTEN; ax25_listen() 788 release_sock(sk); ax25_listen() 806 struct sock *sk; ax25_create() local 861 sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto); ax25_create() 862 if (sk == NULL) ax25_create() 865 ax25 = sk->sk_protinfo = ax25_create_cb(); ax25_create() 867 sk_free(sk); ax25_create() 871 sock_init_data(sock, sk); ax25_create() 873 sk->sk_destruct = ax25_free_sock; ax25_create() 875 sk->sk_protocol = protocol; ax25_create() 877 ax25->sk = sk; ax25_create() 884 struct sock *sk; ax25_make_new() local 887 sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot); ax25_make_new() 888 if (sk == NULL) ax25_make_new() 892 sk_free(sk); ax25_make_new() 902 sk_free(sk); ax25_make_new() 907 sock_init_data(NULL, sk); ax25_make_new() 909 sk->sk_type = osk->sk_type; ax25_make_new() 910 sk->sk_priority = osk->sk_priority; ax25_make_new() 911 sk->sk_protocol = osk->sk_protocol; ax25_make_new() 912 sk->sk_rcvbuf = osk->sk_rcvbuf; ax25_make_new() 913 sk->sk_sndbuf = osk->sk_sndbuf; ax25_make_new() 914 sk->sk_state = TCP_ESTABLISHED; ax25_make_new() 915 sock_copy_flags(sk, osk); ax25_make_new() 939 sk_free(sk); ax25_make_new() 945 sk->sk_protinfo = ax25; ax25_make_new() 946 sk->sk_destruct = ax25_free_sock; ax25_make_new() 947 ax25->sk = sk; ax25_make_new() 949 return sk; ax25_make_new() 954 struct sock *sk = sock->sk; ax25_release() local 957 if (sk == NULL) ax25_release() 960 sock_hold(sk); ax25_release() 961 sock_orphan(sk); ax25_release() 962 lock_sock(sk); ax25_release() 963 ax25 = ax25_sk(sk); ax25_release() 965 if (sk->sk_type == SOCK_SEQPACKET) { ax25_release() 968 release_sock(sk); ax25_release() 970 lock_sock(sk); ax25_release() 977 release_sock(sk); ax25_release() 979 lock_sock(sk); ax25_release() 1009 sk->sk_state = TCP_CLOSE; ax25_release() 1010 sk->sk_shutdown |= SEND_SHUTDOWN; ax25_release() 1011 sk->sk_state_change(sk); ax25_release() 1012 sock_set_flag(sk, SOCK_DESTROY); ax25_release() 1019 sk->sk_state = TCP_CLOSE; ax25_release() 1020 sk->sk_shutdown |= SEND_SHUTDOWN; ax25_release() 1021 sk->sk_state_change(sk); ax25_release() 1025 sock->sk = NULL; ax25_release() 1026 release_sock(sk); ax25_release() 1027 sock_put(sk); ax25_release() 1040 struct sock *sk = sock->sk; ax25_bind() local 1071 lock_sock(sk); ax25_bind() 1073 ax25 = ax25_sk(sk); ax25_bind() 1074 if (!sock_flag(sk, SOCK_ZAPPED)) { ax25_bind() 1105 sock_reset_flag(sk, SOCK_ZAPPED); ax25_bind() 1108 release_sock(sk); ax25_bind() 1119 struct sock *sk = sock->sk; ax25_connect() local 1120 ax25_cb *ax25 = ax25_sk(sk), *ax25t; ax25_connect() 1146 lock_sock(sk); ax25_connect() 1150 switch (sk->sk_state) { ax25_connect() 1166 if (sk->sk_state == TCP_ESTABLISHED && sk->sk_type == SOCK_SEQPACKET) { ax25_connect() 1171 sk->sk_state = TCP_CLOSE; ax25_connect() 1214 if (sock_flag(sk, SOCK_ZAPPED)) { ax25_connect() 1233 if (sk->sk_type == SOCK_SEQPACKET && ax25_connect() 1246 if (sk->sk_type != SOCK_SEQPACKET) { ax25_connect() 1248 sk->sk_state = TCP_ESTABLISHED; ax25_connect() 1254 sk->sk_state = TCP_SYN_SENT; ax25_connect() 1279 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { ax25_connect() 1284 if (sk->sk_state == TCP_SYN_SENT) { ax25_connect() 1288 prepare_to_wait(sk_sleep(sk), &wait, ax25_connect() 1290 if (sk->sk_state != TCP_SYN_SENT) ax25_connect() 1293 release_sock(sk); ax25_connect() 1295 lock_sock(sk); ax25_connect() 1301 finish_wait(sk_sleep(sk), &wait); ax25_connect() 1307 if (sk->sk_state != TCP_ESTABLISHED) { ax25_connect() 1310 err = sock_error(sk); /* Always set at this point */ ax25_connect() 1318 release_sock(sk); ax25_connect() 1328 struct sock *sk; ax25_accept() local 1334 if ((sk = sock->sk) == NULL) ax25_accept() 1337 lock_sock(sk); ax25_accept() 1338 if (sk->sk_type != SOCK_SEQPACKET) { ax25_accept() 1343 if (sk->sk_state != TCP_LISTEN) { ax25_accept() 1353 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); ax25_accept() 1354 skb = skb_dequeue(&sk->sk_receive_queue); ax25_accept() 1363 release_sock(sk); ax25_accept() 1365 lock_sock(sk); ax25_accept() 1371 finish_wait(sk_sleep(sk), &wait); ax25_accept() 1376 newsk = skb->sk; ax25_accept() 1381 sk->sk_ack_backlog--; ax25_accept() 1385 release_sock(sk); ax25_accept() 1394 struct sock *sk = sock->sk; ax25_getname() local 1400 lock_sock(sk); ax25_getname() 1401 ax25 = ax25_sk(sk); ax25_getname() 1404 if (sk->sk_state != TCP_ESTABLISHED) { ax25_getname() 1433 release_sock(sk); ax25_getname() 1441 struct sock *sk = sock->sk; ax25_sendmsg() local 1452 lock_sock(sk); ax25_sendmsg() 1453 ax25 = ax25_sk(sk); ax25_sendmsg() 1455 if (sock_flag(sk, SOCK_ZAPPED)) { ax25_sendmsg() 1460 if (sk->sk_shutdown & SEND_SHUTDOWN) { ax25_sendmsg() 1519 if (sk->sk_type == SOCK_SEQPACKET && ax25_sendmsg() 1534 if (sk->sk_state != TCP_ESTABLISHED) { ax25_sendmsg() 1547 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, &err); ax25_sendmsg() 1564 *skb_push(skb, 1) = sk->sk_protocol; ax25_sendmsg() 1566 if (sk->sk_type == SOCK_SEQPACKET) { ax25_sendmsg() 1568 if (sk->sk_state != TCP_ESTABLISHED) { ax25_sendmsg() 1599 release_sock(sk); ax25_sendmsg() 1607 struct sock *sk = sock->sk; ax25_recvmsg() local 1612 lock_sock(sk); ax25_recvmsg() 1617 if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) { ax25_recvmsg() 1623 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, ax25_recvmsg() 1628 if (!ax25_sk(sk)->pidincl) ax25_recvmsg() 1667 skb_free_datagram(sk, skb); ax25_recvmsg() 1671 release_sock(sk); ax25_recvmsg() 1676 static int ax25_shutdown(struct socket *sk, int how) ax25_shutdown() argument 1684 struct sock *sk = sock->sk; ax25_ioctl() local 1688 lock_sock(sk); ax25_ioctl() 1693 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); ax25_ioctl() 1704 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) ax25_ioctl() 1711 res = sock_get_timestamp(sk, argp); ax25_ioctl() 1715 res = sock_get_timestampns(sk, argp); ax25_ioctl() 1769 ax25_cb *ax25 = ax25_sk(sk); ax25_ioctl() 1783 ax25_info.rcv_q = sk_rmem_alloc_get(sk); ax25_ioctl() 1784 ax25_info.snd_q = sk_wmem_alloc_get(sk); ax25_ioctl() 1847 release_sock(sk); ax25_ioctl() 1910 if (ax25->sk != NULL) { ax25_info_show() 1912 sk_wmem_alloc_get(ax25->sk), ax25_info_show() 1913 sk_rmem_alloc_get(ax25->sk), ax25_info_show() 1914 sock_i_ino(ax25->sk)); ax25_info_show()
|
H A D | ax25_in.c | 145 if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) { ax25_rx_iframe() 146 if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) || ax25_rx_iframe() 148 if (sock_queue_rcv_skb(ax25->sk, skb) == 0) ax25_rx_iframe() 192 struct sock *make, *sk; ax25_rcv() local 264 sk = ax25_get_socket(&dest, &src, SOCK_DGRAM); ax25_rcv() 265 if (sk != NULL) { ax25_rcv() 266 bh_lock_sock(sk); ax25_rcv() 267 if (atomic_read(&sk->sk_rmem_alloc) >= ax25_rcv() 268 sk->sk_rcvbuf) { ax25_rcv() 275 if (sock_queue_rcv_skb(sk, skb) != 0) ax25_rcv() 278 bh_unlock_sock(sk); ax25_rcv() 279 sock_put(sk); ax25_rcv() 340 sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET); ax25_rcv() 342 sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET); ax25_rcv() 344 if (sk != NULL) { ax25_rcv() 345 bh_lock_sock(sk); ax25_rcv() 346 if (sk_acceptq_is_full(sk) || ax25_rcv() 347 (make = ax25_make_new(sk, ax25_dev)) == NULL) { ax25_rcv() 351 bh_unlock_sock(sk); ax25_rcv() 352 sock_put(sk); ax25_rcv() 359 skb_queue_head(&sk->sk_receive_queue, skb); ax25_rcv() 363 sk->sk_ack_backlog++; ax25_rcv() 364 bh_unlock_sock(sk); ax25_rcv() 387 if (sk) ax25_rcv() 388 sock_put(sk); ax25_rcv() 423 if (sk) { ax25_rcv() 424 if (!sock_flag(sk, SOCK_DEAD)) ax25_rcv() 425 sk->sk_data_ready(sk); ax25_rcv() 426 sock_put(sk); ax25_rcv()
|
/linux-4.1.27/drivers/isdn/mISDN/ |
H A D | socket.c | 31 #define _pms(sk) ((struct mISDN_sock *)sk) 55 mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk) mISDN_sock_link() argument 58 sk_add_node(sk, &l->head); mISDN_sock_link() 62 static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk) mISDN_sock_unlink() argument 65 sk_del_node_init(sk); mISDN_sock_unlink() 78 if (msk->sk.sk_state == MISDN_CLOSED) mISDN_send() 81 err = sock_queue_rcv_skb(&msk->sk, skb); mISDN_send() 97 msk->sk.sk_state = MISDN_CLOSED; mISDN_ctrl() 104 mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) mISDN_sock_cmsg() argument 108 if (_pms(sk)->cmask & MISDN_TIME_STAMP) { mISDN_sock_cmsg() 119 struct sock *sk = sock->sk; mISDN_sock_recvmsg() local 125 __func__, (int)len, flags, _pms(sk)->ch.nr, mISDN_sock_recvmsg() 126 sk->sk_protocol); mISDN_sock_recvmsg() 130 if (sk->sk_state == MISDN_CLOSED) mISDN_sock_recvmsg() 133 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); mISDN_sock_recvmsg() 141 maddr->dev = _pms(sk)->dev->id; mISDN_sock_recvmsg() 142 if ((sk->sk_protocol == ISDN_P_LAPD_TE) || mISDN_sock_recvmsg() 143 (sk->sk_protocol == ISDN_P_LAPD_NT)) { mISDN_sock_recvmsg() 148 maddr->channel = _pms(sk)->ch.nr; mISDN_sock_recvmsg() 149 maddr->sapi = _pms(sk)->ch.addr & 0xFF; mISDN_sock_recvmsg() 150 maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; mISDN_sock_recvmsg() 160 skb_queue_head(&sk->sk_receive_queue, skb); mISDN_sock_recvmsg() 168 mISDN_sock_cmsg(sk, msg, skb); mISDN_sock_recvmsg() 170 skb_free_datagram(sk, skb); mISDN_sock_recvmsg() 178 struct sock *sk = sock->sk; mISDN_sock_sendmsg() local 184 __func__, (int)len, msg->msg_flags, _pms(sk)->ch.nr, mISDN_sock_sendmsg() 185 sk->sk_protocol); mISDN_sock_sendmsg() 196 if (sk->sk_state != MISDN_BOUND) mISDN_sock_sendmsg() 199 lock_sock(sk); mISDN_sock_sendmsg() 218 if ((sk->sk_protocol == ISDN_P_LAPD_TE) || mISDN_sock_sendmsg() 219 (sk->sk_protocol == ISDN_P_LAPD_NT)) mISDN_sock_sendmsg() 220 mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr; mISDN_sock_sendmsg() 228 if (!_pms(sk)->ch.peer) mISDN_sock_sendmsg() 230 err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb); mISDN_sock_sendmsg() 241 release_sock(sk); mISDN_sock_sendmsg() 248 struct sock *sk = sock->sk; data_sock_release() local 251 printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); data_sock_release() 252 if (!sk) data_sock_release() 254 switch (sk->sk_protocol) { data_sock_release() 259 if (sk->sk_state == MISDN_BOUND) data_sock_release() 260 delete_channel(&_pms(sk)->ch); data_sock_release() 262 mISDN_sock_unlink(&data_sockets, sk); data_sock_release() 272 delete_channel(&_pms(sk)->ch); data_sock_release() 273 mISDN_sock_unlink(&data_sockets, sk); data_sock_release() 277 lock_sock(sk); data_sock_release() 279 sock_orphan(sk); data_sock_release() 280 skb_queue_purge(&sk->sk_receive_queue); data_sock_release() 282 release_sock(sk); data_sock_release() 283 sock_put(sk); data_sock_release() 289 data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p) data_sock_ioctl_bound() argument 295 lock_sock(sk); data_sock_ioctl_bound() 296 if (!_pms(sk)->dev) { data_sock_ioctl_bound() 306 if ((sk->sk_protocol & ~ISDN_P_B_MASK) == ISDN_P_B_START) { data_sock_ioctl_bound() 308 &_pms(sk)->dev->bchannels, list) { data_sock_ioctl_bound() 316 err = _pms(sk)->dev->D.ctrl(&_pms(sk)->dev->D, data_sock_ioctl_bound() 324 if (sk->sk_protocol != ISDN_P_LAPD_NT) { data_sock_ioctl_bound() 333 err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr, data_sock_ioctl_bound() 337 if (sk->sk_protocol != ISDN_P_LAPD_NT data_sock_ioctl_bound() 338 && sk->sk_protocol != ISDN_P_LAPD_TE) { data_sock_ioctl_bound() 347 err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr, data_sock_ioctl_bound() 355 release_sock(sk); data_sock_ioctl_bound() 363 struct sock *sk = sock->sk; data_sock_ioctl() local 404 if (sk->sk_state == MISDN_BOUND) data_sock_ioctl() 405 err = data_sock_ioctl_bound(sk, cmd, data_sock_ioctl() 416 struct sock *sk = sock->sk; data_sock_setsockopt() local 423 lock_sock(sk); data_sock_setsockopt() 433 _pms(sk)->cmask |= MISDN_TIME_STAMP; data_sock_setsockopt() 435 _pms(sk)->cmask &= ~MISDN_TIME_STAMP; data_sock_setsockopt() 441 release_sock(sk); data_sock_setsockopt() 448 struct sock *sk = sock->sk; data_sock_getsockopt() local 459 if (_pms(sk)->cmask & MISDN_TIME_STAMP) data_sock_getsockopt() 478 struct sock *sk = sock->sk; data_sock_bind() local 483 printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); data_sock_bind() 489 lock_sock(sk); data_sock_bind() 491 if (_pms(sk)->dev) { data_sock_bind() 495 _pms(sk)->dev = get_mdevice(maddr->dev); data_sock_bind() 496 if (!_pms(sk)->dev) { data_sock_bind() 501 if (sk->sk_protocol < ISDN_P_B_START) { data_sock_bind() 504 if (sk == csk) data_sock_bind() 506 if (_pms(csk)->dev != _pms(sk)->dev) data_sock_bind() 511 == IS_ISDN_P_TE(sk->sk_protocol)) data_sock_bind() 520 _pms(sk)->ch.send = mISDN_send; data_sock_bind() 521 _pms(sk)->ch.ctrl = mISDN_ctrl; data_sock_bind() 523 switch (sk->sk_protocol) { data_sock_bind() 528 mISDN_sock_unlink(&data_sockets, sk); data_sock_bind() 529 err = connect_layer1(_pms(sk)->dev, &_pms(sk)->ch, data_sock_bind() 530 sk->sk_protocol, maddr); data_sock_bind() 532 mISDN_sock_link(&data_sockets, sk); data_sock_bind() 536 err = create_l2entity(_pms(sk)->dev, &_pms(sk)->ch, data_sock_bind() 537 sk->sk_protocol, maddr); data_sock_bind() 545 err = connect_Bstack(_pms(sk)->dev, &_pms(sk)->ch, data_sock_bind() 546 sk->sk_protocol, maddr); data_sock_bind() 553 sk->sk_state = MISDN_BOUND; data_sock_bind() 554 _pms(sk)->ch.protocol = sk->sk_protocol; data_sock_bind() 557 release_sock(sk); data_sock_bind() 566 struct sock *sk = sock->sk; data_sock_getname() local 568 if (!_pms(sk)->dev) data_sock_getname() 571 lock_sock(sk); data_sock_getname() 575 maddr->dev = _pms(sk)->dev->id; data_sock_getname() 576 maddr->channel = _pms(sk)->ch.nr; data_sock_getname() 577 maddr->sapi = _pms(sk)->ch.addr & 0xff; data_sock_getname() 578 maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff; data_sock_getname() 579 release_sock(sk); data_sock_getname() 606 struct sock *sk; data_sock_create() local 611 sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto); data_sock_create() 612 if (!sk) data_sock_create() 615 sock_init_data(sock, sk); data_sock_create() 619 sock_reset_flag(sk, SOCK_ZAPPED); data_sock_create() 621 sk->sk_protocol = protocol; data_sock_create() 622 sk->sk_state = MISDN_OPEN; data_sock_create() 623 mISDN_sock_link(&data_sockets, sk); data_sock_create() 631 struct sock *sk = sock->sk; base_sock_release() local 633 printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); base_sock_release() 634 if (!sk) base_sock_release() 637 mISDN_sock_unlink(&base_sockets, sk); base_sock_release() 638 sock_orphan(sk); base_sock_release() 639 sock_put(sk); base_sock_release() 712 struct sock *sk = sock->sk; base_sock_bind() local 718 lock_sock(sk); base_sock_bind() 720 if (_pms(sk)->dev) { base_sock_bind() 725 _pms(sk)->dev = get_mdevice(maddr->dev); base_sock_bind() 726 if (!_pms(sk)->dev) { base_sock_bind() 730 sk->sk_state = MISDN_BOUND; base_sock_bind() 733 release_sock(sk); base_sock_bind() 761 struct sock *sk; base_sock_create() local 766 sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto); base_sock_create() 767 if (!sk) base_sock_create() 770 sock_init_data(sock, sk); base_sock_create() 773 sock_reset_flag(sk, SOCK_ZAPPED); base_sock_create() 774 sk->sk_protocol = protocol; base_sock_create() 775 sk->sk_state = MISDN_OPEN; base_sock_create() 776 mISDN_sock_link(&base_sockets, sk); base_sock_create()
|
H A D | dsp_dtmf.c | 123 s32 sk, sk1, sk2; dsp_dtmf_goertzel_decode() local 160 sk = (*hfccoeff++) >> 4; dsp_dtmf_goertzel_decode() 161 if (sk > 32767 || sk < -32767 || sk2 > 32767 dsp_dtmf_goertzel_decode() 167 (sk * sk) - dsp_dtmf_goertzel_decode() 168 (((cos2pik[k] * sk) >> 15) * sk2) + dsp_dtmf_goertzel_decode() 185 sk = 0; dsp_dtmf_goertzel_decode() 191 sk = ((cos2pik_ * sk1) >> 15) - sk2 + (*buf++); dsp_dtmf_goertzel_decode() 193 sk1 = sk; dsp_dtmf_goertzel_decode() 195 sk >>= 8; dsp_dtmf_goertzel_decode() 197 if (sk > 32767 || sk < -32767 || sk2 > 32767 || sk2 < -32767) dsp_dtmf_goertzel_decode() 201 (sk * sk) - dsp_dtmf_goertzel_decode() 202 (((cos2pik[k] * sk) >> 15) * sk2) + dsp_dtmf_goertzel_decode()
|
/linux-4.1.27/net/phonet/ |
H A D | socket.c | 41 struct sock *sk = sock->sk; pn_socket_release() local 43 if (sk) { pn_socket_release() 44 sock->sk = NULL; pn_socket_release() 45 sk->sk_prot->close(sk, 0); pn_socket_release() 143 void pn_sock_hash(struct sock *sk) pn_sock_hash() argument 145 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); pn_sock_hash() 148 sk_add_node_rcu(sk, hlist); pn_sock_hash() 153 void pn_sock_unhash(struct sock *sk) pn_sock_unhash() argument 156 sk_del_node_init_rcu(sk); pn_sock_unhash() 158 pn_sock_unbind_all_res(sk); pn_sock_unhash() 167 struct sock *sk = sock->sk; pn_socket_bind() local 168 struct pn_sock *pn = pn_sk(sk); pn_socket_bind() 174 if (sk->sk_prot->bind) pn_socket_bind() 175 return sk->sk_prot->bind(sk, addr, len); pn_socket_bind() 184 if (saddr && phonet_address_lookup(sock_net(sk), saddr)) pn_socket_bind() 187 lock_sock(sk); pn_socket_bind() 188 if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) { pn_socket_bind() 192 WARN_ON(sk_hashed(sk)); pn_socket_bind() 194 err = sk->sk_prot->get_port(sk, pn_port(handle)); pn_socket_bind() 203 sk->sk_prot->hash(sk); pn_socket_bind() 207 release_sock(sk); pn_socket_bind() 222 BUG_ON(!pn_port(pn_sk(sock->sk)->sobject)); pn_socket_autobind() 229 struct sock *sk = sock->sk; pn_socket_connect() local 230 struct pn_sock *pn = pn_sk(sk); pn_socket_connect() 233 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); pn_socket_connect() 243 lock_sock(sk); pn_socket_connect() 247 if (sk->sk_state != TCP_CLOSE) { pn_socket_connect() 264 err = sk->sk_prot->connect(sk, addr, len); pn_socket_connect() 271 while (sk->sk_state == TCP_SYN_SENT) { pn_socket_connect() 283 prepare_to_wait_exclusive(sk_sleep(sk), &wait, pn_socket_connect() 285 release_sock(sk); pn_socket_connect() 287 lock_sock(sk); pn_socket_connect() 288 finish_wait(sk_sleep(sk), &wait); pn_socket_connect() 291 if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) pn_socket_connect() 293 else if (sk->sk_state == TCP_CLOSE_WAIT) pn_socket_connect() 299 release_sock(sk); pn_socket_connect() 306 struct sock *sk = sock->sk; pn_socket_accept() local 310 if (unlikely(sk->sk_state != TCP_LISTEN)) pn_socket_accept() 313 newsk = sk->sk_prot->accept(sk, flags, &err); pn_socket_accept() 327 struct sock *sk = sock->sk; pn_socket_getname() local 328 struct pn_sock *pn = pn_sk(sk); pn_socket_getname() 343 struct sock *sk = sock->sk; pn_socket_poll() local 344 struct pep_sock *pn = pep_sk(sk); pn_socket_poll() 347 poll_wait(file, sk_sleep(sk), wait); pn_socket_poll() 349 if (sk->sk_state == TCP_CLOSE) pn_socket_poll() 351 if (!skb_queue_empty(&sk->sk_receive_queue)) pn_socket_poll() 355 if (!mask && sk->sk_state == TCP_CLOSE_WAIT) pn_socket_poll() 358 if (sk->sk_state == TCP_ESTABLISHED && pn_socket_poll() 359 atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && pn_socket_poll() 369 struct sock *sk = sock->sk; pn_socket_ioctl() local 370 struct pn_sock *pn = pn_sk(sk); pn_socket_ioctl() 380 lock_sock(sk); pn_socket_ioctl() 381 if (sk->sk_bound_dev_if) pn_socket_ioctl() 382 dev = dev_get_by_index(sock_net(sk), pn_socket_ioctl() 383 sk->sk_bound_dev_if); pn_socket_ioctl() 385 dev = phonet_device_get(sock_net(sk)); pn_socket_ioctl() 390 release_sock(sk); pn_socket_ioctl() 401 return sk->sk_prot->ioctl(sk, cmd, arg); pn_socket_ioctl() 406 struct sock *sk = sock->sk; pn_socket_listen() local 412 lock_sock(sk); pn_socket_listen() 418 if (sk->sk_state != TCP_LISTEN) { pn_socket_listen() 419 sk->sk_state = TCP_LISTEN; pn_socket_listen() 420 sk->sk_ack_backlog = 0; pn_socket_listen() 422 sk->sk_max_ack_backlog = backlog; pn_socket_listen() 424 release_sock(sk); pn_socket_listen() 431 struct sock *sk = sock->sk; pn_socket_sendmsg() local 436 return sk->sk_prot->sendmsg(sk, m, total_len); pn_socket_sendmsg() 491 int pn_sock_get_port(struct sock *sk, unsigned short sport) pn_sock_get_port() argument 494 struct net *net = sock_net(sk); pn_sock_get_port() 495 struct pn_sock *pn = pn_sk(sk); pn_sock_get_port() 560 static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk) pn_sock_get_next() argument 565 sk = sk_next(sk); pn_sock_get_next() 566 while (sk && !net_eq(net, sock_net(sk))); pn_sock_get_next() 568 return sk; pn_sock_get_next() 580 struct sock *sk; pn_sock_seq_next() local 583 sk = pn_sock_get_idx(seq, 0); pn_sock_seq_next() 585 sk = pn_sock_get_next(seq, v); pn_sock_seq_next() 587 return sk; pn_sock_seq_next() 603 struct sock *sk = v; pn_sock_seq_show() local 604 struct pn_sock *pn = pn_sk(sk); pn_sock_seq_show() 608 sk->sk_protocol, pn->sobject, pn->dobject, pn_sock_seq_show() 609 pn->resource, sk->sk_state, pn_sock_seq_show() 610 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), pn_sock_seq_show() 611 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), pn_sock_seq_show() 612 sock_i_ino(sk), pn_sock_seq_show() 613 atomic_read(&sk->sk_refcnt), sk, pn_sock_seq_show() 614 atomic_read(&sk->sk_drops)); pn_sock_seq_show() 643 struct sock *sk[256]; member in struct:__anon14208 651 struct sock *sk; pn_find_sock_by_res() local 657 sk = rcu_dereference(pnres.sk[res]); pn_find_sock_by_res() 658 if (sk) pn_find_sock_by_res() 659 sock_hold(sk); pn_find_sock_by_res() 661 return sk; pn_find_sock_by_res() 666 int pn_sock_bind_res(struct sock *sk, u8 res) pn_sock_bind_res() argument 670 if (!net_eq(sock_net(sk), &init_net)) pn_sock_bind_res() 674 if (pn_socket_autobind(sk->sk_socket)) pn_sock_bind_res() 678 if (pnres.sk[res] == NULL) { pn_sock_bind_res() 679 sock_hold(sk); pn_sock_bind_res() 680 rcu_assign_pointer(pnres.sk[res], sk); pn_sock_bind_res() 687 int pn_sock_unbind_res(struct sock *sk, u8 res) pn_sock_unbind_res() argument 695 if (pnres.sk[res] == sk) { pn_sock_unbind_res() 696 RCU_INIT_POINTER(pnres.sk[res], NULL); pn_sock_unbind_res() 703 sock_put(sk); pn_sock_unbind_res() 708 void pn_sock_unbind_all_res(struct sock *sk) pn_sock_unbind_all_res() argument 714 if (pnres.sk[res] == sk) { pn_sock_unbind_all_res() 715 RCU_INIT_POINTER(pnres.sk[res], NULL); pn_sock_unbind_all_res() 722 __sock_put(sk); pn_sock_unbind_all_res() 738 if (pnres.sk[i] == NULL) pn_res_get_idx() 741 return pnres.sk + i; pn_res_get_idx() 747 static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk) pn_res_get_next() argument 754 for (i = (sk - pnres.sk) + 1; i < 256; i++) pn_res_get_next() 755 if (pnres.sk[i]) pn_res_get_next() 756 return pnres.sk + i; pn_res_get_next() 769 struct sock **sk; pn_res_seq_next() local 772 sk = pn_res_get_idx(seq, 0); pn_res_seq_next() 774 sk = pn_res_get_next(seq, v); pn_res_seq_next() 776 return sk; pn_res_seq_next() 792 struct sock *sk = *psk; pn_res_seq_show() local 795 (int) (psk - pnres.sk), pn_res_seq_show() 796 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), pn_res_seq_show() 797 sock_i_ino(sk)); pn_res_seq_show()
|
H A D | pep.c | 81 static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload, pep_alloc_skb() argument 87 skb_set_owner_w(skb, sk); pep_alloc_skb() 97 static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code, pep_reply() argument 105 skb = pep_alloc_skb(sk, data, len, priority); pep_reply() 116 return pn_skb_send(sk, skb, &peer); pep_reply() 119 static int pep_indicate(struct sock *sk, u8 id, u8 code, pep_indicate() argument 122 struct pep_sock *pn = pep_sk(sk); pep_indicate() 126 skb = pep_alloc_skb(sk, data, len, priority); pep_indicate() 135 return pn_skb_send(sk, skb, NULL); pep_indicate() 140 static int pipe_handler_request(struct sock *sk, u8 id, u8 code, pipe_handler_request() argument 143 struct pep_sock *pn = pep_sk(sk); pipe_handler_request() 147 skb = pep_alloc_skb(sk, data, len, GFP_KERNEL); pipe_handler_request() 156 return pn_skb_send(sk, skb, NULL); pipe_handler_request() 159 static int pipe_handler_send_created_ind(struct sock *sk) pipe_handler_send_created_ind() argument 161 struct pep_sock *pn = pep_sk(sk); pipe_handler_send_created_ind() 167 return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */, pipe_handler_send_created_ind() 171 static int pep_accept_conn(struct sock *sk, struct sk_buff *skb) pep_accept_conn() argument 188 return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data), pep_accept_conn() 192 static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code, pep_reject_conn() argument 197 return pep_reply(sk, skb, code, data, sizeof(data), priority); pep_reject_conn() 202 static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, pep_ctrlreq_error() argument 215 skb = pep_alloc_skb(sk, data, 4, priority); pep_ctrlreq_error() 226 return pn_skb_send(sk, skb, &dst); pep_ctrlreq_error() 229 static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority) pipe_snd_status() argument 233 return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON, pipe_snd_status() 239 static void pipe_grant_credits(struct sock *sk, gfp_t priority) pipe_grant_credits() argument 241 struct pep_sock *pn = pep_sk(sk); pipe_grant_credits() 243 BUG_ON(sk->sk_state != TCP_ESTABLISHED); pipe_grant_credits() 249 if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL, pipe_grant_credits() 256 if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS, pipe_grant_credits() 264 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) pipe_rcv_status() argument 266 struct pep_sock *pn = pep_sk(sk); pipe_rcv_status() 312 sk->sk_write_space(sk); pipe_rcv_status() 316 static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb) pipe_rcv_created() argument 318 struct pep_sock *pn = pep_sk(sk); pipe_rcv_created() 345 static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) pipe_do_rcv() argument 347 struct pep_sock *pn = pep_sk(sk); pipe_do_rcv() 352 BUG_ON(sk->sk_state == TCP_CLOSE_WAIT); pipe_do_rcv() 356 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC); pipe_do_rcv() 360 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); pipe_do_rcv() 361 sk->sk_state = TCP_CLOSE_WAIT; pipe_do_rcv() 362 if (!sock_flag(sk, SOCK_DEAD)) pipe_do_rcv() 363 sk->sk_state_change(sk); pipe_do_rcv() 368 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); pipe_do_rcv() 386 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); pipe_do_rcv() 391 atomic_inc(&sk->sk_drops); pipe_do_rcv() 404 err = sock_queue_rcv_skb(sk, skb); pipe_do_rcv() 412 atomic_inc(&sk->sk_drops); pipe_do_rcv() 417 queue = &sk->sk_receive_queue; pipe_do_rcv() 421 pipe_rcv_status(sk, skb); pipe_do_rcv() 425 err = pipe_rcv_created(sk, skb); pipe_do_rcv() 429 err = pipe_rcv_created(sk, skb); pipe_do_rcv() 440 sk->sk_write_space(sk); pipe_do_rcv() 442 if (sk->sk_state == TCP_ESTABLISHED) pipe_do_rcv() 444 sk->sk_state = TCP_ESTABLISHED; pipe_do_rcv() 445 pipe_grant_credits(sk, GFP_ATOMIC); pipe_do_rcv() 449 sk->sk_state = TCP_SYN_RECV; pipe_do_rcv() 464 skb_set_owner_r(skb, sk); pipe_do_rcv() 466 if (!sock_flag(sk, SOCK_DEAD)) pipe_do_rcv() 467 sk->sk_data_ready(sk); pipe_do_rcv() 472 static void pipe_destruct(struct sock *sk) pipe_destruct() argument 474 struct pep_sock *pn = pep_sk(sk); pipe_destruct() 476 skb_queue_purge(&sk->sk_receive_queue); pipe_destruct() 494 static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) pep_connresp_rcv() argument 496 struct pep_sock *pn = pep_sk(sk); pep_connresp_rcv() 533 return pipe_handler_send_created_ind(sk); pep_connresp_rcv() 536 static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb) pep_enableresp_rcv() argument 543 return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */, pep_enableresp_rcv() 548 static void pipe_start_flow_control(struct sock *sk) pipe_start_flow_control() argument 550 struct pep_sock *pn = pep_sk(sk); pipe_start_flow_control() 554 sk->sk_write_space(sk); pipe_start_flow_control() 556 pipe_grant_credits(sk, GFP_ATOMIC); pipe_start_flow_control() 561 static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) pipe_handler_do_rcv() argument 563 struct pep_sock *pn = pep_sk(sk); pipe_handler_do_rcv() 574 err = sock_queue_rcv_skb(sk, skb); pipe_handler_do_rcv() 582 atomic_inc(&sk->sk_drops); pipe_handler_do_rcv() 588 skb_set_owner_r(skb, sk); pipe_handler_do_rcv() 589 skb_queue_tail(&sk->sk_receive_queue, skb); pipe_handler_do_rcv() 590 if (!sock_flag(sk, SOCK_DEAD)) pipe_handler_do_rcv() 591 sk->sk_data_ready(sk); pipe_handler_do_rcv() 595 if (sk->sk_state != TCP_SYN_SENT) pipe_handler_do_rcv() 597 if (!sock_flag(sk, SOCK_DEAD)) pipe_handler_do_rcv() 598 sk->sk_state_change(sk); pipe_handler_do_rcv() 599 if (pep_connresp_rcv(sk, skb)) { pipe_handler_do_rcv() 600 sk->sk_state = TCP_CLOSE_WAIT; pipe_handler_do_rcv() 604 sk->sk_state = TCP_SYN_RECV; pipe_handler_do_rcv() 606 sk->sk_state = TCP_ESTABLISHED; pipe_handler_do_rcv() 607 pipe_start_flow_control(sk); pipe_handler_do_rcv() 612 if (sk->sk_state != TCP_SYN_SENT) pipe_handler_do_rcv() 615 if (pep_enableresp_rcv(sk, skb)) { pipe_handler_do_rcv() 616 sk->sk_state = TCP_CLOSE_WAIT; pipe_handler_do_rcv() 620 sk->sk_state = TCP_ESTABLISHED; pipe_handler_do_rcv() 621 pipe_start_flow_control(sk); pipe_handler_do_rcv() 629 pipe_rcv_status(sk, skb); pipe_handler_do_rcv() 666 static int pep_do_rcv(struct sock *sk, struct sk_buff *skb) pep_do_rcv() argument 668 struct pep_sock *pn = pep_sk(sk); pep_do_rcv() 691 if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) { pep_do_rcv() 692 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, pep_do_rcv() 696 skb_queue_head(&sk->sk_receive_queue, skb); pep_do_rcv() 697 sk_acceptq_added(sk); pep_do_rcv() 698 if (!sock_flag(sk, SOCK_DEAD)) pep_do_rcv() 699 sk->sk_data_ready(sk); pep_do_rcv() 703 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); pep_do_rcv() 707 pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC); pep_do_rcv() 717 if ((1 << sk->sk_state) pep_do_rcv() 720 return pipe_handler_do_rcv(sk, skb); pep_do_rcv() 727 static int pipe_do_remove(struct sock *sk) pipe_do_remove() argument 729 struct pep_sock *pn = pep_sk(sk); pipe_do_remove() 733 skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL); pipe_do_remove() 742 return pn_skb_send(sk, skb, NULL); pipe_do_remove() 746 static void pep_sock_close(struct sock *sk, long timeout) pep_sock_close() argument 748 struct pep_sock *pn = pep_sk(sk); pep_sock_close() 751 sock_hold(sk); /* keep a reference after sk_common_release() */ pep_sock_close() 752 sk_common_release(sk); pep_sock_close() 754 lock_sock(sk); pep_sock_close() 755 if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) { pep_sock_close() 756 if (sk->sk_backlog_rcv == pipe_do_rcv) pep_sock_close() 758 pipe_do_remove(sk); pep_sock_close() 760 pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD, pep_sock_close() 763 sk->sk_state = TCP_CLOSE; pep_sock_close() 767 release_sock(sk); pep_sock_close() 770 gprs_detach(sk); pep_sock_close() 771 sock_put(sk); pep_sock_close() 774 static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) pep_sock_accept() argument 776 struct pep_sock *pn = pep_sk(sk), *newpn; pep_sock_accept() 786 skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp); pep_sock_accept() 790 lock_sock(sk); pep_sock_accept() 791 if (sk->sk_state != TCP_LISTEN) { pep_sock_accept() 795 sk_acceptq_removed(sk); pep_sock_accept() 811 pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM, pep_sock_accept() 843 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL); pep_sock_accept() 848 newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot); pep_sock_accept() 850 pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL); pep_sock_accept() 858 newsk->sk_protocol = sk->sk_protocol; pep_sock_accept() 867 sock_hold(sk); pep_sock_accept() 868 newpn->listener = sk; pep_sock_accept() 887 release_sock(sk); pep_sock_accept() 893 static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len) pep_sock_connect() argument 895 struct pep_sock *pn = pep_sk(sk); pep_sock_connect() 902 err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ, pep_sock_connect() 909 sk->sk_state = TCP_SYN_SENT; pep_sock_connect() 914 static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len) pep_sock_enable() argument 918 err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD, pep_sock_enable() 923 sk->sk_state = TCP_SYN_SENT; pep_sock_enable() 928 static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) pep_ioctl() argument 930 struct pep_sock *pn = pep_sk(sk); pep_ioctl() 936 if (sk->sk_state == TCP_LISTEN) { pep_ioctl() 941 lock_sock(sk); pep_ioctl() 942 if (sock_flag(sk, SOCK_URGINLINE) && pep_ioctl() 945 else if (!skb_queue_empty(&sk->sk_receive_queue)) pep_ioctl() 946 answ = skb_peek(&sk->sk_receive_queue)->len; pep_ioctl() 949 release_sock(sk); pep_ioctl() 954 lock_sock(sk); pep_ioctl() 955 if (sk->sk_state == TCP_SYN_SENT) pep_ioctl() 957 else if (sk->sk_state == TCP_ESTABLISHED) pep_ioctl() 960 ret = pep_sock_enable(sk, NULL, 0); pep_ioctl() 961 release_sock(sk); pep_ioctl() 968 static int pep_init(struct sock *sk) pep_init() argument 970 struct pep_sock *pn = pep_sk(sk); pep_init() 972 sk->sk_destruct = pipe_destruct; pep_init() 987 static int pep_setsockopt(struct sock *sk, int level, int optname, pep_setsockopt() argument 990 struct pep_sock *pn = pep_sk(sk); pep_setsockopt() 1000 lock_sock(sk); pep_setsockopt() 1014 release_sock(sk); pep_setsockopt() 1015 err = gprs_attach(sk); pep_setsockopt() 1022 release_sock(sk); pep_setsockopt() 1023 gprs_detach(sk); pep_setsockopt() 1029 if ((sk->sk_state == TCP_CLOSE) && pep_setsockopt() 1043 release_sock(sk); pep_setsockopt() 1049 static int pep_getsockopt(struct sock *sk, int level, int optname, pep_getsockopt() argument 1052 struct pep_sock *pn = pep_sk(sk); pep_getsockopt() 1091 static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) pipe_skb_send() argument 1093 struct pep_sock *pn = pep_sk(sk); pipe_skb_send() 1113 err = pn_skb_send(sk, skb, NULL); pipe_skb_send() 1121 static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) pep_sendmsg() argument 1123 struct pep_sock *pn = pep_sk(sk); pep_sendmsg() 1137 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, pep_sendmsg() 1147 lock_sock(sk); pep_sendmsg() 1148 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); pep_sendmsg() 1149 if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) { pep_sendmsg() 1153 if (sk->sk_state != TCP_ESTABLISHED) { pep_sendmsg() 1156 err = sk_stream_wait_connect(sk, &timeo); pep_sendmsg() 1160 if (sk->sk_state == TCP_CLOSE_WAIT) { pep_sendmsg() 1165 BUG_ON(sk->sk_state != TCP_ESTABLISHED); pep_sendmsg() 1181 prepare_to_wait(sk_sleep(sk), &wait, pep_sendmsg() 1183 done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits)); pep_sendmsg() 1184 finish_wait(sk_sleep(sk), &wait); pep_sendmsg() 1186 if (sk->sk_state != TCP_ESTABLISHED) pep_sendmsg() 1190 err = pipe_skb_send(sk, skb); pep_sendmsg() 1195 release_sock(sk); pep_sendmsg() 1201 int pep_writeable(struct sock *sk) pep_writeable() argument 1203 struct pep_sock *pn = pep_sk(sk); pep_writeable() 1208 int pep_write(struct sock *sk, struct sk_buff *skb) pep_write() argument 1213 if (pep_sk(sk)->aligned) pep_write() 1214 return pipe_skb_send(sk, skb); pep_write() 1236 return pipe_skb_send(sk, rskb); pep_write() 1239 struct sk_buff *pep_read(struct sock *sk) pep_read() argument 1241 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); pep_read() 1243 if (sk->sk_state == TCP_ESTABLISHED) pep_read() 1244 pipe_grant_credits(sk, GFP_ATOMIC); pep_read() 1248 static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, pep_recvmsg() argument 1258 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE))) pep_recvmsg() 1261 if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) { pep_recvmsg() 1263 struct pep_sock *pn = pep_sk(sk); pep_recvmsg() 1269 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR, pep_recvmsg() 1278 skb = skb_recv_datagram(sk, flags, noblock, &err); pep_recvmsg() 1279 lock_sock(sk); pep_recvmsg() 1281 if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT) pep_recvmsg() 1283 release_sock(sk); pep_recvmsg() 1287 if (sk->sk_state == TCP_ESTABLISHED) pep_recvmsg() 1288 pipe_grant_credits(sk, GFP_KERNEL); pep_recvmsg() 1289 release_sock(sk); pep_recvmsg() 1301 skb_free_datagram(sk, skb); pep_recvmsg() 1305 static void pep_sock_unhash(struct sock *sk) pep_sock_unhash() argument 1307 struct pep_sock *pn = pep_sk(sk); pep_sock_unhash() 1310 lock_sock(sk); pep_sock_unhash() 1315 release_sock(sk); pep_sock_unhash() 1319 sk_del_node_init(sk); pep_sock_unhash() 1320 sk = skparent; pep_sock_unhash() 1326 pn_sock_unhash(&pn->pn_sk.sk); pep_sock_unhash() 1327 release_sock(sk); pep_sock_unhash()
|
H A D | pep-gprs.c | 38 struct sock *sk; member in struct:gprs_dev 68 if (pep_writeable(gp->sk)) gprs_writeable() 76 static void gprs_state_change(struct sock *sk) gprs_state_change() argument 78 struct gprs_dev *gp = sk->sk_user_data; gprs_state_change() 80 if (sk->sk_state == TCP_CLOSE_WAIT) { gprs_state_change() 149 static void gprs_data_ready(struct sock *sk) gprs_data_ready() argument 151 struct gprs_dev *gp = sk->sk_user_data; gprs_data_ready() 154 while ((skb = pep_read(sk)) != NULL) { gprs_data_ready() 160 static void gprs_write_space(struct sock *sk) gprs_write_space() argument 162 struct gprs_dev *gp = sk->sk_user_data; gprs_write_space() 189 struct sock *sk = gp->sk; gprs_xmit() local 202 skb_set_owner_w(skb, sk); gprs_xmit() 204 err = pep_write(sk, skb); gprs_xmit() 215 if (pep_writeable(sk)) gprs_xmit() 258 int gprs_attach(struct sock *sk) gprs_attach() argument 265 if (unlikely(sk->sk_type == SOCK_STREAM)) gprs_attach() 273 gp->sk = sk; gprs_attach() 283 lock_sock(sk); gprs_attach() 284 if (unlikely(sk->sk_user_data)) { gprs_attach() 288 if (unlikely((1 << sk->sk_state & (TCPF_CLOSE|TCPF_LISTEN)) || gprs_attach() 289 sock_flag(sk, SOCK_DEAD))) { gprs_attach() 293 sk->sk_user_data = gp; gprs_attach() 294 gp->old_state_change = sk->sk_state_change; gprs_attach() 295 gp->old_data_ready = sk->sk_data_ready; gprs_attach() 296 gp->old_write_space = sk->sk_write_space; gprs_attach() 297 sk->sk_state_change = gprs_state_change; gprs_attach() 298 sk->sk_data_ready = gprs_data_ready; gprs_attach() 299 sk->sk_write_space = gprs_write_space; gprs_attach() 300 release_sock(sk); gprs_attach() 301 sock_hold(sk); gprs_attach() 307 release_sock(sk); gprs_attach() 312 void gprs_detach(struct sock *sk) gprs_detach() argument 314 struct gprs_dev *gp = sk->sk_user_data; gprs_detach() 317 lock_sock(sk); gprs_detach() 318 sk->sk_user_data = NULL; gprs_detach() 319 sk->sk_state_change = gp->old_state_change; gprs_detach() 320 sk->sk_data_ready = gp->old_data_ready; gprs_detach() 321 sk->sk_write_space = gp->old_write_space; gprs_detach() 322 release_sock(sk); gprs_detach() 326 sock_put(sk); gprs_detach()
|
H A D | datagram.c | 36 static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb); 39 static void pn_sock_close(struct sock *sk, long timeout) pn_sock_close() argument 41 sk_common_release(sk); pn_sock_close() 44 static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg) pn_ioctl() argument 51 lock_sock(sk); pn_ioctl() 52 skb = skb_peek(&sk->sk_receive_queue); pn_ioctl() 54 release_sock(sk); pn_ioctl() 65 return pn_sock_bind_res(sk, res); pn_ioctl() 67 return pn_sock_unbind_res(sk, res); pn_ioctl() 75 static void pn_destruct(struct sock *sk) pn_destruct() argument 77 skb_queue_purge(&sk->sk_receive_queue); pn_destruct() 80 static int pn_init(struct sock *sk) pn_init() argument 82 sk->sk_destruct = pn_destruct; pn_init() 86 static int pn_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) pn_sendmsg() argument 105 skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len, pn_sendmsg() 121 err = pn_skb_send(sk, skb, target); pn_sendmsg() 127 static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, pn_recvmsg() argument 139 skb = skb_recv_datagram(sk, flags, noblock, &rval); pn_recvmsg() 166 skb_free_datagram(sk, skb); pn_recvmsg() 173 static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) pn_backlog_rcv() argument 175 int err = sock_queue_rcv_skb(sk, skb); pn_backlog_rcv()
|
/linux-4.1.27/net/ieee802154/ |
H A D | socket.c | 92 struct sock *sk = sock->sk; ieee802154_sock_release() local 94 if (sk) { ieee802154_sock_release() 95 sock->sk = NULL; ieee802154_sock_release() 96 sk->sk_prot->close(sk, 0); ieee802154_sock_release() 104 struct sock *sk = sock->sk; ieee802154_sock_sendmsg() local 106 return sk->sk_prot->sendmsg(sk, msg, len); ieee802154_sock_sendmsg() 112 struct sock *sk = sock->sk; ieee802154_sock_bind() local 114 if (sk->sk_prot->bind) ieee802154_sock_bind() 115 return sk->sk_prot->bind(sk, uaddr, addr_len); ieee802154_sock_bind() 123 struct sock *sk = sock->sk; ieee802154_sock_connect() local 129 return sk->sk_prot->disconnect(sk, flags); ieee802154_sock_connect() 131 return sk->sk_prot->connect(sk, uaddr, addr_len); ieee802154_sock_connect() 134 static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg, ieee802154_dev_ioctl() argument 146 dev_load(sock_net(sk), ifr.ifr_name); ieee802154_dev_ioctl() 147 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); ieee802154_dev_ioctl() 165 struct sock *sk = sock->sk; ieee802154_sock_ioctl() local 169 return sock_get_timestamp(sk, (struct timeval __user *)arg); ieee802154_sock_ioctl() 171 return sock_get_timestampns(sk, (struct timespec __user *)arg); ieee802154_sock_ioctl() 174 return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg, ieee802154_sock_ioctl() 177 if (!sk->sk_prot->ioctl) ieee802154_sock_ioctl() 179 return sk->sk_prot->ioctl(sk, cmd, arg); ieee802154_sock_ioctl() 187 static void raw_hash(struct sock *sk) raw_hash() argument 190 sk_add_node(sk, &raw_head); raw_hash() 191 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); raw_hash() 195 static void raw_unhash(struct sock *sk) raw_unhash() argument 198 if (sk_del_node_init(sk)) raw_unhash() 199 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); raw_unhash() 203 static void raw_close(struct sock *sk, long timeout) raw_close() argument 205 sk_common_release(sk); raw_close() 208 static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len) raw_bind() argument 222 lock_sock(sk); raw_bind() 225 dev = ieee802154_get_dev(sock_net(sk), &addr); raw_bind() 236 sk->sk_bound_dev_if = dev->ifindex; raw_bind() 237 sk_dst_reset(sk); raw_bind() 242 release_sock(sk); raw_bind() 247 static int raw_connect(struct sock *sk, struct sockaddr *uaddr, raw_connect() argument 253 static int raw_disconnect(struct sock *sk, int flags) raw_disconnect() argument 258 static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) raw_sendmsg() argument 271 lock_sock(sk); raw_sendmsg() 272 if (!sk->sk_bound_dev_if) raw_sendmsg() 273 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); raw_sendmsg() 275 dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); raw_sendmsg() 276 release_sock(sk); raw_sendmsg() 295 skb = sock_alloc_send_skb(sk, hlen + tlen + size, raw_sendmsg() 310 skb->sk = sk; raw_sendmsg() 329 static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, raw_recvmsg() argument 336 skb = skb_recv_datagram(sk, flags, noblock, &err); raw_recvmsg() 350 sock_recv_ts_and_drops(msg, sk, skb); raw_recvmsg() 355 skb_free_datagram(sk, skb); raw_recvmsg() 362 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) raw_rcv_skb() argument 368 if (sock_queue_rcv_skb(sk, skb) < 0) { raw_rcv_skb() 378 struct sock *sk; ieee802154_raw_deliver() local 381 sk_for_each(sk, &raw_head) { ieee802154_raw_deliver() 382 bh_lock_sock(sk); ieee802154_raw_deliver() 383 if (!sk->sk_bound_dev_if || ieee802154_raw_deliver() 384 sk->sk_bound_dev_if == dev->ifindex) { ieee802154_raw_deliver() 389 raw_rcv_skb(sk, clone); ieee802154_raw_deliver() 391 bh_unlock_sock(sk); ieee802154_raw_deliver() 396 static int raw_getsockopt(struct sock *sk, int level, int optname, raw_getsockopt() argument 402 static int raw_setsockopt(struct sock *sk, int level, int optname, raw_setsockopt() argument 454 struct sock sk; member in struct:dgram_sock 468 static inline struct dgram_sock *dgram_sk(const struct sock *sk) dgram_sk() argument 470 return container_of(sk, struct dgram_sock, sk); dgram_sk() 473 static void dgram_hash(struct sock *sk) dgram_hash() argument 476 sk_add_node(sk, &dgram_head); dgram_hash() 477 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); dgram_hash() 481 static void dgram_unhash(struct sock *sk) dgram_unhash() argument 484 if (sk_del_node_init(sk)) dgram_unhash() 485 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); dgram_unhash() 489 static int dgram_init(struct sock *sk) dgram_init() argument 491 struct dgram_sock *ro = dgram_sk(sk); dgram_init() 497 static void dgram_close(struct sock *sk, long timeout) dgram_close() argument 499 sk_common_release(sk); dgram_close() 502 static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len) dgram_bind() argument 506 struct dgram_sock *ro = dgram_sk(sk); dgram_bind() 510 lock_sock(sk); dgram_bind() 521 dev = ieee802154_get_dev(sock_net(sk), &haddr); dgram_bind() 539 release_sock(sk); dgram_bind() 544 static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg) dgram_ioctl() argument 549 int amount = sk_wmem_alloc_get(sk); dgram_ioctl() 560 spin_lock_bh(&sk->sk_receive_queue.lock); dgram_ioctl() 561 skb = skb_peek(&sk->sk_receive_queue); dgram_ioctl() 569 spin_unlock_bh(&sk->sk_receive_queue.lock); dgram_ioctl() 578 static int dgram_connect(struct sock *sk, struct sockaddr *uaddr, dgram_connect() argument 582 struct dgram_sock *ro = dgram_sk(sk); dgram_connect() 591 lock_sock(sk); dgram_connect() 602 release_sock(sk); dgram_connect() 606 static int dgram_disconnect(struct sock *sk, int flags) dgram_disconnect() argument 608 struct dgram_sock *ro = dgram_sk(sk); dgram_disconnect() 610 lock_sock(sk); dgram_disconnect() 612 release_sock(sk); dgram_disconnect() 617 static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) dgram_sendmsg() argument 623 struct dgram_sock *ro = dgram_sk(sk); dgram_sendmsg() 639 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); dgram_sendmsg() 641 dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr); dgram_sendmsg() 659 skb = sock_alloc_send_skb(sk, hlen + tlen + size, dgram_sendmsg() 697 skb->sk = sk; dgram_sendmsg() 716 static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, dgram_recvmsg() argument 724 skb = skb_recv_datagram(sk, flags, noblock, &err); dgram_recvmsg() 739 sock_recv_ts_and_drops(msg, sk, skb); dgram_recvmsg() 756 skb_free_datagram(sk, skb); dgram_recvmsg() 763 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) dgram_rcv_skb() argument 769 if (sock_queue_rcv_skb(sk, skb) < 0) { dgram_rcv_skb() 798 struct sock *sk, *prev = NULL; ieee802154_dgram_deliver() local 811 sk_for_each(sk, &dgram_head) { ieee802154_dgram_deliver() 813 dgram_sk(sk))) { ieee802154_dgram_deliver() 822 prev = sk; ieee802154_dgram_deliver() 837 static int dgram_getsockopt(struct sock *sk, int level, int optname, dgram_getsockopt() argument 840 struct dgram_sock *ro = dgram_sk(sk); dgram_getsockopt() 881 static int dgram_setsockopt(struct sock *sk, int level, int optname, dgram_setsockopt() argument 884 struct dgram_sock *ro = dgram_sk(sk); dgram_setsockopt() 885 struct net *net = sock_net(sk); dgram_setsockopt() 895 lock_sock(sk); dgram_setsockopt() 947 release_sock(sk); dgram_setsockopt() 1000 struct sock *sk; ieee802154_create() local 1023 sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto); ieee802154_create() 1024 if (!sk) ieee802154_create() 1030 sock_init_data(sock, sk); ieee802154_create() 1031 /* FIXME: sk->sk_destruct */ ieee802154_create() 1032 sk->sk_family = PF_IEEE802154; ieee802154_create() 1035 sock_set_flag(sk, SOCK_ZAPPED); ieee802154_create() 1037 if (sk->sk_prot->hash) ieee802154_create() 1038 sk->sk_prot->hash(sk); ieee802154_create() 1040 if (sk->sk_prot->init) { ieee802154_create() 1041 rc = sk->sk_prot->init(sk); ieee802154_create() 1043 sk_common_release(sk); ieee802154_create()
|
/linux-4.1.27/net/caif/ |
H A D | caif_socket.c | 47 struct sock sk; /* must be first member */ member in struct:caifsock 92 static void caif_read_lock(struct sock *sk) caif_read_lock() argument 95 cf_sk = container_of(sk, struct caifsock, sk); caif_read_lock() 99 static void caif_read_unlock(struct sock *sk) caif_read_unlock() argument 102 cf_sk = container_of(sk, struct caifsock, sk); caif_read_unlock() 109 return cf_sk->sk.sk_rcvbuf / 4; sk_rcvbuf_lowwater() 112 static void caif_flow_ctrl(struct sock *sk, int mode) caif_flow_ctrl() argument 115 cf_sk = container_of(sk, struct caifsock, sk); caif_flow_ctrl() 124 static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) caif_queue_rcv_skb() argument 128 struct sk_buff_head *list = &sk->sk_receive_queue; caif_queue_rcv_skb() 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_queue_rcv_skb() 131 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= caif_queue_rcv_skb() 132 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { caif_queue_rcv_skb() 134 atomic_read(&cf_sk->sk.sk_rmem_alloc), caif_queue_rcv_skb() 137 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); caif_queue_rcv_skb() 140 err = sk_filter(sk, skb); caif_queue_rcv_skb() 143 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { caif_queue_rcv_skb() 146 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); caif_queue_rcv_skb() 149 skb_set_owner_r(skb, sk); caif_queue_rcv_skb() 156 if (!sock_flag(sk, SOCK_DEAD)) caif_queue_rcv_skb() 160 if (!sock_flag(sk, SOCK_DEAD)) caif_queue_rcv_skb() 161 sk->sk_data_ready(sk); caif_queue_rcv_skb() 176 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { caif_sktrecv_cb() 180 caif_queue_rcv_skb(&cf_sk->sk, skb); caif_sktrecv_cb() 187 sock_hold(&cf_sk->sk); cfsk_hold() 193 sock_put(&cf_sk->sk); cfsk_put() 206 cf_sk->sk.sk_state_change(&cf_sk->sk); caif_ctrl_cb() 212 cf_sk->sk.sk_state_change(&cf_sk->sk); caif_ctrl_cb() 219 cf_sk->sk.sk_state = CAIF_CONNECTED; caif_ctrl_cb() 221 cf_sk->sk.sk_shutdown = 0; caif_ctrl_cb() 222 cf_sk->sk.sk_state_change(&cf_sk->sk); caif_ctrl_cb() 227 cf_sk->sk.sk_state = CAIF_DISCONNECTED; caif_ctrl_cb() 228 cf_sk->sk.sk_state_change(&cf_sk->sk); caif_ctrl_cb() 233 cf_sk->sk.sk_err = ECONNREFUSED; caif_ctrl_cb() 234 cf_sk->sk.sk_state = CAIF_DISCONNECTED; caif_ctrl_cb() 235 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; caif_ctrl_cb() 241 cf_sk->sk.sk_state_change(&cf_sk->sk); caif_ctrl_cb() 246 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; caif_ctrl_cb() 247 cf_sk->sk.sk_err = ECONNRESET; caif_ctrl_cb() 249 cf_sk->sk.sk_error_report(&cf_sk->sk); caif_ctrl_cb() 257 static void caif_check_flow_release(struct sock *sk) caif_check_flow_release() argument 259 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_check_flow_release() 264 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { caif_check_flow_release() 266 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); caif_check_flow_release() 278 struct sock *sk = sock->sk; caif_seqpkt_recvmsg() local 287 skb = skb_recv_datagram(sk, flags, 0 , &ret); caif_seqpkt_recvmsg() 302 skb_free_datagram(sk, skb); caif_seqpkt_recvmsg() 303 caif_check_flow_release(sk); caif_seqpkt_recvmsg() 312 static long caif_stream_data_wait(struct sock *sk, long timeo) caif_stream_data_wait() argument 315 lock_sock(sk); caif_stream_data_wait() 318 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); caif_stream_data_wait() 320 if (!skb_queue_empty(&sk->sk_receive_queue) || caif_stream_data_wait() 321 sk->sk_err || caif_stream_data_wait() 322 sk->sk_state != CAIF_CONNECTED || caif_stream_data_wait() 323 sock_flag(sk, SOCK_DEAD) || caif_stream_data_wait() 324 (sk->sk_shutdown & RCV_SHUTDOWN) || caif_stream_data_wait() 329 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); caif_stream_data_wait() 330 release_sock(sk); caif_stream_data_wait() 332 lock_sock(sk); caif_stream_data_wait() 334 if (sock_flag(sk, SOCK_DEAD)) caif_stream_data_wait() 337 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); caif_stream_data_wait() 340 finish_wait(sk_sleep(sk), &wait); caif_stream_data_wait() 341 release_sock(sk); caif_stream_data_wait() 353 struct sock *sk = sock->sk; caif_stream_recvmsg() local 368 if (sk->sk_state == CAIF_CONNECTING) caif_stream_recvmsg() 371 caif_read_lock(sk); caif_stream_recvmsg() 372 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); caif_stream_recvmsg() 373 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); caif_stream_recvmsg() 379 lock_sock(sk); caif_stream_recvmsg() 380 if (sock_flag(sk, SOCK_DEAD)) { caif_stream_recvmsg() 384 skb = skb_dequeue(&sk->sk_receive_queue); caif_stream_recvmsg() 385 caif_check_flow_release(sk); caif_stream_recvmsg() 393 err = sock_error(sk); caif_stream_recvmsg() 397 if (sk->sk_shutdown & RCV_SHUTDOWN) caif_stream_recvmsg() 401 if (sk->sk_state != CAIF_CONNECTED) caif_stream_recvmsg() 403 if (sock_flag(sk, SOCK_DEAD)) caif_stream_recvmsg() 406 release_sock(sk); caif_stream_recvmsg() 412 caif_read_unlock(sk); caif_stream_recvmsg() 414 timeo = caif_stream_data_wait(sk, timeo); caif_stream_recvmsg() 420 caif_read_lock(sk); caif_stream_recvmsg() 423 release_sock(sk); caif_stream_recvmsg() 426 release_sock(sk); caif_stream_recvmsg() 429 skb_queue_head(&sk->sk_receive_queue, skb); caif_stream_recvmsg() 443 skb_queue_head(&sk->sk_receive_queue, skb); caif_stream_recvmsg() 453 skb_queue_head(&sk->sk_receive_queue, skb); caif_stream_recvmsg() 457 caif_read_unlock(sk); caif_stream_recvmsg() 470 struct sock *sk = &cf_sk->sk; caif_wait_for_flow_on() local 475 (!wait_writeable || sock_writeable(&cf_sk->sk))) caif_wait_for_flow_on() 483 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); caif_wait_for_flow_on() 485 if (sk->sk_shutdown & SHUTDOWN_MASK) caif_wait_for_flow_on() 487 *err = -sk->sk_err; caif_wait_for_flow_on() 488 if (sk->sk_err) caif_wait_for_flow_on() 491 if (cf_sk->sk.sk_state != CAIF_CONNECTED) caif_wait_for_flow_on() 495 finish_wait(sk_sleep(sk), &wait); caif_wait_for_flow_on() 510 cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); transmit_skb() 524 struct sock *sk = sock->sk; caif_seqpkt_sendmsg() local 525 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_seqpkt_sendmsg() 532 ret = sock_error(sk); caif_seqpkt_sendmsg() 549 timeo = sock_sndtimeo(sk, noblock); caif_seqpkt_sendmsg() 550 timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), caif_seqpkt_sendmsg() 556 if (cf_sk->sk.sk_state != CAIF_CONNECTED || caif_seqpkt_sendmsg() 557 sock_flag(sk, SOCK_DEAD) || caif_seqpkt_sendmsg() 558 (sk->sk_shutdown & RCV_SHUTDOWN)) caif_seqpkt_sendmsg() 563 if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) caif_seqpkt_sendmsg() 569 skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); caif_seqpkt_sendmsg() 599 struct sock *sk = sock->sk; caif_stream_sendmsg() local 600 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_stream_sendmsg() 613 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); caif_stream_sendmsg() 616 if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) caif_stream_sendmsg() 627 if (size > ((sk->sk_sndbuf >> 1) - 64)) caif_stream_sendmsg() 628 size = (sk->sk_sndbuf >> 1) - 64; caif_stream_sendmsg() 633 skb = sock_alloc_send_skb(sk, caif_stream_sendmsg() 678 struct sock *sk = sock->sk; setsockopt() local 679 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); setsockopt() 682 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) setsockopt() 693 lock_sock(&(cf_sk->sk)); setsockopt() 695 release_sock(&cf_sk->sk); setsockopt() 701 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) setsockopt() 703 lock_sock(&(cf_sk->sk)); setsockopt() 706 release_sock(&cf_sk->sk); setsockopt() 710 release_sock(&cf_sk->sk); setsockopt() 750 struct sock *sk = sock->sk; caif_connect() local 751 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_connect() 758 lock_sock(sk); caif_connect() 767 caif_assert(sk->sk_state == CAIF_DISCONNECTED); caif_connect() 770 switch (sk->sk_state) { caif_connect() 786 caif_assert(sk->sk_state == CAIF_CONNECTED || caif_connect() 787 sk->sk_state == CAIF_DISCONNECTED); caif_connect() 788 if (sk->sk_shutdown & SHUTDOWN_MASK) { caif_connect() 790 caif_disconnect_client(sock_net(sk), &cf_sk->layer); caif_connect() 802 sk->sk_state = CAIF_DISCONNECTED; caif_connect() 804 sk_stream_kill_queues(&cf_sk->sk); caif_connect() 815 sk->sk_state = CAIF_CONNECTING; caif_connect() 819 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) caif_connect() 821 else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) caif_connect() 824 cf_sk->conn_req.priority = cf_sk->sk.sk_priority; caif_connect() 827 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; caif_connect() 831 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, caif_connect() 835 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; caif_connect() 836 cf_sk->sk.sk_state = CAIF_DISCONNECTED; caif_connect() 842 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); caif_connect() 862 if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK)) caif_connect() 865 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); caif_connect() 867 release_sock(sk); caif_connect() 869 timeo = wait_event_interruptible_timeout(*sk_sleep(sk), caif_connect() 870 sk->sk_state != CAIF_CONNECTING, caif_connect() 872 lock_sock(sk); caif_connect() 877 if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) caif_connect() 879 if (sk->sk_state != CAIF_CONNECTED) { caif_connect() 881 err = sock_error(sk); caif_connect() 889 release_sock(sk); caif_connect() 899 struct sock *sk = sock->sk; caif_release() local 900 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_release() 902 if (!sk) caif_release() 912 spin_lock_bh(&sk->sk_receive_queue.lock); caif_release() 913 sock_set_flag(sk, SOCK_DEAD); caif_release() 914 spin_unlock_bh(&sk->sk_receive_queue.lock); caif_release() 915 sock->sk = NULL; caif_release() 920 lock_sock(&(cf_sk->sk)); caif_release() 921 sk->sk_state = CAIF_DISCONNECTED; caif_release() 922 sk->sk_shutdown = SHUTDOWN_MASK; caif_release() 924 caif_disconnect_client(sock_net(sk), &cf_sk->layer); caif_release() 925 cf_sk->sk.sk_socket->state = SS_DISCONNECTING; caif_release() 926 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); caif_release() 928 sock_orphan(sk); caif_release() 929 sk_stream_kill_queues(&cf_sk->sk); caif_release() 930 release_sock(sk); caif_release() 931 sock_put(sk); caif_release() 939 struct sock *sk = sock->sk; caif_poll() local 941 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_poll() 943 sock_poll_wait(file, sk_sleep(sk), wait); caif_poll() 947 if (sk->sk_err) caif_poll() 949 if (sk->sk_shutdown == SHUTDOWN_MASK) caif_poll() 951 if (sk->sk_shutdown & RCV_SHUTDOWN) caif_poll() 955 if (!skb_queue_empty(&sk->sk_receive_queue) || caif_poll() 956 (sk->sk_shutdown & RCV_SHUTDOWN)) caif_poll() 963 if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) caif_poll() 1012 static void caif_sock_destructor(struct sock *sk) caif_sock_destructor() argument 1014 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_sock_destructor() 1015 caif_assert(!atomic_read(&sk->sk_wmem_alloc)); caif_sock_destructor() 1016 caif_assert(sk_unhashed(sk)); caif_sock_destructor() 1017 caif_assert(!sk->sk_socket); caif_sock_destructor() 1018 if (!sock_flag(sk, SOCK_DEAD)) { caif_sock_destructor() 1019 pr_debug("Attempt to release alive CAIF socket: %p\n", sk); caif_sock_destructor() 1022 sk_stream_kill_queues(&cf_sk->sk); caif_sock_destructor() 1029 struct sock *sk = NULL; caif_create() local 1058 sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); caif_create() 1059 if (!sk) caif_create() 1062 cf_sk = container_of(sk, struct caifsock, sk); caif_create() 1065 sk->sk_protocol = (unsigned char) protocol; caif_create() 1070 sk->sk_priority = TC_PRIO_CONTROL; caif_create() 1073 sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; caif_create() 1076 sk->sk_priority = TC_PRIO_BESTEFFORT; caif_create() 1083 lock_sock(&(cf_sk->sk)); caif_create() 1086 sock_init_data(sock, sk); caif_create() 1087 sk->sk_destruct = caif_sock_destructor; caif_create() 1091 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; caif_create() 1092 cf_sk->sk.sk_state = CAIF_DISCONNECTED; caif_create() 1100 release_sock(&cf_sk->sk); caif_create()
|
/linux-4.1.27/net/unix/ |
H A D | diag.c | 11 static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) sk_diag_dump_name() argument 13 struct unix_address *addr = unix_sk(sk)->addr; sk_diag_dump_name() 22 static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) sk_diag_dump_vfs() argument 24 struct dentry *dentry = unix_sk(sk)->path.dentry; sk_diag_dump_vfs() 38 static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) sk_diag_dump_peer() argument 43 peer = unix_peer_get(sk); sk_diag_dump_peer() 56 static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) sk_diag_dump_icons() argument 63 if (sk->sk_state == TCP_LISTEN) { sk_diag_dump_icons() 64 spin_lock(&sk->sk_receive_queue.lock); sk_diag_dump_icons() 67 sk->sk_receive_queue.qlen * sizeof(u32)); sk_diag_dump_icons() 73 skb_queue_walk(&sk->sk_receive_queue, skb) { sk_diag_dump_icons() 76 req = skb->sk; sk_diag_dump_icons() 78 * The state lock is outer for the same sk's sk_diag_dump_icons() 87 spin_unlock(&sk->sk_receive_queue.lock); sk_diag_dump_icons() 93 spin_unlock(&sk->sk_receive_queue.lock); sk_diag_dump_icons() 97 static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) sk_diag_show_rqlen() argument 101 if (sk->sk_state == TCP_LISTEN) { sk_diag_show_rqlen() 102 rql.udiag_rqueue = sk->sk_receive_queue.qlen; sk_diag_show_rqlen() 103 rql.udiag_wqueue = sk->sk_max_ack_backlog; sk_diag_show_rqlen() 105 rql.udiag_rqueue = (u32) unix_inq_len(sk); sk_diag_show_rqlen() 106 rql.udiag_wqueue = (u32) unix_outq_len(sk); sk_diag_show_rqlen() 112 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, sk_diag_fill() argument 125 rep->udiag_type = sk->sk_type; sk_diag_fill() 126 rep->udiag_state = sk->sk_state; sk_diag_fill() 129 sock_diag_save_cookie(sk, rep->udiag_cookie); sk_diag_fill() 132 sk_diag_dump_name(sk, skb)) sk_diag_fill() 136 sk_diag_dump_vfs(sk, skb)) sk_diag_fill() 140 sk_diag_dump_peer(sk, skb)) sk_diag_fill() 144 sk_diag_dump_icons(sk, skb)) sk_diag_fill() 148 sk_diag_show_rqlen(sk, skb)) sk_diag_fill() 152 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) sk_diag_fill() 155 if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown)) sk_diag_fill() 166 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, sk_diag_dump() argument 171 unix_state_lock(sk); sk_diag_dump() 172 sk_ino = sock_i_ino(sk); sk_diag_dump() 173 unix_state_unlock(sk); sk_diag_dump() 178 return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino); sk_diag_dump() 185 struct net *net = sock_net(skb->sk); unix_diag_dump() 196 struct sock *sk; unix_diag_dump() local 199 sk_for_each(sk, &unix_socket_table[slot]) { unix_diag_dump() 200 if (!net_eq(sock_net(sk), net)) unix_diag_dump() 204 if (!(req->udiag_states & (1 << sk->sk_state))) unix_diag_dump() 206 if (sk_diag_dump(sk, skb, req, unix_diag_dump() 226 struct sock *sk; unix_lookup_by_ino() local 230 sk_for_each(sk, &unix_socket_table[i]) unix_lookup_by_ino() 231 if (ino == sock_i_ino(sk)) { unix_lookup_by_ino() 232 sock_hold(sk); unix_lookup_by_ino() 235 return sk; unix_lookup_by_ino() 248 struct sock *sk; unix_diag_get_exact() local 251 struct net *net = sock_net(in_skb->sk); unix_diag_get_exact() 256 sk = unix_lookup_by_ino(req->udiag_ino); unix_diag_get_exact() 258 if (sk == NULL) unix_diag_get_exact() 261 err = sock_diag_check_cookie(sk, req->udiag_cookie); unix_diag_get_exact() 272 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid, unix_diag_get_exact() 287 if (sk) unix_diag_get_exact() 288 sock_put(sk); unix_diag_get_exact() 296 struct net *net = sock_net(skb->sk); unix_diag_handler_dump()
|
H A D | af_unix.c | 138 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE) 172 #define unix_peer(sk) (unix_sk(sk)->peer) 174 static inline int unix_our_peer(struct sock *sk, struct sock *osk) unix_our_peer() argument 176 return unix_peer(osk) == sk; unix_our_peer() 179 static inline int unix_may_send(struct sock *sk, struct sock *osk) unix_may_send() argument 181 return unix_peer(osk) == NULL || unix_our_peer(sk, osk); unix_may_send() 184 static inline int unix_recvq_full(struct sock const *sk) unix_recvq_full() argument 186 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; unix_recvq_full() 238 static void __unix_remove_socket(struct sock *sk) __unix_remove_socket() argument 240 sk_del_node_init(sk); __unix_remove_socket() 243 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk) __unix_insert_socket() argument 245 WARN_ON(!sk_unhashed(sk)); __unix_insert_socket() 246 sk_add_node(sk, list); __unix_insert_socket() 249 static inline void unix_remove_socket(struct sock *sk) unix_remove_socket() argument 252 __unix_remove_socket(sk); unix_remove_socket() 256 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk) unix_insert_socket() argument 259 __unix_insert_socket(list, sk); unix_insert_socket() 357 u_sleep = sk_sleep(&u->sk); unix_dgram_peer_wake_relay() 364 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other) unix_dgram_peer_wake_connect() argument 369 u = unix_sk(sk); unix_dgram_peer_wake_connect() 385 static void unix_dgram_peer_wake_disconnect(struct sock *sk, unix_dgram_peer_wake_disconnect() argument 390 u = unix_sk(sk); unix_dgram_peer_wake_disconnect() 402 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk, unix_dgram_peer_wake_disconnect_wakeup() argument 405 unix_dgram_peer_wake_disconnect(sk, other); unix_dgram_peer_wake_disconnect_wakeup() 406 wake_up_interruptible_poll(sk_sleep(sk), unix_dgram_peer_wake_disconnect_wakeup() 413 * - unix_peer(sk) == other 416 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) unix_dgram_peer_wake_me() argument 420 connected = unix_dgram_peer_wake_connect(sk, other); unix_dgram_peer_wake_me() 426 unix_dgram_peer_wake_disconnect(sk, other); unix_dgram_peer_wake_me() 431 static inline int unix_writable(struct sock *sk) unix_writable() argument 433 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; unix_writable() 436 static void unix_write_space(struct sock *sk) unix_write_space() argument 441 if (unix_writable(sk)) { unix_write_space() 442 wq = rcu_dereference(sk->sk_wq); unix_write_space() 446 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); unix_write_space() 453 * flow control based only on wmem_alloc; second, sk connected to peer 455 static void unix_dgram_disconnected(struct sock *sk, struct sock *other) unix_dgram_disconnected() argument 457 if (!skb_queue_empty(&sk->sk_receive_queue)) { unix_dgram_disconnected() 458 skb_queue_purge(&sk->sk_receive_queue); unix_dgram_disconnected() 459 wake_up_interruptible_all(&unix_sk(sk)->peer_wait); unix_dgram_disconnected() 465 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) { unix_dgram_disconnected() 472 static void unix_sock_destructor(struct sock *sk) unix_sock_destructor() argument 474 struct unix_sock *u = unix_sk(sk); unix_sock_destructor() 476 skb_queue_purge(&sk->sk_receive_queue); unix_sock_destructor() 478 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); unix_sock_destructor() 479 WARN_ON(!sk_unhashed(sk)); unix_sock_destructor() 480 WARN_ON(sk->sk_socket); unix_sock_destructor() 481 if (!sock_flag(sk, SOCK_DEAD)) { unix_sock_destructor() 482 pr_info("Attempt to release alive unix socket: %p\n", sk); unix_sock_destructor() 491 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); unix_sock_destructor() 494 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk, unix_sock_destructor() 499 static void unix_release_sock(struct sock *sk, int embrion) unix_release_sock() argument 501 struct unix_sock *u = unix_sk(sk); unix_release_sock() 507 unix_remove_socket(sk); unix_release_sock() 510 unix_state_lock(sk); unix_release_sock() 511 sock_orphan(sk); unix_release_sock() 512 sk->sk_shutdown = SHUTDOWN_MASK; unix_release_sock() 516 state = sk->sk_state; unix_release_sock() 517 sk->sk_state = TCP_CLOSE; unix_release_sock() 518 unix_state_unlock(sk); unix_release_sock() 522 skpair = unix_peer(sk); unix_release_sock() 525 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { unix_release_sock() 529 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) unix_release_sock() 536 unix_dgram_peer_wake_disconnect(sk, skpair); unix_release_sock() 538 unix_peer(sk) = NULL; unix_release_sock() 543 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { unix_release_sock() 545 unix_release_sock(skb->sk, 1); unix_release_sock() 553 sock_put(sk); unix_release_sock() 572 static void init_peercred(struct sock *sk) init_peercred() argument 574 put_pid(sk->sk_peer_pid); init_peercred() 575 if (sk->sk_peer_cred) init_peercred() 576 put_cred(sk->sk_peer_cred); init_peercred() 577 sk->sk_peer_pid = get_pid(task_tgid(current)); init_peercred() 578 sk->sk_peer_cred = get_current_cred(); init_peercred() 581 static void copy_peercred(struct sock *sk, struct sock *peersk) copy_peercred() argument 583 put_pid(sk->sk_peer_pid); copy_peercred() 584 if (sk->sk_peer_cred) copy_peercred() 585 put_cred(sk->sk_peer_cred); copy_peercred() 586 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); copy_peercred() 587 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); copy_peercred() 593 struct sock *sk = sock->sk; unix_listen() local 594 struct unix_sock *u = unix_sk(sk); unix_listen() 603 unix_state_lock(sk); unix_listen() 604 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) unix_listen() 606 if (backlog > sk->sk_max_ack_backlog) unix_listen() 608 sk->sk_max_ack_backlog = backlog; unix_listen() 609 sk->sk_state = TCP_LISTEN; unix_listen() 611 init_peercred(sk); unix_listen() 615 unix_state_unlock(sk); unix_listen() 643 static int unix_set_peek_off(struct sock *sk, int val) unix_set_peek_off() argument 645 struct unix_sock *u = unix_sk(sk); unix_set_peek_off() 650 sk->sk_peek_off = val; unix_set_peek_off() 739 struct sock *sk = NULL; unix_create1() local 746 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto); unix_create1() 747 if (!sk) unix_create1() 750 sock_init_data(sock, sk); unix_create1() 751 lockdep_set_class(&sk->sk_receive_queue.lock, unix_create1() 754 sk->sk_write_space = unix_write_space; unix_create1() 755 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; unix_create1() 756 sk->sk_destruct = unix_sock_destructor; unix_create1() 757 u = unix_sk(sk); unix_create1() 766 unix_insert_socket(unix_sockets_unbound(sk), sk); unix_create1() local 768 if (sk == NULL) unix_create1() 772 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); unix_create1() 775 return sk; unix_create1() 811 struct sock *sk = sock->sk; unix_release() local 813 if (!sk) unix_release() 816 unix_release_sock(sk, 0); unix_release() 817 sock->sk = NULL; unix_release() 824 struct sock *sk = sock->sk; unix_autobind() local 825 struct net *net = sock_net(sk); unix_autobind() 826 struct unix_sock *u = unix_sk(sk); unix_autobind() 871 addr->hash ^= sk->sk_type; unix_autobind() 873 __unix_remove_socket(sk); unix_autobind() 875 __unix_insert_socket(&unix_socket_table[addr->hash], sk); unix_autobind() 969 struct sock *sk = sock->sk; unix_bind() local 970 struct net *net = sock_net(sk); unix_bind() 971 struct unix_sock *u = unix_sk(sk); unix_bind() 1008 addr->hash = hash ^ sk->sk_type; unix_bind() 1031 sk->sk_type, hash)) { unix_bind() 1040 __unix_remove_socket(sk); unix_bind() 1042 __unix_insert_socket(list, sk); unix_bind() 1080 struct sock *sk = sock->sk; unix_dgram_connect() local 1081 struct net *net = sock_net(sk); unix_dgram_connect() 1094 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0) unix_dgram_connect() 1102 unix_state_double_lock(sk, other); unix_dgram_connect() 1106 unix_state_double_unlock(sk, other); unix_dgram_connect() 1112 if (!unix_may_send(sk, other)) unix_dgram_connect() 1115 err = security_unix_may_send(sk->sk_socket, other->sk_socket); unix_dgram_connect() 1124 unix_state_double_lock(sk, other); unix_dgram_connect() 1130 if (unix_peer(sk)) { unix_dgram_connect() 1131 struct sock *old_peer = unix_peer(sk); unix_dgram_connect() 1132 unix_peer(sk) = other; unix_dgram_connect() 1133 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer); unix_dgram_connect() 1135 unix_state_double_unlock(sk, other); unix_dgram_connect() 1138 unix_dgram_disconnected(sk, old_peer); unix_dgram_connect() 1141 unix_peer(sk) = other; unix_dgram_connect() 1142 unix_state_double_unlock(sk, other); unix_dgram_connect() 1147 unix_state_double_unlock(sk, other); unix_dgram_connect() 1178 struct sock *sk = sock->sk; unix_stream_connect() local 1179 struct net *net = sock_net(sk); unix_stream_connect() 1180 struct unix_sock *u = unix_sk(sk), *newu, *otheru; unix_stream_connect() 1198 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); unix_stream_connect() 1208 newsk = unix_create1(sock_net(sk), NULL); unix_stream_connect() 1219 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err); unix_stream_connect() 1259 state. other is TCP_LISTEN, if sk is TCP_LISTEN we unix_stream_connect() 1264 st = sk->sk_state; unix_stream_connect() 1279 unix_state_lock_nested(sk); unix_stream_connect() 1281 if (sk->sk_state != st) { unix_stream_connect() 1282 unix_state_unlock(sk); unix_stream_connect() 1288 err = security_unix_stream_connect(sk, other, newsk); unix_stream_connect() 1290 unix_state_unlock(sk); unix_stream_connect() 1296 sock_hold(sk); unix_stream_connect() 1297 unix_peer(newsk) = sk; unix_stream_connect() 1299 newsk->sk_type = sk->sk_type; unix_stream_connect() 1316 copy_peercred(sk, other); unix_stream_connect() 1319 sk->sk_state = TCP_ESTABLISHED; unix_stream_connect() 1323 unix_peer(sk) = newsk; unix_stream_connect() 1325 unix_state_unlock(sk); unix_stream_connect() 1351 struct sock *ska = socka->sk, *skb = sockb->sk; unix_socketpair() 1381 struct sock *sk = sock->sk; unix_accept() local 1391 if (sk->sk_state != TCP_LISTEN) unix_accept() 1398 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err); unix_accept() 1406 tsk = skb->sk; unix_accept() 1407 skb_free_datagram(sk, skb); unix_accept() 1408 wake_up_interruptible(&unix_sk(sk)->peer_wait); unix_accept() 1425 struct sock *sk = sock->sk; unix_getname() local 1431 sk = unix_peer_get(sk); unix_getname() 1434 if (!sk) unix_getname() 1438 sock_hold(sk); unix_getname() 1441 u = unix_sk(sk); unix_getname() 1442 unix_state_lock(sk); unix_getname() 1453 unix_state_unlock(sk); unix_getname() 1454 sock_put(sk); unix_getname() 1511 struct sock *sk = unix_get_socket(scm->fp->fp[i]); unix_attach_fds() local 1513 if (sk) { unix_attach_fds() 1516 unix_sk(sk)->recursion_level); unix_attach_fds() 1576 struct sock *sk = sock->sk; unix_dgram_sendmsg() local 1577 struct net *net = sock_net(sk); unix_dgram_sendmsg() 1578 struct unix_sock *u = unix_sk(sk); unix_dgram_sendmsg() 1608 other = unix_peer_get(sk); unix_dgram_sendmsg() 1618 if (len > sk->sk_sndbuf - 32) unix_dgram_sendmsg() 1630 skb = sock_alloc_send_pskb(sk, len - data_len, data_len, unix_dgram_sendmsg() 1649 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); unix_dgram_sendmsg() 1657 other = unix_find_other(net, sunaddr, namelen, sk->sk_type, unix_dgram_sendmsg() 1673 if (!unix_may_send(sk, other)) unix_dgram_sendmsg() 1685 unix_state_lock(sk); unix_dgram_sendmsg() 1688 if (unix_peer(sk) == other) { unix_dgram_sendmsg() 1689 unix_peer(sk) = NULL; unix_dgram_sendmsg() 1690 unix_dgram_peer_wake_disconnect_wakeup(sk, other); unix_dgram_sendmsg() 1692 unix_state_unlock(sk); unix_dgram_sendmsg() 1694 unix_dgram_disconnected(sk, other); unix_dgram_sendmsg() 1698 unix_state_unlock(sk); unix_dgram_sendmsg() 1711 if (sk->sk_type != SOCK_SEQPACKET) { unix_dgram_sendmsg() 1712 err = security_unix_may_send(sk->sk_socket, other->sk_socket); unix_dgram_sendmsg() 1717 /* other == sk && unix_peer(other) != sk if unix_dgram_sendmsg() 1718 * - unix_peer(sk) == NULL, destination address bound to sk unix_dgram_sendmsg() 1719 * - unix_peer(sk) == sk by time of get but disconnected before lock unix_dgram_sendmsg() 1721 if (other != sk && unix_dgram_sendmsg() 1722 unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { unix_dgram_sendmsg() 1735 unix_state_double_lock(sk, other); unix_dgram_sendmsg() 1738 if (unix_peer(sk) != other || unix_dgram_sendmsg() 1739 unix_dgram_peer_wake_me(sk, other)) { unix_dgram_sendmsg() 1752 unix_state_unlock(sk); unix_dgram_sendmsg() 1768 unix_state_unlock(sk); unix_dgram_sendmsg() 1787 struct sock *sk = sock->sk; unix_stream_sendmsg() local 1807 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; unix_stream_sendmsg() 1811 other = unix_peer(sk); unix_stream_sendmsg() 1816 if (sk->sk_shutdown & SEND_SHUTDOWN) unix_stream_sendmsg() 1823 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64); unix_stream_sendmsg() 1832 skb = sock_alloc_send_pskb(sk, size - data_len, data_len, unix_stream_sendmsg() 1891 struct sock *sk = sock->sk; unix_seqpacket_sendmsg() local 1893 err = sock_error(sk); unix_seqpacket_sendmsg() 1897 if (sk->sk_state != TCP_ESTABLISHED) unix_seqpacket_sendmsg() 1909 struct sock *sk = sock->sk; unix_seqpacket_recvmsg() local 1911 if (sk->sk_state != TCP_ESTABLISHED) unix_seqpacket_recvmsg() 1917 static void unix_copy_addr(struct msghdr *msg, struct sock *sk) unix_copy_addr() argument 1919 struct unix_sock *u = unix_sk(sk); unix_copy_addr() 1931 struct sock *sk = sock->sk; unix_dgram_recvmsg() local 1932 struct unix_sock *u = unix_sk(sk); unix_dgram_recvmsg() 1951 skip = sk_peek_offset(sk, flags); unix_dgram_recvmsg() 1953 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err); unix_dgram_recvmsg() 1955 unix_state_lock(sk); unix_dgram_recvmsg() 1957 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && unix_dgram_recvmsg() 1958 (sk->sk_shutdown & RCV_SHUTDOWN)) unix_dgram_recvmsg() 1960 unix_state_unlock(sk); unix_dgram_recvmsg() 1968 unix_copy_addr(msg, skb->sk); unix_dgram_recvmsg() 1979 if (sock_flag(sk, SOCK_RCVTSTAMP)) unix_dgram_recvmsg() 1980 __sock_recv_timestamp(msg, sk, skb); unix_dgram_recvmsg() 1991 sk_peek_offset_bwd(sk, skb->len); unix_dgram_recvmsg() 2006 sk_peek_offset_fwd(sk, size); unix_dgram_recvmsg() 2016 skb_free_datagram(sk, skb); unix_dgram_recvmsg() 2026 static long unix_stream_data_wait(struct sock *sk, long timeo, unix_stream_data_wait() argument 2031 unix_state_lock(sk); unix_stream_data_wait() 2034 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); unix_stream_data_wait() 2036 if (skb_peek_tail(&sk->sk_receive_queue) != last || unix_stream_data_wait() 2037 sk->sk_err || unix_stream_data_wait() 2038 (sk->sk_shutdown & RCV_SHUTDOWN) || unix_stream_data_wait() 2043 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); unix_stream_data_wait() 2044 unix_state_unlock(sk); unix_stream_data_wait() 2046 unix_state_lock(sk); unix_stream_data_wait() 2048 if (sock_flag(sk, SOCK_DEAD)) unix_stream_data_wait() 2051 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); unix_stream_data_wait() 2054 finish_wait(sk_sleep(sk), &wait); unix_stream_data_wait() 2055 unix_state_unlock(sk); unix_stream_data_wait() 2068 struct sock *sk = sock->sk; unix_stream_recvmsg() local 2069 struct unix_sock *u = unix_sk(sk); unix_stream_recvmsg() 2080 if (sk->sk_state != TCP_ESTABLISHED) unix_stream_recvmsg() 2087 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); unix_stream_recvmsg() 2088 timeo = sock_rcvtimeo(sk, noblock); unix_stream_recvmsg() 2099 skip = sk_peek_offset(sk, flags); unix_stream_recvmsg() 2107 unix_state_lock(sk); unix_stream_recvmsg() 2108 if (sock_flag(sk, SOCK_DEAD)) { unix_stream_recvmsg() 2112 last = skb = skb_peek(&sk->sk_receive_queue); unix_stream_recvmsg() 2115 unix_sk(sk)->recursion_level = 0; unix_stream_recvmsg() 2123 err = sock_error(sk); unix_stream_recvmsg() 2126 if (sk->sk_shutdown & RCV_SHUTDOWN) unix_stream_recvmsg() 2129 unix_state_unlock(sk); unix_stream_recvmsg() 2135 timeo = unix_stream_data_wait(sk, timeo, last); unix_stream_recvmsg() 2146 unix_state_unlock(sk); unix_stream_recvmsg() 2153 skb = skb_peek_next(skb, &sk->sk_receive_queue); unix_stream_recvmsg() 2158 unix_state_unlock(sk); unix_stream_recvmsg() 2174 unix_copy_addr(msg, skb->sk); unix_stream_recvmsg() 2192 sk_peek_offset_bwd(sk, chunk); unix_stream_recvmsg() 2200 skb_unlink(skb, &sk->sk_receive_queue); unix_stream_recvmsg() 2211 sk_peek_offset_fwd(sk, chunk); unix_stream_recvmsg() 2218 unix_state_lock(sk); unix_stream_recvmsg() 2219 skb = skb_peek_next(skb, &sk->sk_receive_queue); unix_stream_recvmsg() 2222 unix_state_unlock(sk); unix_stream_recvmsg() 2235 struct sock *sk = sock->sk; unix_shutdown() local 2247 unix_state_lock(sk); unix_shutdown() 2248 sk->sk_shutdown |= mode; unix_shutdown() 2249 other = unix_peer(sk); unix_shutdown() 2252 unix_state_unlock(sk); unix_shutdown() 2253 sk->sk_state_change(sk); unix_shutdown() 2256 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) { unix_shutdown() 2279 long unix_inq_len(struct sock *sk) unix_inq_len() argument 2284 if (sk->sk_state == TCP_LISTEN) unix_inq_len() 2287 spin_lock(&sk->sk_receive_queue.lock); unix_inq_len() 2288 if (sk->sk_type == SOCK_STREAM || unix_inq_len() 2289 sk->sk_type == SOCK_SEQPACKET) { unix_inq_len() 2290 skb_queue_walk(&sk->sk_receive_queue, skb) unix_inq_len() 2293 skb = skb_peek(&sk->sk_receive_queue); unix_inq_len() 2297 spin_unlock(&sk->sk_receive_queue.lock); unix_inq_len() 2303 long unix_outq_len(struct sock *sk) unix_outq_len() argument 2305 return sk_wmem_alloc_get(sk); unix_outq_len() 2311 struct sock *sk = sock->sk; unix_ioctl() local 2317 amount = unix_outq_len(sk); unix_ioctl() 2321 amount = unix_inq_len(sk); unix_ioctl() 2336 struct sock *sk = sock->sk; unix_poll() local 2339 sock_poll_wait(file, sk_sleep(sk), wait); unix_poll() 2343 if (sk->sk_err) unix_poll() 2345 if (sk->sk_shutdown == SHUTDOWN_MASK) unix_poll() 2347 if (sk->sk_shutdown & RCV_SHUTDOWN) unix_poll() 2351 if (!skb_queue_empty(&sk->sk_receive_queue)) unix_poll() 2355 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && unix_poll() 2356 sk->sk_state == TCP_CLOSE) unix_poll() 2363 if (unix_writable(sk)) unix_poll() 2372 struct sock *sk = sock->sk, *other; unix_dgram_poll() local 2375 sock_poll_wait(file, sk_sleep(sk), wait); unix_dgram_poll() 2379 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) unix_dgram_poll() 2381 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); unix_dgram_poll() 2383 if (sk->sk_shutdown & RCV_SHUTDOWN) unix_dgram_poll() 2385 if (sk->sk_shutdown == SHUTDOWN_MASK) unix_dgram_poll() 2389 if (!skb_queue_empty(&sk->sk_receive_queue)) unix_dgram_poll() 2393 if (sk->sk_type == SOCK_SEQPACKET) { unix_dgram_poll() 2394 if (sk->sk_state == TCP_CLOSE) unix_dgram_poll() 2397 if (sk->sk_state == TCP_SYN_SENT) unix_dgram_poll() 2405 writable = unix_writable(sk); unix_dgram_poll() 2407 unix_state_lock(sk); unix_dgram_poll() 2409 other = unix_peer(sk); unix_dgram_poll() 2410 if (other && unix_peer(other) != sk && unix_dgram_poll() 2412 unix_dgram_peer_wake_me(sk, other)) unix_dgram_poll() 2415 unix_state_unlock(sk); unix_dgram_poll() 2421 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); unix_dgram_poll() 2438 struct sock *sk; unix_from_bucket() local 2441 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) { unix_from_bucket() 2442 if (sock_net(sk) != seq_file_net(seq)) unix_from_bucket() 2448 return sk; unix_from_bucket() 2452 struct sock *sk, unix_next_socket() 2457 while (sk > (struct sock *)SEQ_START_TOKEN) { unix_next_socket() 2458 sk = sk_next(sk); unix_next_socket() 2459 if (!sk) unix_next_socket() 2461 if (sock_net(sk) == seq_file_net(seq)) unix_next_socket() 2462 return sk; unix_next_socket() 2466 sk = unix_from_bucket(seq, pos); unix_next_socket() 2467 if (sk) unix_next_socket() 2468 return sk; unix_next_socket() 2451 unix_next_socket(struct seq_file *seq, struct sock *sk, loff_t *pos) unix_next_socket() argument
|
/linux-4.1.27/net/ipv6/ |
H A D | inet6_connection_sock.c | 30 int inet6_csk_bind_conflict(const struct sock *sk, inet6_csk_bind_conflict() argument 34 int reuse = sk->sk_reuse; inet6_csk_bind_conflict() 35 int reuseport = sk->sk_reuseport; inet6_csk_bind_conflict() 36 kuid_t uid = sock_i_uid((struct sock *)sk); inet6_csk_bind_conflict() 44 if (sk != sk2 && inet6_csk_bind_conflict() 45 (!sk->sk_bound_dev_if || inet6_csk_bind_conflict() 47 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { inet6_csk_bind_conflict() 54 if (ipv6_rcv_saddr_equal(sk, sk2)) inet6_csk_bind_conflict() 59 ipv6_rcv_saddr_equal(sk, sk2)) inet6_csk_bind_conflict() 68 struct dst_entry *inet6_csk_route_req(struct sock *sk, inet6_csk_route_req() argument 73 struct ipv6_pinfo *np = inet6_sk(sk); inet6_csk_route_req() 90 dst = ip6_dst_lookup_flow(sk, fl6, final_p); inet6_csk_route_req() 117 struct request_sock *inet6_csk_search_req(struct sock *sk, inet6_csk_search_req() argument 123 struct inet_connection_sock *icsk = inet_csk(sk); inet6_csk_search_req() 139 WARN_ON(req->sk != NULL); inet6_csk_search_req() 149 void inet6_csk_reqsk_queue_hash_add(struct sock *sk, inet6_csk_reqsk_queue_hash_add() argument 153 struct inet_connection_sock *icsk = inet_csk(sk); inet6_csk_reqsk_queue_hash_add() 160 inet_csk_reqsk_queue_added(sk, timeout); inet6_csk_reqsk_queue_hash_add() 164 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) inet6_csk_addr2sockaddr() argument 169 sin6->sin6_addr = sk->sk_v6_daddr; inet6_csk_addr2sockaddr() 170 sin6->sin6_port = inet_sk(sk)->inet_dport; inet6_csk_addr2sockaddr() 174 sk->sk_bound_dev_if); inet6_csk_addr2sockaddr() 179 void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, __inet6_csk_dst_store() argument 183 __ip6_dst_store(sk, dst, daddr, saddr); __inet6_csk_dst_store() 187 struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) __inet6_csk_dst_check() argument 189 return __sk_dst_check(sk, cookie); __inet6_csk_dst_check() 192 static struct dst_entry *inet6_csk_route_socket(struct sock *sk, inet6_csk_route_socket() argument 195 struct inet_sock *inet = inet_sk(sk); inet6_csk_route_socket() 196 struct ipv6_pinfo *np = inet6_sk(sk); inet6_csk_route_socket() 201 fl6->flowi6_proto = sk->sk_protocol; inet6_csk_route_socket() 202 fl6->daddr = sk->sk_v6_daddr; inet6_csk_route_socket() 205 IP6_ECN_flow_xmit(sk, fl6->flowlabel); inet6_csk_route_socket() 206 fl6->flowi6_oif = sk->sk_bound_dev_if; inet6_csk_route_socket() 207 fl6->flowi6_mark = sk->sk_mark; inet6_csk_route_socket() 210 security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); inet6_csk_route_socket() 216 dst = __inet6_csk_dst_check(sk, np->dst_cookie); inet6_csk_route_socket() 218 dst = ip6_dst_lookup_flow(sk, fl6, final_p); inet6_csk_route_socket() 221 __inet6_csk_dst_store(sk, dst, NULL, NULL); inet6_csk_route_socket() 226 int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused) inet6_csk_xmit() argument 228 struct ipv6_pinfo *np = inet6_sk(sk); inet6_csk_xmit() 233 dst = inet6_csk_route_socket(sk, &fl6); inet6_csk_xmit() 235 sk->sk_err_soft = -PTR_ERR(dst); inet6_csk_xmit() 236 sk->sk_route_caps = 0; inet6_csk_xmit() 245 fl6.daddr = sk->sk_v6_daddr; inet6_csk_xmit() 247 res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), inet6_csk_xmit() 254 struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) inet6_csk_update_pmtu() argument 257 struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6); inet6_csk_update_pmtu() 261 dst->ops->update_pmtu(dst, sk, NULL, mtu); inet6_csk_update_pmtu() 263 dst = inet6_csk_route_socket(sk, &fl6); inet6_csk_update_pmtu()
|
H A D | tcp_ipv6.c | 73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); 74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, tcp_v6_md5_do_lookup() argument 92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) inet6_sk_rx_dst_set() argument 99 sk->sk_rx_dst = dst; inet6_sk_rx_dst_set() 100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; inet6_sk_rx_dst_set() 102 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; inet6_sk_rx_dst_set() 114 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, tcp_v6_connect() argument 118 struct inet_sock *inet = inet_sk(sk); tcp_v6_connect() 119 struct inet_connection_sock *icsk = inet_csk(sk); tcp_v6_connect() 120 struct ipv6_pinfo *np = inet6_sk(sk); tcp_v6_connect() 121 struct tcp_sock *tp = tcp_sk(sk); tcp_v6_connect() 143 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); tcp_v6_connect() 168 if (sk->sk_bound_dev_if && tcp_v6_connect() 169 sk->sk_bound_dev_if != usin->sin6_scope_id) tcp_v6_connect() 172 sk->sk_bound_dev_if = usin->sin6_scope_id; tcp_v6_connect() 176 if (!sk->sk_bound_dev_if) tcp_v6_connect() 181 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) { tcp_v6_connect() 187 sk->sk_v6_daddr = usin->sin6_addr; tcp_v6_connect() 198 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); tcp_v6_connect() 200 if (__ipv6_only_sock(sk)) tcp_v6_connect() 208 sk->sk_backlog_rcv = tcp_v4_do_rcv; tcp_v6_connect() 213 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); tcp_v6_connect() 218 sk->sk_backlog_rcv = tcp_v6_do_rcv; tcp_v6_connect() 224 np->saddr = sk->sk_v6_rcv_saddr; tcp_v6_connect() 229 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) tcp_v6_connect() 230 saddr = &sk->sk_v6_rcv_saddr; tcp_v6_connect() 233 fl6.daddr = sk->sk_v6_daddr; tcp_v6_connect() 235 fl6.flowi6_oif = sk->sk_bound_dev_if; tcp_v6_connect() 236 fl6.flowi6_mark = sk->sk_mark; tcp_v6_connect() 240 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); tcp_v6_connect() 243 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); tcp_v6_connect() 245 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); tcp_v6_connect() 253 sk->sk_v6_rcv_saddr = *saddr; tcp_v6_connect() 260 sk->sk_gso_type = SKB_GSO_TCPV6; tcp_v6_connect() 261 __ip6_dst_store(sk, dst, NULL, NULL); tcp_v6_connect() 266 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr)) tcp_v6_connect() 267 tcp_fetch_timewait_stamp(sk, dst); tcp_v6_connect() 278 tcp_set_state(sk, TCP_SYN_SENT); tcp_v6_connect() 279 err = inet6_hash_connect(&tcp_death_row, sk); tcp_v6_connect() 283 ip6_set_txhash(sk); tcp_v6_connect() 287 sk->sk_v6_daddr.s6_addr32, tcp_v6_connect() 291 err = tcp_connect(sk); tcp_v6_connect() 298 tcp_set_state(sk, TCP_CLOSE); tcp_v6_connect() 299 __sk_dst_reset(sk); tcp_v6_connect() 302 sk->sk_route_caps = 0; tcp_v6_connect() 306 static void tcp_v6_mtu_reduced(struct sock *sk) tcp_v6_mtu_reduced() argument 310 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) tcp_v6_mtu_reduced() 313 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); tcp_v6_mtu_reduced() 317 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { tcp_v6_mtu_reduced() 318 tcp_sync_mss(sk, dst_mtu(dst)); tcp_v6_mtu_reduced() 319 tcp_simple_retransmit(sk); tcp_v6_mtu_reduced() 333 struct sock *sk; tcp_v6_err() local 336 sk = __inet6_lookup_established(net, &tcp_hashinfo, tcp_v6_err() 341 if (!sk) { tcp_v6_err() 347 if (sk->sk_state == TCP_TIME_WAIT) { tcp_v6_err() 348 inet_twsk_put(inet_twsk(sk)); tcp_v6_err() 352 if (sk->sk_state == TCP_NEW_SYN_RECV) tcp_v6_err() 353 return tcp_req_err(sk, seq); tcp_v6_err() 355 bh_lock_sock(sk); tcp_v6_err() 356 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) tcp_v6_err() 359 if (sk->sk_state == TCP_CLOSE) tcp_v6_err() 362 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { tcp_v6_err() 367 tp = tcp_sk(sk); tcp_v6_err() 371 if (sk->sk_state != TCP_LISTEN && tcp_v6_err() 377 np = inet6_sk(sk); tcp_v6_err() 380 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); tcp_v6_err() 383 dst->ops->redirect(dst, sk, skb); tcp_v6_err() 392 if (sk->sk_state == TCP_LISTEN) tcp_v6_err() 395 if (!ip6_sk_accept_pmtu(sk)) tcp_v6_err() 399 if (!sock_owned_by_user(sk)) tcp_v6_err() 400 tcp_v6_mtu_reduced(sk); tcp_v6_err() 403 sock_hold(sk); tcp_v6_err() 410 switch (sk->sk_state) { tcp_v6_err() 416 if (fastopen && !fastopen->sk) tcp_v6_err() 419 if (!sock_owned_by_user(sk)) { tcp_v6_err() 420 sk->sk_err = err; tcp_v6_err() 421 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ tcp_v6_err() 423 tcp_done(sk); tcp_v6_err() 425 sk->sk_err_soft = err; tcp_v6_err() 429 if (!sock_owned_by_user(sk) && np->recverr) { tcp_v6_err() 430 sk->sk_err = err; tcp_v6_err() 431 sk->sk_error_report(sk); tcp_v6_err() 433 sk->sk_err_soft = err; tcp_v6_err() 436 bh_unlock_sock(sk); tcp_v6_err() 437 sock_put(sk); tcp_v6_err() 441 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, tcp_v6_send_synack() argument 448 struct ipv6_pinfo *np = inet6_sk(sk); tcp_v6_send_synack() 454 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) tcp_v6_send_synack() 457 skb = tcp_make_synack(sk, dst, req, foc); tcp_v6_send_synack() 469 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), tcp_v6_send_synack() 486 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, tcp_v6_md5_do_lookup() argument 489 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); tcp_v6_md5_do_lookup() 492 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, tcp_v6_md5_lookup() argument 495 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); tcp_v6_md5_lookup() 498 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval, tcp_v6_parse_md5_keys() argument 515 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], tcp_v6_parse_md5_keys() 517 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, tcp_v6_parse_md5_keys() 525 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], tcp_v6_parse_md5_keys() 528 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, tcp_v6_parse_md5_keys() 585 const struct sock *sk, tcp_v6_md5_hash_skb() 593 if (sk) { /* valid for establish/request sockets */ tcp_v6_md5_hash_skb() 594 saddr = &sk->sk_v6_rcv_saddr; tcp_v6_md5_hash_skb() 595 daddr = &sk->sk_v6_daddr; tcp_v6_md5_hash_skb() 631 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) tcp_v6_inbound_md5_hash() argument 640 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); tcp_v6_inbound_md5_hash() 648 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); tcp_v6_inbound_md5_hash() 653 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); tcp_v6_inbound_md5_hash() 673 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, tcp_v6_init_req() argument 677 struct ipv6_pinfo *np = inet6_sk(sk); tcp_v6_init_req() 683 if (!sk->sk_bound_dev_if && tcp_v6_init_req() 688 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || tcp_v6_init_req() 697 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl, tcp_v6_route_req() argument 703 return inet6_csk_route_req(sk, &fl->u.ip6, req); tcp_v6_route_req() 733 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq, tcp_v6_send_response() argument 742 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); tcp_v6_send_response() 832 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) tcp_v6_send_reset() argument 849 /* If sk not NULL, it means we did a successful lookup and incoming tcp_v6_send_reset() 852 if (!sk && !ipv6_unicast_destination(skb)) tcp_v6_send_reset() 857 if (!sk && hash_location) { tcp_v6_send_reset() 881 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL; tcp_v6_send_reset() 891 oif = sk ? sk->sk_bound_dev_if : 0; tcp_v6_send_reset() 892 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); tcp_v6_send_reset() 903 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq, tcp_v6_send_ack() argument 908 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, tcp_v6_send_ack() 912 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) tcp_v6_timewait_ack() argument 914 struct inet_timewait_sock *tw = inet_twsk(sk); tcp_v6_timewait_ack() 915 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); tcp_v6_timewait_ack() 917 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcp_v6_timewait_ack() 926 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, tcp_v6_reqsk_send_ack() argument 929 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV tcp_v6_reqsk_send_ack() 930 * sk->sk_state == TCP_SYN_RECV -> for Fast Open. tcp_v6_reqsk_send_ack() 932 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? tcp_v6_reqsk_send_ack() 933 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, tcp_v6_reqsk_send_ack() 935 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, tcp_v6_reqsk_send_ack() 936 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), tcp_v6_reqsk_send_ack() 941 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) tcp_v6_hnd_req() argument 948 req = inet6_csk_search_req(sk, th->source, tcp_v6_hnd_req() 952 nsk = tcp_check_req(sk, skb, req, false); tcp_v6_hnd_req() 953 if (!nsk || nsk == sk) tcp_v6_hnd_req() 957 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, tcp_v6_hnd_req() 973 sk = cookie_v6_check(sk, skb); tcp_v6_hnd_req() 975 return sk; tcp_v6_hnd_req() 978 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) tcp_v6_conn_request() argument 981 return tcp_v4_conn_request(sk, skb); tcp_v6_conn_request() 987 &tcp_request_sock_ipv6_ops, sk, skb); tcp_v6_conn_request() 990 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); tcp_v6_conn_request() 994 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, tcp_v6_syn_recv_sock() argument 999 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); tcp_v6_syn_recv_sock() 1015 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); tcp_v6_syn_recv_sock() 1064 if (sk_acceptq_is_full(sk)) tcp_v6_syn_recv_sock() 1068 dst = inet6_csk_route_req(sk, &fl6, req); tcp_v6_syn_recv_sock() 1073 newsk = tcp_create_openreq_child(sk, req, skb); tcp_v6_syn_recv_sock() 1118 sk_gfp_atomic(sk, GFP_ATOMIC)); tcp_v6_syn_recv_sock() 1151 if (tcp_sk(sk)->rx_opt.user_mss && tcp_v6_syn_recv_sock() 1152 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) tcp_v6_syn_recv_sock() 1153 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; tcp_v6_syn_recv_sock() 1162 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); tcp_v6_syn_recv_sock() 1171 sk_gfp_atomic(sk, GFP_ATOMIC)); tcp_v6_syn_recv_sock() 1175 if (__inet_inherit_port(sk, newsk) < 0) { tcp_v6_syn_recv_sock() 1185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); tcp_v6_syn_recv_sock() 1189 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); tcp_v6_syn_recv_sock() 1201 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) tcp_v6_do_rcv() argument 1203 struct ipv6_pinfo *np = inet6_sk(sk); tcp_v6_do_rcv() 1216 return tcp_v4_do_rcv(sk, skb); tcp_v6_do_rcv() 1218 if (sk_filter(sk, skb)) tcp_v6_do_rcv() 1240 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); tcp_v6_do_rcv() 1242 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ tcp_v6_do_rcv() 1243 struct dst_entry *dst = sk->sk_rx_dst; tcp_v6_do_rcv() 1245 sock_rps_save_rxhash(sk, skb); tcp_v6_do_rcv() 1246 sk_mark_napi_id(sk, skb); tcp_v6_do_rcv() 1248 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || tcp_v6_do_rcv() 1251 sk->sk_rx_dst = NULL; tcp_v6_do_rcv() 1255 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); tcp_v6_do_rcv() 1264 if (sk->sk_state == TCP_LISTEN) { tcp_v6_do_rcv() 1265 struct sock *nsk = tcp_v6_hnd_req(sk, skb); tcp_v6_do_rcv() 1274 if (nsk != sk) { tcp_v6_do_rcv() 1276 sk_mark_napi_id(sk, skb); tcp_v6_do_rcv() 1277 if (tcp_child_process(sk, nsk, skb)) tcp_v6_do_rcv() 1284 sock_rps_save_rxhash(sk, skb); tcp_v6_do_rcv() 1286 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) tcp_v6_do_rcv() 1293 tcp_v6_send_reset(sk, skb); tcp_v6_do_rcv() 1300 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); tcp_v6_do_rcv() 1301 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); tcp_v6_do_rcv() 1313 tp = tcp_sk(sk); tcp_v6_do_rcv() 1315 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { tcp_v6_do_rcv() 1324 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { tcp_v6_do_rcv() 1325 skb_set_owner_r(opt_skb, sk); tcp_v6_do_rcv() 1372 struct sock *sk; tcp_v6_rcv() local 1400 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest, tcp_v6_rcv() 1402 if (!sk) tcp_v6_rcv() 1406 if (sk->sk_state == TCP_TIME_WAIT) tcp_v6_rcv() 1409 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { tcp_v6_rcv() 1414 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) tcp_v6_rcv() 1420 if (tcp_v6_inbound_md5_hash(sk, skb)) tcp_v6_rcv() 1424 if (sk_filter(sk, skb)) tcp_v6_rcv() 1427 sk_incoming_cpu_update(sk); tcp_v6_rcv() 1430 bh_lock_sock_nested(sk); tcp_v6_rcv() 1432 if (!sock_owned_by_user(sk)) { tcp_v6_rcv() 1433 if (!tcp_prequeue(sk, skb)) tcp_v6_rcv() 1434 ret = tcp_v6_do_rcv(sk, skb); tcp_v6_rcv() 1435 } else if (unlikely(sk_add_backlog(sk, skb, tcp_v6_rcv() 1436 sk->sk_rcvbuf + sk->sk_sndbuf))) { tcp_v6_rcv() 1437 bh_unlock_sock(sk); tcp_v6_rcv() 1441 bh_unlock_sock(sk); tcp_v6_rcv() 1443 sock_put(sk); tcp_v6_rcv() 1466 sock_put(sk); tcp_v6_rcv() 1471 inet_twsk_put(inet_twsk(sk)); tcp_v6_rcv() 1478 inet_twsk_put(inet_twsk(sk)); tcp_v6_rcv() 1482 inet_twsk_put(inet_twsk(sk)); tcp_v6_rcv() 1486 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { tcp_v6_rcv() 1496 struct inet_timewait_sock *tw = inet_twsk(sk); tcp_v6_rcv() 1499 sk = sk2; tcp_v6_rcv() 1506 tcp_v6_timewait_ack(sk, skb); tcp_v6_rcv() 1521 struct sock *sk; tcp_v6_early_demux() local 1536 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, tcp_v6_early_demux() 1540 if (sk) { tcp_v6_early_demux() 1541 skb->sk = sk; tcp_v6_early_demux() 1543 if (sk_fullsock(sk)) { tcp_v6_early_demux() 1544 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); tcp_v6_early_demux() 1547 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); tcp_v6_early_demux() 1549 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) tcp_v6_early_demux() 1624 static int tcp_v6_init_sock(struct sock *sk) tcp_v6_init_sock() argument 1626 struct inet_connection_sock *icsk = inet_csk(sk); tcp_v6_init_sock() 1628 tcp_init_sock(sk); tcp_v6_init_sock() 1633 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; tcp_v6_init_sock() 1639 static void tcp_v6_destroy_sock(struct sock *sk) tcp_v6_destroy_sock() argument 1641 tcp_v4_destroy_sock(sk); tcp_v6_destroy_sock() 1642 inet6_destroy_sock(sk); tcp_v6_destroy_sock() 1764 struct sock *sk = v; tcp6_seq_show() local 1780 if (sk->sk_state == TCP_TIME_WAIT) tcp6_seq_show() 1821 static void tcp_v6_clear_sk(struct sock *sk, int size) tcp_v6_clear_sk() argument 1823 struct inet_sock *inet = inet_sk(sk); tcp_v6_clear_sk() 1826 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6)); tcp_v6_clear_sk() 583 tcp_v6_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb) tcp_v6_md5_hash_skb() argument
|
H A D | udp.c | 79 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) ipv6_rcv_saddr_equal() argument 83 int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); ipv6_rcv_saddr_equal() 89 (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr || ipv6_rcv_saddr_equal() 90 sk->sk_rcv_saddr == sk2->sk_rcv_saddr)); ipv6_rcv_saddr_equal() 97 !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED)) ipv6_rcv_saddr_equal() 101 ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6)) ipv6_rcv_saddr_equal() 123 int udp_v6_get_port(struct sock *sk, unsigned short snum) udp_v6_get_port() argument 126 udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum); udp_v6_get_port() 128 udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); udp_v6_get_port() 131 udp_sk(sk)->udp_portaddr_hash = hash2_partial; udp_v6_get_port() 132 return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); udp_v6_get_port() 135 static void udp_v6_rehash(struct sock *sk) udp_v6_rehash() argument 137 u16 new_hash = udp6_portaddr_hash(sock_net(sk), udp_v6_rehash() 138 &sk->sk_v6_rcv_saddr, udp_v6_rehash() 139 inet_sk(sk)->inet_num); udp_v6_rehash() 141 udp_lib_rehash(sk, new_hash); udp_v6_rehash() 144 static inline int compute_score(struct sock *sk, struct net *net, compute_score() argument 153 if (!net_eq(sock_net(sk), net) || compute_score() 154 udp_sk(sk)->udp_port_hash != hnum || compute_score() 155 sk->sk_family != PF_INET6) compute_score() 159 inet = inet_sk(sk); compute_score() 167 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { compute_score() 168 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) compute_score() 173 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { compute_score() 174 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) compute_score() 179 if (sk->sk_bound_dev_if) { compute_score() 180 if (sk->sk_bound_dev_if != dif) compute_score() 189 static inline int compute_score2(struct sock *sk, struct net *net, compute_score2() argument 197 if (!net_eq(sock_net(sk), net) || compute_score2() 198 udp_sk(sk)->udp_port_hash != hnum || compute_score2() 199 sk->sk_family != PF_INET6) compute_score2() 202 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) compute_score2() 206 inet = inet_sk(sk); compute_score2() 214 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { compute_score2() 215 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) compute_score2() 220 if (sk->sk_bound_dev_if) { compute_score2() 221 if (sk->sk_bound_dev_if != dif) compute_score2() 235 struct sock *sk, *result; udp6_lib_lookup2() local 243 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { udp6_lib_lookup2() 244 score = compute_score2(sk, net, saddr, sport, udp6_lib_lookup2() 247 result = sk; udp6_lib_lookup2() 249 reuseport = sk->sk_reuseport; udp6_lib_lookup2() 259 result = sk; udp6_lib_lookup2() 289 struct sock *sk, *result; __udp6_lib_lookup() local 325 sk_nulls_for_each_rcu(sk, node, &hslot->head) { __udp6_lib_lookup() 326 score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); __udp6_lib_lookup() 328 result = sk; __udp6_lib_lookup() 330 reuseport = sk->sk_reuseport; __udp6_lib_lookup() 339 result = sk; __udp6_lib_lookup() 369 struct sock *sk; __udp6_lib_lookup_skb() local 372 sk = skb_steal_sock(skb); __udp6_lib_lookup_skb() 373 if (unlikely(sk)) __udp6_lib_lookup_skb() 374 return sk; __udp6_lib_lookup_skb() 392 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, udpv6_recvmsg() argument 395 struct ipv6_pinfo *np = inet6_sk(sk); udpv6_recvmsg() 396 struct inet_sock *inet = inet_sk(sk); udpv6_recvmsg() 401 int is_udplite = IS_UDPLITE(sk); udpv6_recvmsg() 406 return ipv6_recv_error(sk, msg, len, addr_len); udpv6_recvmsg() 409 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); udpv6_recvmsg() 412 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), udpv6_recvmsg() 448 atomic_inc(&sk->sk_drops); udpv6_recvmsg() 450 UDP_INC_STATS_USER(sock_net(sk), udpv6_recvmsg() 454 UDP6_INC_STATS_USER(sock_net(sk), udpv6_recvmsg() 462 UDP_INC_STATS_USER(sock_net(sk), udpv6_recvmsg() 465 UDP6_INC_STATS_USER(sock_net(sk), udpv6_recvmsg() 469 sock_recv_ts_and_drops(msg, sk, skb); udpv6_recvmsg() 492 ip6_datagram_recv_common_ctl(sk, msg, skb); udpv6_recvmsg() 499 ip6_datagram_recv_specific_ctl(sk, msg, skb); udpv6_recvmsg() 507 skb_free_datagram_locked(sk, skb); udpv6_recvmsg() 512 slow = lock_sock_fast(sk); udpv6_recvmsg() 513 if (!skb_kill_datagram(sk, skb, flags)) { udpv6_recvmsg() 515 UDP_INC_STATS_USER(sock_net(sk), udpv6_recvmsg() 517 UDP_INC_STATS_USER(sock_net(sk), udpv6_recvmsg() 520 UDP6_INC_STATS_USER(sock_net(sk), udpv6_recvmsg() 522 UDP6_INC_STATS_USER(sock_net(sk), udpv6_recvmsg() 526 unlock_sock_fast(sk, slow); udpv6_recvmsg() 543 struct sock *sk; __udp6_lib_err() local 547 sk = __udp6_lib_lookup(net, daddr, uh->dest, __udp6_lib_err() 549 if (!sk) { __udp6_lib_err() 556 if (!ip6_sk_accept_pmtu(sk)) __udp6_lib_err() 558 ip6_sk_update_pmtu(skb, sk, info); __udp6_lib_err() 561 ip6_sk_redirect(skb, sk); __udp6_lib_err() 565 np = inet6_sk(sk); __udp6_lib_err() 570 if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) __udp6_lib_err() 574 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); __udp6_lib_err() 576 sk->sk_err = err; __udp6_lib_err() 577 sk->sk_error_report(sk); __udp6_lib_err() 579 sock_put(sk); __udp6_lib_err() 582 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) __udpv6_queue_rcv_skb() argument 586 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { __udpv6_queue_rcv_skb() 587 sock_rps_save_rxhash(sk, skb); __udpv6_queue_rcv_skb() 588 sk_mark_napi_id(sk, skb); __udpv6_queue_rcv_skb() 589 sk_incoming_cpu_update(sk); __udpv6_queue_rcv_skb() 592 rc = sock_queue_rcv_skb(sk, skb); __udpv6_queue_rcv_skb() 594 int is_udplite = IS_UDPLITE(sk); __udpv6_queue_rcv_skb() 598 UDP6_INC_STATS_BH(sock_net(sk), __udpv6_queue_rcv_skb() 600 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); __udpv6_queue_rcv_skb() 622 int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) udpv6_queue_rcv_skb() argument 624 struct udp_sock *up = udp_sk(sk); udpv6_queue_rcv_skb() 626 int is_udplite = IS_UDPLITE(sk); udpv6_queue_rcv_skb() 628 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) udpv6_queue_rcv_skb() 632 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); udpv6_queue_rcv_skb() 654 ret = encap_rcv(sk, skb); udpv6_queue_rcv_skb() 656 UDP_INC_STATS_BH(sock_net(sk), udpv6_queue_rcv_skb() 683 if (rcu_access_pointer(sk->sk_filter)) { udpv6_queue_rcv_skb() 688 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { udpv6_queue_rcv_skb() 689 UDP6_INC_STATS_BH(sock_net(sk), udpv6_queue_rcv_skb() 696 bh_lock_sock(sk); udpv6_queue_rcv_skb() 698 if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb() 699 rc = __udpv6_queue_rcv_skb(sk, skb); udpv6_queue_rcv_skb() 700 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { udpv6_queue_rcv_skb() 701 bh_unlock_sock(sk); udpv6_queue_rcv_skb() 704 bh_unlock_sock(sk); udpv6_queue_rcv_skb() 709 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); udpv6_queue_rcv_skb() 711 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); udpv6_queue_rcv_skb() 712 atomic_inc(&sk->sk_drops); udpv6_queue_rcv_skb() 717 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, __udp_v6_is_mcast_sock() argument 722 struct inet_sock *inet = inet_sk(sk); __udp_v6_is_mcast_sock() 724 if (!net_eq(sock_net(sk), net)) __udp_v6_is_mcast_sock() 727 if (udp_sk(sk)->udp_port_hash != hnum || __udp_v6_is_mcast_sock() 728 sk->sk_family != PF_INET6 || __udp_v6_is_mcast_sock() 730 (!ipv6_addr_any(&sk->sk_v6_daddr) && __udp_v6_is_mcast_sock() 731 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || __udp_v6_is_mcast_sock() 732 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) || __udp_v6_is_mcast_sock() 733 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && __udp_v6_is_mcast_sock() 734 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) __udp_v6_is_mcast_sock() 736 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) __udp_v6_is_mcast_sock() 745 struct sock *sk; flush_stack() local 749 sk = stack[i]; flush_stack() 753 atomic_inc(&sk->sk_drops); flush_stack() 754 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, flush_stack() 755 IS_UDPLITE(sk)); flush_stack() 756 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, flush_stack() 757 IS_UDPLITE(sk)); flush_stack() 760 if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0) flush_stack() 762 sock_put(sk); flush_stack() 786 struct sock *sk, *stack[256 / sizeof(struct sock *)]; __udp6_lib_mcast_deliver() local 792 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); __udp6_lib_mcast_deliver() 802 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); __udp6_lib_mcast_deliver() 806 sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) { __udp6_lib_mcast_deliver() 807 if (__udp_v6_is_mcast_sock(net, sk, __udp6_lib_mcast_deliver() 814 (uh->check || udp_sk(sk)->no_check6_rx)) { __udp6_lib_mcast_deliver() 820 stack[count++] = sk; __udp6_lib_mcast_deliver() 821 sock_hold(sk); __udp6_lib_mcast_deliver() 848 struct sock *sk; __udp6_lib_rcv() local 899 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); __udp6_lib_rcv() 900 if (sk) { __udp6_lib_rcv() 903 if (!uh->check && !udp_sk(sk)->no_check6_rx) { __udp6_lib_rcv() 904 sock_put(sk); __udp6_lib_rcv() 909 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) __udp6_lib_rcv() 913 ret = udpv6_queue_rcv_skb(sk, skb); __udp6_lib_rcv() 914 sock_put(sk); __udp6_lib_rcv() 965 static void udp_v6_flush_pending_frames(struct sock *sk) udp_v6_flush_pending_frames() argument 967 struct udp_sock *up = udp_sk(sk); udp_v6_flush_pending_frames() 970 udp_flush_pending_frames(sk); udp_v6_flush_pending_frames() 974 ip6_flush_pending_frames(sk); udp_v6_flush_pending_frames() 980 * @sk: socket we are sending on 984 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, udp6_hwcsum_outgoing() argument 1026 struct sock *sk = skb->sk; udp_v6_send_skb() local 1029 int is_udplite = IS_UDPLITE(sk); udp_v6_send_skb() 1045 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ udp_v6_send_skb() 1049 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); udp_v6_send_skb() 1063 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { udp_v6_send_skb() 1064 UDP6_INC_STATS_USER(sock_net(sk), udp_v6_send_skb() 1069 UDP6_INC_STATS_USER(sock_net(sk), udp_v6_send_skb() 1074 static int udp_v6_push_pending_frames(struct sock *sk) udp_v6_push_pending_frames() argument 1077 struct udp_sock *up = udp_sk(sk); udp_v6_push_pending_frames() 1082 return udp_push_pending_frames(sk); udp_v6_push_pending_frames() 1087 fl6 = inet_sk(sk)->cork.fl.u.ip6; udp_v6_push_pending_frames() 1089 skb = ip6_finish_skb(sk); udp_v6_push_pending_frames() 1101 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) udpv6_sendmsg() argument 1104 struct udp_sock *up = udp_sk(sk); udpv6_sendmsg() 1105 struct inet_sock *inet = inet_sk(sk); udpv6_sendmsg() 1106 struct ipv6_pinfo *np = inet6_sk(sk); udpv6_sendmsg() 1122 int is_udplite = IS_UDPLITE(sk); udpv6_sendmsg() 1147 if (sk->sk_state != TCP_ESTABLISHED) udpv6_sendmsg() 1149 daddr = &sk->sk_v6_daddr; udpv6_sendmsg() 1162 if (__ipv6_only_sock(sk)) udpv6_sendmsg() 1164 return udp_sendmsg(sk, msg, len); udpv6_sendmsg() 1169 return udp_sendmsg(sk, msg, len); udpv6_sendmsg() 1183 lock_sock(sk); udpv6_sendmsg() 1186 release_sock(sk); udpv6_sendmsg() 1192 release_sock(sk); udpv6_sendmsg() 1208 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); udpv6_sendmsg() 1216 * sk->sk_dst_cache. udpv6_sendmsg() 1218 if (sk->sk_state == TCP_ESTABLISHED && udpv6_sendmsg() 1219 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) udpv6_sendmsg() 1220 daddr = &sk->sk_v6_daddr; udpv6_sendmsg() 1227 if (sk->sk_state != TCP_ESTABLISHED) udpv6_sendmsg() 1231 daddr = &sk->sk_v6_daddr; udpv6_sendmsg() 1237 fl6.flowi6_oif = sk->sk_bound_dev_if; udpv6_sendmsg() 1242 fl6.flowi6_mark = sk->sk_mark; udpv6_sendmsg() 1249 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, udpv6_sendmsg() 1256 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); udpv6_sendmsg() 1272 fl6.flowi6_proto = sk->sk_protocol; udpv6_sendmsg() 1291 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); udpv6_sendmsg() 1293 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p); udpv6_sendmsg() 1314 skb = ip6_make_skb(sk, getfrag, msg, ulen, udpv6_sendmsg() 1324 lock_sock(sk); udpv6_sendmsg() 1328 release_sock(sk); udpv6_sendmsg() 1341 err = ip6_append_data(sk, getfrag, msg, ulen, udpv6_sendmsg() 1346 udp_v6_flush_pending_frames(sk); udpv6_sendmsg() 1348 err = udp_v6_push_pending_frames(sk); udpv6_sendmsg() 1349 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) udpv6_sendmsg() 1354 release_sock(sk); udpv6_sendmsg() 1359 ip6_dst_store(sk, dst, udpv6_sendmsg() 1360 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? udpv6_sendmsg() 1361 &sk->sk_v6_daddr : NULL, udpv6_sendmsg() 1386 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { udpv6_sendmsg() 1387 UDP6_INC_STATS_USER(sock_net(sk), udpv6_sendmsg() 1400 void udpv6_destroy_sock(struct sock *sk) udpv6_destroy_sock() argument 1402 struct udp_sock *up = udp_sk(sk); udpv6_destroy_sock() 1403 lock_sock(sk); udpv6_destroy_sock() 1404 udp_v6_flush_pending_frames(sk); udpv6_destroy_sock() 1405 release_sock(sk); udpv6_destroy_sock() 1408 void (*encap_destroy)(struct sock *sk); udpv6_destroy_sock() 1411 encap_destroy(sk); udpv6_destroy_sock() 1414 inet6_destroy_sock(sk); udpv6_destroy_sock() 1420 int udpv6_setsockopt(struct sock *sk, int level, int optname, udpv6_setsockopt() argument 1424 return udp_lib_setsockopt(sk, level, optname, optval, optlen, udpv6_setsockopt() 1426 return ipv6_setsockopt(sk, level, optname, optval, optlen); udpv6_setsockopt() 1430 int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, compat_udpv6_setsockopt() argument 1434 return udp_lib_setsockopt(sk, level, optname, optval, optlen, compat_udpv6_setsockopt() 1436 return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); compat_udpv6_setsockopt() 1440 int udpv6_getsockopt(struct sock *sk, int level, int optname, udpv6_getsockopt() argument 1444 return udp_lib_getsockopt(sk, level, optname, optval, optlen); udpv6_getsockopt() 1445 return ipv6_getsockopt(sk, level, optname, optval, optlen); udpv6_getsockopt() 1449 int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, compat_udpv6_getsockopt() argument 1453 return udp_lib_getsockopt(sk, level, optname, optval, optlen); compat_udpv6_getsockopt() 1454 return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); compat_udpv6_getsockopt() 1508 void udp_v6_clear_sk(struct sock *sk, int size) udp_v6_clear_sk() argument 1510 struct inet_sock *inet = inet_sk(sk); udp_v6_clear_sk() 1513 sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6)); udp_v6_clear_sk()
|
H A D | inet6_hashtables.c | 59 struct sock *sk; __inet6_lookup_established() local 72 sk_nulls_for_each_rcu(sk, node, &head->chain) { __inet6_lookup_established() 73 if (sk->sk_hash != hash) __inet6_lookup_established() 75 if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif)) __inet6_lookup_established() 77 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) __inet6_lookup_established() 80 if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) { __inet6_lookup_established() 81 sock_gen_put(sk); __inet6_lookup_established() 89 sk = NULL; __inet6_lookup_established() 92 return sk; __inet6_lookup_established() 96 static inline int compute_score(struct sock *sk, struct net *net, compute_score() argument 103 if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum && compute_score() 104 sk->sk_family == PF_INET6) { compute_score() 107 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { compute_score() 108 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) compute_score() 112 if (sk->sk_bound_dev_if) { compute_score() 113 if (sk->sk_bound_dev_if != dif) compute_score() 126 struct sock *sk; inet6_lookup_listener() local 138 sk_nulls_for_each(sk, node, &ilb->head) { inet6_lookup_listener() 139 score = compute_score(sk, net, hnum, daddr, dif); inet6_lookup_listener() 142 result = sk; inet6_lookup_listener() 143 reuseport = sk->sk_reuseport; inet6_lookup_listener() 152 result = sk; inet6_lookup_listener() 182 struct sock *sk; inet6_lookup() local 185 sk = __inet6_lookup(net, hashinfo, saddr, sport, daddr, ntohs(dport), dif); inet6_lookup() 188 return sk; inet6_lookup() 193 struct sock *sk, const __u16 lport, __inet6_check_established() 197 struct inet_sock *inet = inet_sk(sk); __inet6_check_established() 198 const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr; __inet6_check_established() 199 const struct in6_addr *saddr = &sk->sk_v6_daddr; __inet6_check_established() 200 const int dif = sk->sk_bound_dev_if; __inet6_check_established() 202 struct net *net = sock_net(sk); __inet6_check_established() 221 if (twsk_unique(sk, sk2, twp)) __inet6_check_established() 233 sk->sk_hash = hash; __inet6_check_established() 234 WARN_ON(!sk_unhashed(sk)); __inet6_check_established() 235 __sk_nulls_add_node_rcu(sk, &head->chain); __inet6_check_established() 243 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); __inet6_check_established() 260 static inline u32 inet6_sk_port_offset(const struct sock *sk) inet6_sk_port_offset() argument 262 const struct inet_sock *inet = inet_sk(sk); inet6_sk_port_offset() 264 return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32, inet6_sk_port_offset() 265 sk->sk_v6_daddr.s6_addr32, inet6_sk_port_offset() 270 struct sock *sk) inet6_hash_connect() 272 return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk), inet6_hash_connect() 192 __inet6_check_established(struct inet_timewait_death_row *death_row, struct sock *sk, const __u16 lport, struct inet_timewait_sock **twp) __inet6_check_established() argument 269 inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) inet6_hash_connect() argument
|
H A D | raw.c | 72 static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, __raw_v6_lookup() argument 78 sk_for_each_from(sk) sk_for_each_from() 79 if (inet_sk(sk)->inet_num == num) { sk_for_each_from() 81 if (!net_eq(sock_net(sk), net)) sk_for_each_from() 84 if (!ipv6_addr_any(&sk->sk_v6_daddr) && sk_for_each_from() 85 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) sk_for_each_from() 88 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) sk_for_each_from() 91 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { sk_for_each_from() 92 if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)) sk_for_each_from() 95 inet6_mc_check(sk, loc_addr, rmt_addr)) sk_for_each_from() 101 sk = NULL; 103 return sk; 110 static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb) icmpv6_filter() argument 121 const __u32 *data = &raw6_sk(sk)->filter.data[0]; icmpv6_filter() 162 struct sock *sk; ipv6_raw_deliver() local 173 sk = sk_head(&raw_v6_hashinfo.ht[hash]); ipv6_raw_deliver() 175 if (!sk) ipv6_raw_deliver() 179 sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, inet6_iif(skb)); ipv6_raw_deliver() 181 while (sk) { ipv6_raw_deliver() 187 filtered = icmpv6_filter(sk, skb); ipv6_raw_deliver() 202 filtered = filter ? (*filter)(sk, skb) : 0; ipv6_raw_deliver() 219 rawv6_rcv(sk, clone); ipv6_raw_deliver() 222 sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr, ipv6_raw_deliver() 242 static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) rawv6_bind() argument 244 struct inet_sock *inet = inet_sk(sk); rawv6_bind() 245 struct ipv6_pinfo *np = inet6_sk(sk); rawv6_bind() 263 lock_sock(sk); rawv6_bind() 266 if (sk->sk_state != TCP_CLOSE) rawv6_bind() 280 sk->sk_bound_dev_if = addr->sin6_scope_id; rawv6_bind() 284 if (!sk->sk_bound_dev_if) rawv6_bind() 288 dev = dev_get_by_index_rcu(sock_net(sk), rawv6_bind() 289 sk->sk_bound_dev_if); rawv6_bind() 300 if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, rawv6_bind() 308 sk->sk_v6_rcv_saddr = addr->sin6_addr; rawv6_bind() 315 release_sock(sk); rawv6_bind() 319 static void rawv6_err(struct sock *sk, struct sk_buff *skb, rawv6_err() argument 323 struct inet_sock *inet = inet_sk(sk); rawv6_err() 324 struct ipv6_pinfo *np = inet6_sk(sk); rawv6_err() 333 if (!np->recverr && sk->sk_state != TCP_ESTABLISHED) rawv6_err() 338 ip6_sk_update_pmtu(skb, sk, info); rawv6_err() 342 ip6_sk_redirect(skb, sk); rawv6_err() 349 ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload); rawv6_err() 353 sk->sk_err = err; rawv6_err() 354 sk->sk_error_report(sk); rawv6_err() 361 struct sock *sk; raw6_icmp_error() local 369 sk = sk_head(&raw_v6_hashinfo.ht[hash]); raw6_icmp_error() 370 if (sk) { raw6_icmp_error() 377 while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr, raw6_icmp_error() 379 rawv6_err(sk, skb, NULL, type, code, raw6_icmp_error() 381 sk = sk_next(sk); raw6_icmp_error() 387 static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb) rawv6_rcv_skb() argument 389 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) && rawv6_rcv_skb() 391 atomic_inc(&sk->sk_drops); rawv6_rcv_skb() 398 if (sock_queue_rcv_skb(sk, skb) < 0) { rawv6_rcv_skb() 413 int rawv6_rcv(struct sock *sk, struct sk_buff *skb) rawv6_rcv() argument 415 struct inet_sock *inet = inet_sk(sk); rawv6_rcv() 416 struct raw6_sock *rp = raw6_sk(sk); rawv6_rcv() 418 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { rawv6_rcv() 419 atomic_inc(&sk->sk_drops); rawv6_rcv() 443 atomic_inc(&sk->sk_drops); rawv6_rcv() 449 rawv6_rcv_skb(sk, skb); rawv6_rcv() 459 static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, rawv6_recvmsg() argument 462 struct ipv6_pinfo *np = inet6_sk(sk); rawv6_recvmsg() 472 return ipv6_recv_error(sk, msg, len, addr_len); rawv6_recvmsg() 475 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); rawv6_recvmsg() 477 skb = skb_recv_datagram(sk, flags, noblock, &err); rawv6_recvmsg() 512 sock_recv_ts_and_drops(msg, sk, skb); rawv6_recvmsg() 515 ip6_datagram_recv_ctl(sk, msg, skb); rawv6_recvmsg() 522 skb_free_datagram(sk, skb); rawv6_recvmsg() 527 skb_kill_datagram(sk, skb, flags); rawv6_recvmsg() 536 static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, rawv6_push_pending_frames() argument 550 skb = skb_peek(&sk->sk_write_queue); rawv6_push_pending_frames() 555 total_len = inet_sk(sk)->cork.base.length; rawv6_push_pending_frames() 558 ip6_flush_pending_frames(sk); rawv6_push_pending_frames() 563 if (skb_queue_len(&sk->sk_write_queue) == 1) { rawv6_push_pending_frames() 572 skb_queue_walk(&sk->sk_write_queue, skb) { rawv6_push_pending_frames() 606 err = ip6_push_pending_frames(sk); rawv6_push_pending_frames() 611 static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, rawv6_send_hdrinc() argument 615 struct ipv6_pinfo *np = inet6_sk(sk); rawv6_send_hdrinc() 624 ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu); rawv6_send_hdrinc() 630 skb = sock_alloc_send_skb(sk, rawv6_send_hdrinc() 638 skb->priority = sk->sk_priority; rawv6_send_hdrinc() 639 skb->mark = sk->sk_mark; rawv6_send_hdrinc() 654 IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); rawv6_send_hdrinc() 655 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb, rawv6_send_hdrinc() 668 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); rawv6_send_hdrinc() 732 static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) rawv6_sendmsg() argument 738 struct inet_sock *inet = inet_sk(sk); rawv6_sendmsg() 739 struct ipv6_pinfo *np = inet6_sk(sk); rawv6_sendmsg() 740 struct raw6_sock *rp = raw6_sk(sk); rawv6_sendmsg() 768 fl6.flowi6_mark = sk->sk_mark; rawv6_sendmsg() 792 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); rawv6_sendmsg() 800 * sk->sk_dst_cache. rawv6_sendmsg() 802 if (sk->sk_state == TCP_ESTABLISHED && rawv6_sendmsg() 803 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) rawv6_sendmsg() 804 daddr = &sk->sk_v6_daddr; rawv6_sendmsg() 811 if (sk->sk_state != TCP_ESTABLISHED) rawv6_sendmsg() 815 daddr = &sk->sk_v6_daddr; rawv6_sendmsg() 820 fl6.flowi6_oif = sk->sk_bound_dev_if; rawv6_sendmsg() 827 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, rawv6_sendmsg() 834 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); rawv6_sendmsg() 869 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); rawv6_sendmsg() 871 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); rawv6_sendmsg() 890 err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags); rawv6_sendmsg() 892 lock_sock(sk); rawv6_sendmsg() 893 err = ip6_append_data(sk, raw6_getfrag, &rfv, rawv6_sendmsg() 898 ip6_flush_pending_frames(sk); rawv6_sendmsg() 900 err = rawv6_push_pending_frames(sk, &fl6, rp); rawv6_sendmsg() 901 release_sock(sk); rawv6_sendmsg() 917 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, rawv6_seticmpfilter() argument 924 if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) rawv6_seticmpfilter() 934 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, rawv6_geticmpfilter() argument 949 if (copy_to_user(optval, &raw6_sk(sk)->filter, len)) rawv6_geticmpfilter() 960 static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, do_rawv6_setsockopt() argument 963 struct raw6_sock *rp = raw6_sk(sk); do_rawv6_setsockopt() 971 if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 && do_rawv6_setsockopt() 1002 static int rawv6_setsockopt(struct sock *sk, int level, int optname, rawv6_setsockopt() argument 1010 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) rawv6_setsockopt() 1012 return rawv6_seticmpfilter(sk, level, optname, optval, optlen); rawv6_setsockopt() 1017 return ipv6_setsockopt(sk, level, optname, optval, optlen); rawv6_setsockopt() 1020 return do_rawv6_setsockopt(sk, level, optname, optval, optlen); rawv6_setsockopt() 1024 static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname, compat_rawv6_setsockopt() argument 1031 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) compat_rawv6_setsockopt() 1033 return rawv6_seticmpfilter(sk, level, optname, optval, optlen); compat_rawv6_setsockopt() 1038 return compat_ipv6_setsockopt(sk, level, optname, compat_rawv6_setsockopt() 1041 return do_rawv6_setsockopt(sk, level, optname, optval, optlen); compat_rawv6_setsockopt() 1045 static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, do_rawv6_getsockopt() argument 1048 struct raw6_sock *rp = raw6_sk(sk); do_rawv6_getsockopt() 1080 static int rawv6_getsockopt(struct sock *sk, int level, int optname, rawv6_getsockopt() argument 1088 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) rawv6_getsockopt() 1090 return rawv6_geticmpfilter(sk, level, optname, optval, optlen); rawv6_getsockopt() 1095 return ipv6_getsockopt(sk, level, optname, optval, optlen); rawv6_getsockopt() 1098 return do_rawv6_getsockopt(sk, level, optname, optval, optlen); rawv6_getsockopt() 1102 static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname, compat_rawv6_getsockopt() argument 1109 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) compat_rawv6_getsockopt() 1111 return rawv6_geticmpfilter(sk, level, optname, optval, optlen); compat_rawv6_getsockopt() 1116 return compat_ipv6_getsockopt(sk, level, optname, compat_rawv6_getsockopt() 1119 return do_rawv6_getsockopt(sk, level, optname, optval, optlen); compat_rawv6_getsockopt() 1123 static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) rawv6_ioctl() argument 1127 int amount = sk_wmem_alloc_get(sk); rawv6_ioctl() 1135 spin_lock_bh(&sk->sk_receive_queue.lock); rawv6_ioctl() 1136 skb = skb_peek(&sk->sk_receive_queue); rawv6_ioctl() 1140 spin_unlock_bh(&sk->sk_receive_queue.lock); rawv6_ioctl() 1146 return ip6mr_ioctl(sk, cmd, (void __user *)arg); rawv6_ioctl() 1154 static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) compat_rawv6_ioctl() argument 1162 return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg)); compat_rawv6_ioctl() 1170 static void rawv6_close(struct sock *sk, long timeout) rawv6_close() argument 1172 if (inet_sk(sk)->inet_num == IPPROTO_RAW) rawv6_close() 1173 ip6_ra_control(sk, -1); rawv6_close() 1174 ip6mr_sk_done(sk); rawv6_close() 1175 sk_common_release(sk); rawv6_close() 1178 static void raw6_destroy(struct sock *sk) raw6_destroy() argument 1180 lock_sock(sk); raw6_destroy() 1181 ip6_flush_pending_frames(sk); raw6_destroy() 1182 release_sock(sk); raw6_destroy() 1184 inet6_destroy_sock(sk); raw6_destroy() 1187 static int rawv6_init_sk(struct sock *sk) rawv6_init_sk() argument 1189 struct raw6_sock *rp = raw6_sk(sk); rawv6_init_sk() 1191 switch (inet_sk(sk)->inet_num) { rawv6_init_sk()
|
H A D | udp_impl.h | 14 int udp_v6_get_port(struct sock *sk, unsigned short snum); 16 int udpv6_getsockopt(struct sock *sk, int level, int optname, 18 int udpv6_setsockopt(struct sock *sk, int level, int optname, 21 int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, 23 int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, 26 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 27 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 29 int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 30 void udpv6_destroy_sock(struct sock *sk); 32 void udp_v6_clear_sk(struct sock *sk, int size);
|
H A D | xfrm6_output.c | 33 struct sock *sk = skb->sk; xfrm6_local_dontfrag() local 35 if (sk) { xfrm6_local_dontfrag() 36 if (sk->sk_family != AF_INET6) xfrm6_local_dontfrag() 39 proto = sk->sk_protocol; xfrm6_local_dontfrag() 41 return inet6_sk(sk)->dontfrag; xfrm6_local_dontfrag() 50 struct sock *sk = skb->sk; xfrm6_local_rxpmtu() local 52 fl6.flowi6_oif = sk->sk_bound_dev_if; xfrm6_local_rxpmtu() 55 ipv6_local_rxpmtu(sk, &fl6, mtu); xfrm6_local_rxpmtu() 62 struct sock *sk = skb->sk; xfrm6_local_error() local 65 fl6.fl6_dport = inet_sk(sk)->inet_dport; xfrm6_local_error() 68 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); xfrm6_local_error() 85 else if (skb->sk) xfrm6_tunnel_check_size() 123 int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb) xfrm6_output_finish() argument 131 return xfrm_output(sk, skb); xfrm6_output_finish() 134 static int __xfrm6_output(struct sock *sk, struct sk_buff *skb) __xfrm6_output() argument 143 return dst_output_sk(sk, skb); __xfrm6_output() 155 } else if (!skb->ignore_df && skb->len > mtu && skb->sk) { __xfrm6_output() 163 return ip6_fragment(sk, skb, __xfrm6_output() 166 return x->outer_mode->afinfo->output_finish(sk, skb); __xfrm6_output() 169 int xfrm6_output(struct sock *sk, struct sk_buff *skb) xfrm6_output() argument 171 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb, xfrm6_output()
|
H A D | ipv6_sockglue.c | 61 int ip6_ra_control(struct sock *sk, int sel) ip6_ra_control() argument 66 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW) ip6_ra_control() 73 if (ra->sk == sk) { ip6_ra_control() 83 sock_put(sk); ip6_ra_control() 92 new_ra->sk = sk; ip6_ra_control() 96 sock_hold(sk); ip6_ra_control() 102 struct ipv6_txoptions *ipv6_update_options(struct sock *sk, ipv6_update_options() argument 105 if (inet_sk(sk)->is_icsk) { ipv6_update_options() 107 !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && ipv6_update_options() 108 inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) { ipv6_update_options() 109 struct inet_connection_sock *icsk = inet_csk(sk); ipv6_update_options() 111 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); ipv6_update_options() 114 opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt, ipv6_update_options() 116 sk_dst_reset(sk); ipv6_update_options() 140 static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, do_ipv6_setsockopt() argument 143 struct ipv6_pinfo *np = inet6_sk(sk); do_ipv6_setsockopt() 144 struct net *net = sock_net(sk); do_ipv6_setsockopt() 162 return ip6_mroute_setsockopt(sk, optname, optval, optlen); do_ipv6_setsockopt() 166 lock_sock(sk); do_ipv6_setsockopt() 177 if (sk->sk_type == SOCK_RAW) do_ipv6_setsockopt() 180 if (sk->sk_protocol == IPPROTO_UDP || do_ipv6_setsockopt() 181 sk->sk_protocol == IPPROTO_UDPLITE) { do_ipv6_setsockopt() 182 struct udp_sock *up = udp_sk(sk); do_ipv6_setsockopt() 187 } else if (sk->sk_protocol != IPPROTO_TCP) do_ipv6_setsockopt() 190 if (sk->sk_state != TCP_ESTABLISHED) { do_ipv6_setsockopt() 195 if (ipv6_only_sock(sk) || do_ipv6_setsockopt() 196 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { do_ipv6_setsockopt() 201 fl6_free_socklist(sk); do_ipv6_setsockopt() 202 ipv6_sock_mc_close(sk); do_ipv6_setsockopt() 209 sk_refcnt_debug_dec(sk); do_ipv6_setsockopt() 211 if (sk->sk_protocol == IPPROTO_TCP) { do_ipv6_setsockopt() 212 struct inet_connection_sock *icsk = inet_csk(sk); do_ipv6_setsockopt() 214 sock_prot_inuse_add(net, sk->sk_prot, -1); do_ipv6_setsockopt() 217 sk->sk_prot = &tcp_prot; do_ipv6_setsockopt() 219 sk->sk_socket->ops = &inet_stream_ops; do_ipv6_setsockopt() 220 sk->sk_family = PF_INET; do_ipv6_setsockopt() 221 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); do_ipv6_setsockopt() 225 if (sk->sk_protocol == IPPROTO_UDPLITE) do_ipv6_setsockopt() 228 sock_prot_inuse_add(net, sk->sk_prot, -1); do_ipv6_setsockopt() 231 sk->sk_prot = prot; do_ipv6_setsockopt() 232 sk->sk_socket->ops = &inet_dgram_ops; do_ipv6_setsockopt() 233 sk->sk_family = PF_INET; do_ipv6_setsockopt() 238 atomic_sub(opt->tot_len, &sk->sk_omem_alloc); do_ipv6_setsockopt() 244 sk->sk_destruct = inet_sock_destruct; do_ipv6_setsockopt() 249 sk_refcnt_debug_inc(sk); do_ipv6_setsockopt() 258 inet_sk(sk)->inet_num) do_ipv6_setsockopt() 260 sk->sk_ipv6only = valbool; do_ipv6_setsockopt() 376 inet_sk(sk)->transparent = valbool; do_ipv6_setsockopt() 410 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); do_ipv6_setsockopt() 411 opt = ipv6_renew_options(sk, opt, optname, do_ipv6_setsockopt() 438 opt = ipv6_update_options(sk, opt); do_ipv6_setsockopt() 441 atomic_sub(opt->tot_len, &sk->sk_omem_alloc); do_ipv6_setsockopt() 460 if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if) do_ipv6_setsockopt() 477 fl6.flowi6_oif = sk->sk_bound_dev_if; do_ipv6_setsockopt() 478 fl6.flowi6_mark = sk->sk_mark; do_ipv6_setsockopt() 490 opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL); do_ipv6_setsockopt() 505 retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, do_ipv6_setsockopt() 511 opt = ipv6_update_options(sk, opt); do_ipv6_setsockopt() 514 atomic_sub(opt->tot_len, &sk->sk_omem_alloc); do_ipv6_setsockopt() 529 if (sk->sk_type == SOCK_STREAM) do_ipv6_setsockopt() 570 if (sk->sk_bound_dev_if) do_ipv6_setsockopt() 579 if (sk->sk_type == SOCK_STREAM) do_ipv6_setsockopt() 587 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) do_ipv6_setsockopt() 609 if (inet_sk(sk)->is_icsk) do_ipv6_setsockopt() 617 retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); do_ipv6_setsockopt() 619 retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); do_ipv6_setsockopt() 635 retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); do_ipv6_setsockopt() 637 retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); do_ipv6_setsockopt() 658 retv = ipv6_sock_mc_join(sk, greq.gr_interface, do_ipv6_setsockopt() 661 retv = ipv6_sock_mc_drop(sk, greq.gr_interface, do_ipv6_setsockopt() 694 retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, do_ipv6_setsockopt() 705 retv = ip6_mc_source(add, omode, sk, &greqs); do_ipv6_setsockopt() 740 retv = ip6_mc_msfilter(sk, gsf); do_ipv6_setsockopt() 748 retv = ip6_ra_control(sk, val); do_ipv6_setsockopt() 771 skb_queue_purge(&sk->sk_error_queue); do_ipv6_setsockopt() 781 retv = ipv6_flowlabel_opt(sk, optval, optlen); do_ipv6_setsockopt() 788 retv = xfrm_user_policy(sk, optname, optval, optlen); do_ipv6_setsockopt() 871 release_sock(sk); do_ipv6_setsockopt() 878 release_sock(sk); do_ipv6_setsockopt() 884 int ipv6_setsockopt(struct sock *sk, int level, int optname, ipv6_setsockopt() argument 889 if (level == SOL_IP && sk->sk_type != SOCK_RAW) ipv6_setsockopt() 890 return udp_prot.setsockopt(sk, level, optname, optval, optlen); ipv6_setsockopt() 895 err = do_ipv6_setsockopt(sk, level, optname, optval, optlen); ipv6_setsockopt() 900 lock_sock(sk); ipv6_setsockopt() 901 err = nf_setsockopt(sk, PF_INET6, optname, optval, ipv6_setsockopt() 903 release_sock(sk); ipv6_setsockopt() 911 int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, compat_ipv6_setsockopt() argument 916 if (level == SOL_IP && sk->sk_type != SOCK_RAW) { compat_ipv6_setsockopt() 918 return udp_prot.compat_setsockopt(sk, level, optname, compat_ipv6_setsockopt() 920 return udp_prot.setsockopt(sk, level, optname, optval, optlen); compat_ipv6_setsockopt() 927 return compat_mc_setsockopt(sk, level, optname, optval, optlen, compat_ipv6_setsockopt() 930 err = do_ipv6_setsockopt(sk, level, optname, optval, optlen); compat_ipv6_setsockopt() 935 lock_sock(sk); compat_ipv6_setsockopt() 936 err = compat_nf_setsockopt(sk, PF_INET6, optname, compat_ipv6_setsockopt() 938 release_sock(sk); compat_ipv6_setsockopt() 946 static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, ipv6_getsockopt_sticky() argument 980 static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, do_ipv6_getsockopt() argument 983 struct ipv6_pinfo *np = inet6_sk(sk); do_ipv6_getsockopt() 988 return ip6_mroute_getsockopt(sk, optname, optval, optlen); do_ipv6_getsockopt() 994 if (sk->sk_protocol != IPPROTO_UDP && do_ipv6_getsockopt() 995 sk->sk_protocol != IPPROTO_UDPLITE && do_ipv6_getsockopt() 996 sk->sk_protocol != IPPROTO_TCP) do_ipv6_getsockopt() 998 if (sk->sk_state != TCP_ESTABLISHED) do_ipv6_getsockopt() 1000 val = sk->sk_family; do_ipv6_getsockopt() 1013 lock_sock(sk); do_ipv6_getsockopt() 1014 err = ip6_mc_msfget(sk, &gsf, do_ipv6_getsockopt() 1016 release_sock(sk); do_ipv6_getsockopt() 1025 if (sk->sk_type != SOCK_STREAM) do_ipv6_getsockopt() 1032 lock_sock(sk); do_ipv6_getsockopt() 1035 ip6_datagram_recv_ctl(sk, &msg, skb); do_ipv6_getsockopt() 1036 release_sock(sk); do_ipv6_getsockopt() 1042 src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr : np->sticky_pktinfo.ipi6_addr; do_ipv6_getsockopt() 1058 src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr : do_ipv6_getsockopt() 1081 dst = __sk_dst_get(sk); do_ipv6_getsockopt() 1091 val = sk->sk_ipv6only; do_ipv6_getsockopt() 1125 lock_sock(sk); do_ipv6_getsockopt() 1126 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); do_ipv6_getsockopt() 1127 len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len); do_ipv6_getsockopt() 1128 release_sock(sk); do_ipv6_getsockopt() 1179 dst = __sk_dst_get(sk); do_ipv6_getsockopt() 1195 val = inet_sk(sk)->transparent; do_ipv6_getsockopt() 1214 dst = __sk_dst_get(sk); do_ipv6_getsockopt() 1221 val = sock_net(sk)->ipv6.devconf_all->hop_limit; do_ipv6_getsockopt() 1268 val = ipv6_flowlabel_opt_get(sk, &freq, flags); do_ipv6_getsockopt() 1321 int ipv6_getsockopt(struct sock *sk, int level, int optname, ipv6_getsockopt() argument 1326 if (level == SOL_IP && sk->sk_type != SOCK_RAW) ipv6_getsockopt() 1327 return udp_prot.getsockopt(sk, level, optname, optval, optlen); ipv6_getsockopt() 1332 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0); ipv6_getsockopt() 1341 lock_sock(sk); ipv6_getsockopt() 1342 err = nf_getsockopt(sk, PF_INET6, optname, optval, ipv6_getsockopt() 1344 release_sock(sk); ipv6_getsockopt() 1354 int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, compat_ipv6_getsockopt() argument 1359 if (level == SOL_IP && sk->sk_type != SOCK_RAW) { compat_ipv6_getsockopt() 1361 return udp_prot.compat_getsockopt(sk, level, optname, compat_ipv6_getsockopt() 1363 return udp_prot.getsockopt(sk, level, optname, optval, optlen); compat_ipv6_getsockopt() 1370 return compat_mc_getsockopt(sk, level, optname, optval, optlen, compat_ipv6_getsockopt() 1373 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, compat_ipv6_getsockopt() 1383 lock_sock(sk); compat_ipv6_getsockopt() 1384 err = compat_nf_getsockopt(sk, PF_INET6, compat_ipv6_getsockopt() 1386 release_sock(sk); compat_ipv6_getsockopt()
|
H A D | af_inet6.c | 93 static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) inet6_sk_generic() argument 95 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); inet6_sk_generic() 97 return (struct ipv6_pinfo *)(((u8 *)sk) + offset); inet6_sk_generic() 105 struct sock *sk; inet6_create() local 173 sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot); inet6_create() 174 if (!sk) inet6_create() 177 sock_init_data(sock, sk); inet6_create() 181 sk->sk_reuse = SK_CAN_REUSE; inet6_create() 183 inet = inet_sk(sk); inet6_create() 192 sk->sk_destruct = inet_sock_destruct; inet6_create() 193 sk->sk_family = PF_INET6; inet6_create() 194 sk->sk_protocol = protocol; inet6_create() 196 sk->sk_backlog_rcv = answer->prot->backlog_rcv; inet6_create() 198 inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk); inet6_create() 203 sk->sk_ipv6only = net->ipv6.sysctl.bindv6only; inet6_create() 229 sk_refcnt_debug_inc(sk); inet6_create() 237 sk->sk_prot->hash(sk); inet6_create() 239 if (sk->sk_prot->init) { inet6_create() 240 err = sk->sk_prot->init(sk); inet6_create() 242 sk_common_release(sk); inet6_create() 258 struct sock *sk = sock->sk; inet6_bind() local 259 struct inet_sock *inet = inet_sk(sk); inet6_bind() 260 struct ipv6_pinfo *np = inet6_sk(sk); inet6_bind() 261 struct net *net = sock_net(sk); inet6_bind() 268 if (sk->sk_prot->bind) inet6_bind() 269 return sk->sk_prot->bind(sk, uaddr, addr_len); inet6_bind() 285 lock_sock(sk); inet6_bind() 288 if (sk->sk_state != TCP_CLOSE || inet->inet_num) { inet6_bind() 300 if (sk->sk_ipv6only) { inet6_bind() 328 sk->sk_bound_dev_if = addr->sin6_scope_id; inet6_bind() 332 if (!sk->sk_bound_dev_if) { inet6_bind() 336 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); inet6_bind() 362 sk->sk_v6_rcv_saddr = addr->sin6_addr; inet6_bind() 368 if (sk->sk_prot->get_port(sk, snum)) { inet6_bind() 369 inet_reset_saddr(sk); inet6_bind() 375 sk->sk_userlocks |= SOCK_BINDADDR_LOCK; inet6_bind() 377 sk->sk_ipv6only = 1; inet6_bind() 380 sk->sk_userlocks |= SOCK_BINDPORT_LOCK; inet6_bind() 385 release_sock(sk); inet6_bind() 395 struct sock *sk = sock->sk; inet6_release() local 397 if (!sk) inet6_release() 401 ipv6_sock_mc_close(sk); inet6_release() 404 ipv6_sock_ac_close(sk); inet6_release() 410 void inet6_destroy_sock(struct sock *sk) inet6_destroy_sock() argument 412 struct ipv6_pinfo *np = inet6_sk(sk); inet6_destroy_sock() 427 fl6_free_socklist(sk); inet6_destroy_sock() 433 atomic_sub(opt->tot_len, &sk->sk_omem_alloc); inet6_destroy_sock() 447 struct sock *sk = sock->sk; inet6_getname() local 448 struct inet_sock *inet = inet_sk(sk); inet6_getname() 449 struct ipv6_pinfo *np = inet6_sk(sk); inet6_getname() 457 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && inet6_getname() 461 sin->sin6_addr = sk->sk_v6_daddr; inet6_getname() 465 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) inet6_getname() 468 sin->sin6_addr = sk->sk_v6_rcv_saddr; inet6_getname() 473 sk->sk_bound_dev_if); inet6_getname() 481 struct sock *sk = sock->sk; inet6_ioctl() local 482 struct net *net = sock_net(sk); inet6_ioctl() 486 return sock_get_timestamp(sk, (struct timeval __user *)arg); inet6_ioctl() 489 return sock_get_timestampns(sk, (struct timespec __user *)arg); inet6_ioctl() 503 if (!sk->sk_prot->ioctl) inet6_ioctl() 505 return sk->sk_prot->ioctl(sk, cmd, arg); inet6_ioctl() 641 int inet6_sk_rebuild_header(struct sock *sk) inet6_sk_rebuild_header() argument 643 struct ipv6_pinfo *np = inet6_sk(sk); inet6_sk_rebuild_header() 646 dst = __sk_dst_check(sk, np->dst_cookie); inet6_sk_rebuild_header() 649 struct inet_sock *inet = inet_sk(sk); inet6_sk_rebuild_header() 654 fl6.flowi6_proto = sk->sk_protocol; inet6_sk_rebuild_header() 655 fl6.daddr = sk->sk_v6_daddr; inet6_sk_rebuild_header() 658 fl6.flowi6_oif = sk->sk_bound_dev_if; inet6_sk_rebuild_header() 659 fl6.flowi6_mark = sk->sk_mark; inet6_sk_rebuild_header() 662 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); inet6_sk_rebuild_header() 669 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); inet6_sk_rebuild_header() 671 sk->sk_route_caps = 0; inet6_sk_rebuild_header() 672 sk->sk_err_soft = -PTR_ERR(dst); inet6_sk_rebuild_header() 676 __ip6_dst_store(sk, dst, NULL, NULL); inet6_sk_rebuild_header() 683 bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, ipv6_opt_accepted() argument 686 const struct ipv6_pinfo *np = inet6_sk(sk); ipv6_opt_accepted()
|
H A D | datagram.c | 43 static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) __ip6_datagram_connect() argument 46 struct inet_sock *inet = inet_sk(sk); __ip6_datagram_connect() 47 struct ipv6_pinfo *np = inet6_sk(sk); __ip6_datagram_connect() 57 if (__ipv6_only_sock(sk)) __ip6_datagram_connect() 59 err = __ip4_datagram_connect(sk, uaddr, addr_len); __ip6_datagram_connect() 73 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); __ip6_datagram_connect() 93 if (__ipv6_only_sock(sk)) { __ip6_datagram_connect() 101 err = __ip4_datagram_connect(sk, __ip6_datagram_connect() 109 ipv6_addr_set_v4mapped(inet->inet_daddr, &sk->sk_v6_daddr); __ip6_datagram_connect() 115 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) || __ip6_datagram_connect() 116 ipv6_mapped_addr_any(&sk->sk_v6_rcv_saddr)) { __ip6_datagram_connect() 118 &sk->sk_v6_rcv_saddr); __ip6_datagram_connect() 119 if (sk->sk_prot->rehash) __ip6_datagram_connect() 120 sk->sk_prot->rehash(sk); __ip6_datagram_connect() 129 if (sk->sk_bound_dev_if && __ip6_datagram_connect() 130 sk->sk_bound_dev_if != usin->sin6_scope_id) { __ip6_datagram_connect() 134 sk->sk_bound_dev_if = usin->sin6_scope_id; __ip6_datagram_connect() 137 if (!sk->sk_bound_dev_if && (addr_type & IPV6_ADDR_MULTICAST)) __ip6_datagram_connect() 138 sk->sk_bound_dev_if = np->mcast_oif; __ip6_datagram_connect() 141 if (!sk->sk_bound_dev_if) { __ip6_datagram_connect() 147 sk->sk_v6_daddr = *daddr; __ip6_datagram_connect() 157 fl6.flowi6_proto = sk->sk_protocol; __ip6_datagram_connect() 158 fl6.daddr = sk->sk_v6_daddr; __ip6_datagram_connect() 160 fl6.flowi6_oif = sk->sk_bound_dev_if; __ip6_datagram_connect() 161 fl6.flowi6_mark = sk->sk_mark; __ip6_datagram_connect() 171 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); __ip6_datagram_connect() 178 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); __ip6_datagram_connect() 190 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { __ip6_datagram_connect() 191 sk->sk_v6_rcv_saddr = fl6.saddr; __ip6_datagram_connect() 193 if (sk->sk_prot->rehash) __ip6_datagram_connect() 194 sk->sk_prot->rehash(sk); __ip6_datagram_connect() 197 ip6_dst_store(sk, dst, __ip6_datagram_connect() 198 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? __ip6_datagram_connect() 199 &sk->sk_v6_daddr : NULL, __ip6_datagram_connect() 206 sk->sk_state = TCP_ESTABLISHED; __ip6_datagram_connect() 207 ip6_set_txhash(sk); __ip6_datagram_connect() 213 int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ip6_datagram_connect() argument 217 lock_sock(sk); ip6_datagram_connect() 218 res = __ip6_datagram_connect(sk, uaddr, addr_len); ip6_datagram_connect() 219 release_sock(sk); ip6_datagram_connect() 224 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, ip6_datagram_connect_v6_only() argument 230 return ip6_datagram_connect(sk, uaddr, addr_len); ip6_datagram_connect_v6_only() 234 void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, ipv6_icmp_error() argument 237 struct ipv6_pinfo *np = inet6_sk(sk); ipv6_icmp_error() 265 if (sock_queue_err_skb(sk, skb)) ipv6_icmp_error() 269 void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info) ipv6_local_error() argument 271 struct ipv6_pinfo *np = inet6_sk(sk); ipv6_local_error() 304 if (sock_queue_err_skb(sk, skb)) ipv6_local_error() 308 void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) ipv6_local_rxpmtu() argument 310 struct ipv6_pinfo *np = inet6_sk(sk); ipv6_local_rxpmtu() 386 int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) ipv6_recv_error() argument 388 struct ipv6_pinfo *np = inet6_sk(sk); ipv6_recv_error() 400 skb = sock_dequeue_err_skb(sk); ipv6_recv_error() 413 sock_recv_timestamp(msg, sk, skb); ipv6_recv_error() 446 ip6_datagram_recv_common_ctl(sk, msg, skb); ipv6_recv_error() 450 ip6_datagram_recv_specific_ctl(sk, msg, skb); ipv6_recv_error() 457 if (inet_sk(sk)->cmsg_flags) ipv6_recv_error() 479 int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, ipv6_recv_rxpmtu() argument 482 struct ipv6_pinfo *np = inet6_sk(sk); ipv6_recv_rxpmtu() 503 sock_recv_timestamp(msg, sk, skb); ipv6_recv_rxpmtu() 527 void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg, ip6_datagram_recv_common_ctl() argument 530 struct ipv6_pinfo *np = inet6_sk(sk); ip6_datagram_recv_common_ctl() 552 void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, ip6_datagram_recv_specific_ctl() argument 555 struct ipv6_pinfo *np = inet6_sk(sk); ip6_datagram_recv_specific_ctl() 677 void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, ip6_datagram_recv_ctl() argument 680 ip6_datagram_recv_common_ctl(sk, msg, skb); ip6_datagram_recv_ctl() 681 ip6_datagram_recv_specific_ctl(sk, msg, skb); ip6_datagram_recv_ctl() 685 int ip6_datagram_send_ctl(struct net *net, struct sock *sk, ip6_datagram_send_ctl() argument 744 if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) && for_each_cmsghdr()
|
H A D | syncookies.c | 44 static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, get_cookie_sock() argument 48 struct inet_connection_sock *icsk = inet_csk(sk); get_cookie_sock() 51 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); get_cookie_sock() 54 inet_csk_reqsk_queue_add(sk, req, child); get_cookie_sock() 134 __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp) cookie_v6_init_sequence() argument 139 tcp_synq_overflow(sk); cookie_v6_init_sequence() 140 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); cookie_v6_init_sequence() 156 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) cookie_v6_check() argument 161 struct ipv6_pinfo *np = inet6_sk(sk); cookie_v6_check() 162 struct tcp_sock *tp = tcp_sk(sk); cookie_v6_check() 165 struct sock *ret = sk; cookie_v6_check() 174 if (tcp_synq_no_recent_overflow(sk)) cookie_v6_check() 179 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); cookie_v6_check() 183 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); cookie_v6_check() 193 req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk); cookie_v6_check() 201 if (security_inet_conn_request(sk, skb, req)) cookie_v6_check() 209 if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || cookie_v6_check() 216 ireq->ir_iif = sk->sk_bound_dev_if; cookie_v6_check() 218 if (!sk->sk_bound_dev_if && cookie_v6_check() 222 ireq->ir_mark = inet_request_mark(sk, skb); cookie_v6_check() 247 fl6.flowi6_oif = sk->sk_bound_dev_if; cookie_v6_check() 250 fl6.fl6_sport = inet_sk(sk)->inet_sport; cookie_v6_check() 253 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); cookie_v6_check() 259 tcp_select_initial_window(tcp_full_space(sk), req->mss, cookie_v6_check() 265 ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); cookie_v6_check() 267 ret = get_cookie_sock(sk, skb, req, dst); cookie_v6_check()
|
H A D | ip6_output.c | 59 static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb) ip6_finish_output2() argument 73 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) && ip6_finish_output2() 85 sk, newskb, NULL, newskb->dev, ip6_finish_output2() 125 static int ip6_finish_output(struct sock *sk, struct sk_buff *skb) ip6_finish_output() argument 130 return ip6_fragment(sk, skb, ip6_finish_output2); ip6_finish_output() 132 return ip6_finish_output2(sk, skb); ip6_finish_output() 135 int ip6_output(struct sock *sk, struct sk_buff *skb) ip6_output() argument 146 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb, ip6_output() 156 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, ip6_xmit() argument 159 struct net *net = sock_net(sk); ip6_xmit() 160 struct ipv6_pinfo *np = inet6_sk(sk); ip6_xmit() 189 skb_set_owner_w(skb, sk); ip6_xmit() 220 skb->priority = sk->sk_priority; ip6_xmit() 221 skb->mark = sk->sk_mark; ip6_xmit() 227 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb, ip6_xmit() 232 ipv6_local_error(sk, EMSGSIZE, fl6, mtu); ip6_xmit() 246 struct sock *sk = ra->sk; ip6_call_ra_chain() local 247 if (sk && ra->sel == sel && ip6_call_ra_chain() 248 (!sk->sk_bound_dev_if || ip6_call_ra_chain() 249 sk->sk_bound_dev_if == skb->dev->ifindex)) { ip6_call_ra_chain() 255 last = sk; ip6_call_ra_chain() 320 static inline int ip6_forward_finish(struct sock *sk, struct sk_buff *skb) ip6_forward_finish() argument 323 return dst_output_sk(sk, skb); ip6_forward_finish() 379 if (unlikely(skb->sk)) ip6_forward() 546 int ip6_fragment(struct sock *sk, struct sk_buff *skb, ip6_fragment() argument 551 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? ip6_fragment() 552 inet6_sk(skb->sk) : NULL; ip6_fragment() 573 if (skb->sk && dst_allfrag(skb_dst(skb))) ip6_fragment() 574 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); ip6_fragment() 610 BUG_ON(frag->sk); skb_walk_frags() 611 if (skb->sk) { skb_walk_frags() 612 frag->sk = skb->sk; skb_walk_frags() 676 err = output(sk, skb); 709 frag2->sk = NULL; skb_walk_frags() 771 if (skb->sk) 772 skb_set_owner_w(frag, skb->sk); 809 err = output(sk, frag); 836 static struct dst_entry *ip6_sk_dst_check(struct sock *sk, ip6_sk_dst_check() argument 840 struct ipv6_pinfo *np = inet6_sk(sk); ip6_sk_dst_check() 882 static int ip6_dst_lookup_tail(struct sock *sk, ip6_dst_lookup_tail() argument 885 struct net *net = sock_net(sk); ip6_dst_lookup_tail() 907 *dst = ip6_route_output(net, sk, fl6); ip6_dst_lookup_tail() 910 sk ? inet6_sk(sk)->srcprefs : 0, ip6_dst_lookup_tail() 929 *dst = ip6_route_output_flags(net, sk, fl6, flags); ip6_dst_lookup_tail() 970 *dst = ip6_route_output(net, sk, &fl_gw6); ip6_dst_lookup_tail() 990 * @sk: socket which provides route info 998 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6) ip6_dst_lookup() argument 1001 return ip6_dst_lookup_tail(sk, dst, fl6); ip6_dst_lookup() 1007 * @sk: socket which provides route info 1016 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, ip6_dst_lookup_flow() argument 1022 err = ip6_dst_lookup_tail(sk, &dst, fl6); ip6_dst_lookup_flow() 1028 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); ip6_dst_lookup_flow() 1034 * @sk: socket which provides the dst cache and route info 1046 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, ip6_sk_dst_lookup_flow() argument 1049 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); ip6_sk_dst_lookup_flow() 1052 dst = ip6_sk_dst_check(sk, dst, fl6); ip6_sk_dst_lookup_flow() 1054 err = ip6_dst_lookup_tail(sk, &dst, fl6); ip6_sk_dst_lookup_flow() 1060 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); ip6_sk_dst_lookup_flow() 1064 static inline int ip6_ufo_append_data(struct sock *sk, ip6_ufo_append_data() argument 1083 skb = sock_alloc_send_skb(sk, ip6_ufo_append_data() 1116 ipv6_select_ident(sock_net(sk), &fhdr, rt); ip6_ufo_append_data() 1120 return skb_append_datato_frags(sk, skb, getfrag, from, ip6_ufo_append_data() 1160 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, ip6_setup_cork() argument 1165 struct ipv6_pinfo *np = inet6_sk(sk); ip6_setup_cork() 1175 v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation); ip6_setup_cork() 1184 sk->sk_allocation); ip6_setup_cork() 1189 sk->sk_allocation); ip6_setup_cork() 1194 sk->sk_allocation); ip6_setup_cork() 1199 sk->sk_allocation); ip6_setup_cork() 1228 static int __ip6_append_data(struct sock *sk, __ip6_append_data() argument 1278 if (ip6_sk_ignore_df(sk)) __ip6_append_data() 1285 (sk->sk_protocol == IPPROTO_UDP || __ip6_append_data() 1286 sk->sk_protocol == IPPROTO_RAW)) { __ip6_append_data() 1287 ipv6_local_rxpmtu(sk, fl6, mtu - headersize + __ip6_append_data() 1294 ipv6_local_error(sk, EMSGSIZE, fl6, __ip6_append_data() 1301 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) { __ip6_append_data() 1302 sock_tx_timestamp(sk, &tx_flags); __ip6_append_data() 1304 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) __ip6_append_data() 1305 tskey = sk->sk_tskey++; __ip6_append_data() 1313 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP && __ip6_append_data() 1337 (sk->sk_protocol == IPPROTO_UDP) && __ip6_append_data() 1339 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { __ip6_append_data() 1340 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, __ip6_append_data() 1412 skb = sock_alloc_send_skb(sk, __ip6_append_data() 1417 if (atomic_read(&sk->sk_wmem_alloc) <= __ip6_append_data() 1418 2 * sk->sk_sndbuf) __ip6_append_data() 1419 skb = sock_wmalloc(sk, __ip6_append_data() 1421 sk->sk_allocation); __ip6_append_data() 1502 if (!sk_page_frag_refill(sk, pfrag)) __ip6_append_data() 1527 atomic_add(copy, &sk->sk_wmem_alloc); __ip6_append_data() 1539 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); __ip6_append_data() 1543 int ip6_append_data(struct sock *sk, ip6_append_data() argument 1550 struct inet_sock *inet = inet_sk(sk); ip6_append_data() 1551 struct ipv6_pinfo *np = inet6_sk(sk); ip6_append_data() 1557 if (skb_queue_empty(&sk->sk_write_queue)) { ip6_append_data() 1561 err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit, ip6_append_data() 1574 return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base, ip6_append_data() 1575 &np->cork, sk_page_frag(sk), getfrag, ip6_append_data() 1600 struct sk_buff *__ip6_make_skb(struct sock *sk, __ip6_make_skb() argument 1608 struct ipv6_pinfo *np = inet6_sk(sk); __ip6_make_skb() 1609 struct net *net = sock_net(sk); __ip6_make_skb() 1632 tmp_skb->sk = NULL; __ip6_make_skb() 1636 skb->ignore_df = ip6_sk_ignore_df(sk); __ip6_make_skb() 1657 skb->priority = sk->sk_priority; __ip6_make_skb() 1658 skb->mark = sk->sk_mark; __ip6_make_skb() 1676 struct net *net = sock_net(skb->sk); ip6_send_skb() 1692 int ip6_push_pending_frames(struct sock *sk) ip6_push_pending_frames() argument 1696 skb = ip6_finish_skb(sk); ip6_push_pending_frames() 1704 static void __ip6_flush_pending_frames(struct sock *sk, __ip6_flush_pending_frames() argument 1713 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)), __ip6_flush_pending_frames() 1721 void ip6_flush_pending_frames(struct sock *sk) ip6_flush_pending_frames() argument 1723 __ip6_flush_pending_frames(sk, &sk->sk_write_queue, ip6_flush_pending_frames() 1724 &inet_sk(sk)->cork, &inet6_sk(sk)->cork); ip6_flush_pending_frames() 1728 struct sk_buff *ip6_make_skb(struct sock *sk, ip6_make_skb() argument 1752 err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6); ip6_make_skb() 1757 dontfrag = inet6_sk(sk)->dontfrag; ip6_make_skb() 1759 err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork, ip6_make_skb() 1764 __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork); ip6_make_skb() 1768 return __ip6_make_skb(sk, &queue, &cork, &v6_cork); ip6_make_skb()
|
H A D | ping.c | 59 static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, dummy_ipv6_recv_error() argument 64 static void dummy_ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, dummy_ip6_datagram_recv_ctl() argument 72 static void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, dummy_ipv6_icmp_error() argument 80 int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ping_v6_sendmsg() argument 82 struct inet_sock *inet = inet_sk(sk); ping_v6_sendmsg() 83 struct ipv6_pinfo *np = inet6_sk(sk); ping_v6_sendmsg() 95 pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); ping_v6_sendmsg() 109 if (sk->sk_bound_dev_if && ping_v6_sendmsg() 110 sk->sk_bound_dev_if != u->sin6_scope_id) { ping_v6_sendmsg() 116 if (sk->sk_state != TCP_ESTABLISHED) ping_v6_sendmsg() 118 daddr = &sk->sk_v6_daddr; ping_v6_sendmsg() 122 iif = sk->sk_bound_dev_if; ping_v6_sendmsg() 137 fl6.flowi6_mark = sk->sk_mark; ping_v6_sendmsg() 140 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); ping_v6_sendmsg() 147 dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr); ping_v6_sendmsg() 152 np = inet6_sk(sk); ping_v6_sendmsg() 172 lock_sock(sk); ping_v6_sendmsg() 173 err = ip6_append_data(sk, ping_getfrag, &pfh, len, ping_v6_sendmsg() 179 ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev, ping_v6_sendmsg() 181 ip6_flush_pending_frames(sk); ping_v6_sendmsg() 183 err = icmpv6_push_pending_frames(sk, &fl6, ping_v6_sendmsg() 187 release_sock(sk); ping_v6_sendmsg()
|
H A D | icmp.c | 113 struct sock *sk; icmpv6_xmit_lock() local 117 sk = icmpv6_sk(net); icmpv6_xmit_lock() 118 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { icmpv6_xmit_lock() 126 return sk; icmpv6_xmit_lock() 129 static __inline__ void icmpv6_xmit_unlock(struct sock *sk) icmpv6_xmit_unlock() argument 131 spin_unlock_bh(&sk->sk_lock.slock); icmpv6_xmit_unlock() 172 static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, icmpv6_xrlim_allow() argument 175 struct net *net = sock_net(sk); icmpv6_xrlim_allow() 192 dst = ip6_route_output(net, sk, fl6); icmpv6_xrlim_allow() 238 int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, icmpv6_push_pending_frames() argument 245 skb = skb_peek(&sk->sk_write_queue); icmpv6_push_pending_frames() 253 if (skb_queue_len(&sk->sk_write_queue) == 1) { icmpv6_push_pending_frames() 263 skb_queue_walk(&sk->sk_write_queue, skb) { icmpv6_push_pending_frames() 274 ip6_push_pending_frames(sk); icmpv6_push_pending_frames() 325 struct sock *sk, icmpv6_route_lookup() 332 err = ip6_dst_lookup(sk, &dst, fl6); icmpv6_route_lookup() 349 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0); icmpv6_route_lookup() 364 err = ip6_dst_lookup(sk, &dst2, &fl2); icmpv6_route_lookup() 368 dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP); icmpv6_route_lookup() 395 struct sock *sk; icmp6_send() local 480 sk = icmpv6_xmit_lock(net); icmp6_send() 481 if (!sk) icmp6_send() 483 sk->sk_mark = mark; icmp6_send() 484 np = inet6_sk(sk); icmp6_send() 486 if (!icmpv6_xrlim_allow(sk, type, &fl6)) icmp6_send() 499 dst = icmpv6_route_lookup(net, skb, sk, &fl6); icmp6_send() 519 err = ip6_append_data(sk, icmpv6_getfrag, &msg, icmp6_send() 526 ip6_flush_pending_frames(sk); icmp6_send() 528 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, icmp6_send() 535 icmpv6_xmit_unlock(sk); icmp6_send() 549 struct sock *sk; icmpv6_echo_reply() local 583 sk = icmpv6_xmit_lock(net); icmpv6_echo_reply() 584 if (!sk) icmpv6_echo_reply() 586 sk->sk_mark = mark; icmpv6_echo_reply() 587 np = inet6_sk(sk); icmpv6_echo_reply() 594 err = ip6_dst_lookup(sk, &dst, &fl6); icmpv6_echo_reply() 597 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); icmpv6_echo_reply() 610 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), icmpv6_echo_reply() 617 ip6_flush_pending_frames(sk); icmpv6_echo_reply() 619 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, icmpv6_echo_reply() 624 icmpv6_xmit_unlock(sk); icmpv6_echo_reply() 813 void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, icmpv6_flow_init() argument 826 security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); icmpv6_flow_init() 836 struct sock *sk; icmpv6_sk_init() local 845 err = inet_ctl_sock_create(&sk, PF_INET6, for_each_possible_cpu() 853 net->ipv6.icmp_sk[i] = sk; for_each_possible_cpu() 856 * Split off their lock-class, because sk->sk_dst_lock for_each_possible_cpu() 861 lockdep_set_class(&sk->sk_dst_lock, for_each_possible_cpu() 867 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024); for_each_possible_cpu() 323 icmpv6_route_lookup(struct net *net, struct sk_buff *skb, struct sock *sk, struct flowi6 *fl6) icmpv6_route_lookup() argument
|
H A D | ip6_udp_tunnel.c | 26 sk_change_net(sock->sk, net); udp_sock_create6() 49 udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums); udp_sock_create6() 50 udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums); udp_sock_create6() 58 sk_release_kernel(sock->sk); udp_sock_create6() 65 int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, udp_tunnel6_xmit_skb() argument 101 ip6tunnel_xmit(sk, skb, dev); udp_tunnel6_xmit_skb()
|
/linux-4.1.27/net/l2tp/ |
H A D | l2tp_ip.c | 45 static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) l2tp_ip_sk() argument 47 return (struct l2tp_ip_sock *)sk; l2tp_ip_sk() 52 struct sock *sk; __l2tp_ip_bind_lookup() local 54 sk_for_each_bound(sk, &l2tp_ip_bind_table) { __l2tp_ip_bind_lookup() 55 struct inet_sock *inet = inet_sk(sk); __l2tp_ip_bind_lookup() 56 struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); __l2tp_ip_bind_lookup() 62 net_eq(sock_net(sk), net) && __l2tp_ip_bind_lookup() 64 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) __l2tp_ip_bind_lookup() 68 sk = NULL; __l2tp_ip_bind_lookup() 70 return sk; __l2tp_ip_bind_lookup() 75 struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id); l2tp_ip_bind_lookup() local 76 if (sk) l2tp_ip_bind_lookup() 77 sock_hold(sk); l2tp_ip_bind_lookup() 79 return sk; l2tp_ip_bind_lookup() 118 struct sock *sk; l2tp_ip_recv() local 178 sk = tunnel->sock; l2tp_ip_recv() 183 sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); l2tp_ip_recv() 187 if (sk == NULL) l2tp_ip_recv() 190 sock_hold(sk); l2tp_ip_recv() 192 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) l2tp_ip_recv() 197 return sk_receive_skb(sk, skb, 1); l2tp_ip_recv() 200 sock_put(sk); l2tp_ip_recv() 207 static int l2tp_ip_open(struct sock *sk) l2tp_ip_open() argument 210 inet_sk(sk)->inet_num = IPPROTO_L2TP; l2tp_ip_open() 213 sk_add_node(sk, &l2tp_ip_table); l2tp_ip_open() 219 static void l2tp_ip_close(struct sock *sk, long timeout) l2tp_ip_close() argument 222 hlist_del_init(&sk->sk_bind_node); l2tp_ip_close() 223 sk_del_node_init(sk); l2tp_ip_close() 225 sk_common_release(sk); l2tp_ip_close() 228 static void l2tp_ip_destroy_sock(struct sock *sk) l2tp_ip_destroy_sock() argument 231 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); l2tp_ip_destroy_sock() 233 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) l2tp_ip_destroy_sock() 238 sock_put(sk); l2tp_ip_destroy_sock() 241 sk_refcnt_debug_dec(sk); l2tp_ip_destroy_sock() 244 static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) l2tp_ip_bind() argument 246 struct inet_sock *inet = inet_sk(sk); l2tp_ip_bind() 248 struct net *net = sock_net(sk); l2tp_ip_bind() 252 if (!sock_flag(sk, SOCK_ZAPPED)) l2tp_ip_bind() 262 sk->sk_bound_dev_if, addr->l2tp_conn_id)) l2tp_ip_bind() 267 lock_sock(sk); l2tp_ip_bind() 268 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) l2tp_ip_bind() 281 sk_dst_reset(sk); l2tp_ip_bind() 283 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; l2tp_ip_bind() 286 sk_add_bind_node(sk, &l2tp_ip_bind_table); l2tp_ip_bind() 287 sk_del_node_init(sk); l2tp_ip_bind() 290 sock_reset_flag(sk, SOCK_ZAPPED); l2tp_ip_bind() 293 release_sock(sk); l2tp_ip_bind() 303 static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) l2tp_ip_connect() argument 308 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ l2tp_ip_connect() 317 rc = ip4_datagram_connect(sk, uaddr, addr_len); l2tp_ip_connect() 321 lock_sock(sk); l2tp_ip_connect() 323 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; l2tp_ip_connect() 326 hlist_del_init(&sk->sk_bind_node); l2tp_ip_connect() 327 sk_add_bind_node(sk, &l2tp_ip_bind_table); l2tp_ip_connect() 330 release_sock(sk); l2tp_ip_connect() 334 static int l2tp_ip_disconnect(struct sock *sk, int flags) l2tp_ip_disconnect() argument 336 if (sock_flag(sk, SOCK_ZAPPED)) l2tp_ip_disconnect() 339 return udp_disconnect(sk, flags); l2tp_ip_disconnect() 345 struct sock *sk = sock->sk; l2tp_ip_getname() local 346 struct inet_sock *inet = inet_sk(sk); l2tp_ip_getname() 347 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); l2tp_ip_getname() 368 static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) l2tp_ip_backlog_recv() argument 373 rc = sock_queue_rcv_skb(sk, skb); l2tp_ip_backlog_recv() 380 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); l2tp_ip_backlog_recv() 388 static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) l2tp_ip_sendmsg() argument 392 struct inet_sock *inet = inet_sk(sk); l2tp_ip_sendmsg() 398 lock_sock(sk); l2tp_ip_sendmsg() 401 if (sock_flag(sk, SOCK_DEAD)) l2tp_ip_sendmsg() 420 if (sk->sk_state != TCP_ESTABLISHED) l2tp_ip_sendmsg() 429 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) + l2tp_ip_sendmsg() 452 rt = (struct rtable *) __sk_dst_check(sk, 0); l2tp_ip_sendmsg() 468 rt = ip_route_output_ports(sock_net(sk), fl4, sk, l2tp_ip_sendmsg() 471 sk->sk_protocol, RT_CONN_FLAGS(sk), l2tp_ip_sendmsg() 472 sk->sk_bound_dev_if); l2tp_ip_sendmsg() 476 sk_setup_caps(sk, &rt->dst); l2tp_ip_sendmsg() 490 rc = ip_queue_xmit(sk, skb, &inet->cork.fl); l2tp_ip_sendmsg() 498 release_sock(sk); l2tp_ip_sendmsg() 503 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); l2tp_ip_sendmsg() 509 static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg, l2tp_ip_recvmsg() argument 512 struct inet_sock *inet = inet_sk(sk); l2tp_ip_recvmsg() 521 skb = skb_recv_datagram(sk, flags, noblock, &err); l2tp_ip_recvmsg() 535 sock_recv_timestamp(msg, sk, skb); l2tp_ip_recvmsg() 550 skb_free_datagram(sk, skb); l2tp_ip_recvmsg()
|
H A D | l2tp_ip6.c | 54 static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk) l2tp_ip6_sk() argument 56 return (struct l2tp_ip6_sock *)sk; l2tp_ip6_sk() 63 struct sock *sk; __l2tp_ip6_bind_lookup() local 65 sk_for_each_bound(sk, &l2tp_ip6_bind_table) { __l2tp_ip6_bind_lookup() 66 const struct in6_addr *addr = inet6_rcv_saddr(sk); __l2tp_ip6_bind_lookup() 67 struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); __l2tp_ip6_bind_lookup() 73 net_eq(sock_net(sk), net) && __l2tp_ip6_bind_lookup() 75 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) __l2tp_ip6_bind_lookup() 79 sk = NULL; __l2tp_ip6_bind_lookup() 81 return sk; __l2tp_ip6_bind_lookup() 88 struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id); l2tp_ip6_bind_lookup() local 89 if (sk) l2tp_ip6_bind_lookup() 90 sock_hold(sk); l2tp_ip6_bind_lookup() 92 return sk; l2tp_ip6_bind_lookup() 130 struct sock *sk; l2tp_ip6_recv() local 190 sk = tunnel->sock; l2tp_ip6_recv() 195 sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr, l2tp_ip6_recv() 200 if (sk == NULL) l2tp_ip6_recv() 203 sock_hold(sk); l2tp_ip6_recv() 205 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) l2tp_ip6_recv() 210 return sk_receive_skb(sk, skb, 1); l2tp_ip6_recv() 213 sock_put(sk); l2tp_ip6_recv() 220 static int l2tp_ip6_open(struct sock *sk) l2tp_ip6_open() argument 223 inet_sk(sk)->inet_num = IPPROTO_L2TP; l2tp_ip6_open() 226 sk_add_node(sk, &l2tp_ip6_table); l2tp_ip6_open() 232 static void l2tp_ip6_close(struct sock *sk, long timeout) l2tp_ip6_close() argument 235 hlist_del_init(&sk->sk_bind_node); l2tp_ip6_close() 236 sk_del_node_init(sk); l2tp_ip6_close() 239 sk_common_release(sk); l2tp_ip6_close() 242 static void l2tp_ip6_destroy_sock(struct sock *sk) l2tp_ip6_destroy_sock() argument 244 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); l2tp_ip6_destroy_sock() 246 lock_sock(sk); l2tp_ip6_destroy_sock() 247 ip6_flush_pending_frames(sk); l2tp_ip6_destroy_sock() 248 release_sock(sk); l2tp_ip6_destroy_sock() 252 sock_put(sk); l2tp_ip6_destroy_sock() 255 inet6_destroy_sock(sk); l2tp_ip6_destroy_sock() 258 static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) l2tp_ip6_bind() argument 260 struct inet_sock *inet = inet_sk(sk); l2tp_ip6_bind() 261 struct ipv6_pinfo *np = inet6_sk(sk); l2tp_ip6_bind() 267 if (!sock_flag(sk, SOCK_ZAPPED)) l2tp_ip6_bind() 287 sk->sk_bound_dev_if, addr->l2tp_conn_id)) l2tp_ip6_bind() 291 lock_sock(sk); l2tp_ip6_bind() 294 if (sk->sk_state != TCP_CLOSE) l2tp_ip6_bind() 308 sk->sk_bound_dev_if = addr->l2tp_scope_id; l2tp_ip6_bind() 313 if (!sk->sk_bound_dev_if) l2tp_ip6_bind() 317 dev = dev_get_by_index_rcu(sock_net(sk), l2tp_ip6_bind() 318 sk->sk_bound_dev_if); l2tp_ip6_bind() 328 if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0)) l2tp_ip6_bind() 334 sk->sk_v6_rcv_saddr = addr->l2tp_addr; l2tp_ip6_bind() 337 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id; l2tp_ip6_bind() 340 sk_add_bind_node(sk, &l2tp_ip6_bind_table); l2tp_ip6_bind() 341 sk_del_node_init(sk); l2tp_ip6_bind() 344 sock_reset_flag(sk, SOCK_ZAPPED); l2tp_ip6_bind() 345 release_sock(sk); l2tp_ip6_bind() 351 release_sock(sk); l2tp_ip6_bind() 359 static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, l2tp_ip6_connect() argument 368 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ l2tp_ip6_connect() 387 rc = ip6_datagram_connect(sk, uaddr, addr_len); l2tp_ip6_connect() 389 lock_sock(sk); l2tp_ip6_connect() 391 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; l2tp_ip6_connect() 394 hlist_del_init(&sk->sk_bind_node); l2tp_ip6_connect() 395 sk_add_bind_node(sk, &l2tp_ip6_bind_table); l2tp_ip6_connect() 398 release_sock(sk); l2tp_ip6_connect() 403 static int l2tp_ip6_disconnect(struct sock *sk, int flags) l2tp_ip6_disconnect() argument 405 if (sock_flag(sk, SOCK_ZAPPED)) l2tp_ip6_disconnect() 408 return udp_disconnect(sk, flags); l2tp_ip6_disconnect() 415 struct sock *sk = sock->sk; l2tp_ip6_getname() local 416 struct ipv6_pinfo *np = inet6_sk(sk); l2tp_ip6_getname() 417 struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk); l2tp_ip6_getname() 427 lsa->l2tp_addr = sk->sk_v6_daddr; l2tp_ip6_getname() 431 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) l2tp_ip6_getname() 434 lsa->l2tp_addr = sk->sk_v6_rcv_saddr; l2tp_ip6_getname() 439 lsa->l2tp_scope_id = sk->sk_bound_dev_if; l2tp_ip6_getname() 444 static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb) l2tp_ip6_backlog_recv() argument 449 rc = sock_queue_rcv_skb(sk, skb); l2tp_ip6_backlog_recv() 461 static int l2tp_ip6_push_pending_frames(struct sock *sk) l2tp_ip6_push_pending_frames() argument 467 skb = skb_peek(&sk->sk_write_queue); l2tp_ip6_push_pending_frames() 474 err = ip6_push_pending_frames(sk); l2tp_ip6_push_pending_frames() 483 static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) l2tp_ip6_sendmsg() argument 488 struct ipv6_pinfo *np = inet6_sk(sk); l2tp_ip6_sendmsg() 517 fl6.flowi6_mark = sk->sk_mark; l2tp_ip6_sendmsg() 530 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); l2tp_ip6_sendmsg() 538 * sk->sk_dst_cache. l2tp_ip6_sendmsg() 540 if (sk->sk_state == TCP_ESTABLISHED && l2tp_ip6_sendmsg() 541 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) l2tp_ip6_sendmsg() 542 daddr = &sk->sk_v6_daddr; l2tp_ip6_sendmsg() 549 if (sk->sk_state != TCP_ESTABLISHED) l2tp_ip6_sendmsg() 552 daddr = &sk->sk_v6_daddr; l2tp_ip6_sendmsg() 557 fl6.flowi6_oif = sk->sk_bound_dev_if; l2tp_ip6_sendmsg() 564 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, l2tp_ip6_sendmsg() 571 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); l2tp_ip6_sendmsg() 587 fl6.flowi6_proto = sk->sk_protocol; l2tp_ip6_sendmsg() 602 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); l2tp_ip6_sendmsg() 604 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); l2tp_ip6_sendmsg() 623 lock_sock(sk); l2tp_ip6_sendmsg() 624 err = ip6_append_data(sk, ip_generic_getfrag, msg, l2tp_ip6_sendmsg() 629 ip6_flush_pending_frames(sk); l2tp_ip6_sendmsg() 631 err = l2tp_ip6_push_pending_frames(sk); l2tp_ip6_sendmsg() 632 release_sock(sk); l2tp_ip6_sendmsg() 649 static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, l2tp_ip6_recvmsg() argument 652 struct ipv6_pinfo *np = inet6_sk(sk); l2tp_ip6_recvmsg() 665 return ipv6_recv_error(sk, msg, len, addr_len); l2tp_ip6_recvmsg() 667 skb = skb_recv_datagram(sk, flags, noblock, &err); l2tp_ip6_recvmsg() 681 sock_recv_timestamp(msg, sk, skb); l2tp_ip6_recvmsg() 696 ip6_datagram_recv_ctl(sk, msg, skb); l2tp_ip6_recvmsg() 701 skb_free_datagram(sk, skb); l2tp_ip6_recvmsg()
|
H A D | l2tp_ppp.c | 143 static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) pppol2tp_sock_to_session() argument 147 if (sk == NULL) pppol2tp_sock_to_session() 150 sock_hold(sk); pppol2tp_sock_to_session() 151 session = (struct l2tp_session *)(sk->sk_user_data); pppol2tp_sock_to_session() 153 sock_put(sk); pppol2tp_sock_to_session() 193 struct sock *sk = sock->sk; pppol2tp_recvmsg() local 196 if (sk->sk_state & PPPOX_BOUND) pppol2tp_recvmsg() 200 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, pppol2tp_recvmsg() 222 struct sock *sk = NULL; pppol2tp_recv() local 227 sk = ps->sock; pppol2tp_recv() 228 if (sk == NULL) pppol2tp_recv() 231 if (sk->sk_state & PPPOX_BOUND) { pppol2tp_recv() 253 po = pppox_sk(sk); pppol2tp_recv() 260 if (sock_queue_rcv_skb(sk, skb) < 0) { pppol2tp_recv() 301 struct sock *sk = sock->sk; pppol2tp_sendmsg() local 310 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) pppol2tp_sendmsg() 315 session = pppol2tp_sock_to_session(sk); pppol2tp_sendmsg() 328 skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) + pppol2tp_sendmsg() 359 sock_put(sk); pppol2tp_sendmsg() 366 sock_put(sk); pppol2tp_sendmsg() 388 struct sock *sk = (struct sock *) chan->private; pppol2tp_xmit() local 395 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) pppol2tp_xmit() 399 session = pppol2tp_sock_to_session(sk); pppol2tp_xmit() 430 sock_put(sk); pppol2tp_xmit() 436 sock_put(sk); pppol2tp_xmit() 452 struct sock *sk = ps->sock; pppol2tp_session_close() local 453 struct socket *sock = sk->sk_socket; pppol2tp_session_close() 467 static void pppol2tp_session_destruct(struct sock *sk) pppol2tp_session_destruct() argument 469 struct l2tp_session *session = sk->sk_user_data; pppol2tp_session_destruct() 471 sk->sk_user_data = NULL; pppol2tp_session_destruct() 481 struct sock *sk = sock->sk; pppol2tp_release() local 485 if (!sk) pppol2tp_release() 489 lock_sock(sk); pppol2tp_release() 490 if (sock_flag(sk, SOCK_DEAD) != 0) pppol2tp_release() 493 pppox_unbind_sock(sk); pppol2tp_release() 496 sk->sk_state = PPPOX_DEAD; pppol2tp_release() 497 sock_orphan(sk); pppol2tp_release() 498 sock->sk = NULL; pppol2tp_release() 500 session = pppol2tp_sock_to_session(sk); pppol2tp_release() 506 sock_put(sk); pppol2tp_release() 508 skb_queue_purge(&sk->sk_receive_queue); pppol2tp_release() 509 skb_queue_purge(&sk->sk_write_queue); pppol2tp_release() 511 release_sock(sk); pppol2tp_release() 517 sock_put(sk); pppol2tp_release() 522 release_sock(sk); pppol2tp_release() 532 static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb) pppol2tp_backlog_recv() argument 536 rc = l2tp_udp_encap_recv(sk, skb); pppol2tp_backlog_recv() 548 struct sock *sk; pppol2tp_create() local 550 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto); pppol2tp_create() 551 if (!sk) pppol2tp_create() 554 sock_init_data(sock, sk); pppol2tp_create() 559 sk->sk_backlog_rcv = pppol2tp_backlog_recv; pppol2tp_create() 560 sk->sk_protocol = PX_PROTO_OL2TP; pppol2tp_create() 561 sk->sk_family = PF_PPPOX; pppol2tp_create() 562 sk->sk_state = PPPOX_NONE; pppol2tp_create() 563 sk->sk_type = SOCK_STREAM; pppol2tp_create() 564 sk->sk_destruct = pppol2tp_session_destruct; pppol2tp_create() 591 struct sock *sk = sock->sk; pppol2tp_connect() local 593 struct pppox_sock *po = pppox_sk(sk); pppol2tp_connect() 605 lock_sock(sk); pppol2tp_connect() 613 if (sk->sk_state & PPPOX_CONNECTED) pppol2tp_connect() 618 if (sk->sk_user_data) pppol2tp_connect() 670 tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id); pppol2tp_connect() 682 error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); pppol2tp_connect() 709 session = l2tp_session_find(sock_net(sk), tunnel, session_id); pppol2tp_connect() 738 ps->sock = sk; pppol2tp_connect() 781 po->chan.private = sk; pppol2tp_connect() 785 error = ppp_register_net_channel(sock_net(sk), &po->chan); pppol2tp_connect() 791 sk->sk_user_data = session; pppol2tp_connect() 792 sk->sk_state = PPPOX_CONNECTED; pppol2tp_connect() 797 release_sock(sk); pppol2tp_connect() 867 struct sock *sk = sock->sk; pppol2tp_getname() local 872 if (sk == NULL) pppol2tp_getname() 874 if (sk->sk_state != PPPOX_CONNECTED) pppol2tp_getname() 878 session = pppol2tp_sock_to_session(sk); pppol2tp_getname() 968 sock_put(sk); pppol2tp_getname() 1006 struct sock *sk; pppol2tp_session_ioctl() local 1016 sk = ps->sock; pppol2tp_session_ioctl() 1017 sock_hold(sk); pppol2tp_session_ioctl() 1022 if (!(sk->sk_state & PPPOX_CONNECTED)) pppol2tp_session_ioctl() 1039 if (!(sk->sk_state & PPPOX_CONNECTED)) pppol2tp_session_ioctl() 1055 if (!(sk->sk_state & PPPOX_CONNECTED)) pppol2tp_session_ioctl() 1069 if (!(sk->sk_state & PPPOX_CONNECTED)) pppol2tp_session_ioctl() 1104 if (!(sk->sk_state & PPPOX_CONNECTED)) pppol2tp_session_ioctl() 1124 sock_put(sk); pppol2tp_session_ioctl() 1139 struct sock *sk; pppol2tp_tunnel_ioctl() local 1146 sk = tunnel->sock; pppol2tp_tunnel_ioctl() 1147 sock_hold(sk); pppol2tp_tunnel_ioctl() 1152 if (!(sk->sk_state & PPPOX_CONNECTED)) pppol2tp_tunnel_ioctl() 1163 l2tp_session_find(sock_net(sk), tunnel, stats.session_id); pppol2tp_tunnel_ioctl() 1171 stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0; pppol2tp_tunnel_ioctl() 1188 sock_put(sk); pppol2tp_tunnel_ioctl() 1199 struct sock *sk = sock->sk; pppol2tp_ioctl() local 1205 if (!sk) pppol2tp_ioctl() 1209 if (sock_flag(sk, SOCK_DEAD) != 0) pppol2tp_ioctl() 1213 if ((sk->sk_user_data == NULL) || pppol2tp_ioctl() 1214 (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)))) pppol2tp_ioctl() 1219 session = pppol2tp_sock_to_session(sk); pppol2tp_ioctl() 1242 sock_put(sk); pppol2tp_ioctl() 1259 static int pppol2tp_tunnel_setsockopt(struct sock *sk, pppol2tp_tunnel_setsockopt() argument 1282 static int pppol2tp_session_setsockopt(struct sock *sk, pppol2tp_session_setsockopt() argument 1359 struct sock *sk = sock->sk; pppol2tp_setsockopt() local 1376 if (sk->sk_user_data == NULL) pppol2tp_setsockopt() 1381 session = pppol2tp_sock_to_session(sk); pppol2tp_setsockopt() 1395 err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); pppol2tp_setsockopt() 1398 err = pppol2tp_session_setsockopt(sk, session, optname, val); pppol2tp_setsockopt() 1403 sock_put(sk); pppol2tp_setsockopt() 1410 static int pppol2tp_tunnel_getsockopt(struct sock *sk, pppol2tp_tunnel_getsockopt() argument 1433 static int pppol2tp_session_getsockopt(struct sock *sk, pppol2tp_session_getsockopt() argument 1485 struct sock *sk = sock->sk; pppol2tp_getsockopt() local 1504 if (sk->sk_user_data == NULL) pppol2tp_getsockopt() 1509 session = pppol2tp_sock_to_session(sk); pppol2tp_getsockopt() 1522 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); pppol2tp_getsockopt() 1525 err = pppol2tp_session_getsockopt(sk, session, optname, &val); pppol2tp_getsockopt() 1537 sock_put(sk); pppol2tp_getsockopt()
|
/linux-4.1.27/include/trace/events/ |
H A D | udp.h | 12 TP_PROTO(int rc, struct sock *sk), 14 TP_ARGS(rc, sk), 23 __entry->lport = inet_sk(sk)->inet_num;
|
H A D | sock.h | 12 TP_PROTO(struct sock *sk, struct sk_buff *skb), 14 TP_ARGS(sk, skb), 23 __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); 25 __entry->sk_rcvbuf = sk->sk_rcvbuf; 34 TP_PROTO(struct sock *sk, struct proto *prot, long allocated), 36 TP_ARGS(sk, prot, allocated), 51 __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
|
/linux-4.1.27/net/decnet/ |
H A D | dn_timer.c | 14 * Steve Whitehouse : Added checks for sk->sock_readers 38 void dn_start_slow_timer(struct sock *sk) dn_start_slow_timer() argument 40 setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk); dn_start_slow_timer() 41 sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); dn_start_slow_timer() 44 void dn_stop_slow_timer(struct sock *sk) dn_stop_slow_timer() argument 46 sk_stop_timer(sk, &sk->sk_timer); dn_stop_slow_timer() 51 struct sock *sk = (struct sock *)arg; dn_slow_timer() local 52 struct dn_scp *scp = DN_SK(sk); dn_slow_timer() 54 bh_lock_sock(sk); dn_slow_timer() 56 if (sock_owned_by_user(sk)) { dn_slow_timer() 57 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10); dn_slow_timer() 76 if (scp->persist_fxn(sk)) dn_slow_timer() 96 scp->keepalive_fxn(sk); dn_slow_timer() 99 sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); dn_slow_timer() 101 bh_unlock_sock(sk); dn_slow_timer() 102 sock_put(sk); dn_slow_timer()
|
H A D | af_decnet.c | 36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation 39 * Steve Whitehouse: Fixed local port allocation, hashed sk list 143 struct sock sk; member in struct:dn_sock 147 static void dn_keepalive(struct sock *sk); 163 static struct hlist_head *dn_find_list(struct sock *sk) dn_find_list() argument 165 struct dn_scp *scp = DN_SK(sk); dn_find_list() 178 struct sock *sk; check_port() local 183 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { check_port() 184 struct dn_scp *scp = DN_SK(sk); check_port() 191 static unsigned short port_alloc(struct sock *sk) port_alloc() argument 193 struct dn_scp *scp = DN_SK(sk); port_alloc() 212 static int dn_hash_sock(struct sock *sk) dn_hash_sock() argument 214 struct dn_scp *scp = DN_SK(sk); dn_hash_sock() 218 BUG_ON(sk_hashed(sk)); dn_hash_sock() 222 if (!scp->addrloc && !port_alloc(sk)) dn_hash_sock() 226 if ((list = dn_find_list(sk)) == NULL) dn_hash_sock() 229 sk_add_node(sk, list); dn_hash_sock() 236 static void dn_unhash_sock(struct sock *sk) dn_unhash_sock() argument 239 sk_del_node_init(sk); dn_unhash_sock() 243 static void dn_unhash_sock_bh(struct sock *sk) dn_unhash_sock_bh() argument 246 sk_del_node_init(sk); dn_unhash_sock_bh() 271 static void dn_rehash_sock(struct sock *sk) dn_rehash_sock() argument 274 struct dn_scp *scp = DN_SK(sk); dn_rehash_sock() 280 sk_del_node_init(sk); dn_rehash_sock() 281 DN_SK(sk)->addrloc = 0; dn_rehash_sock() 282 list = listen_hash(&DN_SK(sk)->addr); dn_rehash_sock() 283 sk_add_node(sk, list); dn_rehash_sock() 377 struct sock *sk; dn_sklist_find_listener() local 380 sk_for_each(sk, list) { sk_for_each() 381 struct dn_scp *scp = DN_SK(sk); sk_for_each() 382 if (sk->sk_state != TCP_LISTEN) sk_for_each() 395 sock_hold(sk); sk_for_each() 397 return sk; sk_for_each() 400 sk = sk_head(&dn_wild_sk); 401 if (sk) { 402 if (sk->sk_state == TCP_LISTEN) 403 sock_hold(sk); 405 sk = NULL; 409 return sk; 415 struct sock *sk; dn_find_by_skb() local 419 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) { dn_find_by_skb() 420 scp = DN_SK(sk); dn_find_by_skb() 427 sock_hold(sk); dn_find_by_skb() 430 sk = NULL; dn_find_by_skb() 433 return sk; dn_find_by_skb() 438 static void dn_destruct(struct sock *sk) dn_destruct() argument 440 struct dn_scp *scp = DN_SK(sk); dn_destruct() 446 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); dn_destruct() 451 static void dn_enter_memory_pressure(struct sock *sk) dn_enter_memory_pressure() argument 474 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto); dn_alloc_sock() local 476 if (!sk) dn_alloc_sock() 481 sock_init_data(sock, sk); dn_alloc_sock() 483 sk->sk_backlog_rcv = dn_nsp_backlog_rcv; dn_alloc_sock() 484 sk->sk_destruct = dn_destruct; dn_alloc_sock() 485 sk->sk_no_check_tx = 1; dn_alloc_sock() 486 sk->sk_family = PF_DECnet; dn_alloc_sock() 487 sk->sk_protocol = 0; dn_alloc_sock() 488 sk->sk_allocation = gfp; dn_alloc_sock() 489 sk->sk_sndbuf = sysctl_decnet_wmem[1]; dn_alloc_sock() 490 sk->sk_rcvbuf = sysctl_decnet_rmem[1]; dn_alloc_sock() 493 scp = DN_SK(sk); dn_alloc_sock() 539 dn_start_slow_timer(sk); dn_alloc_sock() 541 return sk; dn_alloc_sock() 548 static void dn_keepalive(struct sock *sk) dn_keepalive() argument 550 struct dn_scp *scp = DN_SK(sk); dn_keepalive() 558 dn_nsp_send_link(sk, DN_NOCHANGE, 0); dn_keepalive() 569 int dn_destroy_timer(struct sock *sk) dn_destroy_timer() argument 571 struct dn_scp *scp = DN_SK(sk); dn_destroy_timer() 573 scp->persist = dn_nsp_persist(sk); dn_destroy_timer() 577 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); dn_destroy_timer() 583 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); dn_destroy_timer() 591 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, dn_destroy_timer() 599 if (sk->sk_socket) dn_destroy_timer() 603 dn_unhash_sock(sk); dn_destroy_timer() 604 sock_put(sk); dn_destroy_timer() 611 static void dn_destroy_sock(struct sock *sk) dn_destroy_sock() argument 613 struct dn_scp *scp = DN_SK(sk); dn_destroy_sock() 617 if (sk->sk_socket) { dn_destroy_sock() 618 if (sk->sk_socket->state != SS_UNCONNECTED) dn_destroy_sock() 619 sk->sk_socket->state = SS_DISCONNECTING; dn_destroy_sock() 622 sk->sk_state = TCP_CLOSE; dn_destroy_sock() 626 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, dn_destroy_sock() 627 sk->sk_allocation); dn_destroy_sock() 629 scp->persist = dn_nsp_persist(sk); dn_destroy_sock() 639 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation); dn_destroy_sock() 649 scp->persist = dn_nsp_persist(sk); dn_destroy_sock() 654 dn_stop_slow_timer(sk); dn_destroy_sock() 656 dn_unhash_sock_bh(sk); dn_destroy_sock() 657 sock_put(sk); dn_destroy_sock() 679 struct sock *sk; dn_create() local 699 if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL) dn_create() 702 sk->sk_protocol = protocol; dn_create() 711 struct sock *sk = sock->sk; dn_release() local 713 if (sk) { dn_release() 714 sock_orphan(sk); dn_release() 715 sock_hold(sk); dn_release() 716 lock_sock(sk); dn_release() 717 dn_destroy_sock(sk); dn_release() 718 release_sock(sk); dn_release() 719 sock_put(sk); dn_release() 727 struct sock *sk = sock->sk; dn_bind() local 728 struct dn_scp *scp = DN_SK(sk); dn_bind() 771 lock_sock(sk); dn_bind() 772 if (sock_flag(sk, SOCK_ZAPPED)) { dn_bind() 774 sock_reset_flag(sk, SOCK_ZAPPED); dn_bind() 776 rv = dn_hash_sock(sk); dn_bind() 778 sock_set_flag(sk, SOCK_ZAPPED); dn_bind() 780 release_sock(sk); dn_bind() 788 struct sock *sk = sock->sk; dn_auto_bind() local 789 struct dn_scp *scp = DN_SK(sk); dn_auto_bind() 792 sock_reset_flag(sk, SOCK_ZAPPED); dn_auto_bind() 815 rv = dn_hash_sock(sk); dn_auto_bind() 817 sock_set_flag(sk, SOCK_ZAPPED); dn_auto_bind() 823 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) dn_confirm_accept() argument 825 struct dn_scp *scp = DN_SK(sk); dn_confirm_accept() 833 scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk)); dn_confirm_accept() 834 dn_send_conn_conf(sk, allocation); dn_confirm_accept() 836 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); dn_confirm_accept() 838 release_sock(sk); dn_confirm_accept() 841 lock_sock(sk); dn_confirm_accept() 845 err = sock_error(sk); dn_confirm_accept() 854 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); dn_confirm_accept() 856 finish_wait(sk_sleep(sk), &wait); dn_confirm_accept() 858 sk->sk_socket->state = SS_CONNECTED; dn_confirm_accept() 860 sk->sk_socket->state = SS_UNCONNECTED; dn_confirm_accept() 865 static int dn_wait_run(struct sock *sk, long *timeo) dn_wait_run() argument 867 struct dn_scp *scp = DN_SK(sk); dn_wait_run() 877 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); dn_wait_run() 879 release_sock(sk); dn_wait_run() 882 lock_sock(sk); dn_wait_run() 886 err = sock_error(sk); dn_wait_run() 895 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); dn_wait_run() 897 finish_wait(sk_sleep(sk), &wait); dn_wait_run() 900 sk->sk_socket->state = SS_CONNECTED; dn_wait_run() 902 sk->sk_socket->state = SS_UNCONNECTED; dn_wait_run() 907 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags) __dn_connect() argument 909 struct socket *sock = sk->sk_socket; __dn_connect() 910 struct dn_scp *scp = DN_SK(sk); __dn_connect() 929 return dn_wait_run(sk, timeo); __dn_connect() 943 if (sock_flag(sk, SOCK_ZAPPED)) { __dn_connect() 944 err = dn_auto_bind(sk->sk_socket); __dn_connect() 953 fld.flowidn_oif = sk->sk_bound_dev_if; __dn_connect() 958 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0) __dn_connect() 960 dst = __sk_dst_get(sk); __dn_connect() 961 sk->sk_route_caps = dst->dev->features; __dn_connect() 966 dn_nsp_send_conninit(sk, NSP_CI); __dn_connect() 969 err = dn_wait_run(sk, timeo); __dn_connect() 978 struct sock *sk = sock->sk; dn_connect() local 980 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); dn_connect() 982 lock_sock(sk); dn_connect() 983 err = __dn_connect(sk, addr, addrlen, &timeo, 0); dn_connect() 984 release_sock(sk); dn_connect() 989 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags) dn_check_state() argument 991 struct dn_scp *scp = DN_SK(sk); dn_check_state() 997 return dn_confirm_accept(sk, timeo, sk->sk_allocation); dn_check_state() 1000 return dn_wait_run(sk, timeo); dn_check_state() 1002 return __dn_connect(sk, addr, addrlen, timeo, flags); dn_check_state() 1040 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) dn_wait_for_connect() argument 1046 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); dn_wait_for_connect() 1048 release_sock(sk); dn_wait_for_connect() 1049 skb = skb_dequeue(&sk->sk_receive_queue); dn_wait_for_connect() 1052 skb = skb_dequeue(&sk->sk_receive_queue); dn_wait_for_connect() 1054 lock_sock(sk); dn_wait_for_connect() 1058 if (sk->sk_state != TCP_LISTEN) dn_wait_for_connect() 1066 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); dn_wait_for_connect() 1068 finish_wait(sk_sleep(sk), &wait); dn_wait_for_connect() 1075 struct sock *sk = sock->sk, *newsk; dn_accept() local 1081 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); dn_accept() 1084 lock_sock(sk); dn_accept() 1086 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) { dn_accept() 1087 release_sock(sk); dn_accept() 1091 skb = skb_dequeue(&sk->sk_receive_queue); dn_accept() 1093 skb = dn_wait_for_connect(sk, &timeo); dn_accept() 1095 release_sock(sk); dn_accept() 1101 sk->sk_ack_backlog--; dn_accept() 1102 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation); dn_accept() 1104 release_sock(sk); dn_accept() 1108 release_sock(sk); dn_accept() 1119 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode; dn_accept() 1128 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn)); dn_accept() 1158 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out), dn_accept() 1160 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out), dn_accept() 1170 * Here we use sk->sk_allocation since although the conn conf is dn_accept() 1175 sk->sk_allocation); dn_accept() 1185 struct sock *sk = sock->sk; dn_getname() local 1186 struct dn_scp *scp = DN_SK(sk); dn_getname() 1190 lock_sock(sk); dn_getname() 1196 release_sock(sk); dn_getname() 1205 release_sock(sk); dn_getname() 1213 struct sock *sk = sock->sk; dn_poll() local 1214 struct dn_scp *scp = DN_SK(sk); dn_poll() 1225 struct sock *sk = sock->sk; dn_ioctl() local 1226 struct dn_scp *scp = DN_SK(sk); dn_ioctl() 1239 lock_sock(sk); dn_ioctl() 1243 release_sock(sk); dn_ioctl() 1247 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); dn_ioctl() 1254 lock_sock(sk); dn_ioctl() 1259 skb_queue_walk(&sk->sk_receive_queue, skb) dn_ioctl() 1262 release_sock(sk); dn_ioctl() 1276 struct sock *sk = sock->sk; dn_listen() local 1279 lock_sock(sk); dn_listen() 1281 if (sock_flag(sk, SOCK_ZAPPED)) dn_listen() 1284 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN)) dn_listen() 1287 sk->sk_max_ack_backlog = backlog; dn_listen() 1288 sk->sk_ack_backlog = 0; dn_listen() 1289 sk->sk_state = TCP_LISTEN; dn_listen() 1291 dn_rehash_sock(sk); dn_listen() 1294 release_sock(sk); dn_listen() 1302 struct sock *sk = sock->sk; dn_shutdown() local 1303 struct dn_scp *scp = DN_SK(sk); dn_shutdown() 1306 lock_sock(sk); dn_shutdown() 1322 sk->sk_shutdown = SHUTDOWN_MASK; dn_shutdown() 1323 dn_destroy_sock(sk); dn_shutdown() 1327 release_sock(sk); dn_shutdown() 1334 struct sock *sk = sock->sk; dn_setsockopt() local 1337 lock_sock(sk); dn_setsockopt() 1339 release_sock(sk); dn_setsockopt() 1346 struct sock *sk = sock->sk; __dn_setsockopt() local 1347 struct dn_scp *scp = DN_SK(sk); __dn_setsockopt() 1434 timeo = sock_rcvtimeo(sk, 0); __dn_setsockopt() 1435 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation); __dn_setsockopt() 1443 sk->sk_shutdown = SHUTDOWN_MASK; __dn_setsockopt() 1444 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); __dn_setsockopt() 1449 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); __dn_setsockopt() 1510 struct sock *sk = sock->sk; dn_getsockopt() local 1513 lock_sock(sk); dn_getsockopt() 1515 release_sock(sk); dn_getsockopt() 1522 struct sock *sk = sock->sk; __dn_getsockopt() local 1523 struct dn_scp *scp = DN_SK(sk); __dn_getsockopt() 1589 ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len); __dn_getsockopt() 1645 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) dn_data_ready() argument 1659 if (sk->sk_type == SOCK_SEQPACKET) skb_queue_walk() 1678 struct sock *sk = sock->sk; dn_recvmsg() local 1679 struct dn_scp *scp = DN_SK(sk); dn_recvmsg() 1680 struct sk_buff_head *queue = &sk->sk_receive_queue; dn_recvmsg() 1687 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); dn_recvmsg() 1689 lock_sock(sk); dn_recvmsg() 1691 if (sock_flag(sk, SOCK_ZAPPED)) { dn_recvmsg() 1696 if (sk->sk_shutdown & RCV_SHUTDOWN) { dn_recvmsg() 1701 rv = dn_check_state(sk, NULL, 0, &timeo, flags); dn_recvmsg() 1723 if (sk->sk_err) dn_recvmsg() 1744 if (dn_data_ready(sk, queue, flags, target)) dn_recvmsg() 1752 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); dn_recvmsg() 1753 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); dn_recvmsg() 1754 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); dn_recvmsg() 1755 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); dn_recvmsg() 1756 finish_wait(sk_sleep(sk), &wait); dn_recvmsg() 1784 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) { skb_queue_walk_safe() 1786 dn_nsp_send_link(sk, DN_SEND, 0); skb_queue_walk_safe() 1791 if (sk->sk_type == SOCK_SEQPACKET) skb_queue_walk_safe() 1807 if (eor && (sk->sk_type == SOCK_SEQPACKET)) 1812 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk); 1820 release_sock(sk); 1873 static inline unsigned int dn_current_mss(struct sock *sk, int flags) dn_current_mss() argument 1875 struct dst_entry *dst = __sk_dst_get(sk); dn_current_mss() 1876 struct dn_scp *scp = DN_SK(sk); dn_current_mss() 1898 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk, dn_alloc_send_pskb() argument 1902 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen, dn_alloc_send_pskb() 1913 struct sock *sk = sock->sk; dn_sendmsg() local 1914 struct dn_scp *scp = DN_SK(sk); dn_sendmsg() 1934 lock_sock(sk); dn_sendmsg() 1935 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); dn_sendmsg() 1950 err = dn_check_state(sk, addr, addr_len, &timeo, flags); dn_sendmsg() 1954 if (sk->sk_shutdown & SEND_SHUTDOWN) { dn_sendmsg() 1961 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) dn_sendmsg() 1962 dst_negative_advice(sk); dn_sendmsg() 1967 mss = dn_current_mss(sk, flags); dn_sendmsg() 1980 err = sock_error(sk); dn_sendmsg() 2009 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); dn_sendmsg() 2010 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); dn_sendmsg() 2011 sk_wait_event(sk, &timeo, dn_sendmsg() 2013 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); dn_sendmsg() 2014 finish_wait(sk_sleep(sk), &wait); dn_sendmsg() 2024 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER, dn_sendmsg() 2064 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB); dn_sendmsg() 2067 scp->persist = dn_nsp_persist(sk); dn_sendmsg() 2074 release_sock(sk); dn_sendmsg() 2079 err = sk_stream_error(sk, flags, err); dn_sendmsg() 2080 release_sock(sk); dn_sendmsg() 2155 struct sock *sk = dn_socket_get_first(seq); socket_get_idx() local 2157 if (sk) { socket_get_idx() 2158 while(*pos && (sk = dn_socket_get_next(seq, sk))) socket_get_idx() 2161 return *pos ? NULL : sk; socket_get_idx() 2264 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk) dn_socket_format_entry() argument 2266 struct dn_scp *scp = DN_SK(sk); dn_socket_format_entry()
|
H A D | dn_nsp_in.c | 100 static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack) dn_ack() argument 102 struct dn_scp *scp = DN_SK(sk); dn_ack() 110 wakeup |= dn_nsp_check_xmit_queue(sk, skb, dn_ack() 120 wakeup |= dn_nsp_check_xmit_queue(sk, skb, dn_ack() 129 if (wakeup && !sock_flag(sk, SOCK_DEAD)) dn_ack() 130 sk->sk_state_change(sk); dn_ack() 136 static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth) dn_process_ack() argument 152 dn_ack(sk, skb, ack); dn_process_ack() 165 dn_ack(sk, skb, ack); dn_process_ack() 332 static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb) dn_nsp_conn_init() argument 334 if (sk_acceptq_is_full(sk)) { dn_nsp_conn_init() 339 sk->sk_ack_backlog++; dn_nsp_conn_init() 340 skb_queue_tail(&sk->sk_receive_queue, skb); dn_nsp_conn_init() 341 sk->sk_state_change(sk); dn_nsp_conn_init() 344 static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb) dn_nsp_conn_conf() argument 347 struct dn_scp *scp = DN_SK(sk); dn_nsp_conn_conf() 361 sk->sk_state = TCP_ESTABLISHED; dn_nsp_conn_conf() 378 dn_nsp_send_link(sk, DN_NOCHANGE, 0); dn_nsp_conn_conf() 379 if (!sock_flag(sk, SOCK_DEAD)) dn_nsp_conn_conf() 380 sk->sk_state_change(sk); dn_nsp_conn_conf() 387 static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb) dn_nsp_conn_ack() argument 389 struct dn_scp *scp = DN_SK(sk); dn_nsp_conn_ack() 399 static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb) dn_nsp_disc_init() argument 401 struct dn_scp *scp = DN_SK(sk); dn_nsp_disc_init() 424 sk->sk_state = TCP_CLOSE; dn_nsp_disc_init() 430 sk->sk_err = ECONNREFUSED; dn_nsp_disc_init() 433 sk->sk_shutdown |= SHUTDOWN_MASK; dn_nsp_disc_init() 441 if (!sock_flag(sk, SOCK_DEAD)) { dn_nsp_disc_init() 442 if (sk->sk_socket->state != SS_UNCONNECTED) dn_nsp_disc_init() 443 sk->sk_socket->state = SS_DISCONNECTING; dn_nsp_disc_init() 444 sk->sk_state_change(sk); dn_nsp_disc_init() 454 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC); dn_nsp_disc_init() 457 scp->persist = dn_nsp_persist(sk); dn_nsp_disc_init() 467 static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb) dn_nsp_disc_conf() argument 469 struct dn_scp *scp = DN_SK(sk); dn_nsp_disc_conf() 477 sk->sk_state = TCP_CLOSE; dn_nsp_disc_conf() 493 sk->sk_shutdown |= SHUTDOWN_MASK; dn_nsp_disc_conf() 498 if (!sock_flag(sk, SOCK_DEAD)) { dn_nsp_disc_conf() 499 if (sk->sk_socket->state != SS_UNCONNECTED) dn_nsp_disc_conf() 500 sk->sk_socket->state = SS_DISCONNECTING; dn_nsp_disc_conf() 501 sk->sk_state_change(sk); dn_nsp_disc_conf() 505 scp->persist = dn_nsp_persist(sk); dn_nsp_disc_conf() 511 static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb) dn_nsp_linkservice() argument 513 struct dn_scp *scp = DN_SK(sk); dn_nsp_linkservice() 559 dn_nsp_output(sk); dn_nsp_linkservice() 570 if (wake_up && !sock_flag(sk, SOCK_DEAD)) dn_nsp_linkservice() 571 sk->sk_state_change(sk); dn_nsp_linkservice() 574 dn_nsp_send_oth_ack(sk); dn_nsp_linkservice() 585 static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) dn_queue_skb() argument 592 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= dn_queue_skb() 593 (unsigned int)sk->sk_rcvbuf) { dn_queue_skb() 598 err = sk_filter(sk, skb); dn_queue_skb() 602 skb_set_owner_r(skb, sk); dn_queue_skb() 605 if (!sock_flag(sk, SOCK_DEAD)) dn_queue_skb() 606 sk->sk_data_ready(sk); dn_queue_skb() 611 static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb) dn_nsp_otherdata() argument 613 struct dn_scp *scp = DN_SK(sk); dn_nsp_otherdata() 626 if (dn_queue_skb(sk, skb, SIGURG, &scp->other_receive_queue) == 0) { dn_nsp_otherdata() 633 dn_nsp_send_oth_ack(sk); dn_nsp_otherdata() 639 static void dn_nsp_data(struct sock *sk, struct sk_buff *skb) dn_nsp_data() argument 644 struct dn_scp *scp = DN_SK(sk); dn_nsp_data() 653 if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) { dn_nsp_data() 658 if ((scp->flowloc_sw == DN_SEND) && dn_congested(sk)) { dn_nsp_data() 660 dn_nsp_send_link(sk, DN_DONTSEND, 0); dn_nsp_data() 664 dn_nsp_send_data_ack(sk); dn_nsp_data() 675 static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb) dn_returned_conn_init() argument 677 struct dn_scp *scp = DN_SK(sk); dn_returned_conn_init() 681 sk->sk_state = TCP_CLOSE; dn_returned_conn_init() 682 if (!sock_flag(sk, SOCK_DEAD)) dn_returned_conn_init() 683 sk->sk_state_change(sk); dn_returned_conn_init() 720 struct sock *sk = NULL; dn_nsp_rx_packet() local 749 sk = dn_find_listener(skb, &reason); dn_nsp_rx_packet() 789 sk = dn_find_by_skb(skb); dn_nsp_rx_packet() 791 if (sk != NULL) { dn_nsp_rx_packet() 792 struct dn_scp *scp = DN_SK(sk); dn_nsp_rx_packet() 805 return sk_receive_skb(sk, skb, 0); dn_nsp_rx_packet() 827 int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb) dn_nsp_backlog_rcv() argument 829 struct dn_scp *scp = DN_SK(sk); dn_nsp_backlog_rcv() 834 dn_returned_conn_init(sk, skb); dn_nsp_backlog_rcv() 847 dn_nsp_conn_init(sk, skb); dn_nsp_backlog_rcv() 850 dn_nsp_conn_conf(sk, skb); dn_nsp_backlog_rcv() 853 dn_nsp_disc_init(sk, skb); dn_nsp_backlog_rcv() 856 dn_nsp_disc_conf(sk, skb); dn_nsp_backlog_rcv() 865 dn_nsp_conn_ack(sk, skb); dn_nsp_backlog_rcv() 870 if ((scp->state == DN_CC) && !sock_flag(sk, SOCK_DEAD)) { dn_nsp_backlog_rcv() 872 sk->sk_state = TCP_ESTABLISHED; dn_nsp_backlog_rcv() 873 sk->sk_state_change(sk); dn_nsp_backlog_rcv() 886 dn_process_ack(sk, skb, other); dn_nsp_backlog_rcv() 900 dn_nsp_linkservice(sk, skb); dn_nsp_backlog_rcv() 903 dn_nsp_otherdata(sk, skb); dn_nsp_backlog_rcv() 906 dn_nsp_data(sk, skb); dn_nsp_backlog_rcv()
|
H A D | dn_nsp_out.c | 76 struct sock *sk = skb->sk; dn_nsp_send() local 77 struct dn_scp *scp = DN_SK(sk); dn_nsp_send() 84 dst = sk_dst_check(sk, 0); dn_nsp_send() 93 fld.flowidn_oif = sk->sk_bound_dev_if; dn_nsp_send() 98 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, 0) == 0) { dn_nsp_send() 99 dst = sk_dst_get(sk); dn_nsp_send() 100 sk->sk_route_caps = dst->dev->features; dn_nsp_send() 104 sk->sk_err = EHOSTUNREACH; dn_nsp_send() 105 if (!sock_flag(sk, SOCK_DEAD)) dn_nsp_send() 106 sk->sk_state_change(sk); dn_nsp_send() 111 * If sk == NULL, then we assume that we are supposed to be making 112 * a routing layer skb. If sk != NULL, then we are supposed to be 116 * for its outgoing packets, and to set hdr from this when sk != NULL. 118 struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri) dn_alloc_skb() argument 129 if (sk) dn_alloc_skb() 130 skb_set_owner_w(skb, sk); dn_alloc_skb() 142 unsigned long dn_nsp_persist(struct sock *sk) dn_nsp_persist() argument 144 struct dn_scp *scp = DN_SK(sk); dn_nsp_persist() 165 static void dn_nsp_rtt(struct sock *sk, long rtt) dn_nsp_rtt() argument 167 struct dn_scp *scp = DN_SK(sk); dn_nsp_rtt() 222 skb2->sk = skb->sk; dn_nsp_clone_and_send() 231 * @sk: The socket whose queues are to be investigated 238 void dn_nsp_output(struct sock *sk) dn_nsp_output() argument 240 struct dn_scp *scp = DN_SK(sk); dn_nsp_output() 274 int dn_nsp_xmit_timeout(struct sock *sk) dn_nsp_xmit_timeout() argument 276 struct dn_scp *scp = DN_SK(sk); dn_nsp_xmit_timeout() 278 dn_nsp_output(sk); dn_nsp_xmit_timeout() 282 scp->persist = dn_nsp_persist(sk); dn_nsp_xmit_timeout() 301 static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned char msgflag, int hlen, int other) dn_mk_ack_header() argument 303 struct dn_scp *scp = DN_SK(sk); dn_mk_ack_header() 333 static __le16 *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *skb, int oth) dn_nsp_mk_data_header() argument 335 struct dn_scp *scp = DN_SK(sk); dn_nsp_mk_data_header() 337 __le16 *ptr = dn_mk_ack_header(sk, skb, cb->nsp_flags, 11, oth); dn_nsp_mk_data_header() 351 void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, dn_nsp_queue_xmit() argument 354 struct dn_scp *scp = DN_SK(sk); dn_nsp_queue_xmit() 359 dn_nsp_mk_data_header(sk, skb, oth); dn_nsp_queue_xmit() 380 int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum) dn_nsp_check_xmit_queue() argument 383 struct dn_scp *scp = DN_SK(sk); dn_nsp_check_xmit_queue() 431 dn_nsp_rtt(sk, (long)(pkttime - reftime)); skb_queue_walk_safe() 448 dn_nsp_output(sk); 453 void dn_nsp_send_data_ack(struct sock *sk) dn_nsp_send_data_ack() argument 457 if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL) dn_nsp_send_data_ack() 461 dn_mk_ack_header(sk, skb, 0x04, 9, 0); dn_nsp_send_data_ack() 465 void dn_nsp_send_oth_ack(struct sock *sk) dn_nsp_send_oth_ack() argument 469 if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL) dn_nsp_send_oth_ack() 473 dn_mk_ack_header(sk, skb, 0x14, 9, 1); dn_nsp_send_oth_ack() 478 void dn_send_conn_ack (struct sock *sk) dn_send_conn_ack() argument 480 struct dn_scp *scp = DN_SK(sk); dn_send_conn_ack() 484 if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL) dn_send_conn_ack() 494 void dn_nsp_delayed_ack(struct sock *sk) dn_nsp_delayed_ack() argument 496 struct dn_scp *scp = DN_SK(sk); dn_nsp_delayed_ack() 499 dn_nsp_send_oth_ack(sk); dn_nsp_delayed_ack() 502 dn_nsp_send_data_ack(sk); dn_nsp_delayed_ack() 505 static int dn_nsp_retrans_conn_conf(struct sock *sk) dn_nsp_retrans_conn_conf() argument 507 struct dn_scp *scp = DN_SK(sk); dn_nsp_retrans_conn_conf() 510 dn_send_conn_conf(sk, GFP_ATOMIC); dn_nsp_retrans_conn_conf() 515 void dn_send_conn_conf(struct sock *sk, gfp_t gfp) dn_send_conn_conf() argument 517 struct dn_scp *scp = DN_SK(sk); dn_send_conn_conf() 522 if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL) dn_send_conn_conf() 541 scp->persist = dn_nsp_persist(sk); dn_send_conn_conf() 546 static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, dn_nsp_do_disc() argument 561 if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL) dn_nsp_do_disc() 589 void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, dn_nsp_send_disc() argument 592 struct dn_scp *scp = DN_SK(sk); dn_nsp_send_disc() 601 dn_nsp_do_disc(sk, msgflg, reason, gfp, __sk_dst_get(sk), ddl, dn_nsp_send_disc() 618 void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval) dn_nsp_send_link() argument 620 struct dn_scp *scp = DN_SK(sk); dn_nsp_send_link() 625 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) dn_nsp_send_link() 634 dn_nsp_queue_xmit(sk, skb, gfp, 1); dn_nsp_send_link() 636 scp->persist = dn_nsp_persist(sk); dn_nsp_send_link() 640 static int dn_nsp_retrans_conninit(struct sock *sk) dn_nsp_retrans_conninit() argument 642 struct dn_scp *scp = DN_SK(sk); dn_nsp_retrans_conninit() 645 dn_nsp_send_conninit(sk, NSP_RCI); dn_nsp_retrans_conninit() 650 void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg) dn_nsp_send_conninit() argument 652 struct dn_scp *scp = DN_SK(sk); dn_nsp_send_conninit() 658 gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; dn_nsp_send_conninit() 659 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); dn_nsp_send_conninit() 711 scp->persist = dn_nsp_persist(sk); dn_nsp_send_conninit()
|
/linux-4.1.27/net/vmw_vsock/ |
H A D | vmci_transport_notify.h | 48 void (*socket_init) (struct sock *sk); 50 int (*poll_in) (struct sock *sk, size_t target, 52 int (*poll_out) (struct sock *sk, size_t target, 54 void (*handle_notify_pkt) (struct sock *sk, 59 int (*recv_init) (struct sock *sk, size_t target, 61 int (*recv_pre_block) (struct sock *sk, size_t target, 63 int (*recv_pre_dequeue) (struct sock *sk, size_t target, 65 int (*recv_post_dequeue) (struct sock *sk, size_t target, 68 int (*send_init) (struct sock *sk, 70 int (*send_pre_block) (struct sock *sk, 72 int (*send_pre_enqueue) (struct sock *sk, 74 int (*send_post_enqueue) (struct sock *sk, ssize_t written, 76 void (*process_request) (struct sock *sk); 77 void (*process_negotiate) (struct sock *sk);
|
H A D | af_vsock.c | 101 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); 102 static void vsock_sk_destruct(struct sock *sk); 103 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 170 struct sock *sk = sk_vsock(vsk); vsock_auto_bind() local 176 return __vsock_bind(sk, &local_addr); vsock_auto_bind() 193 sock_hold(&vsk->sk); __vsock_insert_bound() 200 sock_hold(&vsk->sk); __vsock_insert_connected() 207 sock_put(&vsk->sk); __vsock_remove_bound() 213 sock_put(&vsk->sk); __vsock_remove_connected() 289 struct sock *sk; vsock_find_bound_socket() local 292 sk = __vsock_find_bound_socket(addr); vsock_find_bound_socket() 293 if (sk) vsock_find_bound_socket() 294 sock_hold(sk); vsock_find_bound_socket() 298 return sk; vsock_find_bound_socket() 305 struct sock *sk; vsock_find_connected_socket() local 308 sk = __vsock_find_connected_socket(src, dst); vsock_find_connected_socket() 309 if (sk) vsock_find_connected_socket() 310 sock_hold(sk); vsock_find_connected_socket() 314 return sk; vsock_find_connected_socket() 340 void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) vsock_for_each_connected_socket() argument 417 static bool vsock_is_accept_queue_empty(struct sock *sk) vsock_is_accept_queue_empty() argument 419 struct vsock_sock *vsk = vsock_sk(sk); vsock_is_accept_queue_empty() 423 static bool vsock_is_pending(struct sock *sk) vsock_is_pending() argument 425 struct vsock_sock *vsk = vsock_sk(sk); vsock_is_pending() 429 static int vsock_send_shutdown(struct sock *sk, int mode) vsock_send_shutdown() argument 431 return transport->shutdown(vsock_sk(sk), mode); vsock_send_shutdown() 436 struct sock *sk; vsock_pending_work() local 442 sk = sk_vsock(vsk); vsock_pending_work() 447 lock_sock(sk); vsock_pending_work() 449 if (vsock_is_pending(sk)) { vsock_pending_work() 450 vsock_remove_pending(listener, sk); vsock_pending_work() 470 sk->sk_state = SS_FREE; vsock_pending_work() 473 release_sock(sk); vsock_pending_work() 476 sock_put(sk); vsock_pending_work() 478 sock_put(sk); vsock_pending_work() 542 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) __vsock_bind() argument 544 struct vsock_sock *vsk = vsock_sk(sk); __vsock_bind() 561 switch (sk->sk_socket->type) { __vsock_bind() 586 struct sock *sk; __vsock_create() local 590 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto); __vsock_create() 591 if (!sk) __vsock_create() 594 sock_init_data(sock, sk); __vsock_create() 596 /* sk->sk_type is normally set in sock_init_data, but only if sock is __vsock_create() 601 sk->sk_type = type; __vsock_create() 603 vsk = vsock_sk(sk); __vsock_create() 607 sk->sk_destruct = vsock_sk_destruct; __vsock_create() 608 sk->sk_backlog_rcv = vsock_queue_rcv_skb; __vsock_create() 609 sk->sk_state = 0; __vsock_create() 610 sock_reset_flag(sk, SOCK_DONE); __vsock_create() 634 sk_free(sk); __vsock_create() 641 return sk; __vsock_create() 645 static void __vsock_release(struct sock *sk) __vsock_release() argument 647 if (sk) { __vsock_release() 652 vsk = vsock_sk(sk); __vsock_release() 663 lock_sock(sk); __vsock_release() 664 sock_orphan(sk); __vsock_release() 665 sk->sk_shutdown = SHUTDOWN_MASK; __vsock_release() 667 while ((skb = skb_dequeue(&sk->sk_receive_queue))) __vsock_release() 671 while ((pending = vsock_dequeue_accept(sk)) != NULL) { __vsock_release() 676 release_sock(sk); __vsock_release() 677 sock_put(sk); __vsock_release() 681 static void vsock_sk_destruct(struct sock *sk) vsock_sk_destruct() argument 683 struct vsock_sock *vsk = vsock_sk(sk); vsock_sk_destruct() 696 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) vsock_queue_rcv_skb() argument 700 err = sock_queue_rcv_skb(sk, skb); vsock_queue_rcv_skb() 721 __vsock_release(sock->sk); vsock_release() 722 sock->sk = NULL; vsock_release() 732 struct sock *sk; vsock_bind() local 735 sk = sock->sk; vsock_bind() 740 lock_sock(sk); vsock_bind() 741 err = __vsock_bind(sk, vm_addr); vsock_bind() 742 release_sock(sk); vsock_bind() 751 struct sock *sk; vsock_getname() local 755 sk = sock->sk; vsock_getname() 756 vsk = vsock_sk(sk); vsock_getname() 759 lock_sock(sk); vsock_getname() 786 release_sock(sk); vsock_getname() 793 struct sock *sk; vsock_shutdown() local 812 sk = sock->sk; vsock_shutdown() 815 if (sk->sk_type == SOCK_STREAM) vsock_shutdown() 825 lock_sock(sk); vsock_shutdown() 826 sk->sk_shutdown |= mode; vsock_shutdown() 827 sk->sk_state_change(sk); vsock_shutdown() 828 release_sock(sk); vsock_shutdown() 830 if (sk->sk_type == SOCK_STREAM) { vsock_shutdown() 831 sock_reset_flag(sk, SOCK_DONE); vsock_shutdown() 832 vsock_send_shutdown(sk, mode); vsock_shutdown() 842 struct sock *sk; vsock_poll() local 846 sk = sock->sk; vsock_poll() 847 vsk = vsock_sk(sk); vsock_poll() 849 poll_wait(file, sk_sleep(sk), wait); vsock_poll() 852 if (sk->sk_err) vsock_poll() 859 if ((sk->sk_shutdown == SHUTDOWN_MASK) || vsock_poll() 860 ((sk->sk_shutdown & SEND_SHUTDOWN) && vsock_poll() 865 if (sk->sk_shutdown & RCV_SHUTDOWN || vsock_poll() 875 if (!skb_queue_empty(&sk->sk_receive_queue) || vsock_poll() 876 (sk->sk_shutdown & RCV_SHUTDOWN)) { vsock_poll() 880 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) vsock_poll() 884 lock_sock(sk); vsock_poll() 889 if (sk->sk_state == SS_LISTEN vsock_poll() 890 && !vsock_is_accept_queue_empty(sk)) vsock_poll() 895 !(sk->sk_shutdown & RCV_SHUTDOWN)) { vsock_poll() 912 if (sk->sk_shutdown & RCV_SHUTDOWN || vsock_poll() 918 if (sk->sk_state == SS_CONNECTED) { vsock_poll() 919 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { vsock_poll() 940 if (sk->sk_state == SS_UNCONNECTED) { vsock_poll() 941 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) vsock_poll() 946 release_sock(sk); vsock_poll() 956 struct sock *sk; vsock_dgram_sendmsg() local 965 sk = sock->sk; vsock_dgram_sendmsg() 966 vsk = vsock_sk(sk); vsock_dgram_sendmsg() 968 lock_sock(sk); vsock_dgram_sendmsg() 1019 release_sock(sk); vsock_dgram_sendmsg() 1027 struct sock *sk; vsock_dgram_connect() local 1031 sk = sock->sk; vsock_dgram_connect() 1032 vsk = vsock_sk(sk); vsock_dgram_connect() 1036 lock_sock(sk); vsock_dgram_connect() 1040 release_sock(sk); vsock_dgram_connect() 1045 lock_sock(sk); vsock_dgram_connect() 1061 release_sock(sk); vsock_dgram_connect() 1068 return transport->dgram_dequeue(vsock_sk(sock->sk), msg, len, flags); vsock_dgram_recvmsg() 1094 struct sock *sk; vsock_connect_timeout() local 1098 sk = sk_vsock(vsk); vsock_connect_timeout() 1100 lock_sock(sk); vsock_connect_timeout() 1101 if (sk->sk_state == SS_CONNECTING && vsock_connect_timeout() 1102 (sk->sk_shutdown != SHUTDOWN_MASK)) { vsock_connect_timeout() 1103 sk->sk_state = SS_UNCONNECTED; vsock_connect_timeout() 1104 sk->sk_err = ETIMEDOUT; vsock_connect_timeout() 1105 sk->sk_error_report(sk); vsock_connect_timeout() 1107 release_sock(sk); vsock_connect_timeout() 1109 sock_put(sk); vsock_connect_timeout() 1116 struct sock *sk; vsock_stream_connect() local 1123 sk = sock->sk; vsock_stream_connect() 1124 vsk = vsock_sk(sk); vsock_stream_connect() 1126 lock_sock(sk); vsock_stream_connect() 1146 if ((sk->sk_state == SS_LISTEN) || vsock_stream_connect() 1169 sk->sk_state = SS_CONNECTING; vsock_stream_connect() 1187 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); vsock_stream_connect() 1189 while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) { vsock_stream_connect() 1197 sock_hold(sk); vsock_stream_connect() 1206 release_sock(sk); vsock_stream_connect() 1208 lock_sock(sk); vsock_stream_connect() 1218 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); vsock_stream_connect() 1221 if (sk->sk_err) { vsock_stream_connect() 1222 err = -sk->sk_err; vsock_stream_connect() 1228 finish_wait(sk_sleep(sk), &wait); vsock_stream_connect() 1230 release_sock(sk); vsock_stream_connect() 1234 sk->sk_state = SS_UNCONNECTED; vsock_stream_connect() 1249 listener = sock->sk; vsock_accept() 1325 struct sock *sk; vsock_listen() local 1328 sk = sock->sk; vsock_listen() 1330 lock_sock(sk); vsock_listen() 1342 vsk = vsock_sk(sk); vsock_listen() 1349 sk->sk_max_ack_backlog = backlog; vsock_listen() 1350 sk->sk_state = SS_LISTEN; vsock_listen() 1355 release_sock(sk); vsock_listen() 1366 struct sock *sk; vsock_stream_setsockopt() local 1386 sk = sock->sk; vsock_stream_setsockopt() 1387 vsk = vsock_sk(sk); vsock_stream_setsockopt() 1389 lock_sock(sk); vsock_stream_setsockopt() 1432 release_sock(sk); vsock_stream_setsockopt() 1443 struct sock *sk; vsock_stream_getsockopt() local 1466 sk = sock->sk; vsock_stream_getsockopt() 1467 vsk = vsock_sk(sk); vsock_stream_getsockopt() 1510 struct sock *sk; vsock_stream_sendmsg() local 1519 sk = sock->sk; vsock_stream_sendmsg() 1520 vsk = vsock_sk(sk); vsock_stream_sendmsg() 1527 lock_sock(sk); vsock_stream_sendmsg() 1531 err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP; vsock_stream_sendmsg() 1536 if (sk->sk_shutdown & SEND_SHUTDOWN || vsock_stream_sendmsg() 1542 if (sk->sk_state != SS_CONNECTED || vsock_stream_sendmsg() 1554 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); vsock_stream_sendmsg() 1560 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); vsock_stream_sendmsg() 1566 sk->sk_err == 0 && vsock_stream_sendmsg() 1567 !(sk->sk_shutdown & SEND_SHUTDOWN) && vsock_stream_sendmsg() 1580 release_sock(sk); vsock_stream_sendmsg() 1582 lock_sock(sk); vsock_stream_sendmsg() 1591 prepare_to_wait(sk_sleep(sk), &wait, vsock_stream_sendmsg() 1599 if (sk->sk_err) { vsock_stream_sendmsg() 1600 err = -sk->sk_err; vsock_stream_sendmsg() 1602 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || vsock_stream_sendmsg() 1638 finish_wait(sk_sleep(sk), &wait); vsock_stream_sendmsg() 1640 release_sock(sk); vsock_stream_sendmsg() 1649 struct sock *sk; vsock_stream_recvmsg() local 1659 sk = sock->sk; vsock_stream_recvmsg() 1660 vsk = vsock_sk(sk); vsock_stream_recvmsg() 1663 lock_sock(sk); vsock_stream_recvmsg() 1665 if (sk->sk_state != SS_CONNECTED) { vsock_stream_recvmsg() 1671 if (sock_flag(sk, SOCK_DONE)) vsock_stream_recvmsg() 1688 if (sk->sk_shutdown & RCV_SHUTDOWN) { vsock_stream_recvmsg() 1707 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); vsock_stream_recvmsg() 1712 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); vsock_stream_recvmsg() 1719 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); vsock_stream_recvmsg() 1760 if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN) vsock_stream_recvmsg() 1775 release_sock(sk); vsock_stream_recvmsg() 1777 lock_sock(sk); vsock_stream_recvmsg() 1787 prepare_to_wait(sk_sleep(sk), &wait, vsock_stream_recvmsg() 1792 if (sk->sk_err) vsock_stream_recvmsg() 1793 err = -sk->sk_err; vsock_stream_recvmsg() 1794 else if (sk->sk_shutdown & RCV_SHUTDOWN) vsock_stream_recvmsg() 1810 sk->sk_state = SS_UNCONNECTED; vsock_stream_recvmsg() 1811 sock_set_flag(sk, SOCK_DONE); vsock_stream_recvmsg() 1812 sk->sk_state_change(sk); vsock_stream_recvmsg() 1820 finish_wait(sk_sleep(sk), &wait); vsock_stream_recvmsg() 1822 release_sock(sk); vsock_stream_recvmsg()
|
H A D | vmci_transport_notify_qstate.c | 81 vmci_transport_handle_read(struct sock *sk, vmci_transport_handle_read() argument 86 sk->sk_write_space(sk); vmci_transport_handle_read() 90 vmci_transport_handle_wrote(struct sock *sk, vmci_transport_handle_wrote() argument 95 sk->sk_data_ready(sk); vmci_transport_handle_wrote() 98 static void vsock_block_update_write_window(struct sock *sk) vsock_block_update_write_window() argument 100 struct vsock_sock *vsk = vsock_sk(sk); vsock_block_update_write_window() 108 static int vmci_transport_send_read_notification(struct sock *sk) vmci_transport_send_read_notification() argument 115 vsk = vsock_sk(sk); vmci_transport_send_read_notification() 131 err = vmci_transport_send_read(sk); vmci_transport_send_read_notification() 140 sk); vmci_transport_send_read_notification() 148 static void vmci_transport_notify_pkt_socket_init(struct sock *sk) vmci_transport_notify_pkt_socket_init() argument 150 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_socket_init() 167 vmci_transport_notify_pkt_poll_in(struct sock *sk, vmci_transport_notify_pkt_poll_in() argument 170 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_poll_in() 179 if (sk->sk_state == SS_CONNECTED) vmci_transport_notify_pkt_poll_in() 180 vsock_block_update_write_window(sk); vmci_transport_notify_pkt_poll_in() 188 vmci_transport_notify_pkt_poll_out(struct sock *sk, vmci_transport_notify_pkt_poll_out() argument 192 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_poll_out() 210 struct sock *sk, vmci_transport_notify_pkt_recv_init() 214 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_recv_init() 242 struct sock *sk, vmci_transport_notify_pkt_recv_pre_block() 248 vsock_block_update_write_window(sk); vmci_transport_notify_pkt_recv_pre_block() 251 err = vmci_transport_send_read_notification(sk); vmci_transport_notify_pkt_recv_pre_block() 262 struct sock *sk, vmci_transport_notify_pkt_recv_post_dequeue() 273 vsk = vsock_sk(sk); vmci_transport_notify_pkt_recv_post_dequeue() 286 err = vmci_transport_send_read_notification(sk); vmci_transport_notify_pkt_recv_post_dequeue() 293 sk->sk_data_ready(sk); vmci_transport_notify_pkt_recv_post_dequeue() 301 struct sock *sk, vmci_transport_notify_pkt_send_init() 312 struct sock *sk, vmci_transport_notify_pkt_send_post_enqueue() 322 vsk = vsock_sk(sk); vmci_transport_notify_pkt_send_post_enqueue() 332 err = vmci_transport_send_wrote(sk); vmci_transport_notify_pkt_send_post_enqueue() 342 sk); vmci_transport_notify_pkt_send_post_enqueue() 351 struct sock *sk, vmci_transport_notify_pkt_handle_pkt() 361 vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src); vmci_transport_notify_pkt_handle_pkt() 365 vmci_transport_handle_read(sk, pkt, bottom_half, dst, src); vmci_transport_notify_pkt_handle_pkt() 374 static void vmci_transport_notify_pkt_process_request(struct sock *sk) vmci_transport_notify_pkt_process_request() argument 376 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_process_request() 385 static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk) vmci_transport_notify_pkt_process_negotiate() argument 387 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_process_negotiate() 398 struct sock *sk, vmci_transport_notify_pkt_recv_pre_dequeue() 407 struct sock *sk, vmci_transport_notify_pkt_send_pre_block() 415 struct sock *sk, vmci_transport_notify_pkt_send_pre_enqueue() 209 vmci_transport_notify_pkt_recv_init( struct sock *sk, size_t target, struct vmci_transport_recv_notify_data *data) vmci_transport_notify_pkt_recv_init() argument 241 vmci_transport_notify_pkt_recv_pre_block( struct sock *sk, size_t target, struct vmci_transport_recv_notify_data *data) vmci_transport_notify_pkt_recv_pre_block() argument 261 vmci_transport_notify_pkt_recv_post_dequeue( struct sock *sk, size_t target, ssize_t copied, bool data_read, struct vmci_transport_recv_notify_data *data) vmci_transport_notify_pkt_recv_post_dequeue() argument 300 vmci_transport_notify_pkt_send_init( struct sock *sk, struct vmci_transport_send_notify_data *data) vmci_transport_notify_pkt_send_init() argument 311 vmci_transport_notify_pkt_send_post_enqueue( struct sock *sk, ssize_t written, struct vmci_transport_send_notify_data *data) vmci_transport_notify_pkt_send_post_enqueue() argument 350 vmci_transport_notify_pkt_handle_pkt( struct sock *sk, struct vmci_transport_packet *pkt, bool bottom_half, struct sockaddr_vm *dst, struct sockaddr_vm *src, bool *pkt_processed) vmci_transport_notify_pkt_handle_pkt() argument 397 vmci_transport_notify_pkt_recv_pre_dequeue( struct sock *sk, size_t target, struct vmci_transport_recv_notify_data *data) vmci_transport_notify_pkt_recv_pre_dequeue() argument 406 vmci_transport_notify_pkt_send_pre_block( struct sock *sk, struct vmci_transport_send_notify_data *data) vmci_transport_notify_pkt_send_pre_block() argument 414 vmci_transport_notify_pkt_send_pre_enqueue( struct sock *sk, struct vmci_transport_send_notify_data *data) vmci_transport_notify_pkt_send_pre_enqueue() argument
|
H A D | vmci_transport_notify.c | 113 vmci_transport_handle_waiting_read(struct sock *sk, vmci_transport_handle_waiting_read() argument 122 vsk = vsock_sk(sk); vmci_transport_handle_waiting_read() 134 sent = vmci_transport_send_wrote(sk) > 0; vmci_transport_handle_waiting_read() 143 vmci_transport_handle_waiting_write(struct sock *sk, vmci_transport_handle_waiting_write() argument 152 vsk = vsock_sk(sk); vmci_transport_handle_waiting_write() 164 sent = vmci_transport_send_read(sk) > 0; vmci_transport_handle_waiting_write() 173 vmci_transport_handle_read(struct sock *sk, vmci_transport_handle_read() argument 181 vsk = vsock_sk(sk); vmci_transport_handle_read() 185 sk->sk_write_space(sk); vmci_transport_handle_read() 188 static bool send_waiting_read(struct sock *sk, u64 room_needed) send_waiting_read() argument 198 vsk = vsock_sk(sk); send_waiting_read() 220 ret = vmci_transport_send_waiting_read(sk, &waiting_info) > 0; send_waiting_read() 230 static bool send_waiting_write(struct sock *sk, u64 room_needed) send_waiting_write() argument 240 vsk = vsock_sk(sk); send_waiting_write() 257 ret = vmci_transport_send_waiting_write(sk, &waiting_info) > 0; send_waiting_write() 267 static int vmci_transport_send_read_notification(struct sock *sk) vmci_transport_send_read_notification() argument 274 vsk = vsock_sk(sk); vmci_transport_send_read_notification() 290 err = vmci_transport_send_read(sk); vmci_transport_send_read_notification() 298 pr_err("%p unable to send read notify to peer\n", sk); vmci_transport_send_read_notification() 309 vmci_transport_handle_wrote(struct sock *sk, vmci_transport_handle_wrote() argument 315 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_handle_wrote() 318 sk->sk_data_ready(sk); vmci_transport_handle_wrote() 321 static void vmci_transport_notify_pkt_socket_init(struct sock *sk) vmci_transport_notify_pkt_socket_init() argument 323 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_socket_init() 346 vmci_transport_notify_pkt_poll_in(struct sock *sk, vmci_transport_notify_pkt_poll_in() argument 349 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_poll_in() 358 if (sk->sk_state == SS_CONNECTED) { vmci_transport_notify_pkt_poll_in() 359 if (!send_waiting_read(sk, 1)) vmci_transport_notify_pkt_poll_in() 370 vmci_transport_notify_pkt_poll_out(struct sock *sk, vmci_transport_notify_pkt_poll_out() argument 374 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_poll_out() 389 if (!send_waiting_write(sk, 1)) vmci_transport_notify_pkt_poll_out() 400 struct sock *sk, vmci_transport_notify_pkt_recv_init() 404 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_recv_init() 436 struct sock *sk, vmci_transport_notify_pkt_recv_pre_block() 443 if (!send_waiting_read(sk, target)) { vmci_transport_notify_pkt_recv_pre_block() 449 err = vmci_transport_send_read_notification(sk); vmci_transport_notify_pkt_recv_pre_block() 462 struct sock *sk, vmci_transport_notify_pkt_recv_pre_dequeue() 466 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_recv_pre_dequeue() 482 struct sock *sk, vmci_transport_notify_pkt_recv_post_dequeue() 491 vsk = vsock_sk(sk); vmci_transport_notify_pkt_recv_post_dequeue() 505 err = vmci_transport_send_read_notification(sk); vmci_transport_notify_pkt_recv_post_dequeue() 515 struct sock *sk, vmci_transport_notify_pkt_send_init() 528 struct sock *sk, vmci_transport_notify_pkt_send_pre_block() 532 if (!send_waiting_write(sk, 1)) vmci_transport_notify_pkt_send_pre_block() 540 struct sock *sk, vmci_transport_notify_pkt_send_pre_enqueue() 543 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_send_pre_enqueue() 556 struct sock *sk, vmci_transport_notify_pkt_send_post_enqueue() 565 vsk = vsock_sk(sk); vmci_transport_notify_pkt_send_post_enqueue() 586 err = vmci_transport_send_wrote(sk); vmci_transport_notify_pkt_send_post_enqueue() 594 pr_err("%p unable to send wrote notify to peer\n", sk); vmci_transport_notify_pkt_send_post_enqueue() 607 struct sock *sk, vmci_transport_notify_pkt_handle_pkt() 617 vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src); vmci_transport_notify_pkt_handle_pkt() 621 vmci_transport_handle_read(sk, pkt, bottom_half, dst, src); vmci_transport_notify_pkt_handle_pkt() 625 vmci_transport_handle_waiting_write(sk, pkt, bottom_half, vmci_transport_notify_pkt_handle_pkt() 631 vmci_transport_handle_waiting_read(sk, pkt, bottom_half, vmci_transport_notify_pkt_handle_pkt() 641 static void vmci_transport_notify_pkt_process_request(struct sock *sk) vmci_transport_notify_pkt_process_request() argument 643 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_process_request() 652 static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk) vmci_transport_notify_pkt_process_negotiate() argument 654 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_notify_pkt_process_negotiate() 399 vmci_transport_notify_pkt_recv_init( struct sock *sk, size_t target, struct vmci_transport_recv_notify_data *data) vmci_transport_notify_pkt_recv_init() argument 435 vmci_transport_notify_pkt_recv_pre_block( struct sock *sk, size_t target, struct vmci_transport_recv_notify_data *data) vmci_transport_notify_pkt_recv_pre_block() argument 461 vmci_transport_notify_pkt_recv_pre_dequeue( struct sock *sk, size_t target, struct vmci_transport_recv_notify_data *data) vmci_transport_notify_pkt_recv_pre_dequeue() argument 481 vmci_transport_notify_pkt_recv_post_dequeue( struct sock *sk, size_t target, ssize_t copied, bool data_read, struct vmci_transport_recv_notify_data *data) vmci_transport_notify_pkt_recv_post_dequeue() argument 514 vmci_transport_notify_pkt_send_init( struct sock *sk, struct vmci_transport_send_notify_data *data) vmci_transport_notify_pkt_send_init() argument 527 vmci_transport_notify_pkt_send_pre_block( struct sock *sk, struct vmci_transport_send_notify_data *data) vmci_transport_notify_pkt_send_pre_block() argument 539 vmci_transport_notify_pkt_send_pre_enqueue( struct sock *sk, struct vmci_transport_send_notify_data *data) vmci_transport_notify_pkt_send_pre_enqueue() argument 555 vmci_transport_notify_pkt_send_post_enqueue( struct sock *sk, ssize_t written, struct vmci_transport_send_notify_data *data) vmci_transport_notify_pkt_send_post_enqueue() argument 606 vmci_transport_notify_pkt_handle_pkt( struct sock *sk, struct vmci_transport_packet *pkt, bool bottom_half, struct sockaddr_vm *dst, struct sockaddr_vm *src, bool *pkt_processed) vmci_transport_notify_pkt_handle_pkt() argument
|
H A D | vmci_transport.c | 50 static int vmci_transport_recv_listen(struct sock *sk, 53 struct sock *sk, 57 struct sock *sk, 60 struct sock *sk, 63 struct sock *sk, 65 static int vmci_transport_recv_connected(struct sock *sk, 69 static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto, 74 struct sock *sk; member in struct:vmci_transport_recv_pkt_info 276 vmci_transport_send_control_pkt(struct sock *sk, vmci_transport_send_control_pkt() argument 288 vsk = vsock_sk(sk); vmci_transport_send_control_pkt() 321 static int vmci_transport_send_reset(struct sock *sk, vmci_transport_send_reset() argument 326 return vmci_transport_send_control_pkt(sk, vmci_transport_send_reset() 332 static int vmci_transport_send_negotiate(struct sock *sk, size_t size) vmci_transport_send_negotiate() argument 335 sk, vmci_transport_send_negotiate() 342 static int vmci_transport_send_negotiate2(struct sock *sk, size_t size, vmci_transport_send_negotiate2() argument 346 sk, vmci_transport_send_negotiate2() 352 static int vmci_transport_send_qp_offer(struct sock *sk, vmci_transport_send_qp_offer() argument 356 sk, VMCI_TRANSPORT_PACKET_TYPE_OFFER, 0, vmci_transport_send_qp_offer() 361 static int vmci_transport_send_attach(struct sock *sk, vmci_transport_send_attach() argument 365 sk, VMCI_TRANSPORT_PACKET_TYPE_ATTACH, vmci_transport_send_attach() 406 int vmci_transport_send_wrote(struct sock *sk) vmci_transport_send_wrote() argument 409 sk, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0, vmci_transport_send_wrote() 414 int vmci_transport_send_read(struct sock *sk) vmci_transport_send_read() argument 417 sk, VMCI_TRANSPORT_PACKET_TYPE_READ, 0, vmci_transport_send_read() 422 int vmci_transport_send_waiting_write(struct sock *sk, vmci_transport_send_waiting_write() argument 426 sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE, vmci_transport_send_waiting_write() 431 int vmci_transport_send_waiting_read(struct sock *sk, vmci_transport_send_waiting_read() argument 435 sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ, vmci_transport_send_waiting_read() 443 &vsk->sk, vmci_transport_shutdown() 450 static int vmci_transport_send_conn_request(struct sock *sk, size_t size) vmci_transport_send_conn_request() argument 452 return vmci_transport_send_control_pkt(sk, vmci_transport_send_conn_request() 459 static int vmci_transport_send_conn_request2(struct sock *sk, size_t size, vmci_transport_send_conn_request2() argument 463 sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2, vmci_transport_send_conn_request2() 604 struct sock *sk; vmci_transport_recv_dgram_cb() local 609 sk = (struct sock *)data; vmci_transport_recv_dgram_cb() 620 vsk = vsock_sk(sk); vmci_transport_recv_dgram_cb() 632 sock_hold(sk); vmci_transport_recv_dgram_cb() 635 sk_receive_skb(sk, skb, 0); vmci_transport_recv_dgram_cb() 664 struct sock *sk; vmci_transport_recv_stream_cb() local 672 sk = NULL; vmci_transport_recv_stream_cb() 697 sk = vsock_find_connected_socket(&src, &dst); vmci_transport_recv_stream_cb() 698 if (!sk) { vmci_transport_recv_stream_cb() 699 sk = vsock_find_bound_socket(&dst); vmci_transport_recv_stream_cb() 700 if (!sk) { vmci_transport_recv_stream_cb() 737 vsk = vsock_sk(sk); vmci_transport_recv_stream_cb() 748 bh_lock_sock(sk); vmci_transport_recv_stream_cb() 750 if (!sock_owned_by_user(sk)) { vmci_transport_recv_stream_cb() 754 if (sk->sk_state == SS_CONNECTED) vmci_transport_recv_stream_cb() 756 sk, pkt, true, &dst, &src, vmci_transport_recv_stream_cb() 760 bh_unlock_sock(sk); vmci_transport_recv_stream_cb() 774 recv_pkt_info->sk = sk; vmci_transport_recv_stream_cb() 779 /* Clear sk so that the reference count incremented by one of vmci_transport_recv_stream_cb() 784 sk = NULL; vmci_transport_recv_stream_cb() 788 if (sk) vmci_transport_recv_stream_cb() 789 sock_put(sk); vmci_transport_recv_stream_cb() 798 struct sock *sk = client_data; vmci_transport_peer_attach_cb() local 804 vsk = vsock_sk(sk); vmci_transport_peer_attach_cb() 813 bh_lock_sock(sk); vmci_transport_peer_attach_cb() 828 bh_unlock_sock(sk); vmci_transport_peer_attach_cb() 832 static void vmci_transport_handle_detach(struct sock *sk) vmci_transport_handle_detach() argument 836 vsk = vsock_sk(sk); vmci_transport_handle_detach() 838 sock_set_flag(sk, SOCK_DONE); vmci_transport_handle_detach() 850 if (sk->sk_state == SS_CONNECTING) { vmci_transport_handle_detach() 859 sk->sk_state = SS_UNCONNECTED; vmci_transport_handle_detach() 860 sk->sk_err = ECONNRESET; vmci_transport_handle_detach() 861 sk->sk_error_report(sk); vmci_transport_handle_detach() 864 sk->sk_state = SS_UNCONNECTED; vmci_transport_handle_detach() 866 sk->sk_state_change(sk); vmci_transport_handle_detach() 874 struct sock *sk = client_data; vmci_transport_peer_detach_cb() local 879 vsk = vsock_sk(sk); vmci_transport_peer_detach_cb() 885 bh_lock_sock(sk); vmci_transport_peer_detach_cb() 892 vmci_transport_handle_detach(sk); vmci_transport_peer_detach_cb() 894 bh_unlock_sock(sk); vmci_transport_peer_detach_cb() 909 struct sock *sk; vmci_transport_recv_pkt_work() local 913 sk = recv_pkt_info->sk; vmci_transport_recv_pkt_work() 916 lock_sock(sk); vmci_transport_recv_pkt_work() 919 vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; vmci_transport_recv_pkt_work() 921 switch (sk->sk_state) { vmci_transport_recv_pkt_work() 923 vmci_transport_recv_listen(sk, pkt); vmci_transport_recv_pkt_work() 930 vmci_transport_recv_connecting_client(sk, pkt); vmci_transport_recv_pkt_work() 933 vmci_transport_recv_connected(sk, pkt); vmci_transport_recv_pkt_work() 942 vmci_transport_send_reset(sk, pkt); vmci_transport_recv_pkt_work() 946 release_sock(sk); vmci_transport_recv_pkt_work() 951 sock_put(sk); vmci_transport_recv_pkt_work() 954 static int vmci_transport_recv_listen(struct sock *sk, vmci_transport_recv_listen() argument 973 pending = vmci_transport_get_pending(sk, pkt); vmci_transport_recv_listen() 982 err = vmci_transport_recv_connecting_server(sk, vmci_transport_recv_listen() 992 vsock_remove_pending(sk, pending); vmci_transport_recv_listen() 1019 if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) { vmci_transport_recv_listen() 1024 pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL, vmci_transport_recv_listen() 1025 sk->sk_type); vmci_transport_recv_listen() 1027 vmci_transport_send_reset(sk, pkt); vmci_transport_recv_listen() 1102 vmci_transport_send_reset(sk, pkt); vmci_transport_recv_listen() 1108 vsock_add_pending(sk, pending); vmci_transport_recv_listen() 1109 sk->sk_ack_backlog++; vmci_transport_recv_listen() 1127 vpending->listener = sk; vmci_transport_recv_listen() 1128 sock_hold(sk); vmci_transport_recv_listen() 1285 vmci_transport_recv_connecting_client(struct sock *sk, vmci_transport_recv_connecting_client() argument 1292 vsk = vsock_sk(sk); vmci_transport_recv_connecting_client() 1309 sk->sk_state = SS_CONNECTED; vmci_transport_recv_connecting_client() 1310 sk->sk_socket->state = SS_CONNECTED; vmci_transport_recv_connecting_client() 1312 sk->sk_state_change(sk); vmci_transport_recv_connecting_client() 1332 err = vmci_transport_recv_connecting_client_negotiate(sk, pkt); vmci_transport_recv_connecting_client() 1340 err = vmci_transport_recv_connecting_client_invalid(sk, pkt); vmci_transport_recv_connecting_client() 1376 vmci_transport_send_reset(sk, pkt); vmci_transport_recv_connecting_client() 1378 sk->sk_state = SS_UNCONNECTED; vmci_transport_recv_connecting_client() 1379 sk->sk_err = skerr; vmci_transport_recv_connecting_client() 1380 sk->sk_error_report(sk); vmci_transport_recv_connecting_client() 1385 struct sock *sk, vmci_transport_recv_connecting_client_negotiate() 1400 vsk = vsock_sk(sk); vmci_transport_recv_connecting_client_negotiate() 1442 if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) { vmci_transport_recv_connecting_client_negotiate() 1455 sk, &attach_sub_id); vmci_transport_recv_connecting_client_negotiate() 1463 sk, &detach_sub_id); vmci_transport_recv_connecting_client_negotiate() 1487 err = vmci_transport_send_qp_offer(sk, handle); vmci_transport_recv_connecting_client_negotiate() 1502 vmci_trans(vsk)->notify_ops->process_negotiate(sk); vmci_transport_recv_connecting_client_negotiate() 1520 vmci_transport_recv_connecting_client_invalid(struct sock *sk, vmci_transport_recv_connecting_client_invalid() argument 1524 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_recv_connecting_client_invalid() 1531 sk, vmci_trans(vsk)->queue_pair_size); vmci_transport_recv_connecting_client_invalid() 1542 static int vmci_transport_recv_connected(struct sock *sk, vmci_transport_recv_connected() argument 1559 vsk = vsock_sk(sk); vmci_transport_recv_connected() 1562 sk->sk_state_change(sk); vmci_transport_recv_connected() 1567 vsk = vsock_sk(sk); vmci_transport_recv_connected() 1577 sock_set_flag(sk, SOCK_DONE); vmci_transport_recv_connected() 1580 sk->sk_state = SS_DISCONNECTING; vmci_transport_recv_connected() 1582 sk->sk_state_change(sk); vmci_transport_recv_connected() 1586 vsk = vsock_sk(sk); vmci_transport_recv_connected() 1588 sk, pkt, false, NULL, NULL, vmci_transport_recv_connected() 1687 &vsk->sk, vmci_transport_dgram_bind() 1750 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); vmci_transport_dgram_dequeue() 1788 skb_free_datagram(&vsk->sk, skb); vmci_transport_dgram_dequeue() 1808 struct sock *sk = &vsk->sk; vmci_transport_connect() local 1813 sk, vmci_trans(vsk)->queue_pair_size); vmci_transport_connect() 1815 sk->sk_state = SS_UNCONNECTED; vmci_transport_connect() 1822 sk, vmci_trans(vsk)->queue_pair_size, vmci_transport_connect() 1825 sk->sk_state = SS_UNCONNECTED; vmci_transport_connect() 1921 &vsk->sk, target, data_ready_now); vmci_transport_notify_poll_in() 1930 &vsk->sk, target, space_available_now); vmci_transport_notify_poll_out() 1939 &vsk->sk, target, vmci_transport_notify_recv_init() 1949 &vsk->sk, target, vmci_transport_notify_recv_pre_block() 1959 &vsk->sk, target, vmci_transport_notify_recv_pre_dequeue() 1971 &vsk->sk, target, copied, data_read, vmci_transport_notify_recv_post_dequeue() 1980 &vsk->sk, vmci_transport_notify_send_init() 1989 &vsk->sk, vmci_transport_notify_send_pre_block() 1998 &vsk->sk, vmci_transport_notify_send_pre_enqueue() 2008 &vsk->sk, written, vmci_transport_notify_send_post_enqueue() 2027 static bool vmci_transport_proto_to_notify_struct(struct sock *sk, vmci_transport_proto_to_notify_struct() argument 2031 struct vsock_sock *vsk = vsock_sk(sk); vmci_transport_proto_to_notify_struct() 2053 vmci_trans(vsk)->notify_ops->socket_init(sk); vmci_transport_proto_to_notify_struct() 1384 vmci_transport_recv_connecting_client_negotiate( struct sock *sk, struct vmci_transport_packet *pkt) vmci_transport_recv_connecting_client_negotiate() argument
|
/linux-4.1.27/net/netlink/ |
H A D | diag.c | 25 static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb) sk_diag_put_rings_cfg() argument 27 struct netlink_sock *nlk = nlk_sk(sk); sk_diag_put_rings_cfg() 40 static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb) sk_diag_put_rings_cfg() argument 46 static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb) sk_diag_dump_groups() argument 48 struct netlink_sock *nlk = nlk_sk(sk); sk_diag_dump_groups() 57 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, sk_diag_fill() argument 63 struct netlink_sock *nlk = nlk_sk(sk); sk_diag_fill() 72 rep->ndiag_type = sk->sk_type; sk_diag_fill() 73 rep->ndiag_protocol = sk->sk_protocol; sk_diag_fill() 74 rep->ndiag_state = sk->sk_state; sk_diag_fill() 80 sock_diag_save_cookie(sk, rep->ndiag_cookie); sk_diag_fill() 83 sk_diag_dump_groups(sk, skb)) sk_diag_fill() 87 sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO)) sk_diag_fill() 91 sk_diag_put_rings_cfg(sk, skb)) sk_diag_fill() 108 struct net *net = sock_net(skb->sk); __netlink_diag_dump() 111 struct sock *sk; __netlink_diag_dump() local 120 sk = (struct sock *)nlsk; rht_for_each_entry_rcu() 122 if (!net_eq(sock_net(sk), net)) rht_for_each_entry_rcu() 129 if (sk_diag_fill(sk, skb, req, rht_for_each_entry_rcu() 133 sock_i_ino(sk)) < 0) { rht_for_each_entry_rcu() 142 sk_for_each_bound(sk, &tbl->mc_list) { 143 if (sk_hashed(sk)) 145 if (!net_eq(sock_net(sk), net)) 152 if (sk_diag_fill(sk, skb, req, 156 sock_i_ino(sk)) < 0) { 206 struct net *net = sock_net(skb->sk); netlink_diag_handler_dump()
|
H A D | af_netlink.c | 16 * use nlk_sk, as sk->protinfo is on a diet 8) 87 static inline int netlink_is_kernel(struct sock *sk) netlink_is_kernel() argument 89 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; netlink_is_kernel() 97 static int netlink_dump(struct sock *sk); 197 struct sock *sk = skb->sk; netlink_filter_tap() local 202 switch (sk->sk_protocol) { netlink_filter_tap() 221 struct sock *sk = skb->sk; __netlink_deliver_tap_skb() local 232 nskb->protocol = htons((u16) sk->sk_protocol); __netlink_deliver_tap_skb() 233 nskb->pkt_type = netlink_is_kernel(sk) ? __netlink_deliver_tap_skb() 277 static void netlink_overrun(struct sock *sk) netlink_overrun() argument 279 struct netlink_sock *nlk = nlk_sk(sk); netlink_overrun() 282 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) { netlink_overrun() 283 sk->sk_err = ENOBUFS; netlink_overrun() 284 sk->sk_error_report(sk); netlink_overrun() 287 atomic_inc(&sk->sk_drops); netlink_overrun() 290 static void netlink_rcv_wake(struct sock *sk) netlink_rcv_wake() argument 292 struct netlink_sock *nlk = nlk_sk(sk); netlink_rcv_wake() 294 if (skb_queue_empty(&sk->sk_receive_queue)) netlink_rcv_wake() 301 static bool netlink_rx_is_mmaped(struct sock *sk) netlink_rx_is_mmaped() argument 303 return nlk_sk(sk)->rx_ring.pg_vec != NULL; netlink_rx_is_mmaped() 306 static bool netlink_tx_is_mmaped(struct sock *sk) netlink_tx_is_mmaped() argument 308 return nlk_sk(sk)->tx_ring.pg_vec != NULL; netlink_tx_is_mmaped() 377 __netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, __netlink_set_ring() argument 380 struct netlink_sock *nlk = nlk_sk(sk); __netlink_set_ring() 384 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; __netlink_set_ring() 407 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, netlink_set_ring() argument 410 struct netlink_sock *nlk = nlk_sk(sk); netlink_set_ring() 454 __netlink_set_ring(sk, req, tx_ring, pg_vec, order); netlink_set_ring() 471 struct sock *sk = sock->sk; netlink_mm_open() local 473 if (sk) netlink_mm_open() 474 atomic_inc(&nlk_sk(sk)->mapped); netlink_mm_open() 481 struct sock *sk = sock->sk; netlink_mm_close() local 483 if (sk) netlink_mm_close() 484 atomic_dec(&nlk_sk(sk)->mapped); netlink_mm_close() 495 struct sock *sk = sock->sk; netlink_mmap() local 496 struct netlink_sock *nlk = nlk_sk(sk); netlink_mmap() 663 struct sock *sk = sock->sk; netlink_poll() local 664 struct netlink_sock *nlk = nlk_sk(sk); netlink_poll() 674 err = netlink_dump(sk); netlink_poll() 676 sk->sk_err = -err; netlink_poll() 677 sk->sk_error_report(sk); netlink_poll() 681 netlink_rcv_wake(sk); netlink_poll() 686 spin_lock_bh(&sk->sk_receive_queue.lock); netlink_poll() 692 spin_unlock_bh(&sk->sk_receive_queue.lock); netlink_poll() 694 spin_lock_bh(&sk->sk_write_queue.lock); netlink_poll() 699 spin_unlock_bh(&sk->sk_write_queue.lock); netlink_poll() 709 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk, netlink_ring_setup_skb() argument 727 NETLINK_CB(skb).sk = sk; netlink_ring_setup_skb() 730 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg, netlink_mmap_sendmsg() argument 734 struct netlink_sock *nlk = nlk_sk(sk); netlink_mmap_sendmsg() 780 err = security_netlink_send(sk, skb); netlink_mmap_sendmsg() 788 netlink_broadcast(sk, skb, dst_portid, dst_group, netlink_mmap_sendmsg() 791 err = netlink_unicast(sk, skb, dst_portid, netlink_mmap_sendmsg() 808 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb) netlink_queue_mmaped_skb() argument 816 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); netlink_queue_mmaped_skb() 817 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); netlink_queue_mmaped_skb() 825 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) netlink_ring_set_copied() argument 827 struct netlink_sock *nlk = nlk_sk(sk); netlink_ring_set_copied() 831 spin_lock_bh(&sk->sk_receive_queue.lock); netlink_ring_set_copied() 834 spin_unlock_bh(&sk->sk_receive_queue.lock); netlink_ring_set_copied() 836 netlink_overrun(sk); netlink_ring_set_copied() 840 __skb_queue_tail(&sk->sk_receive_queue, skb); netlink_ring_set_copied() 841 spin_unlock_bh(&sk->sk_receive_queue.lock); netlink_ring_set_copied() 846 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); netlink_ring_set_copied() 847 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); netlink_ring_set_copied() 852 #define netlink_rx_is_mmaped(sk) false 853 #define netlink_tx_is_mmaped(sk) false 856 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0 864 struct sock *sk; netlink_skb_destructor() local 873 sk = NETLINK_CB(skb).sk; netlink_skb_destructor() 877 ring = &nlk_sk(sk)->tx_ring; netlink_skb_destructor() 883 ring = &nlk_sk(sk)->rx_ring; netlink_skb_destructor() 888 sock_put(sk); netlink_skb_destructor() 900 if (skb->sk != NULL) netlink_skb_destructor() 904 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) netlink_skb_set_owner_r() argument 906 WARN_ON(skb->sk != NULL); netlink_skb_set_owner_r() 907 skb->sk = sk; netlink_skb_set_owner_r() 909 atomic_add(skb->truesize, &sk->sk_rmem_alloc); netlink_skb_set_owner_r() 910 sk_mem_charge(sk, skb->truesize); netlink_skb_set_owner_r() 913 static void netlink_sock_destruct(struct sock *sk) netlink_sock_destruct() argument 915 struct netlink_sock *nlk = nlk_sk(sk); netlink_sock_destruct() 925 skb_queue_purge(&sk->sk_receive_queue); netlink_sock_destruct() 932 __netlink_set_ring(sk, &req, false, NULL, 0); netlink_sock_destruct() 935 __netlink_set_ring(sk, &req, true, NULL, 0); netlink_sock_destruct() 939 if (!sock_flag(sk, SOCK_DEAD)) { netlink_sock_destruct() 940 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); netlink_sock_destruct() 944 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); netlink_sock_destruct() 945 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); netlink_sock_destruct() 946 WARN_ON(nlk_sk(sk)->groups); netlink_sock_destruct() 1021 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet)); netlink_compare() 1042 static int __netlink_insert(struct netlink_table *table, struct sock *sk) __netlink_insert() argument 1046 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid); __netlink_insert() 1048 &nlk_sk(sk)->node, __netlink_insert() 1055 struct sock *sk; netlink_lookup() local 1058 sk = __netlink_lookup(table, portid, net); netlink_lookup() 1059 if (sk) netlink_lookup() 1060 sock_hold(sk); netlink_lookup() 1063 return sk; netlink_lookup() 1069 netlink_update_listeners(struct sock *sk) netlink_update_listeners() argument 1071 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; netlink_update_listeners() 1082 sk_for_each_bound(sk, &tbl->mc_list) { netlink_update_listeners() 1083 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) netlink_update_listeners() 1084 mask |= nlk_sk(sk)->groups[i]; netlink_update_listeners() 1092 static int netlink_insert(struct sock *sk, u32 portid) netlink_insert() argument 1094 struct netlink_table *table = &nl_table[sk->sk_protocol]; netlink_insert() 1097 lock_sock(sk); netlink_insert() 1099 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; netlink_insert() 1100 if (nlk_sk(sk)->bound) netlink_insert() 1108 nlk_sk(sk)->portid = portid; netlink_insert() 1109 sock_hold(sk); netlink_insert() 1111 err = __netlink_insert(table, sk); netlink_insert() 1120 sock_put(sk); netlink_insert() 1126 nlk_sk(sk)->bound = portid; netlink_insert() 1129 release_sock(sk); netlink_insert() 1133 static void netlink_remove(struct sock *sk) netlink_remove() argument 1137 table = &nl_table[sk->sk_protocol]; netlink_remove() 1138 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node, netlink_remove() 1140 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); netlink_remove() 1141 __sock_put(sk); netlink_remove() 1145 if (nlk_sk(sk)->subscriptions) { netlink_remove() 1146 __sk_del_bind_node(sk); netlink_remove() 1147 netlink_update_listeners(sk); netlink_remove() 1149 if (sk->sk_protocol == NETLINK_GENERIC) netlink_remove() 1163 struct sock *sk; __netlink_create() local 1168 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto); __netlink_create() 1169 if (!sk) __netlink_create() 1172 sock_init_data(sock, sk); __netlink_create() 1174 nlk = nlk_sk(sk); __netlink_create() 1186 sk->sk_destruct = netlink_sock_destruct; __netlink_create() 1187 sk->sk_protocol = protocol; __netlink_create() 1238 nlk = nlk_sk(sock->sk); netlink_create() 1254 sock_put(&nlk->sk); deferred_put_nlk_sk() 1259 struct sock *sk = sock->sk; netlink_release() local 1262 if (!sk) netlink_release() 1265 netlink_remove(sk); netlink_release() 1266 sock_orphan(sk); netlink_release() 1267 nlk = nlk_sk(sk); netlink_release() 1282 nlk->netlink_unbind(sock_net(sk), i + 1); netlink_release() 1284 if (sk->sk_protocol == NETLINK_GENERIC && netlink_release() 1288 sock->sk = NULL; netlink_release() 1291 skb_queue_purge(&sk->sk_write_queue); netlink_release() 1295 .net = sock_net(sk), netlink_release() 1296 .protocol = sk->sk_protocol, netlink_release() 1305 if (netlink_is_kernel(sk)) { netlink_release() 1307 BUG_ON(nl_table[sk->sk_protocol].registered == 0); netlink_release() 1308 if (--nl_table[sk->sk_protocol].registered == 0) { netlink_release() 1311 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners); netlink_release() 1312 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL); netlink_release() 1314 nl_table[sk->sk_protocol].module = NULL; netlink_release() 1315 nl_table[sk->sk_protocol].bind = NULL; netlink_release() 1316 nl_table[sk->sk_protocol].unbind = NULL; netlink_release() 1317 nl_table[sk->sk_protocol].flags = 0; netlink_release() 1318 nl_table[sk->sk_protocol].registered = 0; netlink_release() 1327 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); netlink_release() 1335 struct sock *sk = sock->sk; netlink_autobind() local 1336 struct net *net = sock_net(sk); netlink_autobind() 1337 struct netlink_table *table = &nl_table[sk->sk_protocol]; netlink_autobind() 1355 err = netlink_insert(sk, portid); netlink_autobind() 1380 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) && __netlink_ns_capable() 1429 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap); netlink_net_capable() 1435 return (nl_table[sock->sk->sk_protocol].flags & flag) || netlink_allowed() 1436 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN); netlink_allowed() 1440 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) netlink_update_subscriptions() argument 1442 struct netlink_sock *nlk = nlk_sk(sk); netlink_update_subscriptions() 1445 __sk_del_bind_node(sk); netlink_update_subscriptions() 1447 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); netlink_update_subscriptions() 1451 static int netlink_realloc_groups(struct sock *sk) netlink_realloc_groups() argument 1453 struct netlink_sock *nlk = nlk_sk(sk); netlink_realloc_groups() 1460 groups = nl_table[sk->sk_protocol].groups; netlink_realloc_groups() 1461 if (!nl_table[sk->sk_protocol].registered) { netlink_realloc_groups() 1485 struct sock *sk) netlink_undo_bind() 1487 struct netlink_sock *nlk = nlk_sk(sk); netlink_undo_bind() 1495 nlk->netlink_unbind(sock_net(sk), undo + 1); netlink_undo_bind() 1501 struct sock *sk = sock->sk; netlink_bind() local 1502 struct net *net = sock_net(sk); netlink_bind() 1503 struct netlink_sock *nlk = nlk_sk(sk); netlink_bind() 1519 err = netlink_realloc_groups(sk); netlink_bind() 1542 netlink_undo_bind(group, groups, sk); netlink_bind() 1552 netlink_insert(sk, nladdr->nl_pid) : netlink_bind() 1555 netlink_undo_bind(nlk->ngroups, groups, sk); netlink_bind() 1564 netlink_update_subscriptions(sk, nlk->subscriptions + netlink_bind() 1568 netlink_update_listeners(sk); netlink_bind() 1578 struct sock *sk = sock->sk; netlink_connect() local 1579 struct netlink_sock *nlk = nlk_sk(sk); netlink_connect() 1586 sk->sk_state = NETLINK_UNCONNECTED; netlink_connect() 1605 sk->sk_state = NETLINK_CONNECTED; netlink_connect() 1616 struct sock *sk = sock->sk; netlink_getname() local 1617 struct netlink_sock *nlk = nlk_sk(sk); netlink_getname() 1661 sock = SOCKET_I(inode)->sk; netlink_getsockbyfilp() 1704 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, netlink_attachskb() argument 1709 nlk = nlk_sk(sk); netlink_attachskb() 1711 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || netlink_attachskb() 1717 netlink_overrun(sk); netlink_attachskb() 1718 sock_put(sk); netlink_attachskb() 1726 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || netlink_attachskb() 1728 !sock_flag(sk, SOCK_DEAD)) netlink_attachskb() 1733 sock_put(sk); netlink_attachskb() 1741 netlink_skb_set_owner_r(skb, sk); netlink_attachskb() 1745 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) __netlink_sendskb() argument 1753 netlink_queue_mmaped_skb(sk, skb); __netlink_sendskb() 1754 else if (netlink_rx_is_mmaped(sk)) __netlink_sendskb() 1755 netlink_ring_set_copied(sk, skb); __netlink_sendskb() 1758 skb_queue_tail(&sk->sk_receive_queue, skb); __netlink_sendskb() 1759 sk->sk_data_ready(sk); __netlink_sendskb() 1763 int netlink_sendskb(struct sock *sk, struct sk_buff *skb) netlink_sendskb() argument 1765 int len = __netlink_sendskb(sk, skb); netlink_sendskb() 1767 sock_put(sk); netlink_sendskb() 1771 void netlink_detachskb(struct sock *sk, struct sk_buff *skb) netlink_detachskb() argument 1774 sock_put(sk); netlink_detachskb() 1781 WARN_ON(skb->sk != NULL); netlink_trim() 1803 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, netlink_unicast_kernel() argument 1807 struct netlink_sock *nlk = nlk_sk(sk); netlink_unicast_kernel() 1812 netlink_skb_set_owner_r(skb, sk); netlink_unicast_kernel() 1813 NETLINK_CB(skb).sk = ssk; netlink_unicast_kernel() 1814 netlink_deliver_tap_kernel(sk, ssk, skb); netlink_unicast_kernel() 1820 sock_put(sk); netlink_unicast_kernel() 1827 struct sock *sk; netlink_unicast() local 1835 sk = netlink_getsockbyportid(ssk, portid); netlink_unicast() 1836 if (IS_ERR(sk)) { netlink_unicast() 1838 return PTR_ERR(sk); netlink_unicast() 1840 if (netlink_is_kernel(sk)) netlink_unicast() 1841 return netlink_unicast_kernel(sk, skb, ssk); netlink_unicast() 1843 if (sk_filter(sk, skb)) { netlink_unicast() 1846 sock_put(sk); netlink_unicast() 1850 err = netlink_attachskb(sk, skb, &timeo, ssk); netlink_unicast() 1856 return netlink_sendskb(sk, skb); netlink_unicast() 1864 struct sock *sk = NULL; netlink_alloc_skb() local 1870 sk = netlink_getsockbyportid(ssk, dst_portid); netlink_alloc_skb() 1871 if (IS_ERR(sk)) netlink_alloc_skb() 1874 ring = &nlk_sk(sk)->rx_ring; netlink_alloc_skb() 1886 spin_lock_bh(&sk->sk_receive_queue.lock); netlink_alloc_skb() 1900 netlink_ring_setup_skb(skb, sk, ring, hdr); netlink_alloc_skb() 1905 spin_unlock_bh(&sk->sk_receive_queue.lock); netlink_alloc_skb() 1910 spin_unlock_bh(&sk->sk_receive_queue.lock); netlink_alloc_skb() 1911 netlink_overrun(sk); netlink_alloc_skb() 1913 sock_put(sk); netlink_alloc_skb() 1918 spin_unlock_bh(&sk->sk_receive_queue.lock); netlink_alloc_skb() 1920 sock_put(sk); netlink_alloc_skb() 1927 int netlink_has_listeners(struct sock *sk, unsigned int group) netlink_has_listeners() argument 1932 BUG_ON(!netlink_is_kernel(sk)); netlink_has_listeners() 1935 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); netlink_has_listeners() 1937 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups) netlink_has_listeners() 1946 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) netlink_broadcast_deliver() argument 1948 struct netlink_sock *nlk = nlk_sk(sk); netlink_broadcast_deliver() 1950 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && netlink_broadcast_deliver() 1952 netlink_skb_set_owner_r(skb, sk); netlink_broadcast_deliver() 1953 __netlink_sendskb(sk, skb); netlink_broadcast_deliver() 1954 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); netlink_broadcast_deliver() 1974 static void do_one_broadcast(struct sock *sk, do_one_broadcast() argument 1977 struct netlink_sock *nlk = nlk_sk(sk); do_one_broadcast() 1980 if (p->exclude_sk == sk) do_one_broadcast() 1987 if (!net_eq(sock_net(sk), p->net)) do_one_broadcast() 1991 netlink_overrun(sk); do_one_broadcast() 1995 sock_hold(sk); do_one_broadcast() 2009 netlink_overrun(sk); do_one_broadcast() 2014 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) { do_one_broadcast() 2017 } else if (sk_filter(sk, p->skb2)) { do_one_broadcast() 2020 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { do_one_broadcast() 2021 netlink_overrun(sk); do_one_broadcast() 2029 sock_put(sk); do_one_broadcast() 2039 struct sock *sk; netlink_broadcast_filtered() local 2061 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) netlink_broadcast_filtered() 2062 do_one_broadcast(sk, &info); netlink_broadcast_filtered() 2098 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) do_one_set_err() argument 2100 struct netlink_sock *nlk = nlk_sk(sk); do_one_set_err() 2103 if (sk == p->exclude_sk) do_one_set_err() 2106 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) do_one_set_err() 2118 sk->sk_err = p->code; do_one_set_err() 2119 sk->sk_error_report(sk); do_one_set_err() 2137 struct sock *sk; netlink_set_err() local 2143 /* sk->sk_err wants a positive error value */ netlink_set_err() 2148 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) netlink_set_err() 2149 ret += do_one_set_err(sk, &info); netlink_set_err() 2169 netlink_update_subscriptions(&nlk->sk, subscriptions); netlink_update_socket_mc() 2170 netlink_update_listeners(&nlk->sk); netlink_update_socket_mc() 2176 struct sock *sk = sock->sk; netlink_setsockopt() local 2177 struct netlink_sock *nlk = nlk_sk(sk); netlink_setsockopt() 2201 err = netlink_realloc_groups(sk); netlink_setsockopt() 2207 err = nlk->netlink_bind(sock_net(sk), val); netlink_setsockopt() 2216 nlk->netlink_unbind(sock_net(sk), val); netlink_setsockopt() 2252 err = netlink_set_ring(sk, &req, netlink_setsockopt() 2266 struct sock *sk = sock->sk; netlink_getsockopt() local 2267 struct netlink_sock *nlk = nlk_sk(sk); netlink_getsockopt() 2325 struct sock *sk = sock->sk; netlink_sendmsg() local 2326 struct netlink_sock *nlk = nlk_sk(sk); netlink_sendmsg() 2370 if (netlink_tx_is_mmaped(sk) && netlink_sendmsg() 2374 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, netlink_sendmsg() 2380 if (len > sk->sk_sndbuf - 32) netlink_sendmsg() 2398 err = security_netlink_send(sk, skb); netlink_sendmsg() 2406 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL); netlink_sendmsg() 2408 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT); netlink_sendmsg() 2419 struct sock *sk = sock->sk; netlink_recvmsg() local 2420 struct netlink_sock *nlk = nlk_sk(sk); netlink_recvmsg() 2431 skb = skb_recv_datagram(sk, flags, noblock, &err); netlink_recvmsg() 2485 skb_free_datagram(sk, skb); netlink_recvmsg() 2488 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { netlink_recvmsg() 2489 ret = netlink_dump(sk); netlink_recvmsg() 2491 sk->sk_err = -ret; netlink_recvmsg() 2492 sk->sk_error_report(sk); netlink_recvmsg() 2498 netlink_rcv_wake(sk); netlink_recvmsg() 2502 static void netlink_data_ready(struct sock *sk) netlink_data_ready() argument 2518 struct sock *sk; __netlink_kernel_create() local 2533 * We have to just have a reference on the net from sk, but don't __netlink_kernel_create() 2541 sk = sock->sk; __netlink_kernel_create() 2542 sk_change_net(sk, net); __netlink_kernel_create() 2553 sk->sk_data_ready = netlink_data_ready; __netlink_kernel_create() 2555 nlk_sk(sk)->netlink_rcv = cfg->input; __netlink_kernel_create() 2557 if (netlink_insert(sk, 0)) __netlink_kernel_create() 2560 nlk = nlk_sk(sk); __netlink_kernel_create() 2582 return sk; __netlink_kernel_create() 2586 netlink_kernel_release(sk); __netlink_kernel_create() 2596 netlink_kernel_release(struct sock *sk) netlink_kernel_release() argument 2598 sk_release_kernel(sk); netlink_kernel_release() 2602 int __netlink_change_ngroups(struct sock *sk, unsigned int groups) __netlink_change_ngroups() argument 2605 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; __netlink_change_ngroups() 2634 * @sk: The kernel netlink socket, as returned by netlink_kernel_create(). 2637 int netlink_change_ngroups(struct sock *sk, unsigned int groups) netlink_change_ngroups() argument 2642 err = __netlink_change_ngroups(sk, groups); netlink_change_ngroups() 2650 struct sock *sk; __netlink_clear_multicast_users() local 2653 sk_for_each_bound(sk, &tbl->mc_list) __netlink_clear_multicast_users() 2654 netlink_update_socket_mc(nlk_sk(sk), group, 0); __netlink_clear_multicast_users() 2680 static int netlink_dump(struct sock *sk) netlink_dump() argument 2682 struct netlink_sock *nlk = nlk_sk(sk); netlink_dump() 2696 if (!netlink_rx_is_mmaped(sk) && netlink_dump() 2697 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) netlink_dump() 2710 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, netlink_dump() 2717 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, netlink_dump() 2734 netlink_skb_set_owner_r(skb, sk); netlink_dump() 2741 if (sk_filter(sk, skb)) netlink_dump() 2744 __netlink_sendskb(sk, skb); netlink_dump() 2756 if (sk_filter(sk, skb)) netlink_dump() 2759 __netlink_sendskb(sk, skb); netlink_dump() 2781 struct sock *sk; __netlink_dump_start() local 2796 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); __netlink_dump_start() 2797 if (sk == NULL) { __netlink_dump_start() 2802 nlk = nlk_sk(sk); __netlink_dump_start() 2829 ret = netlink_dump(sk); __netlink_dump_start() 2830 sock_put(sk); __netlink_dump_start() 2841 sock_put(sk); __netlink_dump_start() 2860 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload), netlink_ack() 2863 struct sock *sk; netlink_ack() local 2865 sk = netlink_lookup(sock_net(in_skb->sk), netlink_ack() 2866 in_skb->sk->sk_protocol, netlink_ack() 2868 if (sk) { netlink_ack() 2869 sk->sk_err = ENOBUFS; netlink_ack() 2870 sk->sk_error_report(sk); netlink_ack() 2871 sock_put(sk); netlink_ack() 2881 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT); netlink_ack() 2929 * @sk: netlink socket to use 2936 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, nlmsg_notify() argument 2949 /* errors reported via destination sk->sk_err, but propagate nlmsg_notify() 2951 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags); nlmsg_notify() 2957 err2 = nlmsg_unicast(sk, skb, portid); nlmsg_notify() 3022 } while (sock_net(&nlk->sk) != seq_file_net(seq)); __netlink_seq_next() 3067 "sk Eth Pid Groups " netlink_seq_show() 3199 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid); netlink_hash() 1484 netlink_undo_bind(int group, long unsigned int groups, struct sock *sk) netlink_undo_bind() argument
|
/linux-4.1.27/net/can/ |
H A D | raw.c | 84 struct sock sk; member in struct:raw_sock 113 static inline struct raw_sock *raw_sk(const struct sock *sk) raw_sk() argument 115 return (struct raw_sock *)sk; raw_sk() 120 struct sock *sk = (struct sock *)data; raw_rcv() local 121 struct raw_sock *ro = raw_sk(sk); raw_rcv() 127 if (!ro->recv_own_msgs && oskb->sk == sk) raw_rcv() 175 if (oskb->sk) raw_rcv() 177 if (oskb->sk == sk) raw_rcv() 180 if (sock_queue_rcv_skb(sk, skb) < 0) raw_rcv() 184 static int raw_enable_filters(struct net_device *dev, struct sock *sk, raw_enable_filters() argument 193 raw_rcv, sk, "raw"); raw_enable_filters() 199 raw_rcv, sk); raw_enable_filters() 207 static int raw_enable_errfilter(struct net_device *dev, struct sock *sk, raw_enable_errfilter() argument 214 raw_rcv, sk, "raw"); raw_enable_errfilter() 219 static void raw_disable_filters(struct net_device *dev, struct sock *sk, raw_disable_filters() argument 226 raw_rcv, sk); raw_disable_filters() 230 struct sock *sk, raw_disable_errfilter() 236 raw_rcv, sk); raw_disable_errfilter() 240 struct sock *sk) raw_disable_allfilters() 242 struct raw_sock *ro = raw_sk(sk); raw_disable_allfilters() 244 raw_disable_filters(dev, sk, ro->filter, ro->count); raw_disable_allfilters() 245 raw_disable_errfilter(dev, sk, ro->err_mask); raw_disable_allfilters() 248 static int raw_enable_allfilters(struct net_device *dev, struct sock *sk) raw_enable_allfilters() argument 250 struct raw_sock *ro = raw_sk(sk); raw_enable_allfilters() 253 err = raw_enable_filters(dev, sk, ro->filter, ro->count); raw_enable_allfilters() 255 err = raw_enable_errfilter(dev, sk, ro->err_mask); raw_enable_allfilters() 257 raw_disable_filters(dev, sk, ro->filter, ro->count); raw_enable_allfilters() 268 struct sock *sk = &ro->sk; raw_notifier() local 282 lock_sock(sk); raw_notifier() 285 raw_disable_allfilters(dev, sk); raw_notifier() 293 release_sock(sk); raw_notifier() 295 sk->sk_err = ENODEV; raw_notifier() 296 if (!sock_flag(sk, SOCK_DEAD)) raw_notifier() 297 sk->sk_error_report(sk); raw_notifier() 301 sk->sk_err = ENETDOWN; raw_notifier() 302 if (!sock_flag(sk, SOCK_DEAD)) raw_notifier() 303 sk->sk_error_report(sk); raw_notifier() 310 static int raw_init(struct sock *sk) raw_init() argument 312 struct raw_sock *ro = raw_sk(sk); raw_init() 344 struct sock *sk = sock->sk; raw_release() local 347 if (!sk) raw_release() 350 ro = raw_sk(sk); raw_release() 354 lock_sock(sk); raw_release() 363 raw_disable_allfilters(dev, sk); raw_release() 367 raw_disable_allfilters(NULL, sk); raw_release() 378 sock_orphan(sk); raw_release() 379 sock->sk = NULL; raw_release() 381 release_sock(sk); raw_release() 382 sock_put(sk); raw_release() 390 struct sock *sk = sock->sk; raw_bind() local 391 struct raw_sock *ro = raw_sk(sk); raw_bind() 399 lock_sock(sk); raw_bind() 423 err = raw_enable_allfilters(dev, sk); raw_bind() 429 err = raw_enable_allfilters(NULL, sk); raw_bind() 440 raw_disable_allfilters(dev, sk); raw_bind() 444 raw_disable_allfilters(NULL, sk); raw_bind() 451 release_sock(sk); raw_bind() 454 sk->sk_err = ENETDOWN; raw_bind() 455 if (!sock_flag(sk, SOCK_DEAD)) raw_bind() 456 sk->sk_error_report(sk); raw_bind() 466 struct sock *sk = sock->sk; raw_getname() local 467 struct raw_sock *ro = raw_sk(sk); raw_getname() 484 struct sock *sk = sock->sk; raw_setsockopt() local 485 struct raw_sock *ro = raw_sk(sk); raw_setsockopt() 514 lock_sock(sk); raw_setsockopt() 522 err = raw_enable_filters(dev, sk, &sfilter, 1); raw_setsockopt() 524 err = raw_enable_filters(dev, sk, filter, raw_setsockopt() 533 raw_disable_filters(dev, sk, ro->filter, ro->count); raw_setsockopt() 553 release_sock(sk); raw_setsockopt() 566 lock_sock(sk); raw_setsockopt() 574 err = raw_enable_errfilter(dev, sk, err_mask); raw_setsockopt() 580 raw_disable_errfilter(dev, sk, ro->err_mask); raw_setsockopt() 590 release_sock(sk); raw_setsockopt() 639 struct sock *sk = sock->sk; raw_getsockopt() local 640 struct raw_sock *ro = raw_sk(sk); raw_getsockopt() 655 lock_sock(sk); raw_getsockopt() 664 release_sock(sk); raw_getsockopt() 713 struct sock *sk = sock->sk; raw_sendmsg() local 714 struct raw_sock *ro = raw_sk(sk); raw_sendmsg() 745 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), raw_sendmsg() 758 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); raw_sendmsg() 761 skb->sk = sk; raw_sendmsg() 762 skb->priority = sk->sk_priority; raw_sendmsg() 784 struct sock *sk = sock->sk; raw_recvmsg() local 792 skb = skb_recv_datagram(sk, flags, noblock, &err); raw_recvmsg() 803 skb_free_datagram(sk, skb); raw_recvmsg() 807 sock_recv_ts_and_drops(msg, sk, skb); raw_recvmsg() 818 skb_free_datagram(sk, skb); raw_recvmsg() 229 raw_disable_errfilter(struct net_device *dev, struct sock *sk, can_err_mask_t err_mask) raw_disable_errfilter() argument 239 raw_disable_allfilters(struct net_device *dev, struct sock *sk) raw_disable_allfilters() argument
|
/linux-4.1.27/crypto/ |
H A D | algif_skcipher.c | 105 struct sock *sk = req->data; skcipher_async_cb() local 106 struct alg_sock *ask = alg_sk(sk); skcipher_async_cb() 117 static inline int skcipher_sndbuf(struct sock *sk) skcipher_sndbuf() argument 119 struct alg_sock *ask = alg_sk(sk); skcipher_sndbuf() 122 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - skcipher_sndbuf() 126 static inline bool skcipher_writable(struct sock *sk) skcipher_writable() argument 128 return PAGE_SIZE <= skcipher_sndbuf(sk); skcipher_writable() 131 static int skcipher_alloc_sgl(struct sock *sk) skcipher_alloc_sgl() argument 133 struct alg_sock *ask = alg_sk(sk); skcipher_alloc_sgl() 143 sgl = sock_kmalloc(sk, sizeof(*sgl) + skcipher_alloc_sgl() 161 static void skcipher_pull_sgl(struct sock *sk, int used, int put) skcipher_pull_sgl() argument 163 struct alg_sock *ask = alg_sk(sk); skcipher_pull_sgl() 194 sock_kfree_s(sk, sgl, skcipher_pull_sgl() 203 static void skcipher_free_sgl(struct sock *sk) skcipher_free_sgl() argument 205 struct alg_sock *ask = alg_sk(sk); skcipher_free_sgl() 208 skcipher_pull_sgl(sk, ctx->used, 1); skcipher_free_sgl() 211 static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) skcipher_wait_for_wmem() argument 220 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); skcipher_wait_for_wmem() 225 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skcipher_wait_for_wmem() 227 if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) { skcipher_wait_for_wmem() 232 finish_wait(sk_sleep(sk), &wait); skcipher_wait_for_wmem() 237 static void skcipher_wmem_wakeup(struct sock *sk) skcipher_wmem_wakeup() argument 241 if (!skcipher_writable(sk)) skcipher_wmem_wakeup() 245 wq = rcu_dereference(sk->sk_wq); skcipher_wmem_wakeup() 250 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); skcipher_wmem_wakeup() 254 static int skcipher_wait_for_data(struct sock *sk, unsigned flags) skcipher_wait_for_data() argument 256 struct alg_sock *ask = alg_sk(sk); skcipher_wait_for_data() 266 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); skcipher_wait_for_data() 271 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skcipher_wait_for_data() 273 if (sk_wait_event(sk, &timeout, ctx->used)) { skcipher_wait_for_data() 278 finish_wait(sk_sleep(sk), &wait); skcipher_wait_for_data() 280 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); skcipher_wait_for_data() 285 static void skcipher_data_wakeup(struct sock *sk) skcipher_data_wakeup() argument 287 struct alg_sock *ask = alg_sk(sk); skcipher_data_wakeup() 295 wq = rcu_dereference(sk->sk_wq); skcipher_data_wakeup() 300 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); skcipher_data_wakeup() 307 struct sock *sk = sock->sk; skcipher_sendmsg() local 308 struct alg_sock *ask = alg_sk(sk); skcipher_sendmsg() 343 lock_sock(sk); skcipher_sendmsg() 381 if (!skcipher_writable(sk)) { skcipher_sendmsg() 382 err = skcipher_wait_for_wmem(sk, msg->msg_flags); skcipher_sendmsg() 387 len = min_t(unsigned long, len, skcipher_sndbuf(sk)); skcipher_sendmsg() 389 err = skcipher_alloc_sgl(sk); skcipher_sendmsg() 433 skcipher_data_wakeup(sk); skcipher_sendmsg() 434 release_sock(sk); skcipher_sendmsg() 442 struct sock *sk = sock->sk; skcipher_sendpage() local 443 struct alg_sock *ask = alg_sk(sk); skcipher_sendpage() 451 lock_sock(sk); skcipher_sendpage() 458 if (!skcipher_writable(sk)) { skcipher_sendpage() 459 err = skcipher_wait_for_wmem(sk, flags); skcipher_sendpage() 464 err = skcipher_alloc_sgl(sk); skcipher_sendpage() 484 skcipher_data_wakeup(sk); skcipher_sendpage() 485 release_sock(sk); skcipher_sendpage() 510 struct sock *sk = sock->sk; skcipher_recvmsg_async() local 511 struct alg_sock *ask = alg_sk(sk); skcipher_recvmsg_async() 524 lock_sock(sk); skcipher_recvmsg_async() 542 skcipher_async_cb, sk); skcipher_recvmsg_async() 549 err = skcipher_wait_for_data(sk, flags); skcipher_recvmsg_async() 610 skcipher_pull_sgl(sk, used, 0); skcipher_recvmsg_async() 630 skcipher_wmem_wakeup(sk); skcipher_recvmsg_async() 631 release_sock(sk); skcipher_recvmsg_async() 638 struct sock *sk = sock->sk; skcipher_recvmsg_sync() local 639 struct alg_sock *ask = alg_sk(sk); skcipher_recvmsg_sync() 649 lock_sock(sk); skcipher_recvmsg_sync() 659 err = skcipher_wait_for_data(sk, flags); skcipher_recvmsg_sync() 695 skcipher_pull_sgl(sk, used, 1); skcipher_recvmsg_sync() 702 skcipher_wmem_wakeup(sk); skcipher_recvmsg_sync() 703 release_sock(sk); skcipher_recvmsg_sync() 719 struct sock *sk = sock->sk; skcipher_poll() local 720 struct alg_sock *ask = alg_sk(sk); skcipher_poll() 724 sock_poll_wait(file, sk_sleep(sk), wait); skcipher_poll() 730 if (skcipher_writable(sk)) skcipher_poll() 764 struct sock *sk = sock->sk; skcipher_check_key() local 765 struct alg_sock *ask = alg_sk(sk); skcipher_check_key() 767 lock_sock(sk); skcipher_check_key() 791 release_sock(sk); skcipher_check_key() 893 static void skcipher_wait(struct sock *sk) skcipher_wait() argument 895 struct alg_sock *ask = alg_sk(sk); skcipher_wait() 903 static void skcipher_sock_destruct(struct sock *sk) skcipher_sock_destruct() argument 905 struct alg_sock *ask = alg_sk(sk); skcipher_sock_destruct() 910 skcipher_wait(sk); skcipher_sock_destruct() 912 skcipher_free_sgl(sk); skcipher_sock_destruct() 913 sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); skcipher_sock_destruct() 914 sock_kfree_s(sk, ctx, ctx->len); skcipher_sock_destruct() 915 af_alg_release_parent(sk); skcipher_sock_destruct() 918 static int skcipher_accept_parent_nokey(void *private, struct sock *sk) skcipher_accept_parent_nokey() argument 921 struct alg_sock *ask = alg_sk(sk); skcipher_accept_parent_nokey() 926 ctx = sock_kmalloc(sk, len, GFP_KERNEL); skcipher_accept_parent_nokey() 930 ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(skcipher), skcipher_accept_parent_nokey() 933 sock_kfree_s(sk, ctx, len); skcipher_accept_parent_nokey() 954 sk->sk_destruct = skcipher_sock_destruct; skcipher_accept_parent_nokey() 959 static int skcipher_accept_parent(void *private, struct sock *sk) skcipher_accept_parent() argument 966 return skcipher_accept_parent_nokey(private, sk); skcipher_accept_parent()
|
H A D | algif_aead.c | 56 static inline int aead_sndbuf(struct sock *sk) aead_sndbuf() argument 58 struct alg_sock *ask = alg_sk(sk); aead_sndbuf() 61 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - aead_sndbuf() 65 static inline bool aead_writable(struct sock *sk) aead_writable() argument 67 return PAGE_SIZE <= aead_sndbuf(sk); aead_writable() 77 static void aead_put_sgl(struct sock *sk) aead_put_sgl() argument 79 struct alg_sock *ask = alg_sk(sk); aead_put_sgl() 98 static void aead_wmem_wakeup(struct sock *sk) aead_wmem_wakeup() argument 102 if (!aead_writable(sk)) aead_wmem_wakeup() 106 wq = rcu_dereference(sk->sk_wq); aead_wmem_wakeup() 111 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); aead_wmem_wakeup() 115 static int aead_wait_for_data(struct sock *sk, unsigned flags) aead_wait_for_data() argument 117 struct alg_sock *ask = alg_sk(sk); aead_wait_for_data() 126 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); aead_wait_for_data() 131 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); aead_wait_for_data() 133 if (sk_wait_event(sk, &timeout, !ctx->more)) { aead_wait_for_data() 138 finish_wait(sk_sleep(sk), &wait); aead_wait_for_data() 140 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); aead_wait_for_data() 145 static void aead_data_wakeup(struct sock *sk) aead_data_wakeup() argument 147 struct alg_sock *ask = alg_sk(sk); aead_data_wakeup() 157 wq = rcu_dereference(sk->sk_wq); aead_data_wakeup() 162 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); aead_data_wakeup() 168 struct sock *sk = sock->sk; aead_sendmsg() local 169 struct alg_sock *ask = alg_sk(sk); aead_sendmsg() 201 lock_sock(sk); aead_sendmsg() 238 if (!aead_writable(sk)) { aead_sendmsg() 240 aead_put_sgl(sk); aead_sendmsg() 246 len = min_t(unsigned long, size, aead_sndbuf(sk)); aead_sendmsg() 251 aead_put_sgl(sk); aead_sendmsg() 287 aead_put_sgl(sk); aead_sendmsg() 292 aead_data_wakeup(sk); aead_sendmsg() 293 release_sock(sk); aead_sendmsg() 301 struct sock *sk = sock->sk; aead_sendpage() local 302 struct alg_sock *ask = alg_sk(sk); aead_sendpage() 313 lock_sock(sk); aead_sendpage() 320 if (!aead_writable(sk)) { aead_sendpage() 322 aead_put_sgl(sk); aead_sendpage() 339 aead_put_sgl(sk); aead_sendpage() 344 aead_data_wakeup(sk); aead_sendpage() 345 release_sock(sk); aead_sendpage() 352 struct sock *sk = sock->sk; aead_recvmsg() local 353 struct alg_sock *ask = alg_sk(sk); aead_recvmsg() 372 lock_sock(sk); aead_recvmsg() 390 err = aead_wait_for_data(sk, flags); aead_recvmsg() 504 aead_put_sgl(sk); aead_recvmsg() 508 aead_put_sgl(sk); aead_recvmsg() 516 aead_wmem_wakeup(sk); aead_recvmsg() 517 release_sock(sk); aead_recvmsg() 525 struct sock *sk = sock->sk; aead_poll() local 526 struct alg_sock *ask = alg_sk(sk); aead_poll() 530 sock_poll_wait(file, sk_sleep(sk), wait); aead_poll() 536 if (aead_writable(sk)) aead_poll() 584 static void aead_sock_destruct(struct sock *sk) aead_sock_destruct() argument 586 struct alg_sock *ask = alg_sk(sk); aead_sock_destruct() 591 aead_put_sgl(sk); aead_sock_destruct() 592 sock_kzfree_s(sk, ctx->iv, ivlen); aead_sock_destruct() 593 sock_kfree_s(sk, ctx, ctx->len); aead_sock_destruct() 594 af_alg_release_parent(sk); aead_sock_destruct() 597 static int aead_accept_parent(void *private, struct sock *sk) aead_accept_parent() argument 600 struct alg_sock *ask = alg_sk(sk); aead_accept_parent() 604 ctx = sock_kmalloc(sk, len, GFP_KERNEL); aead_accept_parent() 609 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); aead_accept_parent() 611 sock_kfree_s(sk, ctx, len); aead_accept_parent() 632 sk->sk_destruct = aead_sock_destruct; aead_accept_parent()
|
H A D | af_alg.c | 124 if (sock->sk) af_alg_release() 125 sock_put(sock->sk); af_alg_release() 130 void af_alg_release_parent(struct sock *sk) af_alg_release_parent() argument 132 struct alg_sock *ask = alg_sk(sk); af_alg_release_parent() 136 sk = ask->parent; af_alg_release_parent() 137 ask = alg_sk(sk); af_alg_release_parent() 139 lock_sock(sk); af_alg_release_parent() 143 release_sock(sk); af_alg_release_parent() 146 sock_put(sk); af_alg_release_parent() 152 struct sock *sk = sock->sk; alg_bind() local 153 struct alg_sock *ask = alg_sk(sk); alg_bind() 184 lock_sock(sk); alg_bind() 194 release_sock(sk); alg_bind() 201 static int alg_setkey(struct sock *sk, char __user *ukey, alg_setkey() argument 204 struct alg_sock *ask = alg_sk(sk); alg_setkey() 209 key = sock_kmalloc(sk, keylen, GFP_KERNEL); alg_setkey() 220 sock_kzfree_s(sk, key, keylen); alg_setkey() 228 struct sock *sk = sock->sk; alg_setsockopt() local 229 struct alg_sock *ask = alg_sk(sk); alg_setsockopt() 233 lock_sock(sk); alg_setsockopt() 250 err = alg_setkey(sk, optval, optlen); alg_setsockopt() 261 release_sock(sk); alg_setsockopt() 266 int af_alg_accept(struct sock *sk, struct socket *newsock) af_alg_accept() argument 268 struct alg_sock *ask = alg_sk(sk); af_alg_accept() 274 lock_sock(sk); af_alg_accept() 281 sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto); af_alg_accept() 288 security_sk_clone(sk, sk2); af_alg_accept() 302 sock_hold(sk); af_alg_accept() 304 alg_sk(sk2)->parent = sk; af_alg_accept() 317 release_sock(sk); af_alg_accept() 325 return af_alg_accept(sock->sk, newsock); alg_accept() 351 static void alg_sock_destruct(struct sock *sk) alg_sock_destruct() argument 353 struct alg_sock *ask = alg_sk(sk); alg_sock_destruct() 361 struct sock *sk; alg_create() local 370 sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto); alg_create() 371 if (!sk) alg_create() 375 sock_init_data(sock, sk); alg_create() 377 sk->sk_family = PF_ALG; alg_create() 378 sk->sk_destruct = alg_sock_destruct; alg_create()
|
H A D | algif_hash.c | 46 struct sock *sk = sock->sk; hash_sendmsg() local 47 struct alg_sock *ask = alg_sk(sk); hash_sendmsg() 52 if (limit > sk->sk_sndbuf) hash_sendmsg() 53 limit = sk->sk_sndbuf; hash_sendmsg() 55 lock_sock(sk); hash_sendmsg() 99 release_sock(sk); hash_sendmsg() 107 struct sock *sk = sock->sk; hash_sendpage() local 108 struct alg_sock *ask = alg_sk(sk); hash_sendpage() 115 lock_sock(sk); hash_sendpage() 144 release_sock(sk); hash_sendpage() 152 struct sock *sk = sock->sk; hash_recvmsg() local 153 struct alg_sock *ask = alg_sk(sk); hash_recvmsg() 163 lock_sock(sk); hash_recvmsg() 176 release_sock(sk); hash_recvmsg() 183 struct sock *sk = sock->sk; hash_accept() local 184 struct alg_sock *ask = alg_sk(sk); hash_accept() 201 sk2 = newsock->sk; hash_accept() 243 struct sock *sk = sock->sk; hash_check_key() local 244 struct alg_sock *ask = alg_sk(sk); hash_check_key() 246 lock_sock(sk); hash_check_key() 270 release_sock(sk); hash_check_key() 384 static void hash_sock_destruct(struct sock *sk) hash_sock_destruct() argument 386 struct alg_sock *ask = alg_sk(sk); hash_sock_destruct() 389 sock_kzfree_s(sk, ctx->result, hash_sock_destruct() 391 sock_kfree_s(sk, ctx, ctx->len); hash_sock_destruct() 392 af_alg_release_parent(sk); hash_sock_destruct() 395 static int hash_accept_parent_nokey(void *private, struct sock *sk) hash_accept_parent_nokey() argument 398 struct alg_sock *ask = alg_sk(sk); hash_accept_parent_nokey() 404 ctx = sock_kmalloc(sk, len, GFP_KERNEL); hash_accept_parent_nokey() 408 ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); hash_accept_parent_nokey() 410 sock_kfree_s(sk, ctx, len); hash_accept_parent_nokey() 426 sk->sk_destruct = hash_sock_destruct; hash_accept_parent_nokey() 431 static int hash_accept_parent(void *private, struct sock *sk) hash_accept_parent() argument 438 return hash_accept_parent_nokey(private, sk); hash_accept_parent()
|
/linux-4.1.27/net/rxrpc/ |
H A D | af_rxrpc.c | 55 static inline int rxrpc_writable(struct sock *sk) rxrpc_writable() argument 57 return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; rxrpc_writable() 63 static void rxrpc_write_space(struct sock *sk) rxrpc_write_space() argument 65 _enter("%p", sk); rxrpc_write_space() 67 if (rxrpc_writable(sk)) { rxrpc_write_space() 68 struct socket_wq *wq = rcu_dereference(sk->sk_wq); rxrpc_write_space() 72 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rxrpc_write_space() 125 struct sock *sk = sock->sk; rxrpc_bind() local 127 struct rxrpc_sock *rx = rxrpc_sk(sk), *prx; rxrpc_bind() 137 lock_sock(&rx->sk); rxrpc_bind() 139 if (rx->sk.sk_state != RXRPC_UNCONNECTED) { rxrpc_bind() 166 rx->sk.sk_state = RXRPC_SERVER_BOUND; rxrpc_bind() 168 rx->sk.sk_state = RXRPC_CLIENT_BOUND; rxrpc_bind() 171 release_sock(&rx->sk); rxrpc_bind() 179 release_sock(&rx->sk); rxrpc_bind() 190 struct sock *sk = sock->sk; rxrpc_listen() local 191 struct rxrpc_sock *rx = rxrpc_sk(sk); rxrpc_listen() 196 lock_sock(&rx->sk); rxrpc_listen() 198 switch (rx->sk.sk_state) { rxrpc_listen() 209 sk->sk_max_ack_backlog = backlog; rxrpc_listen() 210 rx->sk.sk_state = RXRPC_SERVER_LISTENING; rxrpc_listen() 215 release_sock(&rx->sk); rxrpc_listen() 230 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); rxrpc_name_to_transport() 236 ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED); rxrpc_name_to_transport() 278 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); rxrpc_kernel_begin_call() 283 lock_sock(&rx->sk); rxrpc_kernel_begin_call() 323 release_sock(&rx->sk); rxrpc_kernel_begin_call() 360 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); rxrpc_kernel_intercept_rx_messages() 377 struct sock *sk = sock->sk; rxrpc_connect() local 380 struct rxrpc_sock *rx = rxrpc_sk(sk); rxrpc_connect() 391 lock_sock(&rx->sk); rxrpc_connect() 393 switch (rx->sk.sk_state) { rxrpc_connect() 404 release_sock(&rx->sk); rxrpc_connect() 408 rx->sk.sk_state = RXRPC_CLIENT_BOUND; rxrpc_connect() 412 release_sock(&rx->sk); rxrpc_connect() 415 release_sock(&rx->sk); rxrpc_connect() 422 release_sock(&rx->sk); rxrpc_connect() 429 rx->sk.sk_state = RXRPC_CLIENT_CONNECTED; rxrpc_connect() 431 release_sock(&rx->sk); rxrpc_connect() 447 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); rxrpc_sendmsg() 450 _enter(",{%d},,%zu", rx->sk.sk_state, len); rxrpc_sendmsg() 464 lock_sock(&rx->sk); rxrpc_sendmsg() 481 switch (rx->sk.sk_state) { rxrpc_sendmsg() 502 release_sock(&rx->sk); rxrpc_sendmsg() 515 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); rxrpc_setsockopt() 521 lock_sock(&rx->sk); rxrpc_setsockopt() 531 if (rx->sk.sk_state != RXRPC_UNCONNECTED) rxrpc_setsockopt() 541 if (rx->sk.sk_state != RXRPC_UNCONNECTED) rxrpc_setsockopt() 551 if (rx->sk.sk_state != RXRPC_UNCONNECTED) rxrpc_setsockopt() 561 if (rx->sk.sk_state != RXRPC_UNCONNECTED) rxrpc_setsockopt() 581 release_sock(&rx->sk); rxrpc_setsockopt() 592 struct sock *sk = sock->sk; rxrpc_poll() local 594 sock_poll_wait(file, sk_sleep(sk), wait); rxrpc_poll() 599 if (!skb_queue_empty(&sk->sk_receive_queue)) rxrpc_poll() 605 if (rxrpc_writable(sk)) rxrpc_poll() 618 struct sock *sk; rxrpc_create() local 635 sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto); rxrpc_create() 636 if (!sk) rxrpc_create() 639 sock_init_data(sock, sk); rxrpc_create() 640 sk->sk_state = RXRPC_UNCONNECTED; rxrpc_create() 641 sk->sk_write_space = rxrpc_write_space; rxrpc_create() 642 sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen; rxrpc_create() 643 sk->sk_destruct = rxrpc_sock_destructor; rxrpc_create() 645 rx = rxrpc_sk(sk); rxrpc_create() 662 static void rxrpc_sock_destructor(struct sock *sk) rxrpc_sock_destructor() argument 664 _enter("%p", sk); rxrpc_sock_destructor() 666 rxrpc_purge_queue(&sk->sk_receive_queue); rxrpc_sock_destructor() 668 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); rxrpc_sock_destructor() 669 WARN_ON(!sk_unhashed(sk)); rxrpc_sock_destructor() 670 WARN_ON(sk->sk_socket); rxrpc_sock_destructor() 672 if (!sock_flag(sk, SOCK_DEAD)) { rxrpc_sock_destructor() 673 printk("Attempt to release alive rxrpc socket: %p\n", sk); rxrpc_sock_destructor() 681 static int rxrpc_release_sock(struct sock *sk) rxrpc_release_sock() argument 683 struct rxrpc_sock *rx = rxrpc_sk(sk); rxrpc_release_sock() 685 _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); rxrpc_release_sock() 688 sock_orphan(sk); rxrpc_release_sock() 689 sk->sk_shutdown = SHUTDOWN_MASK; rxrpc_release_sock() 691 spin_lock_bh(&sk->sk_receive_queue.lock); rxrpc_release_sock() 692 sk->sk_state = RXRPC_CLOSE; rxrpc_release_sock() 693 spin_unlock_bh(&sk->sk_receive_queue.lock); rxrpc_release_sock() 706 rxrpc_purge_queue(&sk->sk_receive_queue); rxrpc_release_sock() 730 sock_put(sk); rxrpc_release_sock() 741 struct sock *sk = sock->sk; rxrpc_release() local 743 _enter("%p{%p}", sock, sk); rxrpc_release() 745 if (!sk) rxrpc_release() 748 sock->sk = NULL; rxrpc_release() 750 return rxrpc_release_sock(sk); rxrpc_release()
|
H A D | ar-recvmsg.c | 51 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); rxrpc_recvmsg() 66 timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); rxrpc_recvmsg() 69 lock_sock(&rx->sk); rxrpc_recvmsg() 77 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { rxrpc_recvmsg() 78 release_sock(&rx->sk); rxrpc_recvmsg() 86 skb = skb_peek(&rx->sk.sk_receive_queue); rxrpc_recvmsg() 94 release_sock(&rx->sk); rxrpc_recvmsg() 95 prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, rxrpc_recvmsg() 97 ret = sock_error(&rx->sk); rxrpc_recvmsg() 101 if (skb_queue_empty(&rx->sk.sk_receive_queue)) { rxrpc_recvmsg() 106 finish_wait(sk_sleep(&rx->sk), &wait); rxrpc_recvmsg() 107 lock_sock(&rx->sk); rxrpc_recvmsg() 124 if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) rxrpc_recvmsg() 135 release_sock(&rx->sk); rxrpc_recvmsg() 153 sock_recv_timestamp(msg, &rx->sk, skb); rxrpc_recvmsg() 215 if (skb_dequeue(&rx->sk.sk_receive_queue) != rxrpc_recvmsg() 235 if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) rxrpc_recvmsg() 241 if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) rxrpc_recvmsg() 249 release_sock(&rx->sk); rxrpc_recvmsg() 268 if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) rxrpc_recvmsg() 320 if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) rxrpc_recvmsg() 326 release_sock(&rx->sk); rxrpc_recvmsg() 335 release_sock(&rx->sk); rxrpc_recvmsg() 345 finish_wait(sk_sleep(&rx->sk), &wait); rxrpc_recvmsg()
|
/linux-4.1.27/include/linux/ |
H A D | sock_diag.h | 22 int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie); 23 void sock_diag_save_cookie(struct sock *sk, __u32 *cookie); 25 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); 26 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
|
H A D | udp.h | 45 #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0] 46 #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1] 47 #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node 72 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 73 void (*encap_destroy)(struct sock *sk); 76 static inline struct udp_sock *udp_sk(const struct sock *sk) udp_sk() argument 78 return (struct udp_sock *)sk; udp_sk() 81 static inline void udp_set_no_check6_tx(struct sock *sk, bool val) udp_set_no_check6_tx() argument 83 udp_sk(sk)->no_check6_tx = val; udp_set_no_check6_tx() 86 static inline void udp_set_no_check6_rx(struct sock *sk, bool val) udp_set_no_check6_rx() argument 88 udp_sk(sk)->no_check6_rx = val; udp_set_no_check6_rx() 91 static inline bool udp_get_no_check6_tx(struct sock *sk) udp_get_no_check6_tx() argument 93 return udp_sk(sk)->no_check6_tx; udp_get_no_check6_tx() 96 static inline bool udp_get_no_check6_rx(struct sock *sk) udp_get_no_check6_rx() argument 98 return udp_sk(sk)->no_check6_rx; udp_get_no_check6_rx()
|
H A D | netfilter.h | 56 struct sock *sk; member in struct:nf_hook_state 65 struct sock *sk, nf_hook_state_init() 73 p->sk = sk; nf_hook_state_init() 102 int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); 104 int (*compat_set)(struct sock *sk, int optval, 109 int (*get)(struct sock *sk, int optval, void __user *user, int *len); 111 int (*compat_get)(struct sock *sk, int optval, 159 struct sock *sk, nf_hook_thresh() 170 indev, outdev, sk, okfn); nf_hook_thresh() 176 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, nf_hook() argument 181 return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN); nf_hook() 202 NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk, NF_HOOK_THRESH() argument 207 int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh); NF_HOOK_THRESH() 209 ret = okfn(sk, skb); NF_HOOK_THRESH() 214 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk, NF_HOOK_COND() argument 221 ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1)) NF_HOOK_COND() 222 ret = okfn(sk, skb); NF_HOOK_COND() 227 NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb, NF_HOOK() argument 231 return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN); NF_HOOK() 235 int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 237 int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 240 int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, 242 int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, 331 #define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb) 332 #define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb) nf_hook_thresh() 334 struct sock *sk, nf_hook_thresh() 338 int (*okfn)(struct sock *sk, struct sk_buff *), int thresh) nf_hook_thresh() 340 return okfn(sk, skb); nf_hook_thresh() 342 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, nf_hook() argument 60 nf_hook_state_init(struct nf_hook_state *p, unsigned int hook, int thresh, u_int8_t pf, struct net_device *indev, struct net_device *outdev, struct sock *sk, int (*okfn)(struct sock *, struct sk_buff *)) nf_hook_state_init() argument 158 nf_hook_thresh(u_int8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sock *, struct sk_buff *), int thresh) nf_hook_thresh() argument 333 nf_hook_thresh(u_int8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sock *sk, struct sk_buff *), int thresh) nf_hook_thresh() argument
|
H A D | ipv6.h | 258 extern int inet6_sk_rebuild_header(struct sock *sk); 270 static inline struct raw6_sock *raw6_sk(const struct sock *sk) raw6_sk() argument 272 return (struct raw6_sock *)sk; raw6_sk() 286 #define __ipv6_only_sock(sk) (sk->sk_ipv6only) 287 #define ipv6_only_sock(sk) (__ipv6_only_sock(sk)) 288 #define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \ 289 inet6_sk(sk)->rxopt.bits.rxinfo) 291 static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk) inet6_rcv_saddr() argument 293 if (sk->sk_family == AF_INET6) inet6_rcv_saddr() 294 return &sk->sk_v6_rcv_saddr; inet6_rcv_saddr() 298 static inline int inet_v6_ipv6only(const struct sock *sk) inet_v6_ipv6only() argument 301 return ipv6_only_sock(sk); inet_v6_ipv6only() 304 #define __ipv6_only_sock(sk) 0 305 #define ipv6_only_sock(sk) 0 306 #define ipv6_sk_rxinfo(sk) 0 319 static inline struct raw6_sock *raw6_sk(const struct sock *sk) raw6_sk() argument
|
H A D | inet_diag.h | 23 void (*idiag_get_info)(struct sock *sk, 30 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 43 int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
|
/linux-4.1.27/net/atm/ |
H A D | raw.c | 25 struct sock *sk = sk_atm(vcc); atm_push_raw() local 27 skb_queue_tail(&sk->sk_receive_queue, skb); atm_push_raw() 28 sk->sk_data_ready(sk); atm_push_raw() 34 struct sock *sk = sk_atm(vcc); atm_pop_raw() local 37 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize); atm_pop_raw() 38 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); atm_pop_raw() 40 sk->sk_write_space(sk); atm_pop_raw()
|
H A D | common.c | 42 static void __vcc_insert_socket(struct sock *sk) __vcc_insert_socket() argument 44 struct atm_vcc *vcc = atm_sk(sk); __vcc_insert_socket() 46 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1); __vcc_insert_socket() 47 sk_add_node(sk, head); __vcc_insert_socket() 50 void vcc_insert_socket(struct sock *sk) vcc_insert_socket() argument 53 __vcc_insert_socket(sk); vcc_insert_socket() 58 static void vcc_remove_socket(struct sock *sk) vcc_remove_socket() argument 61 sk_del_node_init(sk); vcc_remove_socket() 68 struct sock *sk = sk_atm(vcc); alloc_tx() local 70 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { alloc_tx() 72 sk_wmem_alloc_get(sk), size, sk->sk_sndbuf); alloc_tx() 77 pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); alloc_tx() 78 atomic_add(skb->truesize, &sk->sk_wmem_alloc); alloc_tx() 82 static void vcc_sock_destruct(struct sock *sk) vcc_sock_destruct() argument 84 if (atomic_read(&sk->sk_rmem_alloc)) vcc_sock_destruct() 86 __func__, atomic_read(&sk->sk_rmem_alloc)); vcc_sock_destruct() 88 if (atomic_read(&sk->sk_wmem_alloc)) vcc_sock_destruct() 90 __func__, atomic_read(&sk->sk_wmem_alloc)); vcc_sock_destruct() 93 static void vcc_def_wakeup(struct sock *sk) vcc_def_wakeup() argument 98 wq = rcu_dereference(sk->sk_wq); vcc_def_wakeup() 104 static inline int vcc_writable(struct sock *sk) vcc_writable() argument 106 struct atm_vcc *vcc = atm_sk(sk); vcc_writable() 109 atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf; vcc_writable() 112 static void vcc_write_space(struct sock *sk) vcc_write_space() argument 118 if (vcc_writable(sk)) { vcc_write_space() 119 wq = rcu_dereference(sk->sk_wq); vcc_write_space() 123 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); vcc_write_space() 129 static void vcc_release_cb(struct sock *sk) vcc_release_cb() argument 131 struct atm_vcc *vcc = atm_sk(sk); vcc_release_cb() 146 struct sock *sk; vcc_create() local 149 sock->sk = NULL; vcc_create() 152 sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto); vcc_create() 153 if (!sk) vcc_create() 155 sock_init_data(sock, sk); vcc_create() 156 sk->sk_state_change = vcc_def_wakeup; vcc_create() 157 sk->sk_write_space = vcc_write_space; vcc_create() 159 vcc = atm_sk(sk); vcc_create() 164 atomic_set(&sk->sk_wmem_alloc, 1); vcc_create() 165 atomic_set(&sk->sk_rmem_alloc, 0); vcc_create() 173 sk->sk_destruct = vcc_sock_destruct; vcc_create() 177 static void vcc_destroy_socket(struct sock *sk) vcc_destroy_socket() argument 179 struct atm_vcc *vcc = atm_sk(sk); vcc_destroy_socket() 191 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { vcc_destroy_socket() 200 vcc_remove_socket(sk); vcc_destroy_socket() 205 struct sock *sk = sock->sk; vcc_release() local 207 if (sk) { vcc_release() 208 lock_sock(sk); vcc_release() 209 vcc_destroy_socket(sock->sk); vcc_release() 210 release_sock(sk); vcc_release() 211 sock_put(sk); vcc_release() 219 struct sock *sk = sk_atm(vcc); vcc_release_async() local 222 sk->sk_shutdown |= RCV_SHUTDOWN; vcc_release_async() 223 sk->sk_err = -reply; vcc_release_async() 225 sk->sk_state_change(sk); vcc_release_async() 389 struct sock *sk = sk_atm(vcc); __vcc_connect() local 410 __vcc_insert_socket(sk); __vcc_connect() 458 vcc_remove_socket(sk); __vcc_connect() 529 struct sock *sk = sock->sk; vcc_recvmsg() local 547 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error); vcc_recvmsg() 560 sock_recv_ts_and_drops(msg, sk, skb); vcc_recvmsg() 563 pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), vcc_recvmsg() 568 skb_free_datagram(sk, skb); vcc_recvmsg() 574 struct sock *sk = sock->sk; vcc_sendmsg() local 580 lock_sock(sk); vcc_sendmsg() 607 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); vcc_sendmsg() 626 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); vcc_sendmsg() 628 finish_wait(sk_sleep(sk), &wait); vcc_sendmsg() 643 release_sock(sk); vcc_sendmsg() 649 struct sock *sk = sock->sk; vcc_poll() local 653 sock_poll_wait(file, sk_sleep(sk), wait); vcc_poll() 659 if (sk->sk_err) vcc_poll() 667 if (!skb_queue_empty(&sk->sk_receive_queue)) vcc_poll() 676 vcc_writable(sk)) vcc_poll()
|
H A D | svc.c | 49 struct sock *sk = sk_atm(vcc); svc_disconnect() local 55 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); svc_disconnect() 60 finish_wait(sk_sleep(sk), &wait); svc_disconnect() 64 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { svc_disconnect() 76 struct sock *sk = sock->sk; svc_release() local 79 if (sk) { svc_release() 98 struct sock *sk = sock->sk; svc_bind() local 105 lock_sock(sk); svc_bind() 131 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); svc_bind() 136 finish_wait(sk_sleep(sk), &wait); svc_bind() 142 if (!sk->sk_err) svc_bind() 144 error = -sk->sk_err; svc_bind() 146 release_sock(sk); svc_bind() 154 struct sock *sk = sock->sk; svc_connect() local 160 lock_sock(sk); svc_connect() 179 if (sk->sk_err) { svc_connect() 180 error = -sk->sk_err; svc_connect() 213 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); svc_connect() 217 prepare_to_wait(sk_sleep(sk), &wait, svc_connect() 236 prepare_to_wait(sk_sleep(sk), &wait, svc_connect() 240 if (!sk->sk_err) svc_connect() 243 prepare_to_wait(sk_sleep(sk), &wait, svc_connect() 254 finish_wait(sk_sleep(sk), &wait); svc_connect() 261 if (sk->sk_err) { svc_connect() 262 error = -sk->sk_err; svc_connect() 277 release_sock(sk); svc_connect() 284 struct sock *sk = sock->sk; svc_listen() local 289 lock_sock(sk); svc_listen() 302 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); svc_listen() 307 finish_wait(sk_sleep(sk), &wait); svc_listen() 313 vcc_insert_socket(sk); svc_listen() 314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; svc_listen() 315 error = -sk->sk_err; svc_listen() 317 release_sock(sk); svc_listen() 323 struct sock *sk = sock->sk; svc_accept() local 330 lock_sock(sk); svc_accept() 332 error = svc_create(sock_net(sk), newsock, 0, 0); svc_accept() 342 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); svc_accept() 343 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && svc_accept() 348 error = -sk->sk_err; svc_accept() 355 release_sock(sk); svc_accept() 357 lock_sock(sk); svc_accept() 362 prepare_to_wait(sk_sleep(sk), &wait, svc_accept() 365 finish_wait(sk_sleep(sk), &wait); svc_accept() 382 sk->sk_ack_backlog--; svc_accept() 397 release_sock(sk); svc_accept() 399 lock_sock(sk); svc_accept() 415 release_sock(sk); svc_accept() 433 struct sock *sk = sk_atm(vcc); svc_change_qos() local 439 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); svc_change_qos() 446 finish_wait(sk_sleep(sk), &wait); svc_change_qos() 449 return -sk->sk_err; svc_change_qos() 455 struct sock *sk = sock->sk; svc_setsockopt() local 459 lock_sock(sk); svc_setsockopt() 493 release_sock(sk); svc_setsockopt() 500 struct sock *sk = sock->sk; svc_getsockopt() local 503 lock_sock(sk); svc_getsockopt() 521 release_sock(sk); svc_getsockopt() 529 struct sock *sk = sock->sk; svc_addparty() local 533 lock_sock(sk); svc_addparty() 543 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); svc_addparty() 548 finish_wait(sk_sleep(sk), &wait); svc_addparty() 549 error = xchg(&sk->sk_err_soft, 0); svc_addparty() 551 release_sock(sk); svc_addparty() 558 struct sock *sk = sock->sk; svc_dropparty() local 562 lock_sock(sk); svc_dropparty() 566 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); svc_dropparty() 571 finish_wait(sk_sleep(sk), &wait); svc_dropparty() 576 error = xchg(&sk->sk_err_soft, 0); svc_dropparty() 578 release_sock(sk); svc_dropparty()
|
H A D | pvc.c | 29 struct sock *sk = sock->sk; pvc_bind() local 39 lock_sock(sk); pvc_bind() 54 release_sock(sk); pvc_bind() 67 struct sock *sk = sock->sk; pvc_setsockopt() local 70 lock_sock(sk); pvc_setsockopt() 72 release_sock(sk); pvc_setsockopt() 79 struct sock *sk = sock->sk; pvc_getsockopt() local 82 lock_sock(sk); pvc_getsockopt() 84 release_sock(sk); pvc_getsockopt()
|
H A D | proc.c | 70 struct sock *sk; member in struct:vcc_state 74 static inline int compare_family(struct sock *sk, int family) compare_family() argument 76 return !family || (sk->sk_family == family); compare_family() 81 struct sock *sk = *sock; __vcc_walk() local 83 if (sk == SEQ_START_TOKEN) { __vcc_walk() 87 sk = hlist_empty(head) ? NULL : __sk_head(head); __vcc_walk() 88 if (sk) __vcc_walk() 94 for (; sk; sk = sk_next(sk)) { __vcc_walk() 95 l -= compare_family(sk, family); __vcc_walk() 99 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { __vcc_walk() 100 sk = sk_head(&vcc_hash[*bucket]); __vcc_walk() 103 sk = SEQ_START_TOKEN; __vcc_walk() 105 *sock = sk; __vcc_walk() 111 return __vcc_walk(&state->sk, state->family, &state->bucket, l) ? vcc_walk() 135 state->sk = SEQ_START_TOKEN; __acquires() 192 struct sock *sk = sk_atm(vcc); vcc_info() local 200 switch (sk->sk_family) { vcc_info() 208 seq_printf(seq, "%3d", sk->sk_family); vcc_info() 211 vcc->flags, sk->sk_err, vcc_info() 212 sk_wmem_alloc_get(sk), sk->sk_sndbuf, vcc_info() 213 sk_rmem_alloc_get(sk), sk->sk_rcvbuf, vcc_info() 214 atomic_read(&sk->sk_refcnt)); vcc_info() 281 struct atm_vcc *vcc = atm_sk(state->sk); pvc_seq_show() 315 struct atm_vcc *vcc = atm_sk(state->sk); vcc_seq_show() 350 struct atm_vcc *vcc = atm_sk(state->sk); svc_seq_show()
|
H A D | signaling.c | 67 struct sock *sk; sigd_send() local 73 sk = sk_atm(vcc); sigd_send() 77 sk->sk_err = -msg->reply; sigd_send() 98 sk->sk_err = -msg->reply; sigd_send() 103 sk = sk_atm(vcc); sigd_send() 105 lock_sock(sk); sigd_send() 106 if (sk_acceptq_is_full(sk)) { sigd_send() 111 sk->sk_ack_backlog++; sigd_send() 112 skb_queue_tail(&sk->sk_receive_queue, skb); sigd_send() 113 pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk)); sigd_send() 114 sk->sk_state_change(sk); sigd_send() 116 release_sock(sk); sigd_send() 127 sk->sk_err_soft = msg->reply; sigd_send() 135 sk->sk_state_change(sk); sigd_send()
|
/linux-4.1.27/include/net/phonet/ |
H A D | gprs.h | 31 int pep_writeable(struct sock *sk); 32 int pep_write(struct sock *sk, struct sk_buff *skb); 33 struct sk_buff *pep_read(struct sock *sk); 35 int gprs_attach(struct sock *sk); 36 void gprs_detach(struct sock *sk);
|
H A D | phonet.h | 37 struct sock sk; member in struct:pn_sock 43 static inline struct pn_sock *pn_sk(struct sock *sk) pn_sk() argument 45 return (struct pn_sock *)sk; pn_sk() 54 void pn_sock_hash(struct sock *sk); 55 void pn_sock_unhash(struct sock *sk); 56 int pn_sock_get_port(struct sock *sk, unsigned short sport); 60 int pn_sock_unbind_res(struct sock *sk, u8 res); 61 void pn_sock_unbind_all_res(struct sock *sk); 63 int pn_skb_send(struct sock *sk, struct sk_buff *skb,
|
/linux-4.1.27/net/rds/ |
H A D | tcp_listen.c | 88 ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, rds_tcp_accept_one() 89 sock->sk->sk_protocol, &new_sock); rds_tcp_accept_one() 105 inet = inet_sk(new_sock->sk); rds_tcp_accept_one() 157 void rds_tcp_listen_data_ready(struct sock *sk) rds_tcp_listen_data_ready() argument 159 void (*ready)(struct sock *sk); rds_tcp_listen_data_ready() 161 rdsdebug("listen data ready sk %p\n", sk); rds_tcp_listen_data_ready() 163 read_lock(&sk->sk_callback_lock); rds_tcp_listen_data_ready() 164 ready = sk->sk_user_data; rds_tcp_listen_data_ready() 166 ready = sk->sk_data_ready; rds_tcp_listen_data_ready() 176 if (sk->sk_state == TCP_LISTEN) rds_tcp_listen_data_ready() 180 read_unlock(&sk->sk_callback_lock); rds_tcp_listen_data_ready() 181 ready(sk); rds_tcp_listen_data_ready() 194 sock->sk->sk_reuse = SK_CAN_REUSE; rds_tcp_listen_init() 197 write_lock_bh(&sock->sk->sk_callback_lock); rds_tcp_listen_init() 198 sock->sk->sk_user_data = sock->sk->sk_data_ready; rds_tcp_listen_init() 199 sock->sk->sk_data_ready = rds_tcp_listen_data_ready; rds_tcp_listen_init() 200 write_unlock_bh(&sock->sk->sk_callback_lock); rds_tcp_listen_init() 225 struct sock *sk; rds_tcp_listen_stop() local 230 sk = sock->sk; rds_tcp_listen_stop() 233 lock_sock(sk); rds_tcp_listen_stop() 234 write_lock_bh(&sk->sk_callback_lock); rds_tcp_listen_stop() 235 if (sk->sk_user_data) { rds_tcp_listen_stop() 236 sk->sk_data_ready = sk->sk_user_data; rds_tcp_listen_stop() 237 sk->sk_user_data = NULL; rds_tcp_listen_stop() 239 write_unlock_bh(&sk->sk_callback_lock); rds_tcp_listen_stop() 240 release_sock(sk); rds_tcp_listen_stop()
|
H A D | tcp.c | 69 struct sock *sk = sock->sk; rds_tcp_tune() local 77 lock_sock(sk); rds_tcp_tune() 78 sk->sk_sndbuf = RDS_TCP_DEFAULT_BUFSIZE; rds_tcp_tune() 79 sk->sk_rcvbuf = RDS_TCP_DEFAULT_BUFSIZE; rds_tcp_tune() 80 sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; rds_tcp_tune() 81 release_sock(sk); rds_tcp_tune() 86 return tcp_sk(tc->t_sock->sk)->snd_nxt; rds_tcp_snd_nxt() 91 return tcp_sk(tc->t_sock->sk)->snd_una; rds_tcp_snd_una() 98 write_lock_bh(&sock->sk->sk_callback_lock); rds_tcp_restore_callbacks() 108 sock->sk->sk_write_space = tc->t_orig_write_space; rds_tcp_restore_callbacks() 109 sock->sk->sk_data_ready = tc->t_orig_data_ready; rds_tcp_restore_callbacks() 110 sock->sk->sk_state_change = tc->t_orig_state_change; rds_tcp_restore_callbacks() 111 sock->sk->sk_user_data = NULL; rds_tcp_restore_callbacks() 113 write_unlock_bh(&sock->sk->sk_callback_lock); rds_tcp_restore_callbacks() 126 write_lock_bh(&sock->sk->sk_callback_lock); rds_tcp_set_callbacks() 135 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready) rds_tcp_set_callbacks() 136 sock->sk->sk_data_ready = sock->sk->sk_user_data; rds_tcp_set_callbacks() 140 tc->t_orig_data_ready = sock->sk->sk_data_ready; rds_tcp_set_callbacks() 141 tc->t_orig_write_space = sock->sk->sk_write_space; rds_tcp_set_callbacks() 142 tc->t_orig_state_change = sock->sk->sk_state_change; rds_tcp_set_callbacks() 144 sock->sk->sk_user_data = conn; rds_tcp_set_callbacks() 145 sock->sk->sk_data_ready = rds_tcp_data_ready; rds_tcp_set_callbacks() 146 sock->sk->sk_write_space = rds_tcp_write_space; rds_tcp_set_callbacks() 147 sock->sk->sk_state_change = rds_tcp_state_change; rds_tcp_set_callbacks() 149 write_unlock_bh(&sock->sk->sk_callback_lock); rds_tcp_set_callbacks()
|
H A D | tcp_connect.c | 40 void rds_tcp_state_change(struct sock *sk) rds_tcp_state_change() argument 42 void (*state_change)(struct sock *sk); rds_tcp_state_change() 46 read_lock(&sk->sk_callback_lock); rds_tcp_state_change() 47 conn = sk->sk_user_data; rds_tcp_state_change() 49 state_change = sk->sk_state_change; rds_tcp_state_change() 55 rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); rds_tcp_state_change() 57 switch(sk->sk_state) { rds_tcp_state_change() 72 read_unlock(&sk->sk_callback_lock); rds_tcp_state_change() 73 state_change(sk); rds_tcp_state_change() 143 lock_sock(sock->sk); rds_tcp_conn_shutdown() 146 release_sock(sock->sk); rds_tcp_conn_shutdown()
|
/linux-4.1.27/net/netfilter/ |
H A D | nf_sockopt.c | 61 static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, u_int8_t pf, nf_sockopt_find() argument 92 static int nf_sockopt(struct sock *sk, u_int8_t pf, int val, nf_sockopt() argument 98 ops = nf_sockopt_find(sk, pf, val, get); nf_sockopt() 103 ret = ops->get(sk, val, opt, len); nf_sockopt() 105 ret = ops->set(sk, val, opt, *len); nf_sockopt() 111 int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, nf_setsockopt() argument 114 return nf_sockopt(sk, pf, val, opt, &len, 0); nf_setsockopt() 118 int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, nf_getsockopt() argument 121 return nf_sockopt(sk, pf, val, opt, len, 1); nf_getsockopt() 126 static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val, compat_nf_sockopt() argument 132 ops = nf_sockopt_find(sk, pf, val, get); compat_nf_sockopt() 138 ret = ops->compat_get(sk, val, opt, len); compat_nf_sockopt() 140 ret = ops->get(sk, val, opt, len); compat_nf_sockopt() 143 ret = ops->compat_set(sk, val, opt, *len); compat_nf_sockopt() 145 ret = ops->set(sk, val, opt, *len); compat_nf_sockopt() 152 int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, compat_nf_setsockopt() argument 155 return compat_nf_sockopt(sk, pf, val, opt, &len, 0); compat_nf_setsockopt() 159 int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, compat_nf_getsockopt() argument 162 return compat_nf_sockopt(sk, pf, val, opt, len, 1); compat_nf_getsockopt()
|
H A D | xt_TPROXY.c | 43 static bool tproxy_sk_is_transparent(struct sock *sk) tproxy_sk_is_transparent() argument 45 switch (sk->sk_state) { tproxy_sk_is_transparent() 47 if (inet_twsk(sk)->tw_transparent) tproxy_sk_is_transparent() 51 if (inet_rsk(inet_reqsk(sk))->no_srccheck) tproxy_sk_is_transparent() 55 if (inet_sk(sk)->transparent) tproxy_sk_is_transparent() 59 sock_gen_put(sk); tproxy_sk_is_transparent() 114 struct sock *sk; nf_tproxy_get_sock_v4() local 120 sk = inet_lookup_listener(net, &tcp_hashinfo, nf_tproxy_get_sock_v4() 132 sk = inet_lookup_established(net, &tcp_hashinfo, nf_tproxy_get_sock_v4() 141 sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, nf_tproxy_get_sock_v4() 143 if (sk) { nf_tproxy_get_sock_v4() 144 int connected = (sk->sk_state == TCP_ESTABLISHED); nf_tproxy_get_sock_v4() 145 int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0); nf_tproxy_get_sock_v4() 154 sock_put(sk); nf_tproxy_get_sock_v4() 155 sk = NULL; nf_tproxy_get_sock_v4() 161 sk = NULL; nf_tproxy_get_sock_v4() 165 protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk); nf_tproxy_get_sock_v4() 167 return sk; nf_tproxy_get_sock_v4() 178 struct sock *sk; nf_tproxy_get_sock_v6() local 184 sk = inet6_lookup_listener(net, &tcp_hashinfo, nf_tproxy_get_sock_v6() 196 sk = __inet6_lookup_established(net, &tcp_hashinfo, nf_tproxy_get_sock_v6() 205 sk = udp6_lib_lookup(net, saddr, sport, daddr, dport, nf_tproxy_get_sock_v6() 207 if (sk) { nf_tproxy_get_sock_v6() 208 int connected = (sk->sk_state == TCP_ESTABLISHED); nf_tproxy_get_sock_v6() 209 int wildcard = ipv6_addr_any(&sk->sk_v6_rcv_saddr); nf_tproxy_get_sock_v6() 218 sock_put(sk); nf_tproxy_get_sock_v6() 219 sk = NULL; nf_tproxy_get_sock_v6() 225 sk = NULL; nf_tproxy_get_sock_v6() 229 protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk); nf_tproxy_get_sock_v6() 231 return sk; nf_tproxy_get_sock_v6() 240 * @sk: The TIME_WAIT TCP socket found by the lookup. 254 struct sock *sk) tproxy_handle_time_wait4() 261 inet_twsk_put(inet_twsk(sk)); tproxy_handle_time_wait4() 275 inet_twsk_deschedule(inet_twsk(sk)); tproxy_handle_time_wait4() 276 inet_twsk_put(inet_twsk(sk)); tproxy_handle_time_wait4() 277 sk = sk2; tproxy_handle_time_wait4() 281 return sk; tproxy_handle_time_wait4() 284 /* assign a socket to the skb -- consumes sk */ 286 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) nf_tproxy_assign_sock() argument 289 skb->sk = sk; nf_tproxy_assign_sock() 299 struct sock *sk; tproxy_tg4() local 309 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, tproxy_tg4() 319 if (sk && sk->sk_state == TCP_TIME_WAIT) tproxy_tg4() 321 sk = tproxy_handle_time_wait4(skb, laddr, lport, sk); tproxy_tg4() 322 else if (!sk) tproxy_tg4() 325 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, tproxy_tg4() 330 /* NOTE: assign_sock consumes our sk reference */ tproxy_tg4() 331 if (sk && tproxy_sk_is_transparent(sk)) { tproxy_tg4() 340 nf_tproxy_assign_sock(skb, sk); tproxy_tg4() 401 * @sk: The TIME_WAIT TCP socket found by the lookup. 416 struct sock *sk) tproxy_handle_time_wait6() 424 inet_twsk_put(inet_twsk(sk)); tproxy_handle_time_wait6() 440 inet_twsk_deschedule(inet_twsk(sk)); tproxy_handle_time_wait6() 441 inet_twsk_put(inet_twsk(sk)); tproxy_handle_time_wait6() 442 sk = sk2; tproxy_handle_time_wait6() 446 return sk; tproxy_handle_time_wait6() 455 struct sock *sk; tproxy_tg6_v1() local 477 sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, tproxy_tg6_v1() 486 if (sk && sk->sk_state == TCP_TIME_WAIT) tproxy_tg6_v1() 488 sk = tproxy_handle_time_wait6(skb, tproto, thoff, par, sk); tproxy_tg6_v1() 489 else if (!sk) tproxy_tg6_v1() 492 sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, tproxy_tg6_v1() 497 /* NOTE: assign_sock consumes our sk reference */ tproxy_tg6_v1() 498 if (sk && tproxy_sk_is_transparent(sk)) { tproxy_tg6_v1() 507 nf_tproxy_assign_sock(skb, sk); tproxy_tg6_v1() 253 tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport, struct sock *sk) tproxy_handle_time_wait4() argument 414 tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, const struct xt_action_param *par, struct sock *sk) tproxy_handle_time_wait6() argument
|
H A D | xt_socket.c | 132 static bool xt_socket_sk_is_transparent(struct sock *sk) xt_socket_sk_is_transparent() argument 134 switch (sk->sk_state) { xt_socket_sk_is_transparent() 136 return inet_twsk(sk)->tw_transparent; xt_socket_sk_is_transparent() 139 return inet_rsk(inet_reqsk(sk))->no_srccheck; xt_socket_sk_is_transparent() 142 return inet_sk(sk)->transparent; xt_socket_sk_is_transparent() 208 struct sock *sk = skb->sk; socket_match() local 210 if (!sk) socket_match() 211 sk = xt_socket_lookup_slow_v4(skb, par->in); socket_match() 212 if (sk) { socket_match() 220 sk_fullsock(sk) && socket_match() 221 inet_sk(sk)->inet_rcv_saddr == 0); socket_match() 227 transparent = xt_socket_sk_is_transparent(sk); socket_match() 229 if (sk != skb->sk) socket_match() 230 sock_gen_put(sk); socket_match() 233 sk = NULL; socket_match() 236 return sk != NULL; socket_match() 377 struct sock *sk = skb->sk; socket_mt6_v1_v2() local 379 if (!sk) socket_mt6_v1_v2() 380 sk = xt_socket_lookup_slow_v6(skb, par->in); socket_mt6_v1_v2() 381 if (sk) { socket_mt6_v1_v2() 389 sk_fullsock(sk) && socket_mt6_v1_v2() 390 ipv6_addr_any(&sk->sk_v6_rcv_saddr)); socket_mt6_v1_v2() 396 transparent = xt_socket_sk_is_transparent(sk); socket_mt6_v1_v2() 398 if (sk != skb->sk) socket_mt6_v1_v2() 399 sock_gen_put(sk); socket_mt6_v1_v2() 402 sk = NULL; socket_mt6_v1_v2() 405 return sk != NULL; socket_mt6_v1_v2()
|
H A D | nft_meta.c | 89 if (skb->sk == NULL || !sk_fullsock(skb->sk)) nft_meta_get_eval() 92 read_lock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval() 93 if (skb->sk->sk_socket == NULL || nft_meta_get_eval() 94 skb->sk->sk_socket->file == NULL) { nft_meta_get_eval() 95 read_unlock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval() 100 skb->sk->sk_socket->file->f_cred->fsuid); nft_meta_get_eval() 101 read_unlock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval() 104 if (skb->sk == NULL || !sk_fullsock(skb->sk)) nft_meta_get_eval() 107 read_lock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval() 108 if (skb->sk->sk_socket == NULL || nft_meta_get_eval() 109 skb->sk->sk_socket->file == NULL) { nft_meta_get_eval() 110 read_unlock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval() 114 skb->sk->sk_socket->file->f_cred->fsgid); nft_meta_get_eval() 115 read_unlock_bh(&skb->sk->sk_callback_lock); nft_meta_get_eval() 170 if (skb->sk == NULL || !sk_fullsock(skb->sk)) nft_meta_get_eval() 172 *dest = skb->sk->sk_classid; nft_meta_get_eval()
|
/linux-4.1.27/drivers/net/ppp/ |
H A D | pppox.c | 57 void pppox_unbind_sock(struct sock *sk) pppox_unbind_sock() argument 61 if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED | PPPOX_ZOMBIE)) { pppox_unbind_sock() 62 ppp_unregister_channel(&pppox_sk(sk)->chan); pppox_unbind_sock() 63 sk->sk_state = PPPOX_DEAD; pppox_unbind_sock() 73 struct sock *sk = sock->sk; pppox_ioctl() local 74 struct pppox_sock *po = pppox_sk(sk); pppox_ioctl() 77 lock_sock(sk); pppox_ioctl() 83 if (!(sk->sk_state & PPPOX_CONNECTED)) pppox_ioctl() 92 sk->sk_state |= PPPOX_BOUND; pppox_ioctl() 96 rc = pppox_protos[sk->sk_protocol]->ioctl ? pppox_ioctl() 97 pppox_protos[sk->sk_protocol]->ioctl(sock, cmd, arg) : -ENOTTY; pppox_ioctl() 100 release_sock(sk); pppox_ioctl()
|
H A D | pppoe.c | 24 * guards against sock_put not actually freeing the sk 92 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 289 struct sock *sk; pppoe_flush_dev() local 299 sk = sk_pppox(po); pppoe_flush_dev() 309 sock_hold(sk); pppoe_flush_dev() 311 lock_sock(sk); pppoe_flush_dev() 314 sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { pppoe_flush_dev() 315 pppox_unbind_sock(sk); pppoe_flush_dev() 316 sk->sk_state_change(sk); pppoe_flush_dev() 321 release_sock(sk); pppoe_flush_dev() 322 sock_put(sk); pppoe_flush_dev() 372 static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) pppoe_rcv_core() argument 374 struct pppox_sock *po = pppox_sk(sk); pppoe_rcv_core() 378 * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state pppoe_rcv_core() 385 if (sk->sk_state & PPPOX_BOUND) { pppoe_rcv_core() 387 } else if (sk->sk_state & PPPOX_RELAY) { pppoe_rcv_core() 388 relay_po = get_item_by_addr(sock_net(sk), pppoe_rcv_core() 401 if (sock_queue_rcv_skb(sk, skb)) pppoe_rcv_core() 466 struct sock *sk = sk_pppox(po); pppoe_unbind_sock_work() local 468 lock_sock(sk); pppoe_unbind_sock_work() 473 pppox_unbind_sock(sk); pppoe_unbind_sock_work() 474 release_sock(sk); pppoe_unbind_sock_work() 475 sock_put(sk); pppoe_unbind_sock_work() 506 struct sock *sk = sk_pppox(po); pppoe_disc_rcv() local 508 bh_lock_sock(sk); pppoe_disc_rcv() 515 if (sock_owned_by_user(sk) == 0) { pppoe_disc_rcv() 519 sk->sk_state = PPPOX_ZOMBIE; pppoe_disc_rcv() 522 bh_unlock_sock(sk); pppoe_disc_rcv() 524 sock_put(sk); pppoe_disc_rcv() 556 struct sock *sk; pppoe_create() local 558 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto); pppoe_create() 559 if (!sk) pppoe_create() 562 sock_init_data(sock, sk); pppoe_create() 567 sk->sk_backlog_rcv = pppoe_rcv_core; pppoe_create() 568 sk->sk_state = PPPOX_NONE; pppoe_create() 569 sk->sk_type = SOCK_STREAM; pppoe_create() 570 sk->sk_family = PF_PPPOX; pppoe_create() 571 sk->sk_protocol = PX_PROTO_OE; pppoe_create() 573 INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work, pppoe_create() 581 struct sock *sk = sock->sk; pppoe_release() local 586 if (!sk) pppoe_release() 589 lock_sock(sk); pppoe_release() 590 if (sock_flag(sk, SOCK_DEAD)) { pppoe_release() 591 release_sock(sk); pppoe_release() 595 po = pppox_sk(sk); pppoe_release() 602 pppox_unbind_sock(sk); pppoe_release() 605 sk->sk_state = PPPOX_DEAD; pppoe_release() 607 net = sock_net(sk); pppoe_release() 617 sock_orphan(sk); pppoe_release() 618 sock->sk = NULL; pppoe_release() 620 skb_queue_purge(&sk->sk_receive_queue); pppoe_release() 621 release_sock(sk); pppoe_release() 622 sock_put(sk); pppoe_release() 630 struct sock *sk = sock->sk; pppoe_connect() local 632 struct pppox_sock *po = pppox_sk(sk); pppoe_connect() 638 lock_sock(sk); pppoe_connect() 646 if ((sk->sk_state & PPPOX_CONNECTED) && pppoe_connect() 652 if ((sk->sk_state & PPPOX_DEAD) && pppoe_connect() 660 pppox_unbind_sock(sk); pppoe_connect() 661 pn = pppoe_pernet(sock_net(sk)); pppoe_connect() 676 sk->sk_state = PPPOX_NONE; pppoe_connect() 682 net = sock_net(sk); pppoe_connect() 708 po->chan.private = sk; pppoe_connect() 718 sk->sk_state = PPPOX_CONNECTED; pppoe_connect() 724 release_sock(sk); pppoe_connect() 742 memcpy(&sp.sa_addr.pppoe, &pppox_sk(sock->sk)->pppoe_pa, pppoe_getname() 755 struct sock *sk = sock->sk; pppoe_ioctl() local 756 struct pppox_sock *po = pppox_sk(sk); pppoe_ioctl() 763 if (!(sk->sk_state & PPPOX_CONNECTED)) pppoe_ioctl() 777 if (!(sk->sk_state & PPPOX_CONNECTED)) pppoe_ioctl() 804 if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE | PPPOX_DEAD)) pppoe_ioctl() 808 if (!(sk->sk_state & PPPOX_CONNECTED)) pppoe_ioctl() 826 relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay); pppoe_ioctl() 831 sk->sk_state |= PPPOX_RELAY; pppoe_ioctl() 838 if (!(sk->sk_state & PPPOX_RELAY)) pppoe_ioctl() 841 sk->sk_state &= ~PPPOX_RELAY; pppoe_ioctl() 856 struct sock *sk = sock->sk; pppoe_sendmsg() local 857 struct pppox_sock *po = pppox_sk(sk); pppoe_sendmsg() 864 lock_sock(sk); pppoe_sendmsg() 865 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { pppoe_sendmsg() 882 skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, pppoe_sendmsg() 895 skb->priority = sk->sk_priority; pppoe_sendmsg() 918 release_sock(sk); pppoe_sendmsg() 927 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) __pppoe_xmit() argument 929 struct pppox_sock *po = pppox_sk(sk); __pppoe_xmit() 936 * sk->sk_state cannot change, so we don't need to do lock_sock(). __pppoe_xmit() 942 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) __pppoe_xmit() 986 struct sock *sk = (struct sock *)chan->private; pppoe_xmit() local 987 return __pppoe_xmit(sk, skb); pppoe_xmit() 997 struct sock *sk = sock->sk; pppoe_recvmsg() local 1001 if (sk->sk_state & PPPOX_BOUND) { pppoe_recvmsg() 1006 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, pppoe_recvmsg()
|
H A D | pptp.c | 175 struct sock *sk = (struct sock *) chan->private; pptp_xmit() local 176 struct pppox_sock *po = pppox_sk(sk); pptp_xmit() 195 rt = ip_route_output_ports(sock_net(sk), &fl4, NULL, pptp_xmit() 213 if (skb->sk) pptp_xmit() 214 skb_set_owner_w(new_skb, skb->sk); pptp_xmit() 270 if (ip_dont_fragment(sk, &rt->dst)) pptp_xmit() 287 ip_select_ident(sock_net(sk), skb, NULL); pptp_xmit() 298 static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) pptp_rcv_core() argument 300 struct pppox_sock *po = pppox_sk(sk); pptp_rcv_core() 306 if (!(sk->sk_state & PPPOX_CONNECTED)) { pptp_rcv_core() 307 if (sock_queue_rcv_skb(sk, skb)) pptp_rcv_core() 420 struct sock *sk = sock->sk; pptp_bind() local 422 struct pppox_sock *po = pppox_sk(sk); pptp_bind() 428 lock_sock(sk); pptp_bind() 430 if (sk->sk_state & PPPOX_DEAD) { pptp_bind() 435 if (sk->sk_state & PPPOX_BOUND) { pptp_bind() 443 sk->sk_state |= PPPOX_BOUND; pptp_bind() 446 release_sock(sk); pptp_bind() 453 struct sock *sk = sock->sk; pptp_connect() local 455 struct pppox_sock *po = pppox_sk(sk); pptp_connect() 470 lock_sock(sk); pptp_connect() 472 if (sk->sk_state & PPPOX_CONNECTED) { pptp_connect() 478 if (sk->sk_state & PPPOX_DEAD) { pptp_connect() 488 po->chan.private = sk; pptp_connect() 491 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, pptp_connect() 495 IPPROTO_GRE, RT_CONN_FLAGS(sk), 0); pptp_connect() 500 sk_setup_caps(sk, &rt->dst); pptp_connect() 516 sk->sk_state |= PPPOX_CONNECTED; pptp_connect() 519 release_sock(sk); pptp_connect() 533 sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; pptp_getname() 544 struct sock *sk = sock->sk; pptp_release() local 549 if (!sk) pptp_release() 552 lock_sock(sk); pptp_release() 554 if (sock_flag(sk, SOCK_DEAD)) { pptp_release() 555 release_sock(sk); pptp_release() 559 po = pppox_sk(sk); pptp_release() 563 pppox_unbind_sock(sk); pptp_release() 564 sk->sk_state = PPPOX_DEAD; pptp_release() 566 sock_orphan(sk); pptp_release() 567 sock->sk = NULL; pptp_release() 569 release_sock(sk); pptp_release() 570 sock_put(sk); pptp_release() 575 static void pptp_sock_destruct(struct sock *sk) pptp_sock_destruct() argument 577 if (!(sk->sk_state & PPPOX_DEAD)) { pptp_sock_destruct() 578 del_chan(pppox_sk(sk)); pptp_sock_destruct() 579 pppox_unbind_sock(sk); pptp_sock_destruct() 581 skb_queue_purge(&sk->sk_receive_queue); pptp_sock_destruct() 587 struct sock *sk; pptp_create() local 591 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto); pptp_create() 592 if (!sk) pptp_create() 595 sock_init_data(sock, sk); pptp_create() 600 sk->sk_backlog_rcv = pptp_rcv_core; pptp_create() 601 sk->sk_state = PPPOX_NONE; pptp_create() 602 sk->sk_type = SOCK_STREAM; pptp_create() 603 sk->sk_family = PF_PPPOX; pptp_create() 604 sk->sk_protocol = PX_PROTO_PPTP; pptp_create() 605 sk->sk_destruct = pptp_sock_destruct; pptp_create() 607 po = pppox_sk(sk); pptp_create() 621 struct sock *sk = (struct sock *) chan->private; pptp_ppp_ioctl() local 622 struct pppox_sock *po = pppox_sk(sk); pptp_ppp_ioctl()
|
/linux-4.1.27/net/tipc/ |
H A D | socket.c | 58 * @sk: socket - interacts with 'port' and with user via the socket API 81 struct sock sk; member in struct:tipc_sock 104 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); 105 static void tipc_data_ready(struct sock *sk); 106 static void tipc_write_space(struct sock *sk); 177 * - pointer to socket sk structure (aka tipc_sock structure) 230 static struct tipc_sock *tipc_sk(const struct sock *sk) tipc_sk() argument 232 return container_of(sk, struct tipc_sock, sk); tipc_sk() 245 static void tsk_advance_rx_queue(struct sock *sk) tsk_advance_rx_queue() argument 247 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); tsk_advance_rx_queue() 255 static void tsk_rej_rx_queue(struct sock *sk) tsk_rej_rx_queue() argument 259 u32 own_node = tsk_own_node(tipc_sk(sk)); tsk_rej_rx_queue() 261 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) { tsk_rej_rx_queue() 263 tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0); tsk_rej_rx_queue() 274 struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id); tsk_peer_msg() 318 struct sock *sk; tipc_sk_create() local 345 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); tipc_sk_create() 346 if (sk == NULL) tipc_sk_create() 349 tsk = tipc_sk(sk); tipc_sk_create() 353 tn = net_generic(sock_net(sk), tipc_net_id); tipc_sk_create() 360 sock_init_data(sock, sk); tipc_sk_create() 366 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); tipc_sk_create() 367 sk->sk_backlog_rcv = tipc_backlog_rcv; tipc_sk_create() 368 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; tipc_sk_create() 369 sk->sk_data_ready = tipc_data_ready; tipc_sk_create() 370 sk->sk_write_space = tipc_write_space; tipc_sk_create() 387 sock_put(&tsk->sk); tipc_sk_callback() 408 struct sock *sk = sock->sk; tipc_release() local 418 if (sk == NULL) tipc_release() 421 net = sock_net(sk); tipc_release() 422 tsk = tipc_sk(sk); tipc_release() 423 lock_sock(sk); tipc_release() 431 skb = __skb_dequeue(&sk->sk_receive_queue); tipc_release() 451 if (del_timer_sync(&sk->sk_timer) && tipc_release() 453 sock_put(sk); tipc_release() 466 __skb_queue_purge(&sk->sk_receive_queue); tipc_release() 470 release_sock(sk); tipc_release() 473 sock->sk = NULL; tipc_release() 496 struct sock *sk = sock->sk; tipc_bind() local 498 struct tipc_sock *tsk = tipc_sk(sk); tipc_bind() 501 lock_sock(sk); tipc_bind() 534 release_sock(sk); tipc_bind() 555 struct tipc_sock *tsk = tipc_sk(sock->sk); tipc_getname() 556 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id); tipc_getname() 622 struct sock *sk = sock->sk; tipc_poll() local 623 struct tipc_sock *tsk = tipc_sk(sk); tipc_poll() 626 sock_poll_wait(file, sk_sleep(sk), wait); tipc_poll() 640 if (!skb_queue_empty(&sk->sk_receive_queue)) tipc_poll() 665 struct sock *sk = sock->sk; tipc_sendmcast() local 666 struct tipc_sock *tsk = tipc_sk(sk); tipc_sendmcast() 667 struct net *net = sock_net(sk); tipc_sendmcast() 669 struct sk_buff_head *pktchain = &sk->sk_write_queue; tipc_sendmcast() 701 tipc_sk(sk)->link_cong = 1; tipc_sendmcast() 786 tsk->sk.sk_write_space(&tsk->sk); tipc_sk_proto_rcv() 801 struct sock *sk = sock->sk; tipc_wait_for_sndmsg() local 802 struct tipc_sock *tsk = tipc_sk(sk); tipc_wait_for_sndmsg() 807 int err = sock_error(sk); tipc_wait_for_sndmsg() 817 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); tipc_wait_for_sndmsg() 818 done = sk_wait_event(sk, timeo_p, !tsk->link_cong); tipc_wait_for_sndmsg() 819 finish_wait(sk_sleep(sk), &wait); tipc_wait_for_sndmsg() 840 struct sock *sk = sock->sk; tipc_sendmsg() local 843 lock_sock(sk); tipc_sendmsg() 845 release_sock(sk); tipc_sendmsg() 853 struct sock *sk = sock->sk; __tipc_sendmsg() local 854 struct tipc_sock *tsk = tipc_sk(sk); __tipc_sendmsg() 855 struct net *net = sock_net(sk); __tipc_sendmsg() 858 struct sk_buff_head *pktchain = &sk->sk_write_queue; __tipc_sendmsg() 890 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); __tipc_sendmsg() 953 struct sock *sk = sock->sk; tipc_wait_for_sndpkt() local 954 struct tipc_sock *tsk = tipc_sk(sk); tipc_wait_for_sndpkt() 959 int err = sock_error(sk); tipc_wait_for_sndpkt() 971 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); tipc_wait_for_sndpkt() 972 done = sk_wait_event(sk, timeo_p, tipc_wait_for_sndpkt() 976 finish_wait(sk_sleep(sk), &wait); tipc_wait_for_sndpkt() 994 struct sock *sk = sock->sk; tipc_send_stream() local 997 lock_sock(sk); tipc_send_stream() 999 release_sock(sk); tipc_send_stream() 1006 struct sock *sk = sock->sk; __tipc_send_stream() local 1007 struct net *net = sock_net(sk); __tipc_send_stream() 1008 struct tipc_sock *tsk = tipc_sk(sk); __tipc_send_stream() 1010 struct sk_buff_head *pktchain = &sk->sk_write_queue; __tipc_send_stream() 1036 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); __tipc_send_stream() 1097 struct sock *sk = &tsk->sk; tipc_sk_finish_conn() local 1098 struct net *net = sock_net(sk); tipc_sk_finish_conn() 1110 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); tipc_sk_finish_conn() 1211 struct net *net = sock_net(&tsk->sk); tipc_sk_send_ack() 1231 struct sock *sk = sock->sk; tipc_wait_for_rcvmsg() local 1237 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); tipc_wait_for_rcvmsg() 1238 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { tipc_wait_for_rcvmsg() 1243 release_sock(sk); tipc_wait_for_rcvmsg() 1245 lock_sock(sk); tipc_wait_for_rcvmsg() 1248 if (!skb_queue_empty(&sk->sk_receive_queue)) tipc_wait_for_rcvmsg() 1257 finish_wait(sk_sleep(sk), &wait); tipc_wait_for_rcvmsg() 1276 struct sock *sk = sock->sk; tipc_recvmsg() local 1277 struct tipc_sock *tsk = tipc_sk(sk); tipc_recvmsg() 1289 lock_sock(sk); tipc_recvmsg() 1296 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); tipc_recvmsg() 1305 buf = skb_peek(&sk->sk_receive_queue); tipc_recvmsg() 1312 tsk_advance_rx_queue(sk); tipc_recvmsg() 1349 tsk_advance_rx_queue(sk); tipc_recvmsg() 1352 release_sock(sk); tipc_recvmsg() 1370 struct sock *sk = sock->sk; tipc_recv_stream() local 1371 struct tipc_sock *tsk = tipc_sk(sk); tipc_recv_stream() 1385 lock_sock(sk); tipc_recv_stream() 1392 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); tipc_recv_stream() 1393 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); tipc_recv_stream() 1402 buf = skb_peek(&sk->sk_receive_queue); tipc_recv_stream() 1409 tsk_advance_rx_queue(sk); tipc_recv_stream() 1458 tsk_advance_rx_queue(sk); tipc_recv_stream() 1463 (!skb_queue_empty(&sk->sk_receive_queue) || tipc_recv_stream() 1470 release_sock(sk); tipc_recv_stream() 1476 * @sk: socket 1478 static void tipc_write_space(struct sock *sk) tipc_write_space() argument 1483 wq = rcu_dereference(sk->sk_wq); tipc_write_space() 1492 * @sk: socket 1495 static void tipc_data_ready(struct sock *sk) tipc_data_ready() argument 1500 wq = rcu_dereference(sk->sk_wq); tipc_data_ready() 1516 struct sock *sk = &tsk->sk; filter_connect() local 1517 struct net *net = sock_net(sk); filter_connect() 1518 struct socket *sock = sk->sk_socket; filter_connect() 1547 sk->sk_err = ECONNREFUSED; filter_connect() 1554 sk->sk_err = EINVAL; filter_connect() 1571 if (waitqueue_active(sk_sleep(sk))) filter_connect() 1572 wake_up_interruptible(sk_sleep(sk)); filter_connect() 1592 * @sk: socket 1608 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf) rcvbuf_limit() argument 1615 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE << rcvbuf_limit() 1621 * @sk: socket 1631 static int filter_rcv(struct sock *sk, struct sk_buff **skb) filter_rcv() argument 1633 struct socket *sock = sk->sk_socket; filter_rcv() 1634 struct tipc_sock *tsk = tipc_sk(sk); filter_rcv() 1636 unsigned int limit = rcvbuf_limit(sk, *skb); filter_rcv() 1647 sk->sk_write_space(sk); filter_rcv() 1666 if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit) filter_rcv() 1671 __skb_queue_tail(&sk->sk_receive_queue, *skb); filter_rcv() 1672 skb_set_owner_r(*skb, sk); filter_rcv() 1674 sk->sk_data_ready(sk); filter_rcv() 1681 * @sk: socket 1688 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) tipc_backlog_rcv() argument 1693 struct tipc_sock *tsk = tipc_sk(sk); tipc_backlog_rcv() 1694 struct net *net = sock_net(sk); tipc_backlog_rcv() 1697 err = filter_rcv(sk, &skb); tipc_backlog_rcv() 1713 * @sk: socket where the buffers should be enqueued 1722 static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, tipc_sk_enqueue() argument 1737 if (!sock_owned_by_user(sk)) { tipc_sk_enqueue() 1738 err = filter_rcv(sk, &skb); tipc_sk_enqueue() 1744 dcnt = &tipc_sk(sk)->dupl_rcvcnt; tipc_sk_enqueue() 1745 if (sk->sk_backlog.len) tipc_sk_enqueue() 1747 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); tipc_sk_enqueue() 1748 if (likely(!sk_add_backlog(sk, skb, lim))) tipc_sk_enqueue() 1771 struct sock *sk; tipc_sk_rcv() local 1779 sk = &tsk->sk; tipc_sk_rcv() 1780 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { tipc_sk_rcv() 1781 err = tipc_sk_enqueue(inputq, sk, dport, &skb); tipc_sk_rcv() 1782 spin_unlock_bh(&sk->sk_lock.slock); tipc_sk_rcv() 1785 sock_put(sk); tipc_sk_rcv() 1808 struct sock *sk = sock->sk; tipc_wait_for_connect() local 1813 int err = sock_error(sk); tipc_wait_for_connect() 1821 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); tipc_wait_for_connect() 1822 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING); tipc_wait_for_connect() 1823 finish_wait(sk_sleep(sk), &wait); tipc_wait_for_connect() 1840 struct sock *sk = sock->sk; tipc_connect() local 1841 struct tipc_sock *tsk = tipc_sk(sk); tipc_connect() 1848 lock_sock(sk); tipc_connect() 1914 release_sock(sk); tipc_connect() 1927 struct sock *sk = sock->sk; tipc_listen() local 1930 lock_sock(sk); tipc_listen() 1939 release_sock(sk); tipc_listen() 1945 struct sock *sk = sock->sk; tipc_wait_for_accept() local 1955 prepare_to_wait_exclusive(sk_sleep(sk), &wait, tipc_wait_for_accept() 1957 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { tipc_wait_for_accept() 1958 release_sock(sk); tipc_wait_for_accept() 1960 lock_sock(sk); tipc_wait_for_accept() 1963 if (!skb_queue_empty(&sk->sk_receive_queue)) tipc_wait_for_accept() 1975 finish_wait(sk_sleep(sk), &wait); tipc_wait_for_accept() 1989 struct sock *new_sk, *sk = sock->sk; tipc_accept() local 1996 lock_sock(sk); tipc_accept() 2002 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); tipc_accept() 2007 buf = skb_peek(&sk->sk_receive_queue); tipc_accept() 2009 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); tipc_accept() 2012 security_sk_clone(sock->sk, new_sock->sk); tipc_accept() 2014 new_sk = new_sock->sk; tipc_accept() 2018 /* we lock on new_sk; but lockdep sees the lock on sk */ tipc_accept() 2044 tsk_advance_rx_queue(sk); tipc_accept() 2047 __skb_dequeue(&sk->sk_receive_queue); tipc_accept() 2053 release_sock(sk); tipc_accept() 2068 struct sock *sk = sock->sk; tipc_shutdown() local 2069 struct net *net = sock_net(sk); tipc_shutdown() 2070 struct tipc_sock *tsk = tipc_sk(sk); tipc_shutdown() 2078 lock_sock(sk); tipc_shutdown() 2086 skb = __skb_dequeue(&sk->sk_receive_queue); tipc_shutdown() 2114 __skb_queue_purge(&sk->sk_receive_queue); tipc_shutdown() 2117 sk->sk_state_change(sk); tipc_shutdown() 2125 release_sock(sk); tipc_shutdown() 2132 struct sock *sk = &tsk->sk; tipc_sk_timeout() local 2137 bh_lock_sock(sk); tipc_sk_timeout() 2139 bh_unlock_sock(sk); tipc_sk_timeout() 2146 if (!sock_owned_by_user(sk)) { tipc_sk_timeout() 2147 sk->sk_socket->state = SS_DISCONNECTING; tipc_sk_timeout() 2149 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), tipc_sk_timeout() 2151 sk->sk_state_change(sk); tipc_sk_timeout() 2154 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20)); tipc_sk_timeout() 2162 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); tipc_sk_timeout() 2164 bh_unlock_sock(sk); tipc_sk_timeout() 2166 tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid); tipc_sk_timeout() 2168 sock_put(sk); tipc_sk_timeout() 2174 struct net *net = sock_net(&tsk->sk); tipc_sk_publish() 2198 struct net *net = sock_net(&tsk->sk); tipc_sk_withdraw() 2243 spin_lock_bh(&tsk->sk.sk_lock.slock); rht_for_each_entry_rcu() 2247 spin_unlock_bh(&tsk->sk.sk_lock.slock); rht_for_each_entry_rcu() 2261 sock_hold(&tsk->sk); tipc_sk_lookup() 2269 struct sock *sk = &tsk->sk; tipc_sk_insert() local 2270 struct net *net = sock_net(sk); tipc_sk_insert() 2280 sock_hold(&tsk->sk); tipc_sk_insert() 2284 sock_put(&tsk->sk); tipc_sk_insert() 2292 struct sock *sk = &tsk->sk; tipc_sk_remove() local 2293 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); tipc_sk_remove() 2296 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); tipc_sk_remove() 2297 __sock_put(sk); tipc_sk_remove() 2344 struct sock *sk = sock->sk; tipc_setsockopt() local 2345 struct tipc_sock *tsk = tipc_sk(sk); tipc_setsockopt() 2359 lock_sock(sk); tipc_setsockopt() 2375 tipc_sk(sk)->conn_timeout = value; tipc_setsockopt() 2382 release_sock(sk); tipc_setsockopt() 2403 struct sock *sk = sock->sk; tipc_getsockopt() local 2404 struct tipc_sock *tsk = tipc_sk(sk); tipc_getsockopt() 2417 lock_sock(sk); tipc_getsockopt() 2437 value = skb_queue_len(&sk->sk_receive_queue); tipc_getsockopt() 2443 release_sock(sk); tipc_getsockopt() 2459 struct sock *sk = sock->sk; tipc_ioctl() local 2467 if (!tipc_node_get_linkname(sock_net(sk), tipc_ioctl() 2634 struct net *net = sock_net(skb->sk); __tipc_nl_add_sk() 2677 struct net *net = sock_net(skb->sk); tipc_nl_sk_dump() 2686 spin_lock_bh(&tsk->sk.sk_lock.slock); rht_for_each_entry_rcu() 2688 spin_unlock_bh(&tsk->sk.sk_lock.slock); rht_for_each_entry_rcu() 2695 spin_unlock_bh(&tsk->sk.sk_lock.slock); rht_for_each_entry_rcu() 2699 spin_unlock_bh(&tsk->sk.sk_lock.slock); rht_for_each_entry_rcu() 2796 struct net *net = sock_net(skb->sk); tipc_nl_publ_dump() 2826 lock_sock(&tsk->sk); tipc_nl_publ_dump() 2830 release_sock(&tsk->sk); tipc_nl_publ_dump() 2831 sock_put(&tsk->sk); tipc_nl_publ_dump()
|
H A D | server.c | 95 struct sock *sk; tipc_conn_kref_release() local 98 sk = sock->sk; tipc_conn_kref_release() 101 __module_get(sk->sk_prot_creator->owner); tipc_conn_kref_release() 135 static void sock_data_ready(struct sock *sk) sock_data_ready() argument 139 read_lock(&sk->sk_callback_lock); sock_data_ready() 140 con = sock2con(sk); sock_data_ready() 146 read_unlock(&sk->sk_callback_lock); sock_data_ready() 149 static void sock_write_space(struct sock *sk) sock_write_space() argument 153 read_lock(&sk->sk_callback_lock); sock_write_space() 154 con = sock2con(sk); sock_write_space() 160 read_unlock(&sk->sk_callback_lock); sock_write_space() 165 struct sock *sk = sock->sk; tipc_register_callbacks() local 167 write_lock_bh(&sk->sk_callback_lock); tipc_register_callbacks() 169 sk->sk_data_ready = sock_data_ready; tipc_register_callbacks() 170 sk->sk_write_space = sock_write_space; tipc_register_callbacks() 171 sk->sk_user_data = con; tipc_register_callbacks() 175 write_unlock_bh(&sk->sk_callback_lock); tipc_register_callbacks() 180 struct sock *sk = con->sock->sk; tipc_unregister_callbacks() local 182 write_lock_bh(&sk->sk_callback_lock); tipc_unregister_callbacks() 183 sk->sk_user_data = NULL; tipc_unregister_callbacks() 184 write_unlock_bh(&sk->sk_callback_lock); tipc_unregister_callbacks() 206 * sk->sk_user_data to 0 before releasing connection object. tipc_close_conn() 271 s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr, tipc_receive_from_sock() 314 newsock->sk->sk_data_ready(newsock->sk); tipc_accept_from_sock() 369 module_put(sock->sk->sk_prot_creator->owner); tipc_create_listen_sock()
|
/linux-4.1.27/net/sctp/ |
H A D | socket.c | 83 static int sctp_writeable(struct sock *sk); 87 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 89 static int sctp_wait_for_accept(struct sock *sk, long timeo); 90 static void sctp_wait_for_close(struct sock *sk, long timeo); 91 static void sctp_destruct_sock(struct sock *sk); 101 static int sctp_autobind(struct sock *sk); 109 static void sctp_enter_memory_pressure(struct sock *sk) sctp_enter_memory_pressure() argument 123 amt = sk_wmem_alloc_get(asoc->base.sk); sctp_wspace() 125 if (amt >= asoc->base.sk->sk_sndbuf) { sctp_wspace() 126 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) sctp_wspace() 129 amt = sk_stream_wspace(asoc->base.sk); sctp_wspace() 134 amt = asoc->base.sk->sk_sndbuf - amt; sctp_wspace() 151 struct sock *sk = asoc->base.sk; sctp_set_owner_w() local 156 skb_set_owner_w(chunk->skb, sk); sctp_set_owner_w() 166 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); sctp_set_owner_w() 167 sk->sk_wmem_queued += chunk->skb->truesize; sctp_set_owner_w() 168 sk_mem_charge(sk, chunk->skb->truesize); sctp_set_owner_w() 172 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, sctp_verify_addr() argument 178 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); sctp_verify_addr() 183 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) sctp_verify_addr() 186 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) sctp_verify_addr() 195 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) sctp_id2assoc() argument 200 if (!sctp_style(sk, UDP)) { sctp_id2assoc() 205 if (!sctp_sstate(sk, ESTABLISHED)) sctp_id2assoc() 209 if (!list_empty(&sctp_sk(sk)->ep->asocs)) sctp_id2assoc() 210 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, sctp_id2assoc() 223 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) sctp_id2assoc() 233 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, sctp_addr_id2transport() argument 241 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, sctp_addr_id2transport() 248 id_asoc = sctp_id2assoc(sk, id); sctp_addr_id2transport() 252 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), sctp_addr_id2transport() 268 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) sctp_bind() argument 272 lock_sock(sk); sctp_bind() 274 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, sctp_bind() 278 if (!sctp_sk(sk)->ep->base.bind_addr.port) sctp_bind() 279 retval = sctp_do_bind(sk, (union sctp_addr *)addr, sctp_bind() 284 release_sock(sk); sctp_bind() 322 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) sctp_do_bind() argument 324 struct net *net = sock_net(sk); sctp_do_bind() 325 struct sctp_sock *sp = sctp_sk(sk); sctp_do_bind() 335 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", sctp_do_bind() 336 __func__, sk, addr, len); sctp_do_bind() 342 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", sctp_do_bind() 343 __func__, sk, &addr->sa, bp->port, snum, len); sctp_do_bind() 378 if ((ret = sctp_get_port_local(sk, addr))) { sctp_do_bind() 384 bp->port = inet_sk(sk)->inet_num; sctp_do_bind() 393 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); sctp_do_bind() 394 sp->pf->to_sk_saddr(addr, sk); sctp_do_bind() 413 struct net *net = sock_net(asoc->base.sk); sctp_send_asconf() 448 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) sctp_bindx_add() argument 456 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, sctp_bindx_add() 471 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, sctp_bindx_add() 480 sctp_bindx_rem(sk, addrs, cnt); sctp_bindx_add() 498 static int sctp_send_asconf_add_ip(struct sock *sk, sctp_send_asconf_add_ip() argument 502 struct net *net = sock_net(sk); sctp_send_asconf_add_ip() 520 sp = sctp_sk(sk); sctp_send_asconf_add_ip() 523 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", sctp_send_asconf_add_ip() 524 __func__, sk, addrs, addrcnt); sctp_send_asconf_add_ip() 597 sctp_sk(asoc->base.sk)); sctp_send_asconf_add_ip() 622 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) sctp_bindx_rem() argument 624 struct sctp_sock *sp = sctp_sk(sk); sctp_bindx_rem() 633 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", sctp_bindx_rem() 634 __func__, sk, addrs, addrcnt); sctp_bindx_rem() 669 /* FIXME - There is probably a need to check if sk->sk_saddr and sctp_bindx_rem() 670 * sk->sk_rcv_addr are currently set to one of the addresses to sctp_bindx_rem() 683 sctp_bindx_add(sk, addrs, cnt); sctp_bindx_rem() 701 static int sctp_send_asconf_del_ip(struct sock *sk, sctp_send_asconf_del_ip() argument 705 struct net *net = sock_net(sk); sctp_send_asconf_del_ip() 724 sp = sctp_sk(sk); sctp_send_asconf_del_ip() 727 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", sctp_send_asconf_del_ip() 728 __func__, sk, addrs, addrcnt); sctp_send_asconf_del_ip() 842 sctp_sk(asoc->base.sk)); sctp_send_asconf_del_ip() 857 struct sock *sk = sctp_opt2sk(sp); sctp_asconf_mgmt() local 867 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) sctp_asconf_mgmt() 871 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); sctp_asconf_mgmt() 873 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); sctp_asconf_mgmt() 930 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 944 * sk The sk of the socket 952 static int sctp_setsockopt_bindx(struct sock *sk, sctp_setsockopt_bindx() argument 964 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", sctp_setsockopt_bindx() 965 __func__, sk, addrs, addrs_size, op); sctp_setsockopt_bindx() 1010 err = sctp_bindx_add(sk, kaddrs, addrcnt); sctp_setsockopt_bindx() 1013 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); sctp_setsockopt_bindx() 1017 err = sctp_bindx_rem(sk, kaddrs, addrcnt); sctp_setsockopt_bindx() 1020 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); sctp_setsockopt_bindx() 1034 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1039 static int __sctp_connect(struct sock *sk, __sctp_connect() argument 1044 struct net *net = sock_net(sk); __sctp_connect() 1061 sp = sctp_sk(sk); __sctp_connect() 1069 if (sctp_sstate(sk, ESTABLISHED) || __sctp_connect() 1070 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { __sctp_connect() 1101 err = sctp_verify_addr(sk, &to, af->sockaddr_len); __sctp_connect() 1141 if (sctp_autobind(sk)) { __sctp_connect() 1161 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); __sctp_connect() 1202 /* Initialize sk's dport and daddr for getpeername() */ __sctp_connect() 1203 inet_sk(sk)->inet_dport = htons(asoc->peer.port); __sctp_connect() 1204 sp->pf->to_sk_daddr(sa_addr, sk); __sctp_connect() 1205 sk->sk_err = 0; __sctp_connect() 1210 if (sk->sk_socket->file) __sctp_connect() 1211 f_flags = sk->sk_socket->file->f_flags; __sctp_connect() 1213 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); __sctp_connect() 1293 * sk The sk of the socket 1299 static int __sctp_setsockopt_connectx(struct sock *sk, __sctp_setsockopt_connectx() argument 1307 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", __sctp_setsockopt_connectx() 1308 __func__, sk, addrs, addrs_size); __sctp_setsockopt_connectx() 1325 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); __sctp_setsockopt_connectx() 1337 static int sctp_setsockopt_connectx_old(struct sock *sk, sctp_setsockopt_connectx_old() argument 1341 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); sctp_setsockopt_connectx_old() 1350 static int sctp_setsockopt_connectx(struct sock *sk, sctp_setsockopt_connectx() argument 1357 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); sctp_setsockopt_connectx() 1381 static int sctp_getsockopt_connectx3(struct sock *sk, int len, sctp_getsockopt_connectx3() argument 1410 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) sctp_getsockopt_connectx3() 1473 static void sctp_close(struct sock *sk, long timeout) sctp_close() argument 1475 struct net *net = sock_net(sk); sctp_close() 1481 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); sctp_close() 1483 lock_sock(sk); sctp_close() 1484 sk->sk_shutdown = SHUTDOWN_MASK; sctp_close() 1485 sk->sk_state = SCTP_SS_CLOSING; sctp_close() 1487 ep = sctp_sk(sk)->ep; sctp_close() 1490 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); sctp_close() 1491 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); sctp_close() 1497 if (sctp_style(sk, TCP)) { sctp_close() 1512 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { sctp_close() 1522 if (sctp_style(sk, TCP) && timeout) sctp_close() 1523 sctp_wait_for_close(sk, timeout); sctp_close() 1526 release_sock(sk); sctp_close() 1534 bh_lock_sock(sk); sctp_close() 1539 sock_hold(sk); sctp_close() 1540 sk_common_release(sk); sctp_close() 1542 bh_unlock_sock(sk); sctp_close() 1545 sock_put(sk); sctp_close() 1551 static int sctp_error(struct sock *sk, int flags, int err) sctp_error() argument 1554 err = sock_error(sk) ? : -EPIPE; sctp_error() 1585 static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) sctp_sendmsg() argument 1587 struct net *net = sock_net(sk); sctp_sendmsg() 1609 sp = sctp_sk(sk); sctp_sendmsg() 1612 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, sctp_sendmsg() 1616 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { sctp_sendmsg() 1633 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { sctp_sendmsg() 1636 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, sctp_sendmsg() 1671 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { sctp_sendmsg() 1699 lock_sock(sk); sctp_sendmsg() 1711 if ((sctp_style(sk, TCP) && sctp_sendmsg() 1712 sctp_sstate(sk, ESTABLISHED)) || sctp_sendmsg() 1719 asoc = sctp_id2assoc(sk, associd); sctp_sendmsg() 1734 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { sctp_sendmsg() 1802 if (sctp_autobind(sk)) { sctp_sendmsg() 1822 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); sctp_sendmsg() 1890 if (msg_len > sk->sk_sndbuf) { sctp_sendmsg() 1896 sctp_assoc_pending_pmtu(sk, asoc); sctp_sendmsg() 1903 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { sctp_sendmsg() 1914 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); sctp_sendmsg() 1925 if ((sctp_style(sk, TCP) && msg_name) || sctp_sendmsg() 1980 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); sctp_sendmsg() 1995 release_sock(sk); sctp_sendmsg() 1998 return sctp_error(sk, msg_flags, err); sctp_sendmsg() 2005 err = sock_error(sk); sctp_sendmsg() 2064 static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, sctp_recvmsg() argument 2068 struct sctp_sock *sp = sctp_sk(sk); sctp_recvmsg() 2074 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " sctp_recvmsg() 2075 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, sctp_recvmsg() 2078 lock_sock(sk); sctp_recvmsg() 2080 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { sctp_recvmsg() 2085 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); sctp_recvmsg() 2105 sock_recv_ts_and_drops(msg, sk, skb); sctp_recvmsg() 2115 sctp_ulpevent_read_nxtinfo(event, msg, sk); sctp_recvmsg() 2125 if (sk->sk_protinfo.af_inet.cmsg_flags) sctp_recvmsg() 2140 skb_queue_head(&sk->sk_receive_queue, skb); sctp_recvmsg() 2169 release_sock(sk); sctp_recvmsg() 2180 static int sctp_setsockopt_disable_fragments(struct sock *sk, sctp_setsockopt_disable_fragments() argument 2192 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; sctp_setsockopt_disable_fragments() 2197 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, sctp_setsockopt_events() argument 2205 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) sctp_setsockopt_events() 2208 if (sctp_sk(sk)->subscribe.sctp_data_io_event) sctp_setsockopt_events() 2219 &sctp_sk(sk)->subscribe)) { sctp_setsockopt_events() 2220 asoc = sctp_id2assoc(sk, 0); sctp_setsockopt_events() 2246 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, sctp_setsockopt_autoclose() argument 2249 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_autoclose() 2250 struct net *net = sock_net(sk); sctp_setsockopt_autoclose() 2253 if (sctp_style(sk, TCP)) sctp_setsockopt_autoclose() 2374 struct net *net = sock_net(trans->asoc->base.sk); sctp_apply_peer_addr_params() 2505 static int sctp_setsockopt_peer_addr_params(struct sock *sk, sctp_setsockopt_peer_addr_params() argument 2512 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_peer_addr_params() 2538 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { sctp_setsockopt_peer_addr_params() 2539 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, sctp_setsockopt_peer_addr_params() 2549 asoc = sctp_id2assoc(sk, params.spp_assoc_id); sctp_setsockopt_peer_addr_params() 2550 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) sctp_setsockopt_peer_addr_params() 2628 static int sctp_setsockopt_delayed_ack(struct sock *sk, sctp_setsockopt_delayed_ack() argument 2634 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_delayed_ack() 2666 asoc = sctp_id2assoc(sk, params.sack_assoc_id); sctp_setsockopt_delayed_ack() 2667 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) sctp_setsockopt_delayed_ack() 2738 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) sctp_setsockopt_initmsg() argument 2741 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_initmsg() 2774 static int sctp_setsockopt_default_send_param(struct sock *sk, sctp_setsockopt_default_send_param() argument 2778 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_default_send_param() 2791 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); sctp_setsockopt_default_send_param() 2792 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) sctp_setsockopt_default_send_param() 2814 static int sctp_setsockopt_default_sndinfo(struct sock *sk, sctp_setsockopt_default_sndinfo() argument 2818 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_default_sndinfo() 2831 asoc = sctp_id2assoc(sk, info.snd_assoc_id); sctp_setsockopt_default_sndinfo() 2832 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) sctp_setsockopt_default_sndinfo() 2855 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, sctp_setsockopt_primary_addr() argument 2867 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); sctp_setsockopt_primary_addr() 2884 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, sctp_setsockopt_nodelay() argument 2894 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; sctp_setsockopt_nodelay() 2910 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) sctp_setsockopt_rtoinfo() argument 2915 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_rtoinfo() 2923 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); sctp_setsockopt_rtoinfo() 2926 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) sctp_setsockopt_rtoinfo() 2975 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) sctp_setsockopt_associnfo() argument 2986 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); sctp_setsockopt_associnfo() 2988 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) sctp_setsockopt_associnfo() 3020 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_associnfo() 3042 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) sctp_setsockopt_mappedv4() argument 3045 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_mappedv4() 3086 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) sctp_setsockopt_maxseg() argument 3090 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_maxseg() 3112 asoc = sctp_id2assoc(sk, params.assoc_id); sctp_setsockopt_maxseg() 3113 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) sctp_setsockopt_maxseg() 3141 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, sctp_setsockopt_peer_primary_addr() argument 3144 struct net *net = sock_net(sk); sctp_setsockopt_peer_primary_addr() 3152 sp = sctp_sk(sk); sctp_setsockopt_peer_primary_addr() 3163 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); sctp_setsockopt_peer_primary_addr() 3199 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, sctp_setsockopt_adaptation_layer() argument 3209 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; sctp_setsockopt_adaptation_layer() 3228 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, sctp_setsockopt_context() argument 3240 sp = sctp_sk(sk); sctp_setsockopt_context() 3243 asoc = sctp_id2assoc(sk, params.assoc_id); sctp_setsockopt_context() 3278 static int sctp_setsockopt_fragment_interleave(struct sock *sk, sctp_setsockopt_fragment_interleave() argument 3289 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; sctp_setsockopt_fragment_interleave() 3311 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, sctp_setsockopt_partial_delivery_point() argument 3325 if (val > (sk->sk_rcvbuf >> 1)) sctp_setsockopt_partial_delivery_point() 3328 sctp_sk(sk)->pd_point = val; sctp_setsockopt_partial_delivery_point() 3344 static int sctp_setsockopt_maxburst(struct sock *sk, sctp_setsockopt_maxburst() argument 3370 sp = sctp_sk(sk); sctp_setsockopt_maxburst() 3373 asoc = sctp_id2assoc(sk, assoc_id); sctp_setsockopt_maxburst() 3390 static int sctp_setsockopt_auth_chunk(struct sock *sk, sctp_setsockopt_auth_chunk() argument 3394 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_setsockopt_auth_chunk() 3423 static int sctp_setsockopt_hmac_ident(struct sock *sk, sctp_setsockopt_hmac_ident() argument 3427 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_setsockopt_hmac_ident() 3461 static int sctp_setsockopt_auth_key(struct sock *sk, sctp_setsockopt_auth_key() argument 3465 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_setsockopt_auth_key() 3485 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); sctp_setsockopt_auth_key() 3486 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { sctp_setsockopt_auth_key() 3503 static int sctp_setsockopt_active_key(struct sock *sk, sctp_setsockopt_active_key() argument 3507 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_setsockopt_active_key() 3519 asoc = sctp_id2assoc(sk, val.scact_assoc_id); sctp_setsockopt_active_key() 3520 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) sctp_setsockopt_active_key() 3531 static int sctp_setsockopt_del_key(struct sock *sk, sctp_setsockopt_del_key() argument 3535 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_setsockopt_del_key() 3547 asoc = sctp_id2assoc(sk, val.scact_assoc_id); sctp_setsockopt_del_key() 3548 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) sctp_setsockopt_del_key() 3569 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, sctp_setsockopt_auto_asconf() argument 3573 struct sctp_sock *sp = sctp_sk(sk); sctp_setsockopt_auto_asconf() 3579 if (!sctp_is_ep_boundall(sk) && val) sctp_setsockopt_auto_asconf() 3584 spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); sctp_setsockopt_auto_asconf() 3590 &sock_net(sk)->sctp.auto_asconf_splist); sctp_setsockopt_auto_asconf() 3593 spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); sctp_setsockopt_auto_asconf() 3604 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, sctp_setsockopt_paddr_thresholds() argument 3619 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { sctp_setsockopt_paddr_thresholds() 3620 asoc = sctp_id2assoc(sk, val.spt_assoc_id); sctp_setsockopt_paddr_thresholds() 3634 trans = sctp_addr_id2transport(sk, &val.spt_address, sctp_setsockopt_paddr_thresholds() 3647 static int sctp_setsockopt_recvrcvinfo(struct sock *sk, sctp_setsockopt_recvrcvinfo() argument 3658 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; sctp_setsockopt_recvrcvinfo() 3663 static int sctp_setsockopt_recvnxtinfo(struct sock *sk, sctp_setsockopt_recvnxtinfo() argument 3674 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; sctp_setsockopt_recvnxtinfo() 3698 static int sctp_setsockopt(struct sock *sk, int level, int optname, sctp_setsockopt() argument 3703 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); sctp_setsockopt() 3712 struct sctp_af *af = sctp_sk(sk)->pf->af; sctp_setsockopt() 3713 retval = af->setsockopt(sk, level, optname, optval, optlen); sctp_setsockopt() 3717 lock_sock(sk); sctp_setsockopt() 3722 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, sctp_setsockopt() 3728 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, sctp_setsockopt() 3734 retval = sctp_setsockopt_connectx_old(sk, sctp_setsockopt() 3741 retval = sctp_setsockopt_connectx(sk, sctp_setsockopt() 3747 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); sctp_setsockopt() 3751 retval = sctp_setsockopt_events(sk, optval, optlen); sctp_setsockopt() 3755 retval = sctp_setsockopt_autoclose(sk, optval, optlen); sctp_setsockopt() 3759 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); sctp_setsockopt() 3763 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); sctp_setsockopt() 3766 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); sctp_setsockopt() 3770 retval = sctp_setsockopt_initmsg(sk, optval, optlen); sctp_setsockopt() 3773 retval = sctp_setsockopt_default_send_param(sk, optval, sctp_setsockopt() 3777 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); sctp_setsockopt() 3780 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); sctp_setsockopt() 3783 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); sctp_setsockopt() 3786 retval = sctp_setsockopt_nodelay(sk, optval, optlen); sctp_setsockopt() 3789 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); sctp_setsockopt() 3792 retval = sctp_setsockopt_associnfo(sk, optval, optlen); sctp_setsockopt() 3795 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); sctp_setsockopt() 3798 retval = sctp_setsockopt_maxseg(sk, optval, optlen); sctp_setsockopt() 3801 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); sctp_setsockopt() 3804 retval = sctp_setsockopt_context(sk, optval, optlen); sctp_setsockopt() 3807 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); sctp_setsockopt() 3810 retval = sctp_setsockopt_maxburst(sk, optval, optlen); sctp_setsockopt() 3813 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); sctp_setsockopt() 3816 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); sctp_setsockopt() 3819 retval = sctp_setsockopt_auth_key(sk, optval, optlen); sctp_setsockopt() 3822 retval = sctp_setsockopt_active_key(sk, optval, optlen); sctp_setsockopt() 3825 retval = sctp_setsockopt_del_key(sk, optval, optlen); sctp_setsockopt() 3828 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); sctp_setsockopt() 3831 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); sctp_setsockopt() 3834 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); sctp_setsockopt() 3837 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); sctp_setsockopt() 3844 release_sock(sk); sctp_setsockopt() 3866 static int sctp_connect(struct sock *sk, struct sockaddr *addr, sctp_connect() argument 3872 lock_sock(sk); sctp_connect() 3874 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, sctp_connect() 3885 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); sctp_connect() 3888 release_sock(sk); sctp_connect() 3893 static int sctp_disconnect(struct sock *sk, int flags) sctp_disconnect() argument 3905 static struct sock *sctp_accept(struct sock *sk, int flags, int *err) sctp_accept() argument 3914 lock_sock(sk); sctp_accept() 3916 sp = sctp_sk(sk); sctp_accept() 3919 if (!sctp_style(sk, TCP)) { sctp_accept() 3924 if (!sctp_sstate(sk, LISTENING)) { sctp_accept() 3929 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); sctp_accept() 3931 error = sctp_wait_for_accept(sk, timeo); sctp_accept() 3940 newsk = sp->pf->create_accept_sk(sk, asoc); sctp_accept() 3949 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); sctp_accept() 3952 release_sock(sk); sctp_accept() 3958 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) sctp_ioctl() argument 3962 lock_sock(sk); sctp_ioctl() 3968 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) sctp_ioctl() 3976 skb = skb_peek(&sk->sk_receive_queue); sctp_ioctl() 3992 release_sock(sk); sctp_ioctl() 4000 static int sctp_init_sock(struct sock *sk) sctp_init_sock() argument 4002 struct net *net = sock_net(sk); sctp_init_sock() 4005 pr_debug("%s: sk:%p\n", __func__, sk); sctp_init_sock() 4007 sp = sctp_sk(sk); sctp_init_sock() 4010 switch (sk->sk_type) { sctp_init_sock() 4103 sp->pf = sctp_get_pf_specific(sk->sk_family); sctp_init_sock() 4114 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); sctp_init_sock() 4120 sk->sk_destruct = sctp_destruct_sock; sctp_init_sock() 4126 sock_prot_inuse_add(net, sk->sk_prot, 1); sctp_init_sock() 4132 spin_lock(&sock_net(sk)->sctp.addr_wq_lock); sctp_init_sock() 4136 spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); sctp_init_sock() 4147 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true 4149 static void sctp_destroy_sock(struct sock *sk) sctp_destroy_sock() argument 4153 pr_debug("%s: sk:%p\n", __func__, sk); sctp_destroy_sock() 4156 sp = sctp_sk(sk); sctp_destroy_sock() 4170 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); sctp_destroy_sock() 4175 static void sctp_destruct_sock(struct sock *sk) sctp_destruct_sock() argument 4177 struct sctp_sock *sp = sctp_sk(sk); sctp_destruct_sock() 4182 inet_sock_destruct(sk); sctp_destruct_sock() 4201 static void sctp_shutdown(struct sock *sk, int how) sctp_shutdown() argument 4203 struct net *net = sock_net(sk); sctp_shutdown() 4207 if (!sctp_style(sk, TCP)) sctp_shutdown() 4211 ep = sctp_sk(sk)->ep; sctp_shutdown() 4227 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, sctp_getsockopt_sctp_status() argument 4249 asoc = sctp_id2assoc(sk, associd); sctp_getsockopt_sctp_status() 4270 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), sctp_getsockopt_sctp_status() 4307 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, sctp_getsockopt_peer_addr_info() argument 4326 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, sctp_getsockopt_peer_addr_info() 4362 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, sctp_getsockopt_disable_fragments() argument 4371 val = (sctp_sk(sk)->disable_fragments == 1); sctp_getsockopt_disable_fragments() 4384 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, sctp_getsockopt_events() argument 4393 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) sctp_getsockopt_events() 4409 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) sctp_getsockopt_autoclose() argument 4412 if (sctp_style(sk, TCP)) sctp_getsockopt_autoclose() 4419 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) sctp_getsockopt_autoclose() 4425 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) sctp_do_peeloff() argument 4427 struct sctp_association *asoc = sctp_id2assoc(sk, id); sctp_do_peeloff() 4428 struct sctp_sock *sp = sctp_sk(sk); sctp_do_peeloff() 4438 if (!sctp_style(sk, UDP)) sctp_do_peeloff() 4442 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); sctp_do_peeloff() 4446 sctp_copy_sock(sock->sk, sk, asoc); sctp_do_peeloff() 4451 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); sctp_do_peeloff() 4456 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); sctp_do_peeloff() 4464 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) sctp_getsockopt_peeloff() argument 4477 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); sctp_getsockopt_peeloff() 4495 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, sctp_getsockopt_peeloff() 4608 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, sctp_getsockopt_peer_addr_params() argument 4614 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_peer_addr_params() 4625 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { sctp_getsockopt_peer_addr_params() 4626 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, sctp_getsockopt_peer_addr_params() 4638 asoc = sctp_id2assoc(sk, params.spp_assoc_id); sctp_getsockopt_peer_addr_params() 4639 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { sctp_getsockopt_peer_addr_params() 4717 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, sctp_getsockopt_delayed_ack() argument 4723 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_delayed_ack() 4745 asoc = sctp_id2assoc(sk, params.sack_assoc_id); sctp_getsockopt_delayed_ack() 4746 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) sctp_getsockopt_delayed_ack() 4791 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) sctp_getsockopt_initmsg() argument 4798 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) sctp_getsockopt_initmsg() 4804 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, sctp_getsockopt_peer_addrs() argument 4813 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_peer_addrs() 4825 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); sctp_getsockopt_peer_addrs() 4835 addrlen = sctp_get_pf_specific(sk->sk_family) sctp_getsockopt_peer_addrs() 4855 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, sctp_copy_laddrs() argument 4862 struct net *net = sock_net(sk); sctp_copy_laddrs() 4869 if ((PF_INET == sk->sk_family) && sctp_copy_laddrs() 4872 if ((PF_INET6 == sk->sk_family) && sctp_copy_laddrs() 4873 inet_v6_ipv6only(sk) && sctp_copy_laddrs() 4880 addrlen = sctp_get_pf_specific(sk->sk_family) sctp_copy_laddrs() 4881 ->addr_to_user(sctp_sk(sk), &temp); sctp_copy_laddrs() 4900 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, sctp_getsockopt_local_addrs() argument 4910 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_local_addrs() 4931 bp = &sctp_sk(sk)->ep->base.bind_addr; sctp_getsockopt_local_addrs() 4933 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); sctp_getsockopt_local_addrs() 4952 if (sctp_is_any(sk, &addr->a)) { sctp_getsockopt_local_addrs() 4953 cnt = sctp_copy_laddrs(sk, bp->port, addrs, sctp_getsockopt_local_addrs() 4970 addrlen = sctp_get_pf_specific(sk->sk_family) sctp_getsockopt_local_addrs() 5005 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, sctp_getsockopt_primary_addr() argument 5010 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_primary_addr() 5020 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); sctp_getsockopt_primary_addr() 5030 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, sctp_getsockopt_primary_addr() 5047 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, sctp_getsockopt_adaptation_layer() argument 5057 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; sctp_getsockopt_adaptation_layer() 5086 static int sctp_getsockopt_default_send_param(struct sock *sk, sctp_getsockopt_default_send_param() argument 5090 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_default_send_param() 5102 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); sctp_getsockopt_default_send_param() 5103 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) sctp_getsockopt_default_send_param() 5130 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, sctp_getsockopt_default_sndinfo() argument 5134 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_default_sndinfo() 5146 asoc = sctp_id2assoc(sk, info.snd_assoc_id); sctp_getsockopt_default_sndinfo() 5147 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) sctp_getsockopt_default_sndinfo() 5179 static int sctp_getsockopt_nodelay(struct sock *sk, int len, sctp_getsockopt_nodelay() argument 5188 val = (sctp_sk(sk)->nodelay == 1); sctp_getsockopt_nodelay() 5208 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, sctp_getsockopt_rtoinfo() argument 5222 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); sctp_getsockopt_rtoinfo() 5224 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) sctp_getsockopt_rtoinfo() 5234 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_rtoinfo() 5261 static int sctp_getsockopt_associnfo(struct sock *sk, int len, sctp_getsockopt_associnfo() argument 5279 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); sctp_getsockopt_associnfo() 5281 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) sctp_getsockopt_associnfo() 5298 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_associnfo() 5329 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, sctp_getsockopt_mappedv4() argument 5333 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_mappedv4() 5352 static int sctp_getsockopt_context(struct sock *sk, int len, sctp_getsockopt_context() argument 5367 sp = sctp_sk(sk); sctp_getsockopt_context() 5370 asoc = sctp_id2assoc(sk, params.assoc_id); sctp_getsockopt_context() 5413 static int sctp_getsockopt_maxseg(struct sock *sk, int len, sctp_getsockopt_maxseg() argument 5433 asoc = sctp_id2assoc(sk, params.assoc_id); sctp_getsockopt_maxseg() 5434 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) sctp_getsockopt_maxseg() 5440 params.assoc_value = sctp_sk(sk)->user_frag; sctp_getsockopt_maxseg() 5459 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, sctp_getsockopt_fragment_interleave() argument 5469 val = sctp_sk(sk)->frag_interleave; sctp_getsockopt_fragment_interleave() 5482 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, sctp_getsockopt_partial_delivery_point() argument 5493 val = sctp_sk(sk)->pd_point; sctp_getsockopt_partial_delivery_point() 5506 static int sctp_getsockopt_maxburst(struct sock *sk, int len, sctp_getsockopt_maxburst() argument 5528 sp = sctp_sk(sk); sctp_getsockopt_maxburst() 5531 asoc = sctp_id2assoc(sk, params.assoc_id); sctp_getsockopt_maxburst() 5550 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, sctp_getsockopt_hmac_ident() argument 5553 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_getsockopt_hmac_ident() 5585 static int sctp_getsockopt_active_key(struct sock *sk, int len, sctp_getsockopt_active_key() argument 5588 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_getsockopt_active_key() 5600 asoc = sctp_id2assoc(sk, val.scact_assoc_id); sctp_getsockopt_active_key() 5601 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) sctp_getsockopt_active_key() 5618 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, sctp_getsockopt_peer_auth_chunks() argument 5621 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_getsockopt_peer_auth_chunks() 5639 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); sctp_getsockopt_peer_auth_chunks() 5663 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, sctp_getsockopt_local_auth_chunks() argument 5666 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_getsockopt_local_auth_chunks() 5684 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); sctp_getsockopt_local_auth_chunks() 5685 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) sctp_getsockopt_local_auth_chunks() 5717 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, sctp_getsockopt_assoc_number() argument 5720 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_assoc_number() 5724 if (sctp_style(sk, TCP)) sctp_getsockopt_assoc_number() 5748 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, sctp_getsockopt_auto_asconf() argument 5757 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) sctp_getsockopt_auto_asconf() 5773 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, sctp_getsockopt_assoc_ids() argument 5776 struct sctp_sock *sp = sctp_sk(sk); sctp_getsockopt_assoc_ids() 5781 if (sctp_style(sk, TCP)) sctp_getsockopt_assoc_ids() 5822 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, sctp_getsockopt_paddr_thresholds() argument 5837 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { sctp_getsockopt_paddr_thresholds() 5838 asoc = sctp_id2assoc(sk, val.spt_assoc_id); sctp_getsockopt_paddr_thresholds() 5845 trans = sctp_addr_id2transport(sk, &val.spt_address, sctp_getsockopt_paddr_thresholds() 5866 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, sctp_getsockopt_assoc_stats() argument 5883 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); sctp_getsockopt_assoc_stats() 5924 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, sctp_getsockopt_recvrcvinfo() argument 5934 if (sctp_sk(sk)->recvrcvinfo) sctp_getsockopt_recvrcvinfo() 5944 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, sctp_getsockopt_recvnxtinfo() argument 5954 if (sctp_sk(sk)->recvnxtinfo) sctp_getsockopt_recvnxtinfo() 5964 static int sctp_getsockopt(struct sock *sk, int level, int optname, sctp_getsockopt() argument 5970 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); sctp_getsockopt() 5979 struct sctp_af *af = sctp_sk(sk)->pf->af; sctp_getsockopt() 5981 retval = af->getsockopt(sk, level, optname, optval, optlen); sctp_getsockopt() 5988 lock_sock(sk); sctp_getsockopt() 5992 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); sctp_getsockopt() 5995 retval = sctp_getsockopt_disable_fragments(sk, len, optval, sctp_getsockopt() 5999 retval = sctp_getsockopt_events(sk, len, optval, optlen); sctp_getsockopt() 6002 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); sctp_getsockopt() 6005 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); sctp_getsockopt() 6008 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, sctp_getsockopt() 6012 retval = sctp_getsockopt_delayed_ack(sk, len, optval, sctp_getsockopt() 6016 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); sctp_getsockopt() 6019 retval = sctp_getsockopt_peer_addrs(sk, len, optval, sctp_getsockopt() 6023 retval = sctp_getsockopt_local_addrs(sk, len, optval, sctp_getsockopt() 6027 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); sctp_getsockopt() 6030 retval = sctp_getsockopt_default_send_param(sk, len, sctp_getsockopt() 6034 retval = sctp_getsockopt_default_sndinfo(sk, len, sctp_getsockopt() 6038 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); sctp_getsockopt() 6041 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); sctp_getsockopt() 6044 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); sctp_getsockopt() 6047 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); sctp_getsockopt() 6050 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); sctp_getsockopt() 6053 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); sctp_getsockopt() 6056 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, sctp_getsockopt() 6060 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, sctp_getsockopt() 6064 retval = sctp_getsockopt_context(sk, len, optval, optlen); sctp_getsockopt() 6067 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, sctp_getsockopt() 6071 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, sctp_getsockopt() 6075 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); sctp_getsockopt() 6083 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); sctp_getsockopt() 6086 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); sctp_getsockopt() 6089 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, sctp_getsockopt() 6093 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, sctp_getsockopt() 6097 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); sctp_getsockopt() 6100 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); sctp_getsockopt() 6103 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); sctp_getsockopt() 6106 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); sctp_getsockopt() 6109 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); sctp_getsockopt() 6112 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); sctp_getsockopt() 6115 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); sctp_getsockopt() 6122 release_sock(sk); sctp_getsockopt() 6126 static void sctp_hash(struct sock *sk) sctp_hash() argument 6131 static void sctp_unhash(struct sock *sk) sctp_unhash() argument 6151 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) sctp_get_port_local() argument 6168 struct net *net = sock_net(sk); sctp_get_port_local() 6180 index = sctp_phashfn(sock_net(sk), rover); sctp_get_port_local() 6185 net_eq(sock_net(sk), pp->net)) sctp_get_port_local() 6209 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; sctp_get_port_local() 6212 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) sctp_get_port_local() 6225 int reuse = sk->sk_reuse; sctp_get_port_local() 6230 if (pp->fastreuse && sk->sk_reuse && sctp_get_port_local() 6231 sk->sk_state != SCTP_SS_LISTENING) sctp_get_port_local() 6236 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, sctp_get_port_local() 6240 * the socket sk. If we find a match, then that means sctp_get_port_local() 6241 * that this port/socket (sk) combination are already sctp_get_port_local() 6248 if (sk == sk2 || sctp_get_port_local() 6254 sctp_sk(sk2), sctp_sk(sk))) { sctp_get_port_local() 6265 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) sctp_get_port_local() 6269 * if sk->sk_reuse is too (that is, if the caller requested sctp_get_port_local() 6270 * SO_REUSEADDR on this socket -sk-). sctp_get_port_local() 6273 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) sctp_get_port_local() 6278 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) sctp_get_port_local() 6286 if (!sctp_sk(sk)->bind_hash) { sctp_get_port_local() 6287 inet_sk(sk)->inet_num = snum; sctp_get_port_local() 6288 sk_add_bind_node(sk, &pp->owner); sctp_get_port_local() 6289 sctp_sk(sk)->bind_hash = pp; sctp_get_port_local() 6304 static int sctp_get_port(struct sock *sk, unsigned short snum) sctp_get_port() argument 6307 struct sctp_af *af = sctp_sk(sk)->pf->af; sctp_get_port() 6309 /* Set up a dummy address struct from the sk. */ sctp_get_port() 6310 af->from_sk(&addr, sk); sctp_get_port() 6313 /* Note: sk->sk_num gets filled in if ephemeral port request. */ sctp_get_port() 6314 return !!sctp_get_port_local(sk, &addr); sctp_get_port() 6320 static int sctp_listen_start(struct sock *sk, int backlog) sctp_listen_start() argument 6322 struct sctp_sock *sp = sctp_sk(sk); sctp_listen_start() 6336 sctp_sk(sk)->hmac = tfm; sctp_listen_start() 6350 sk->sk_state = SCTP_SS_LISTENING; sctp_listen_start() 6352 if (sctp_autobind(sk)) sctp_listen_start() 6355 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { sctp_listen_start() 6356 sk->sk_state = SCTP_SS_CLOSED; sctp_listen_start() 6361 sk->sk_max_ack_backlog = backlog; sctp_listen_start() 6382 struct sock *sk = sock->sk; sctp_inet_listen() local 6383 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_inet_listen() 6389 lock_sock(sk); sctp_inet_listen() 6392 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) sctp_inet_listen() 6400 if (sctp_sstate(sk, CLOSED)) sctp_inet_listen() 6405 sk->sk_state = SCTP_SS_CLOSED; sctp_inet_listen() 6406 if (sk->sk_reuse) sctp_inet_listen() 6407 sctp_sk(sk)->bind_hash->fastreuse = 1; sctp_inet_listen() 6412 if (sctp_sstate(sk, LISTENING)) sctp_inet_listen() 6413 sk->sk_max_ack_backlog = backlog; sctp_inet_listen() 6415 err = sctp_listen_start(sk, backlog); sctp_inet_listen() 6422 release_sock(sk); sctp_inet_listen() 6441 struct sock *sk = sock->sk; sctp_poll() local 6442 struct sctp_sock *sp = sctp_sk(sk); sctp_poll() 6445 poll_wait(file, sk_sleep(sk), wait); sctp_poll() 6450 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) sctp_poll() 6457 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) sctp_poll() 6459 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); sctp_poll() 6460 if (sk->sk_shutdown & RCV_SHUTDOWN) sctp_poll() 6462 if (sk->sk_shutdown == SHUTDOWN_MASK) sctp_poll() 6466 if (!skb_queue_empty(&sk->sk_receive_queue)) sctp_poll() 6470 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) sctp_poll() 6474 if (sctp_writeable(sk)) { sctp_poll() 6477 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); sctp_poll() 6486 if (sctp_writeable(sk)) sctp_poll() 6524 static inline void __sctp_put_port(struct sock *sk) __sctp_put_port() argument 6527 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), __sctp_put_port() 6528 inet_sk(sk)->inet_num)]; __sctp_put_port() 6532 pp = sctp_sk(sk)->bind_hash; __sctp_put_port() 6533 __sk_del_bind_node(sk); __sctp_put_port() 6534 sctp_sk(sk)->bind_hash = NULL; __sctp_put_port() 6535 inet_sk(sk)->inet_num = 0; __sctp_put_port() 6540 void sctp_put_port(struct sock *sk) sctp_put_port() argument 6543 __sctp_put_port(sk); sctp_put_port() 6553 static int sctp_autobind(struct sock *sk) sctp_autobind() argument 6560 af = sctp_sk(sk)->pf->af; sctp_autobind() 6562 port = htons(inet_sk(sk)->inet_num); sctp_autobind() 6565 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); sctp_autobind() 6702 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) sctp_wait_for_packet() argument 6707 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sctp_wait_for_packet() 6710 error = sock_error(sk); sctp_wait_for_packet() 6714 if (!skb_queue_empty(&sk->sk_receive_queue)) sctp_wait_for_packet() 6718 if (sk->sk_shutdown & RCV_SHUTDOWN) sctp_wait_for_packet() 6727 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) sctp_wait_for_packet() 6739 release_sock(sk); sctp_wait_for_packet() 6741 lock_sock(sk); sctp_wait_for_packet() 6744 finish_wait(sk_sleep(sk), &wait); sctp_wait_for_packet() 6751 finish_wait(sk_sleep(sk), &wait); sctp_wait_for_packet() 6760 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, sctp_skb_recv_datagram() argument 6767 timeo = sock_rcvtimeo(sk, noblock); sctp_skb_recv_datagram() 6781 spin_lock_bh(&sk->sk_receive_queue.lock); sctp_skb_recv_datagram() 6782 skb = skb_peek(&sk->sk_receive_queue); sctp_skb_recv_datagram() 6785 spin_unlock_bh(&sk->sk_receive_queue.lock); sctp_skb_recv_datagram() 6787 skb = skb_dequeue(&sk->sk_receive_queue); sctp_skb_recv_datagram() 6793 /* Caller is allowed not to check sk->sk_err before calling. */ sctp_skb_recv_datagram() 6794 error = sock_error(sk); sctp_skb_recv_datagram() 6798 if (sk->sk_shutdown & RCV_SHUTDOWN) sctp_skb_recv_datagram() 6801 if (sk_can_busy_loop(sk) && sctp_skb_recv_datagram() 6802 sk_busy_loop(sk, noblock)) sctp_skb_recv_datagram() 6809 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); sctp_skb_recv_datagram() 6821 struct sock *sk = asoc->base.sk; __sctp_write_space() local 6822 struct socket *sock = sk->sk_socket; __sctp_write_space() 6828 if (sctp_writeable(sk)) { __sctp_write_space() 6829 wait_queue_head_t *wq = sk_sleep(sk); __sctp_write_space() 6838 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) __sctp_write_space() 6845 static void sctp_wake_up_waiters(struct sock *sk, sctp_wake_up_waiters() argument 6860 return sctp_write_space(sk); sctp_wake_up_waiters() 6875 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) sctp_wake_up_waiters() 6893 struct sock *sk = asoc->base.sk; sctp_wfree() local 6899 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); sctp_wfree() 6904 sk->sk_wmem_queued -= skb->truesize; sctp_wfree() 6905 sk_mem_uncharge(sk, skb->truesize); sctp_wfree() 6908 sctp_wake_up_waiters(sk, asoc); sctp_wfree() 6920 struct sock *sk = skb->sk; sctp_sock_rfree() local 6923 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); sctp_sock_rfree() 6928 sk_mem_uncharge(sk, event->rmem_len); sctp_sock_rfree() 6936 struct sock *sk = asoc->base.sk; sctp_wait_for_sndbuf() local 6953 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || sctp_wait_for_sndbuf() 6964 release_sock(sk); sctp_wait_for_sndbuf() 6966 BUG_ON(sk != asoc->base.sk); sctp_wait_for_sndbuf() 6967 lock_sock(sk); sctp_wait_for_sndbuf() 6993 void sctp_data_ready(struct sock *sk) sctp_data_ready() argument 6998 wq = rcu_dereference(sk->sk_wq); sctp_data_ready() 7002 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); sctp_data_ready() 7007 void sctp_write_space(struct sock *sk) sctp_write_space() argument 7012 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { sctp_write_space() 7028 static int sctp_writeable(struct sock *sk) sctp_writeable() argument 7032 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); sctp_writeable() 7043 struct sock *sk = asoc->base.sk; sctp_wait_for_connect() local 7058 if (sk->sk_shutdown & RCV_SHUTDOWN) sctp_wait_for_connect() 7060 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || sctp_wait_for_connect() 7072 release_sock(sk); sctp_wait_for_connect() 7074 lock_sock(sk); sctp_wait_for_connect() 7103 static int sctp_wait_for_accept(struct sock *sk, long timeo) sctp_wait_for_accept() argument 7109 ep = sctp_sk(sk)->ep; sctp_wait_for_accept() 7113 prepare_to_wait_exclusive(sk_sleep(sk), &wait, sctp_wait_for_accept() 7117 release_sock(sk); sctp_wait_for_accept() 7119 lock_sock(sk); sctp_wait_for_accept() 7123 if (!sctp_sstate(sk, LISTENING)) sctp_wait_for_accept() 7139 finish_wait(sk_sleep(sk), &wait); sctp_wait_for_accept() 7144 static void sctp_wait_for_close(struct sock *sk, long timeout) sctp_wait_for_close() argument 7149 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sctp_wait_for_close() 7150 if (list_empty(&sctp_sk(sk)->ep->asocs)) sctp_wait_for_close() 7152 release_sock(sk); sctp_wait_for_close() 7154 lock_sock(sk); sctp_wait_for_close() 7157 finish_wait(sk_sleep(sk), &wait); sctp_wait_for_close() 7160 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) sctp_skb_set_owner_r_frag() argument 7169 sctp_skb_set_owner_r_frag(frag, sk); sctp_skb_set_owner_r_frag() 7172 sctp_skb_set_owner_r(skb, sk); sctp_skb_set_owner_r_frag() 7175 void sctp_copy_sock(struct sock *newsk, struct sock *sk, sctp_copy_sock() argument 7178 struct inet_sock *inet = inet_sk(sk); sctp_copy_sock() 7181 newsk->sk_type = sk->sk_type; sctp_copy_sock() 7182 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; sctp_copy_sock() 7183 newsk->sk_flags = sk->sk_flags; sctp_copy_sock() 7184 newsk->sk_tsflags = sk->sk_tsflags; sctp_copy_sock() 7185 newsk->sk_no_check_tx = sk->sk_no_check_tx; sctp_copy_sock() 7186 newsk->sk_no_check_rx = sk->sk_no_check_rx; sctp_copy_sock() 7187 newsk->sk_reuse = sk->sk_reuse; sctp_copy_sock() 7189 newsk->sk_shutdown = sk->sk_shutdown; sctp_copy_sock() 7191 newsk->sk_family = sk->sk_family; sctp_copy_sock() 7193 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; sctp_copy_sock() 7194 newsk->sk_sndbuf = sk->sk_sndbuf; sctp_copy_sock() 7195 newsk->sk_rcvbuf = sk->sk_rcvbuf; sctp_copy_sock() 7196 newsk->sk_lingertime = sk->sk_lingertime; sctp_copy_sock() 7197 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; sctp_copy_sock() 7198 newsk->sk_sndtimeo = sk->sk_sndtimeo; sctp_copy_sock() 7202 /* Initialize sk's sport, dport, rcv_saddr and daddr for sctp_copy_sock() 7401 static void sctp_v6_destroy_sock(struct sock *sk) sctp_v6_destroy_sock() argument 7403 sctp_destroy_sock(sk); sctp_v6_destroy_sock() 7404 inet6_destroy_sock(sk); sctp_v6_destroy_sock()
|
H A D | endpointola.c | 58 struct sock *sk, sctp_endpoint_init() 61 struct net *net = sock_net(sk); sctp_endpoint_init() 130 ep->base.sk = sk; sctp_endpoint_init() 131 sock_hold(ep->base.sk); sctp_endpoint_init() 139 sk->sk_data_ready = sctp_data_ready; sctp_endpoint_init() 140 sk->sk_write_space = sctp_write_space; sctp_endpoint_init() 141 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); sctp_endpoint_init() 184 struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) sctp_endpoint_new() argument 193 if (!sctp_endpoint_init(ep, sk, gfp)) sctp_endpoint_new() 209 struct sock *sk = ep->base.sk; sctp_endpoint_add_asoc() local 222 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) sctp_endpoint_add_asoc() 223 sk->sk_ack_backlog++; sctp_endpoint_add_asoc() 233 ep->base.sk->sk_state = SCTP_SS_CLOSED; sctp_endpoint_free() 244 struct sock *sk; sctp_endpoint_destroy() local 271 sk = ep->base.sk; sctp_endpoint_destroy() 272 if (sk != NULL) { sctp_endpoint_destroy() 274 if (sctp_sk(sk)->bind_hash) sctp_endpoint_destroy() 275 sctp_put_port(sk); sctp_endpoint_destroy() 277 sock_put(sk); sctp_endpoint_destroy() 307 net_eq(sock_net(ep->base.sk), net)) { sctp_endpoint_is_match() 309 sctp_sk(ep->base.sk))) sctp_endpoint_is_match() 343 hash = sctp_assoc_hashfn(sock_net(ep->base.sk), ep->base.bind_addr.port, __sctp_endpoint_lookup_assoc() 387 struct net *net = sock_net(ep->base.sk); sctp_endpoint_is_peeled_off() 410 struct sock *sk; sctp_endpoint_bh_rcv() local 425 sk = ep->base.sk; sctp_endpoint_bh_rcv() 426 net = sock_net(sk); sctp_endpoint_bh_rcv() 478 SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS); sctp_endpoint_bh_rcv() 495 if (!sctp_sk(sk)->ep) sctp_endpoint_bh_rcv() 57 sctp_endpoint_init(struct sctp_endpoint *ep, struct sock *sk, gfp_t gfp) sctp_endpoint_init() argument
|
H A D | input.c | 75 static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb); 109 struct sock *sk; sctp_rcv() local 181 sk = rcvr->sk; sctp_rcv() 187 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { sctp_rcv() 195 sk = net->sctp.ctl_sock; sctp_rcv() 196 ep = sctp_sk(sk)->ep; sctp_rcv() 216 if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family)) sctp_rcv() 220 if (sk_filter(sk, skb)) sctp_rcv() 224 chunk = sctp_chunkify(skb, asoc, sk); sctp_rcv() 245 bh_lock_sock(sk); sctp_rcv() 247 if (sk != rcvr->sk) { sctp_rcv() 248 /* Our cached sk is different from the rcvr->sk. This is sctp_rcv() 253 * of the current sk. sctp_rcv() 255 bh_unlock_sock(sk); sctp_rcv() 256 sk = rcvr->sk; sctp_rcv() 257 bh_lock_sock(sk); sctp_rcv() 260 if (sock_owned_by_user(sk)) { sctp_rcv() 261 if (sctp_add_backlog(sk, skb)) { sctp_rcv() 262 bh_unlock_sock(sk); sctp_rcv() 273 bh_unlock_sock(sk); sctp_rcv() 303 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) sctp_backlog_rcv() argument 321 if (unlikely(rcvr->sk != sk)) { sctp_backlog_rcv() 333 sk = rcvr->sk; sctp_backlog_rcv() 334 bh_lock_sock(sk); sctp_backlog_rcv() 336 if (sock_owned_by_user(sk)) { sctp_backlog_rcv() 337 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) sctp_backlog_rcv() 344 bh_unlock_sock(sk); sctp_backlog_rcv() 365 static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) sctp_add_backlog() argument 371 ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf); sctp_add_backlog() 389 void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, sctp_icmp_frag_needed() argument 395 if (sock_owned_by_user(sk)) { sctp_icmp_frag_needed() 403 sctp_transport_update_pmtu(sk, t, pmtu); sctp_icmp_frag_needed() 406 sctp_assoc_sync_pmtu(sk, asoc); sctp_icmp_frag_needed() 418 void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t, sctp_icmp_redirect() argument 427 dst->ops->redirect(dst, sk, skb); sctp_icmp_redirect() 441 void sctp_icmp_proto_unreachable(struct sock *sk, sctp_icmp_proto_unreachable() argument 445 if (sock_owned_by_user(sk)) { sctp_icmp_proto_unreachable() 454 struct net *net = sock_net(sk); sctp_icmp_proto_unreachable() 478 struct sock *sk = NULL; sctp_err_lookup() local 503 sk = asoc->base.sk; sctp_err_lookup() 529 bh_lock_sock(sk); sctp_err_lookup() 534 if (sock_owned_by_user(sk)) sctp_err_lookup() 539 return sk; sctp_err_lookup() 547 void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) sctp_err_finish() argument 549 bh_unlock_sock(sk); sctp_err_finish() 574 struct sock *sk; sctp_v4_err() local 587 sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport); sctp_v4_err() 591 if (!sk) { sctp_v4_err() 609 sctp_icmp_frag_needed(sk, asoc, transport, info); sctp_v4_err() 613 sctp_icmp_proto_unreachable(sk, asoc, sctp_v4_err() 630 sctp_icmp_redirect(sk, transport, skb); sctp_v4_err() 636 inet = inet_sk(sk); sctp_v4_err() 637 if (!sock_owned_by_user(sk) && inet->recverr) { sctp_v4_err() 638 sk->sk_err = err; sctp_v4_err() 639 sk->sk_error_report(sk); sctp_v4_err() 641 sk->sk_err_soft = err; sctp_v4_err() 645 sctp_err_finish(sk, asoc); sctp_v4_err() 711 struct net *net = sock_net(ep->base.sk); __sctp_hash_endpoint() 736 struct net *net = sock_net(ep->base.sk); __sctp_unhash_endpoint() 788 struct net *net = sock_net(asoc->base.sk); __sctp_hash_established() 819 struct net *net = sock_net(asoc->base.sk); __sctp_unhash_established()
|
/linux-4.1.27/security/smack/ |
H A D | smack_netfilter.c | 31 if (skb && skb->sk && skb->sk->sk_security) { smack_ipv6_output() 32 ssp = skb->sk->sk_security; smack_ipv6_output() 48 if (skb && skb->sk && skb->sk->sk_security) { smack_ipv4_output() 49 ssp = skb->sk->sk_security; smack_ipv4_output()
|
/linux-4.1.27/include/linux/can/ |
H A D | skb.h | 49 static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) can_skb_set_owner() argument 51 if (sk) { can_skb_set_owner() 52 sock_hold(sk); can_skb_set_owner() 54 skb->sk = sk; can_skb_set_owner() 67 can_skb_set_owner(nskb, skb->sk); can_create_echo_skb()
|
/linux-4.1.27/net/dccp/ccids/ |
H A D | ccid3.c | 63 static void ccid3_hc_tx_set_state(struct sock *sk, ccid3_hc_tx_set_state() argument 66 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ccid3_hc_tx_set_state() 70 dccp_role(sk), sk, ccid3_tx_state_name(oldstate), ccid3_hc_tx_set_state() 85 static inline u64 rfc3390_initial_rate(struct sock *sk) rfc3390_initial_rate() argument 87 const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); rfc3390_initial_rate() 124 static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) ccid3_hc_tx_update_x() argument 126 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ccid3_hc_tx_update_x() 138 min_rate = rfc3390_initial_rate(sk); ccid3_hc_tx_update_x() 200 struct sock *sk = (struct sock *)data; ccid3_hc_tx_no_feedback_timer() local 201 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ccid3_hc_tx_no_feedback_timer() 204 bh_lock_sock(sk); ccid3_hc_tx_no_feedback_timer() 205 if (sock_owned_by_user(sk)) { ccid3_hc_tx_no_feedback_timer() 211 ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk, ccid3_hc_tx_no_feedback_timer() 215 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN)) ccid3_hc_tx_no_feedback_timer() 220 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); ccid3_hc_tx_no_feedback_timer() 251 ccid3_hc_tx_update_x(sk, NULL); ccid3_hc_tx_no_feedback_timer() 266 sk_reset_timer(sk, &hc->tx_no_feedback_timer, ccid3_hc_tx_no_feedback_timer() 269 bh_unlock_sock(sk); ccid3_hc_tx_no_feedback_timer() 270 sock_put(sk); ccid3_hc_tx_no_feedback_timer() 275 * @skb: next packet candidate to send on @sk 280 static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) ccid3_hc_tx_send_packet() argument 282 struct dccp_sock *dp = dccp_sk(sk); ccid3_hc_tx_send_packet() 283 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ccid3_hc_tx_send_packet() 296 sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies + ccid3_hc_tx_send_packet() 314 hc->tx_x = rfc3390_initial_rate(sk); ccid3_hc_tx_send_packet() 329 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); ccid3_hc_tx_send_packet() 357 static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len) ccid3_hc_tx_packet_sent() argument 359 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ccid3_hc_tx_packet_sent() 363 if (tfrc_tx_hist_add(&hc->tx_hist, dccp_sk(sk)->dccps_gss)) ccid3_hc_tx_packet_sent() 367 static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) ccid3_hc_tx_packet_recv() argument 369 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ccid3_hc_tx_packet_recv() 395 r_sample = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp)); ccid3_hc_tx_packet_recv() 402 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); ccid3_hc_tx_packet_recv() 408 hc->tx_x = rfc3390_initial_rate(sk); ccid3_hc_tx_packet_recv() 425 ccid3_hc_tx_update_x(sk, &now); ccid3_hc_tx_packet_recv() 430 dccp_role(sk), sk, hc->tx_rtt, r_sample, ccid3_hc_tx_packet_recv() 436 sk_stop_timer(sk, &hc->tx_no_feedback_timer); ccid3_hc_tx_packet_recv() 442 sk->sk_write_space(sk); ccid3_hc_tx_packet_recv() 450 USEC_PER_SEC/HZ * tcp_rto_min(sk)); ccid3_hc_tx_packet_recv() 459 dccp_role(sk), sk, usecs_to_jiffies(t_nfb), t_nfb); ccid3_hc_tx_packet_recv() 461 sk_reset_timer(sk, &hc->tx_no_feedback_timer, ccid3_hc_tx_packet_recv() 465 static int ccid3_hc_tx_parse_options(struct sock *sk, u8 packet_type, ccid3_hc_tx_parse_options() argument 468 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ccid3_hc_tx_parse_options() 479 dccp_role(sk), sk, optlen, option); ccid3_hc_tx_parse_options() 490 dccp_role(sk), sk, opt_val); ccid3_hc_tx_parse_options() 496 dccp_role(sk), sk, opt_val); ccid3_hc_tx_parse_options() 502 static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) ccid3_hc_tx_init() argument 509 ccid3_hc_tx_no_feedback_timer, (unsigned long)sk); ccid3_hc_tx_init() 513 static void ccid3_hc_tx_exit(struct sock *sk) ccid3_hc_tx_exit() argument 515 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ccid3_hc_tx_exit() 517 sk_stop_timer(sk, &hc->tx_no_feedback_timer); ccid3_hc_tx_exit() 521 static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) ccid3_hc_tx_get_info() argument 523 info->tcpi_rto = ccid3_hc_tx_sk(sk)->tx_t_rto; ccid3_hc_tx_get_info() 524 info->tcpi_rtt = ccid3_hc_tx_sk(sk)->tx_rtt; ccid3_hc_tx_get_info() 527 static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, ccid3_hc_tx_getsockopt() argument 530 const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ccid3_hc_tx_getsockopt() 583 static void ccid3_hc_rx_set_state(struct sock *sk, ccid3_hc_rx_set_state() argument 586 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); ccid3_hc_rx_set_state() 590 dccp_role(sk), sk, ccid3_rx_state_name(oldstate), ccid3_hc_rx_set_state() 596 static void ccid3_hc_rx_send_feedback(struct sock *sk, ccid3_hc_rx_send_feedback() argument 600 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); ccid3_hc_rx_send_feedback() 601 struct dccp_sock *dp = dccp_sk(sk); ccid3_hc_rx_send_feedback() 643 dccp_send_ack(sk); ccid3_hc_rx_send_feedback() 646 static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) ccid3_hc_rx_insert_options() argument 648 const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); ccid3_hc_rx_insert_options() 651 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) ccid3_hc_rx_insert_options() 679 static u32 ccid3_first_li(struct sock *sk) ccid3_first_li() argument 681 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); ccid3_first_li() 706 "loss rate=%u\n", dccp_role(sk), sk, x_recv, p); ccid3_first_li() 711 static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) ccid3_hc_rx_packet_recv() argument 713 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); ccid3_hc_rx_packet_recv() 715 const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp; ccid3_hc_rx_packet_recv() 722 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); ccid3_hc_rx_packet_recv() 749 skb, ndp, ccid3_first_li, sk)) { ccid3_hc_rx_packet_recv() 792 ccid3_hc_rx_send_feedback(sk, skb, do_feedback); ccid3_hc_rx_packet_recv() 795 static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) ccid3_hc_rx_init() argument 804 static void ccid3_hc_rx_exit(struct sock *sk) ccid3_hc_rx_exit() argument 806 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); ccid3_hc_rx_exit() 812 static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) ccid3_hc_rx_get_info() argument 814 info->tcpi_ca_state = ccid3_hc_rx_sk(sk)->rx_state; ccid3_hc_rx_get_info() 816 info->tcpi_rcv_rtt = ccid3_hc_rx_sk(sk)->rx_rtt; ccid3_hc_rx_get_info() 819 static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, ccid3_hc_rx_getsockopt() argument 822 const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); ccid3_hc_rx_getsockopt()
|
H A D | ccid2.c | 79 static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) ccid2_hc_tx_send_packet() argument 81 if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk))) ccid2_hc_tx_send_packet() 86 static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) ccid2_change_l_ack_ratio() argument 88 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); ccid2_change_l_ack_ratio() 100 dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO, ccid2_change_l_ack_ratio() 104 static void ccid2_check_l_ack_ratio(struct sock *sk) ccid2_check_l_ack_ratio() argument 106 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_check_l_ack_ratio() 118 if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd) ccid2_check_l_ack_ratio() 119 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U); ccid2_check_l_ack_ratio() 122 static void ccid2_change_l_seq_window(struct sock *sk, u64 val) ccid2_change_l_seq_window() argument 124 dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW, ccid2_change_l_seq_window() 131 struct sock *sk = (struct sock *)data; ccid2_hc_tx_rto_expire() local 132 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_hc_tx_rto_expire() 135 bh_lock_sock(sk); ccid2_hc_tx_rto_expire() 136 if (sock_owned_by_user(sk)) { ccid2_hc_tx_rto_expire() 137 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5); ccid2_hc_tx_rto_expire() 162 ccid2_change_l_ack_ratio(sk, 1); ccid2_hc_tx_rto_expire() 166 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); ccid2_hc_tx_rto_expire() 168 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); ccid2_hc_tx_rto_expire() 170 bh_unlock_sock(sk); ccid2_hc_tx_rto_expire() 171 sock_put(sk); ccid2_hc_tx_rto_expire() 194 static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now) ccid2_cwnd_application_limited() argument 196 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_cwnd_application_limited() 198 u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache), ccid2_cwnd_application_limited() 209 ccid2_check_l_ack_ratio(sk); ccid2_cwnd_application_limited() 213 static void ccid2_cwnd_restart(struct sock *sk, const u32 now) ccid2_cwnd_restart() argument 215 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_cwnd_restart() 217 iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache); ccid2_cwnd_restart() 229 ccid2_check_l_ack_ratio(sk); ccid2_cwnd_restart() 232 static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) ccid2_hc_tx_packet_sent() argument 234 struct dccp_sock *dp = dccp_sk(sk); ccid2_hc_tx_packet_sent() 235 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_hc_tx_packet_sent() 242 ccid2_cwnd_restart(sk, now); ccid2_hc_tx_packet_sent() 259 ccid2_cwnd_application_limited(sk, now); ccid2_hc_tx_packet_sent() 320 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); ccid2_hc_tx_packet_sent() 330 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); ccid2_hc_tx_packet_sent() 356 static void ccid2_rtt_estimator(struct sock *sk, const long mrtt) ccid2_rtt_estimator() argument 358 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_rtt_estimator() 366 hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk)); ccid2_rtt_estimator() 369 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss; ccid2_rtt_estimator() 404 if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) { ccid2_rtt_estimator() 408 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss; ccid2_rtt_estimator() 409 hc->tx_mdev_max = tcp_rto_min(sk); ccid2_rtt_estimator() 426 static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp, ccid2_new_ack() argument 429 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_new_ack() 430 struct dccp_sock *dp = dccp_sk(sk); ccid2_new_ack() 452 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2); ccid2_new_ack() 454 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U); ccid2_new_ack() 457 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2); ccid2_new_ack() 459 ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2); ccid2_new_ack() 469 ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent); ccid2_new_ack() 472 static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) ccid2_congestion_event() argument 474 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_congestion_event() 486 ccid2_check_l_ack_ratio(sk); ccid2_congestion_event() 489 static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, ccid2_hc_tx_parse_options() argument 492 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_hc_tx_parse_options() 503 static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) ccid2_hc_tx_packet_recv() argument 505 struct dccp_sock *dp = dccp_sk(sk); ccid2_hc_tx_packet_recv() 506 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_hc_tx_packet_recv() 545 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); ccid2_hc_tx_packet_recv() 615 ccid2_congestion_event(sk, ccid2_hc_tx_packet_recv() 618 ccid2_new_ack(sk, seqp, ccid2_hc_tx_packet_recv() 679 ccid2_congestion_event(sk, seqp); ccid2_hc_tx_packet_recv() 700 sk_stop_timer(sk, &hc->tx_rtotimer); ccid2_hc_tx_packet_recv() 702 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); ccid2_hc_tx_packet_recv() 706 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); ccid2_hc_tx_packet_recv() 710 static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) ccid2_hc_tx_init() argument 713 struct dccp_sock *dp = dccp_sk(sk); ccid2_hc_tx_init() 737 (unsigned long)sk); ccid2_hc_tx_init() 742 static void ccid2_hc_tx_exit(struct sock *sk) ccid2_hc_tx_exit() argument 744 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); ccid2_hc_tx_exit() 747 sk_stop_timer(sk, &hc->tx_rtotimer); ccid2_hc_tx_exit() 754 static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) ccid2_hc_rx_packet_recv() argument 756 struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk); ccid2_hc_rx_packet_recv() 761 if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) { ccid2_hc_rx_packet_recv() 762 dccp_send_ack(sk); ccid2_hc_rx_packet_recv()
|
/linux-4.1.27/include/crypto/ |
H A D | if_alg.h | 29 struct sock sk; member in struct:alg_sock 55 int (*accept)(void *private, struct sock *sk); 56 int (*accept_nokey)(void *private, struct sock *sk); 75 void af_alg_release_parent(struct sock *sk); 76 int af_alg_accept(struct sock *sk, struct socket *newsock); 87 static inline struct alg_sock *alg_sk(struct sock *sk) alg_sk() argument 89 return (struct alg_sock *)sk; alg_sk()
|
/linux-4.1.27/net/irda/ |
H A D | af_irda.c | 84 struct sock *sk; irda_data_indication() local 88 sk = instance; irda_data_indication() 90 err = sock_queue_rcv_skb(sk, skb); irda_data_indication() 112 struct sock *sk; irda_disconnect_indication() local 122 sk = instance; irda_disconnect_indication() 123 if (sk == NULL) { irda_disconnect_indication() 124 pr_debug("%s(%p) : BUG : sk is NULL\n", irda_disconnect_indication() 130 bh_lock_sock(sk); irda_disconnect_indication() 131 if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) { irda_disconnect_indication() 132 sk->sk_state = TCP_CLOSE; irda_disconnect_indication() 133 sk->sk_shutdown |= SEND_SHUTDOWN; irda_disconnect_indication() 135 sk->sk_state_change(sk); irda_disconnect_indication() 146 * Note : all socket function do check sk->sk_state, so we are irda_disconnect_indication() 155 bh_unlock_sock(sk); irda_disconnect_indication() 159 * For example, bind() and connect() won't reset sk->sk_err, irda_disconnect_indication() 160 * sk->sk_shutdown and sk->sk_flags to valid values... irda_disconnect_indication() 177 struct sock *sk; irda_connect_confirm() local 183 sk = instance; irda_connect_confirm() 184 if (sk == NULL) { irda_connect_confirm() 190 // Should be ??? skb_queue_tail(&sk->sk_receive_queue, skb); irda_connect_confirm() 199 switch (sk->sk_type) { irda_connect_confirm() 226 sk->sk_state = TCP_ESTABLISHED; irda_connect_confirm() 227 sk->sk_state_change(sk); irda_connect_confirm() 241 struct sock *sk; irda_connect_indication() local 247 sk = instance; irda_connect_indication() 248 if (sk == NULL) { irda_connect_indication() 260 switch (sk->sk_type) { irda_connect_indication() 288 skb_queue_tail(&sk->sk_receive_queue, skb); irda_connect_indication() 289 sk->sk_state_change(sk); irda_connect_indication() 324 struct sock *sk; irda_flow_indication() local 327 sk = instance; irda_flow_indication() 328 BUG_ON(sk == NULL); irda_flow_indication() 340 wake_up_interruptible(sk_sleep(sk)); irda_flow_indication() 702 struct sock *sk = sock->sk; irda_getname() local 703 struct irda_sock *self = irda_sk(sk); irda_getname() 707 if (sk->sk_state != TCP_ESTABLISHED) irda_getname() 737 struct sock *sk = sock->sk; irda_listen() local 740 lock_sock(sk); irda_listen() 742 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && irda_listen() 743 (sk->sk_type != SOCK_DGRAM)) irda_listen() 746 if (sk->sk_state != TCP_LISTEN) { irda_listen() 747 sk->sk_max_ack_backlog = backlog; irda_listen() 748 sk->sk_state = TCP_LISTEN; irda_listen() 753 release_sock(sk); irda_listen() 766 struct sock *sk = sock->sk; irda_bind() local 768 struct irda_sock *self = irda_sk(sk); irda_bind() 776 lock_sock(sk); irda_bind() 779 if ((sk->sk_type == SOCK_DGRAM) && irda_bind() 780 (sk->sk_protocol == IRDAPROTO_ULTRA)) { irda_bind() 794 sk->sk_state = TCP_ESTABLISHED; irda_bind() 820 release_sock(sk); irda_bind() 832 struct sock *sk = sock->sk; irda_accept() local 833 struct irda_sock *new, *self = irda_sk(sk); irda_accept() 838 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); irda_accept() 844 lock_sock(sk); irda_accept() 848 if ((sk = sock->sk) == NULL) irda_accept() 852 if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && irda_accept() 853 (sk->sk_type != SOCK_DGRAM)) irda_accept() 857 if (sk->sk_state != TCP_LISTEN) irda_accept() 874 skb = skb_dequeue(&sk->sk_receive_queue); irda_accept() 883 err = wait_event_interruptible(*(sk_sleep(sk)), irda_accept() 884 skb_peek(&sk->sk_receive_queue)); irda_accept() 889 newsk = newsock->sk; irda_accept() 923 sk->sk_ack_backlog--; irda_accept() 930 release_sock(sk); irda_accept() 957 struct sock *sk = sock->sk; irda_connect() local 959 struct irda_sock *self = irda_sk(sk); irda_connect() 964 lock_sock(sk); irda_connect() 967 if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) irda_connect() 970 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { irda_connect() 976 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { irda_connect() 983 if (sk->sk_state == TCP_ESTABLISHED) irda_connect() 986 sk->sk_state = TCP_CLOSE; irda_connect() 1032 sk->sk_state = TCP_SYN_SENT; irda_connect() 1045 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) irda_connect() 1049 if (wait_event_interruptible(*(sk_sleep(sk)), irda_connect() 1050 (sk->sk_state != TCP_SYN_SENT))) irda_connect() 1053 if (sk->sk_state != TCP_ESTABLISHED) { irda_connect() 1055 err = sock_error(sk); irda_connect() 1067 release_sock(sk); irda_connect() 1086 struct sock *sk; irda_create() local 1106 sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto); irda_create() 1107 if (sk == NULL) irda_create() 1110 self = irda_sk(sk); irda_create() 1141 sk_free(sk); irda_create() 1146 sk_free(sk); irda_create() 1151 sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */ irda_create() 1152 sk->sk_family = PF_IRDA; irda_create() 1153 sk->sk_protocol = protocol; irda_create() 1208 struct sock *sk = sock->sk; irda_release() local 1210 if (sk == NULL) irda_release() 1213 lock_sock(sk); irda_release() 1214 sk->sk_state = TCP_CLOSE; irda_release() 1215 sk->sk_shutdown |= SEND_SHUTDOWN; irda_release() 1216 sk->sk_state_change(sk); irda_release() 1219 irda_destroy_socket(irda_sk(sk)); irda_release() 1221 sock_orphan(sk); irda_release() 1222 sock->sk = NULL; irda_release() 1223 release_sock(sk); irda_release() 1226 skb_queue_purge(&sk->sk_receive_queue); irda_release() 1229 * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */ irda_release() 1230 sock_put(sk); irda_release() 1270 struct sock *sk = sock->sk; irda_sendmsg() local 1283 lock_sock(sk); irda_sendmsg() 1285 if (sk->sk_shutdown & SEND_SHUTDOWN) irda_sendmsg() 1288 if (sk->sk_state != TCP_ESTABLISHED) { irda_sendmsg() 1293 self = irda_sk(sk); irda_sendmsg() 1297 if (wait_event_interruptible(*(sk_sleep(sk)), irda_sendmsg() 1298 (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { irda_sendmsg() 1304 if (sk->sk_state != TCP_ESTABLISHED) { irda_sendmsg() 1316 skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16, irda_sendmsg() 1340 release_sock(sk); irda_sendmsg() 1345 err = sk_stream_error(sk, msg->msg_flags, err); irda_sendmsg() 1347 release_sock(sk); irda_sendmsg() 1361 struct sock *sk = sock->sk; irda_recvmsg_dgram() local 1362 struct irda_sock *self = irda_sk(sk); irda_recvmsg_dgram() 1367 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, irda_recvmsg_dgram() 1383 skb_free_datagram(sk, skb); irda_recvmsg_dgram() 1392 if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { irda_recvmsg_dgram() 1408 struct sock *sk = sock->sk; irda_recvmsg_stream() local 1409 struct irda_sock *self = irda_sk(sk); irda_recvmsg_stream() 1415 if ((err = sock_error(sk)) < 0) irda_recvmsg_stream() 1426 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); irda_recvmsg_stream() 1427 timeo = sock_rcvtimeo(sk, noblock); irda_recvmsg_stream() 1431 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); irda_recvmsg_stream() 1440 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); irda_recvmsg_stream() 1445 err = sock_error(sk); irda_recvmsg_stream() 1448 else if (sk->sk_shutdown & RCV_SHUTDOWN) irda_recvmsg_stream() 1454 else if (sk->sk_state != TCP_ESTABLISHED) irda_recvmsg_stream() 1456 else if (skb_peek(&sk->sk_receive_queue) == NULL) irda_recvmsg_stream() 1460 finish_wait(sk_sleep(sk), &wait); irda_recvmsg_stream() 1464 if (sk->sk_shutdown & RCV_SHUTDOWN) irda_recvmsg_stream() 1472 skb_queue_head(&sk->sk_receive_queue, skb); irda_recvmsg_stream() 1488 skb_queue_head(&sk->sk_receive_queue, skb); irda_recvmsg_stream() 1497 skb_queue_head(&sk->sk_receive_queue, skb); irda_recvmsg_stream() 1509 if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { irda_recvmsg_stream() 1529 struct sock *sk = sock->sk; irda_sendmsg_dgram() local 1539 lock_sock(sk); irda_sendmsg_dgram() 1541 if (sk->sk_shutdown & SEND_SHUTDOWN) { irda_sendmsg_dgram() 1548 if (sk->sk_state != TCP_ESTABLISHED) irda_sendmsg_dgram() 1551 self = irda_sk(sk); irda_sendmsg_dgram() 1563 skb = sock_alloc_send_skb(sk, len + self->max_header_size, irda_sendmsg_dgram() 1590 release_sock(sk); irda_sendmsg_dgram() 1594 release_sock(sk); irda_sendmsg_dgram() 1608 struct sock *sk = sock->sk; irda_sendmsg_ultra() local 1621 lock_sock(sk); irda_sendmsg_ultra() 1624 if (sk->sk_shutdown & SEND_SHUTDOWN) { irda_sendmsg_ultra() 1629 self = irda_sk(sk); irda_sendmsg_ultra() 1652 (sk->sk_state != TCP_ESTABLISHED)) { irda_sendmsg_ultra() 1672 skb = sock_alloc_send_skb(sk, len + self->max_header_size, irda_sendmsg_ultra() 1694 release_sock(sk); irda_sendmsg_ultra() 1700 * Function irda_shutdown (sk, how) 1704 struct sock *sk = sock->sk; irda_shutdown() local 1705 struct irda_sock *self = irda_sk(sk); irda_shutdown() 1709 lock_sock(sk); irda_shutdown() 1711 sk->sk_state = TCP_CLOSE; irda_shutdown() 1712 sk->sk_shutdown |= SEND_SHUTDOWN; irda_shutdown() 1713 sk->sk_state_change(sk); irda_shutdown() 1731 release_sock(sk); irda_shutdown() 1742 struct sock *sk = sock->sk; irda_poll() local 1743 struct irda_sock *self = irda_sk(sk); irda_poll() 1746 poll_wait(file, sk_sleep(sk), wait); irda_poll() 1750 if (sk->sk_err) irda_poll() 1752 if (sk->sk_shutdown & RCV_SHUTDOWN) { irda_poll() 1758 if (!skb_queue_empty(&sk->sk_receive_queue)) { irda_poll() 1764 switch (sk->sk_type) { irda_poll() 1766 if (sk->sk_state == TCP_CLOSE) { irda_poll() 1771 if (sk->sk_state == TCP_ESTABLISHED) { irda_poll() 1773 sock_writeable(sk)) irda_poll() 1781 sock_writeable(sk)) irda_poll() 1787 if (sock_writeable(sk)) irda_poll() 1802 struct sock *sk = sock->sk; irda_ioctl() local 1812 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); irda_ioctl() 1823 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) irda_ioctl() 1830 if (sk != NULL) irda_ioctl() 1831 err = sock_get_timestamp(sk, (struct timeval __user *)arg); irda_ioctl() 1875 struct sock *sk = sock->sk; irda_setsockopt() local 1876 struct irda_sock *self = irda_sk(sk); irda_setsockopt() 1887 lock_sock(sk); irda_setsockopt() 2105 if (sk->sk_type != SOCK_SEQPACKET) { irda_setsockopt() 2166 release_sock(sk); irda_setsockopt() 2226 struct sock *sk = sock->sk; irda_getsockopt() local 2227 struct irda_sock *self = irda_sk(sk); irda_getsockopt() 2250 lock_sock(sk); irda_getsockopt() 2584 release_sock(sk); irda_getsockopt()
|
/linux-4.1.27/security/selinux/ |
H A D | netlabel.c | 73 * @sk: the socket 81 static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk) selinux_netlbl_sock_genattr() argument 84 struct sk_security_struct *sksec = sk->sk_security; selinux_netlbl_sock_genattr() 105 * @sk: the socket 113 const struct sock *sk, selinux_netlbl_sock_getattr() 116 struct sk_security_struct *sksec = sk->sk_security; selinux_netlbl_sock_getattr() 244 struct sock *sk; selinux_netlbl_skbuff_setsid() local 248 sk = skb->sk; selinux_netlbl_skbuff_setsid() 249 if (sk != NULL) { selinux_netlbl_skbuff_setsid() 250 struct sk_security_struct *sksec = sk->sk_security; selinux_netlbl_skbuff_setsid() 253 secattr = selinux_netlbl_sock_getattr(sk, sid); selinux_netlbl_skbuff_setsid() 302 * @sk: the new sock 305 * A new connection has been established using @sk, we've already labeled the 310 void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family) selinux_netlbl_inet_csk_clone() argument 312 struct sk_security_struct *sksec = sk->sk_security; selinux_netlbl_inet_csk_clone() 330 int selinux_netlbl_socket_post_create(struct sock *sk, u16 family) selinux_netlbl_socket_post_create() argument 333 struct sk_security_struct *sksec = sk->sk_security; selinux_netlbl_socket_post_create() 339 secattr = selinux_netlbl_sock_genattr(sk); selinux_netlbl_socket_post_create() 342 rc = netlbl_sock_setattr(sk, family, secattr); selinux_netlbl_socket_post_create() 430 struct sock *sk = sock->sk; selinux_netlbl_socket_setsockopt() local 431 struct sk_security_struct *sksec = sk->sk_security; selinux_netlbl_socket_setsockopt() 438 lock_sock(sk); selinux_netlbl_socket_setsockopt() 442 rc = netlbl_sock_getattr(sk, &secattr); selinux_netlbl_socket_setsockopt() 443 release_sock(sk); selinux_netlbl_socket_setsockopt() 456 * @sk: the socket to label 464 int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr) selinux_netlbl_socket_connect() argument 467 struct sk_security_struct *sksec = sk->sk_security; selinux_netlbl_socket_connect() 474 lock_sock(sk); selinux_netlbl_socket_connect() 480 netlbl_sock_delattr(sk); selinux_netlbl_socket_connect() 485 secattr = selinux_netlbl_sock_genattr(sk); selinux_netlbl_socket_connect() 490 rc = netlbl_conn_setattr(sk, addr, secattr); selinux_netlbl_socket_connect() 495 release_sock(sk); selinux_netlbl_socket_connect() 112 selinux_netlbl_sock_getattr( const struct sock *sk, u32 sid) selinux_netlbl_sock_getattr() argument
|
/linux-4.1.27/drivers/target/iscsi/ |
H A D | iscsi_target_nego.c | 411 static void iscsi_target_sk_data_ready(struct sock *sk) iscsi_target_sk_data_ready() argument 413 struct iscsi_conn *conn = sk->sk_user_data; iscsi_target_sk_data_ready() 418 write_lock_bh(&sk->sk_callback_lock); iscsi_target_sk_data_ready() 419 if (!sk->sk_user_data) { iscsi_target_sk_data_ready() 420 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_sk_data_ready() 424 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_sk_data_ready() 429 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_sk_data_ready() 434 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_sk_data_ready() 444 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_sk_data_ready() 451 struct sock *sk; iscsi_target_set_sock_callbacks() local 456 sk = conn->sock->sk; iscsi_target_set_sock_callbacks() 459 write_lock_bh(&sk->sk_callback_lock); iscsi_target_set_sock_callbacks() 460 sk->sk_user_data = conn; iscsi_target_set_sock_callbacks() 461 conn->orig_data_ready = sk->sk_data_ready; iscsi_target_set_sock_callbacks() 462 conn->orig_state_change = sk->sk_state_change; iscsi_target_set_sock_callbacks() 463 sk->sk_data_ready = iscsi_target_sk_data_ready; iscsi_target_set_sock_callbacks() 464 sk->sk_state_change = iscsi_target_sk_state_change; iscsi_target_set_sock_callbacks() 465 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_set_sock_callbacks() 467 sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ; iscsi_target_set_sock_callbacks() 468 sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ; iscsi_target_set_sock_callbacks() 473 struct sock *sk; iscsi_target_restore_sock_callbacks() local 478 sk = conn->sock->sk; iscsi_target_restore_sock_callbacks() 481 write_lock_bh(&sk->sk_callback_lock); iscsi_target_restore_sock_callbacks() 482 if (!sk->sk_user_data) { iscsi_target_restore_sock_callbacks() 483 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_restore_sock_callbacks() 486 sk->sk_user_data = NULL; iscsi_target_restore_sock_callbacks() 487 sk->sk_data_ready = conn->orig_data_ready; iscsi_target_restore_sock_callbacks() 488 sk->sk_state_change = conn->orig_state_change; iscsi_target_restore_sock_callbacks() 489 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_restore_sock_callbacks() 491 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; iscsi_target_restore_sock_callbacks() 492 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; iscsi_target_restore_sock_callbacks() 497 static bool iscsi_target_sk_state_check(struct sock *sk) iscsi_target_sk_state_check() argument 499 if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { iscsi_target_sk_state_check() 558 struct sock *sk = conn->sock->sk; iscsi_target_do_login_rx() local 560 read_lock_bh(&sk->sk_callback_lock); iscsi_target_do_login_rx() 561 state = iscsi_target_sk_state_check(sk); iscsi_target_do_login_rx() 562 read_unlock_bh(&sk->sk_callback_lock); iscsi_target_do_login_rx() 605 struct sock *sk = conn->sock->sk; iscsi_target_do_login_rx() local 607 write_lock_bh(&sk->sk_callback_lock); iscsi_target_do_login_rx() 609 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_do_login_rx() 622 struct sock *sk = conn->sock->sk; iscsi_target_do_cleanup() local 631 conn->orig_state_change(sk); iscsi_target_do_cleanup() 640 static void iscsi_target_sk_state_change(struct sock *sk) iscsi_target_sk_state_change() argument 648 write_lock_bh(&sk->sk_callback_lock); iscsi_target_sk_state_change() 649 conn = sk->sk_user_data; iscsi_target_sk_state_change() 651 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_sk_state_change() 659 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_sk_state_change() 660 orig_state_change(sk); iscsi_target_sk_state_change() 666 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_sk_state_change() 667 orig_state_change(sk); iscsi_target_sk_state_change() 673 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_sk_state_change() 674 orig_state_change(sk); iscsi_target_sk_state_change() 678 state = iscsi_target_sk_state_check(sk); iscsi_target_sk_state_change() 679 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_sk_state_change() 688 orig_state_change(sk); iscsi_target_sk_state_change() 977 struct sock *sk = conn->sock->sk; iscsi_target_do_login() local 980 read_lock_bh(&sk->sk_callback_lock); iscsi_target_do_login() 981 state = iscsi_target_sk_state_check(sk); iscsi_target_do_login() 982 read_unlock_bh(&sk->sk_callback_lock); iscsi_target_do_login() 1257 struct sock *sk = conn->sock->sk; iscsi_target_start_negotiation() local 1259 write_lock_bh(&sk->sk_callback_lock); iscsi_target_start_negotiation() 1261 write_unlock_bh(&sk->sk_callback_lock); iscsi_target_start_negotiation()
|
/linux-4.1.27/net/bluetooth/bnep/ |
H A D | sock.c | 38 struct sock *sk = sock->sk; bnep_sock_release() local 40 BT_DBG("sock %p sk %p", sock, sk); bnep_sock_release() 42 if (!sk) bnep_sock_release() 45 bt_sock_unlink(&bnep_sk_list, sk); bnep_sock_release() 47 sock_orphan(sk); bnep_sock_release() 48 sock_put(sk); bnep_sock_release() 77 if (nsock->sk->sk_state != BT_CONNECTED) { bnep_sock_ioctl() 198 struct sock *sk; bnep_sock_create() local 205 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto); bnep_sock_create() 206 if (!sk) bnep_sock_create() 209 sock_init_data(sock, sk); bnep_sock_create() 215 sock_reset_flag(sk, SOCK_ZAPPED); bnep_sock_create() 217 sk->sk_protocol = protocol; bnep_sock_create() 218 sk->sk_state = BT_OPEN; bnep_sock_create() 220 bt_sock_link(&bnep_sk_list, sk); bnep_sock_create()
|
/linux-4.1.27/net/bluetooth/cmtp/ |
H A D | sock.c | 51 struct sock *sk = sock->sk; cmtp_sock_release() local 53 BT_DBG("sock %p sk %p", sock, sk); cmtp_sock_release() 55 if (!sk) cmtp_sock_release() 58 bt_sock_unlink(&cmtp_sk_list, sk); cmtp_sock_release() 60 sock_orphan(sk); cmtp_sock_release() 61 sock_put(sk); cmtp_sock_release() 90 if (nsock->sk->sk_state != BT_CONNECTED) { cmtp_sock_ioctl() 201 struct sock *sk; cmtp_sock_create() local 208 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto); cmtp_sock_create() 209 if (!sk) cmtp_sock_create() 212 sock_init_data(sock, sk); cmtp_sock_create() 218 sock_reset_flag(sk, SOCK_ZAPPED); cmtp_sock_create() 220 sk->sk_protocol = protocol; cmtp_sock_create() 221 sk->sk_state = BT_OPEN; cmtp_sock_create() 223 bt_sock_link(&cmtp_sk_list, sk); cmtp_sock_create()
|
/linux-4.1.27/net/packet/ |
H A D | af_packet.c | 29 * Alan Cox : New buffers. Use sk->mac.raw. 168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 216 static void packet_flush_mclist(struct sock *sk); 243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 244 static void __fanout_link(struct sock *sk, struct packet_sock *po); 337 static void register_prot_hook(struct sock *sk) register_prot_hook() argument 339 struct packet_sock *po = pkt_sk(sk); register_prot_hook() 343 __fanout_link(sk, po); register_prot_hook() 347 sock_hold(sk); register_prot_hook() 359 static void __unregister_prot_hook(struct sock *sk, bool sync) __unregister_prot_hook() argument 361 struct packet_sock *po = pkt_sk(sk); __unregister_prot_hook() 366 __fanout_unlink(sk, po); __unregister_prot_hook() 370 __sock_put(sk); __unregister_prot_hook() 379 static void unregister_prot_hook(struct sock *sk, bool sync) unregister_prot_hook() argument 381 struct packet_sock *po = pkt_sk(sk); unregister_prot_hook() 384 __unregister_prot_hook(sk, sync); unregister_prot_hook() 568 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); prb_calc_retire_blk_tmo() 681 spin_lock(&po->sk.sk_receive_queue.lock); prb_retire_rx_blk_timer_expired() 744 spin_unlock(&po->sk.sk_receive_queue.lock); prb_retire_rx_blk_timer_expired() 798 struct sock *sk = &po->sk; prb_close_block() local 827 sk->sk_data_ready(sk); prb_close_block() 1043 /* Assumes caller has the sk->rx_queue.lock */ __packet_lookup_frame_in_block() 1239 struct sock *sk = &po->sk; packet_rcv_has_room() local 1243 return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize) packet_rcv_has_room() 1244 <= sk->sk_rcvbuf; packet_rcv_has_room() 1246 spin_lock(&sk->sk_receive_queue.lock); packet_rcv_has_room() 1255 spin_unlock(&sk->sk_receive_queue.lock); packet_rcv_has_room() 1260 static void packet_sock_destruct(struct sock *sk) packet_sock_destruct() argument 1262 skb_queue_purge(&sk->sk_error_queue); packet_sock_destruct() 1264 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); packet_sock_destruct() 1265 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); packet_sock_destruct() 1267 if (!sock_flag(sk, SOCK_DEAD)) { packet_sock_destruct() 1268 pr_err("Attempt to release alive packet socket: %p\n", sk); packet_sock_destruct() 1272 sk_refcnt_debug_dec(sk); packet_sock_destruct() 1393 static void __fanout_link(struct sock *sk, struct packet_sock *po) __fanout_link() argument 1398 f->arr[f->num_members] = sk; __fanout_link() 1404 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) __fanout_unlink() argument 1411 if (f->arr[i] == sk) __fanout_unlink() 1420 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) match_fanout_group() argument 1422 if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout) match_fanout_group() 1428 static int fanout_add(struct sock *sk, u16 id, u16 type_flags) fanout_add() argument 1430 struct packet_sock *po = pkt_sk(sk); fanout_add() 1460 read_pnet(&f->net) == sock_net(sk)) { fanout_add() 1473 write_pnet(&match->net, sock_net(sk)); fanout_add() 1498 __fanout_link(sk, po); fanout_add() 1507 static void fanout_release(struct sock *sk) fanout_release() argument 1509 struct packet_sock *po = pkt_sk(sk); fanout_release() 1548 struct sock *sk; packet_rcv_spkt() local 1556 sk = pt->af_packet_priv; packet_rcv_spkt() 1572 if (!net_eq(dev_net(dev), sock_net(sk))) packet_rcv_spkt() 1602 if (sock_queue_rcv_skb(sk, skb) == 0) packet_rcv_spkt() 1620 struct sock *sk = sock->sk; packet_sendmsg_spkt() local 1647 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); packet_sendmsg_spkt() 1661 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { packet_sendmsg_spkt() 1679 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); packet_sendmsg_spkt() 1710 skb->priority = sk->sk_priority; packet_sendmsg_spkt() 1711 skb->mark = sk->sk_mark; packet_sendmsg_spkt() 1713 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); packet_sendmsg_spkt() 1732 const struct sock *sk, run_filter() 1738 filter = rcu_dereference(sk->sk_filter); run_filter() 1761 struct sock *sk; packet_rcv() local 1771 sk = pt->af_packet_priv; packet_rcv() 1772 po = pkt_sk(sk); packet_rcv() 1774 if (!net_eq(dev_net(dev), sock_net(sk))) packet_rcv() 1787 if (sk->sk_type != SOCK_DGRAM) packet_rcv() 1797 res = run_filter(skb, sk, snaplen); packet_rcv() 1803 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) packet_rcv() 1839 skb_set_owner_r(skb, sk); packet_rcv() 1846 spin_lock(&sk->sk_receive_queue.lock); packet_rcv() 1848 sock_skb_set_dropcount(sk, skb); packet_rcv() 1849 __skb_queue_tail(&sk->sk_receive_queue, skb); packet_rcv() 1850 spin_unlock(&sk->sk_receive_queue.lock); packet_rcv() 1851 sk->sk_data_ready(sk); packet_rcv() 1855 spin_lock(&sk->sk_receive_queue.lock); packet_rcv() 1857 atomic_inc(&sk->sk_drops); packet_rcv() 1858 spin_unlock(&sk->sk_receive_queue.lock); packet_rcv() 1873 struct sock *sk; tpacket_rcv() local 1896 sk = pt->af_packet_priv; tpacket_rcv() 1897 po = pkt_sk(sk); tpacket_rcv() 1899 if (!net_eq(dev_net(dev), sock_net(sk))) tpacket_rcv() 1903 if (sk->sk_type != SOCK_DGRAM) tpacket_rcv() 1913 res = run_filter(skb, sk, snaplen); tpacket_rcv() 1927 if (sk->sk_type == SOCK_DGRAM) { tpacket_rcv() 1940 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { tpacket_rcv() 1948 skb_set_owner_r(copy_skb, sk); tpacket_rcv() 1967 spin_lock(&sk->sk_receive_queue.lock); tpacket_rcv() 1986 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); tpacket_rcv() 1988 spin_unlock(&sk->sk_receive_queue.lock); tpacket_rcv() 2071 sk->sk_data_ready(sk); tpacket_rcv() 2087 spin_unlock(&sk->sk_receive_queue.lock); tpacket_rcv() 2089 sk->sk_data_ready(sk); tpacket_rcv() 2096 struct packet_sock *po = pkt_sk(skb->sk); tpacket_destruct_skb() 2139 struct socket *sock = po->sk.sk_socket; tpacket_fill_skb() 2148 skb->priority = po->sk.sk_priority; tpacket_fill_skb() 2149 skb->mark = po->sk.sk_mark; tpacket_fill_skb() 2150 sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags); tpacket_fill_skb() 2228 atomic_add(to_write, &po->sk.sk_wmem_alloc); tpacket_fill_skb() 2286 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); tpacket_snd() 2296 if (po->sk.sk_socket->type == SOCK_RAW) tpacket_snd() 2316 skb = sock_alloc_send_skb(&po->sk, tpacket_snd() 2393 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, packet_alloc_skb() argument 2404 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, packet_alloc_skb() 2419 struct sock *sk = sock->sk; packet_snd() local 2429 struct packet_sock *po = pkt_sk(sk); packet_snd() 2451 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); packet_snd() 2513 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { packet_snd() 2528 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, packet_snd() 2551 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); packet_snd() 2561 skb->priority = sk->sk_priority; packet_snd() 2562 skb->mark = sk->sk_mark; packet_snd() 2611 struct sock *sk = sock->sk; packet_sendmsg() local 2612 struct packet_sock *po = pkt_sk(sk); packet_sendmsg() 2627 struct sock *sk = sock->sk; packet_release() local 2632 if (!sk) packet_release() 2635 net = sock_net(sk); packet_release() 2636 po = pkt_sk(sk); packet_release() 2639 sk_del_node_init_rcu(sk); packet_release() 2643 sock_prot_inuse_add(net, sk->sk_prot, -1); packet_release() 2647 unregister_prot_hook(sk, false); packet_release() 2656 packet_flush_mclist(sk); packet_release() 2660 packet_set_ring(sk, &req_u, 1, 0); packet_release() 2665 packet_set_ring(sk, &req_u, 1, 1); packet_release() 2668 fanout_release(sk); packet_release() 2674 sock_orphan(sk); packet_release() 2675 sock->sk = NULL; packet_release() 2679 skb_queue_purge(&sk->sk_receive_queue); packet_release() 2681 sk_refcnt_debug_release(sk); packet_release() 2683 sock_put(sk); packet_release() 2691 static int packet_do_bind(struct sock *sk, const char *name, int ifindex, packet_do_bind() argument 2694 struct packet_sock *po = pkt_sk(sk); packet_do_bind() 2705 lock_sock(sk); packet_do_bind() 2710 dev = dev_get_by_name_rcu(sock_net(sk), name); packet_do_bind() 2716 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); packet_do_bind() 2734 __unregister_prot_hook(sk, true); packet_do_bind() 2738 unlisted = !dev_get_by_index_rcu(sock_net(sk), packet_do_bind() 2763 register_prot_hook(sk); packet_do_bind() 2765 sk->sk_err = ENETDOWN; packet_do_bind() 2766 if (!sock_flag(sk, SOCK_DEAD)) packet_do_bind() 2767 sk->sk_error_report(sk); packet_do_bind() 2773 release_sock(sk); packet_do_bind() 2784 struct sock *sk = sock->sk; packet_bind_spkt() local 2795 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); packet_bind_spkt() 2801 struct sock *sk = sock->sk; packet_bind() local 2812 return packet_do_bind(sk, NULL, sll->sll_ifindex, packet_bind() 2813 sll->sll_protocol ? : pkt_sk(sk)->num); packet_bind() 2829 struct sock *sk; packet_create() local 2843 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto); packet_create() 2844 if (sk == NULL) packet_create() 2851 sock_init_data(sock, sk); packet_create() 2853 po = pkt_sk(sk); packet_create() 2854 sk->sk_family = PF_PACKET; packet_create() 2864 sk->sk_destruct = packet_sock_destruct; packet_create() 2865 sk_refcnt_debug_inc(sk); packet_create() 2878 po->prot_hook.af_packet_priv = sk; packet_create() 2882 register_prot_hook(sk); packet_create() 2886 sk_add_node_rcu(sk, &net->packet.sklist); packet_create() 2895 sk_free(sk); packet_create() 2908 struct sock *sk = sock->sk; packet_recvmsg() local 2920 if (pkt_sk(sk)->ifindex < 0) packet_recvmsg() 2925 err = sock_recv_errqueue(sk, msg, len, packet_recvmsg() 2939 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); packet_recvmsg() 2950 if (pkt_sk(sk)->has_vnet_hdr) { packet_recvmsg() 3021 sock_recv_ts_and_drops(msg, sk, skb); packet_recvmsg() 3040 if (pkt_sk(sk)->auxdata) { packet_recvmsg() 3073 skb_free_datagram(sk, skb); packet_recvmsg() 3082 struct sock *sk = sock->sk; packet_getname_spkt() local 3090 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); packet_getname_spkt() 3103 struct sock *sk = sock->sk; packet_getname() local 3104 struct packet_sock *po = pkt_sk(sk); packet_getname() 3115 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); packet_getname() 3175 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) packet_mc_add() argument 3177 struct packet_sock *po = pkt_sk(sk); packet_mc_add() 3185 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); packet_mc_add() 3229 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) packet_mc_drop() argument 3235 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { packet_mc_drop() 3243 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); packet_mc_drop() 3255 static void packet_flush_mclist(struct sock *sk) packet_flush_mclist() argument 3257 struct packet_sock *po = pkt_sk(sk); packet_flush_mclist() 3268 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); packet_flush_mclist() 3279 struct sock *sk = sock->sk; packet_setsockopt() local 3280 struct packet_sock *po = pkt_sk(sk); packet_setsockopt() 3302 ret = packet_mc_add(sk, &mreq); packet_setsockopt() 3304 ret = packet_mc_drop(sk, &mreq); packet_setsockopt() 3326 if (pkt_sk(sk)->has_vnet_hdr) packet_setsockopt() 3330 return packet_set_ring(sk, &req_u, 0, packet_setsockopt() 3342 pkt_sk(sk)->copy_thresh = val; packet_setsockopt() 3452 return fanout_add(sk, val & 0xffff, val >> 16); packet_setsockopt() 3489 struct sock *sk = sock->sk; packet_getsockopt() local 3490 struct packet_sock *po = pkt_sk(sk); packet_getsockopt() 3505 spin_lock_bh(&sk->sk_receive_queue.lock); packet_getsockopt() 3508 spin_unlock_bh(&sk->sk_receive_queue.lock); packet_getsockopt() 3591 struct sock *sk; packet_notifier() local 3596 sk_for_each_rcu(sk, &net->packet.sklist) { packet_notifier() 3597 struct packet_sock *po = pkt_sk(sk); packet_notifier() 3609 __unregister_prot_hook(sk, false); packet_notifier() 3610 sk->sk_err = ENETDOWN; packet_notifier() 3611 if (!sock_flag(sk, SOCK_DEAD)) packet_notifier() 3612 sk->sk_error_report(sk); packet_notifier() 3628 register_prot_hook(sk); packet_notifier() 3642 struct sock *sk = sock->sk; packet_ioctl() local 3647 int amount = sk_wmem_alloc_get(sk); packet_ioctl() 3656 spin_lock_bh(&sk->sk_receive_queue.lock); packet_ioctl() 3657 skb = skb_peek(&sk->sk_receive_queue); packet_ioctl() 3660 spin_unlock_bh(&sk->sk_receive_queue.lock); packet_ioctl() 3664 return sock_get_timestamp(sk, (struct timeval __user *)arg); packet_ioctl() 3666 return sock_get_timestampns(sk, (struct timespec __user *)arg); packet_ioctl() 3695 struct sock *sk = sock->sk; packet_poll() local 3696 struct packet_sock *po = pkt_sk(sk); packet_poll() 3699 spin_lock_bh(&sk->sk_receive_queue.lock); packet_poll() 3705 spin_unlock_bh(&sk->sk_receive_queue.lock); packet_poll() 3706 spin_lock_bh(&sk->sk_write_queue.lock); packet_poll() 3711 spin_unlock_bh(&sk->sk_write_queue.lock); packet_poll() 3724 struct sock *sk = sock->sk; packet_mm_open() local 3726 if (sk) packet_mm_open() 3727 atomic_inc(&pkt_sk(sk)->mapped); packet_mm_open() 3734 struct sock *sk = sock->sk; packet_mm_close() local 3736 if (sk) packet_mm_close() 3737 atomic_dec(&pkt_sk(sk)->mapped); packet_mm_close() 3813 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, packet_set_ring() argument 3817 struct packet_sock *po = pkt_sk(sk); packet_set_ring() 3833 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; packet_set_ring() 3907 lock_sock(sk); packet_set_ring() 3915 __unregister_prot_hook(sk, false); packet_set_ring() 3948 register_prot_hook(sk); packet_set_ring() 3956 release_sock(sk); packet_set_ring() 3967 struct sock *sk = sock->sk; packet_mmap() local 3968 struct packet_sock *po = pkt_sk(sk); packet_mmap() 4104 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); packet_seq_show() 1731 run_filter(const struct sk_buff *skb, const struct sock *sk, unsigned int res) run_filter() argument
|
/linux-4.1.27/net/appletalk/ |
H A D | ddp.c | 80 static inline void __atalk_insert_socket(struct sock *sk) __atalk_insert_socket() argument 82 sk_add_node(sk, &atalk_sockets); __atalk_insert_socket() 85 static inline void atalk_remove_socket(struct sock *sk) atalk_remove_socket() argument 88 sk_del_node_init(sk); atalk_remove_socket() 131 * @sk: socket to insert in the list if it is not there already 139 static struct sock *atalk_find_or_insert_socket(struct sock *sk, atalk_find_or_insert_socket() argument 155 __atalk_insert_socket(sk); /* Wheee, it's free, assign and insert. */ atalk_find_or_insert_socket() 163 struct sock *sk = (struct sock *)data; atalk_destroy_timer() local 165 if (sk_has_allocations(sk)) { atalk_destroy_timer() 166 sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME; atalk_destroy_timer() 167 add_timer(&sk->sk_timer); atalk_destroy_timer() 169 sock_put(sk); atalk_destroy_timer() 172 static inline void atalk_destroy_socket(struct sock *sk) atalk_destroy_socket() argument 174 atalk_remove_socket(sk); atalk_destroy_socket() 175 skb_queue_purge(&sk->sk_receive_queue); atalk_destroy_socket() 177 if (sk_has_allocations(sk)) { atalk_destroy_socket() 178 setup_timer(&sk->sk_timer, atalk_destroy_timer, atalk_destroy_socket() 179 (unsigned long)sk); atalk_destroy_socket() 180 sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME; atalk_destroy_socket() 181 add_timer(&sk->sk_timer); atalk_destroy_socket() 183 sock_put(sk); atalk_destroy_socket() 1020 struct sock *sk; atalk_create() local 1033 sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto); atalk_create() 1034 if (!sk) atalk_create() 1038 sock_init_data(sock, sk); atalk_create() 1041 sock_set_flag(sk, SOCK_ZAPPED); atalk_create() 1049 struct sock *sk = sock->sk; atalk_release() local 1051 if (sk) { atalk_release() 1052 sock_hold(sk); atalk_release() 1053 lock_sock(sk); atalk_release() 1055 sock_orphan(sk); atalk_release() 1056 sock->sk = NULL; atalk_release() 1057 atalk_destroy_socket(sk); atalk_release() 1059 release_sock(sk); atalk_release() 1060 sock_put(sk); atalk_release() 1067 * @sk: socket to insert into the tables 1075 static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat) atalk_pick_and_bind_port() argument 1096 __atalk_insert_socket(sk); atalk_pick_and_bind_port() 1097 at_sk(sk)->src_port = sat->sat_port; atalk_pick_and_bind_port() 1110 static int atalk_autobind(struct sock *sk) atalk_autobind() argument 1112 struct atalk_sock *at = at_sk(sk); atalk_autobind() 1123 n = atalk_pick_and_bind_port(sk, &sat); atalk_autobind() 1125 sock_reset_flag(sk, SOCK_ZAPPED); atalk_autobind() 1134 struct sock *sk = sock->sk; atalk_bind() local 1135 struct atalk_sock *at = at_sk(sk); atalk_bind() 1138 if (!sock_flag(sk, SOCK_ZAPPED) || atalk_bind() 1145 lock_sock(sk); atalk_bind() 1166 err = atalk_pick_and_bind_port(sk, addr); atalk_bind() 1174 if (atalk_find_or_insert_socket(sk, addr)) atalk_bind() 1178 sock_reset_flag(sk, SOCK_ZAPPED); atalk_bind() 1181 release_sock(sk); atalk_bind() 1189 struct sock *sk = sock->sk; atalk_connect() local 1190 struct atalk_sock *at = at_sk(sk); atalk_connect() 1194 sk->sk_state = TCP_CLOSE; atalk_connect() 1206 !sock_flag(sk, SOCK_BROADCAST)) { atalk_connect() 1215 lock_sock(sk); atalk_connect() 1217 if (sock_flag(sk, SOCK_ZAPPED)) atalk_connect() 1218 if (atalk_autobind(sk) < 0) atalk_connect() 1230 sk->sk_state = TCP_ESTABLISHED; atalk_connect() 1233 release_sock(sk); atalk_connect() 1245 struct sock *sk = sock->sk; atalk_getname() local 1246 struct atalk_sock *at = at_sk(sk); atalk_getname() 1249 lock_sock(sk); atalk_getname() 1251 if (sock_flag(sk, SOCK_ZAPPED)) atalk_getname() 1252 if (atalk_autobind(sk) < 0) atalk_getname() 1260 if (sk->sk_state != TCP_ESTABLISHED) atalk_getname() 1277 release_sock(sk); atalk_getname() 1564 struct sock *sk = sock->sk; atalk_sendmsg() local 1565 struct atalk_sock *at = at_sk(sk); atalk_sendmsg() 1583 lock_sock(sk); atalk_sendmsg() 1586 if (sock_flag(sk, SOCK_ZAPPED)) atalk_sendmsg() 1587 if (atalk_autobind(sk) < 0) atalk_sendmsg() 1598 !sock_flag(sk, SOCK_BROADCAST)) { atalk_sendmsg() 1603 if (sk->sk_state != TCP_ESTABLISHED) atalk_sendmsg() 1613 SOCK_DEBUG(sk, "SK %p: Got address.\n", sk); atalk_sendmsg() 1634 SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n", atalk_sendmsg() 1635 sk, size, dev->name); atalk_sendmsg() 1638 release_sock(sk); atalk_sendmsg() 1639 skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err); atalk_sendmsg() 1640 lock_sock(sk); atalk_sendmsg() 1648 SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk); atalk_sendmsg() 1659 SOCK_DEBUG(sk, "SK %p: Copy user data (%Zd bytes).\n", sk, len); atalk_sendmsg() 1668 if (sk->sk_no_check_tx) atalk_sendmsg() 1683 SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk); atalk_sendmsg() 1692 SOCK_DEBUG(sk, "SK %p: Loop back.\n", sk); atalk_sendmsg() 1712 SOCK_DEBUG(sk, "SK %p: send out.\n", sk); atalk_sendmsg() 1723 SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len); atalk_sendmsg() 1726 release_sock(sk); atalk_sendmsg() 1733 struct sock *sk = sock->sk; atalk_recvmsg() local 1740 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, atalk_recvmsg() 1742 lock_sock(sk); atalk_recvmsg() 1751 if (sk->sk_type != SOCK_RAW) { atalk_recvmsg() 1771 skb_free_datagram(sk, skb); /* Free the datagram. */ atalk_recvmsg() 1774 release_sock(sk); atalk_recvmsg() 1785 struct sock *sk = sock->sk; atalk_ioctl() local 1791 long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); atalk_ioctl() 1803 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); atalk_ioctl() 1812 rc = sock_get_timestamp(sk, argp); atalk_ioctl() 1815 rc = sock_get_timestampns(sk, argp); atalk_ioctl()
|
/linux-4.1.27/net/bluetooth/hidp/ |
H A D | sock.c | 34 struct sock *sk = sock->sk; hidp_sock_release() local 36 BT_DBG("sock %p sk %p", sock, sk); hidp_sock_release() 38 if (!sk) hidp_sock_release() 41 bt_sock_unlink(&hidp_sk_list, sk); hidp_sock_release() 43 sock_orphan(sk); hidp_sock_release() 44 sock_put(sk); hidp_sock_release() 231 struct sock *sk; hidp_sock_create() local 238 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto); hidp_sock_create() 239 if (!sk) hidp_sock_create() 242 sock_init_data(sock, sk); hidp_sock_create() 248 sock_reset_flag(sk, SOCK_ZAPPED); hidp_sock_create() 250 sk->sk_protocol = protocol; hidp_sock_create() 251 sk->sk_state = BT_OPEN; hidp_sock_create() 253 bt_sock_link(&hidp_sk_list, sk); hidp_sock_create()
|