Home
last modified time | relevance | path

Searched refs:sk (Results 1 – 200 of 588) sorted by relevance

123

/linux-4.4.14/include/net/
Dsock.h97 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ argument
102 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) in SOCK_DEBUG() argument
453 void (*sk_state_change)(struct sock *sk);
454 void (*sk_data_ready)(struct sock *sk);
455 void (*sk_write_space)(struct sock *sk);
456 void (*sk_error_report)(struct sock *sk);
457 int (*sk_backlog_rcv)(struct sock *sk,
459 void (*sk_destruct)(struct sock *sk);
462 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) argument
464 #define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) argument
[all …]
Dllc_c_ev.h128 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
129 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
131 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
132 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
133 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
134 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
135 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
136 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
137 int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
138 int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
[all …]
Dllc_c_ac.h90 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
92 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
93 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
94 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
95 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
96 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
97 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
98 int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
99 int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk,
101 int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
[all …]
Dinet_connection_sock.h39 int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
40 void (*send_check)(struct sock *sk, struct sk_buff *skb);
41 int (*rebuild_header)(struct sock *sk);
42 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
43 int (*conn_request)(struct sock *sk, struct sk_buff *skb);
44 struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
52 int (*setsockopt)(struct sock *sk, int level, int optname,
54 int (*getsockopt)(struct sock *sk, int level, int optname,
57 int (*compat_setsockopt)(struct sock *sk,
60 int (*compat_getsockopt)(struct sock *sk,
[all …]
Dtcp.h53 void tcp_time_wait(struct sock *sk, int state, int timeo);
293 static inline bool tcp_under_memory_pressure(const struct sock *sk) in tcp_under_memory_pressure() argument
295 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) in tcp_under_memory_pressure()
296 return !!sk->sk_cgrp->memory_pressure; in tcp_under_memory_pressure()
317 static inline bool tcp_out_of_memory(struct sock *sk) in tcp_out_of_memory() argument
319 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && in tcp_out_of_memory()
320 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) in tcp_out_of_memory()
325 void sk_forced_mem_schedule(struct sock *sk, int size);
327 static inline bool tcp_too_many_orphans(struct sock *sk, int shift) in tcp_too_many_orphans() argument
329 struct percpu_counter *ocp = sk->sk_prot->orphan_count; in tcp_too_many_orphans()
[all …]
Dinet_sock.h103 static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) in inet_rsk() argument
105 return (struct inet_request_sock *)sk; in inet_rsk()
108 static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) in inet_request_mark() argument
110 if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) in inet_request_mark()
113 return sk->sk_mark; in inet_request_mark()
160 struct sock sk; member
165 #define inet_daddr sk.__sk_common.skc_daddr
166 #define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
167 #define inet_dport sk.__sk_common.skc_dport
168 #define inet_num sk.__sk_common.skc_num
[all …]
Dip.h73 struct sock *sk; member
103 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
110 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
111 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
112 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
115 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
116 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
118 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
120 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
129 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
[all …]
Dbusy_poll.h50 static inline unsigned long sk_busy_loop_end_time(struct sock *sk) in sk_busy_loop_end_time() argument
52 return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec); in sk_busy_loop_end_time()
61 static inline bool sk_can_busy_loop(struct sock *sk) in sk_can_busy_loop() argument
63 return sk->sk_ll_usec && sk->sk_napi_id && in sk_can_busy_loop()
78 static inline bool sk_busy_loop(struct sock *sk, int nonblock) in sk_busy_loop() argument
80 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; in sk_busy_loop()
91 napi = napi_by_id(sk->sk_napi_id); in sk_busy_loop()
107 NET_ADD_STATS_BH(sock_net(sk), in sk_busy_loop()
111 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && in sk_busy_loop()
114 rc = !skb_queue_empty(&sk->sk_receive_queue); in sk_busy_loop()
[all …]
Ddn_nsp.h18 void dn_nsp_send_data_ack(struct sock *sk);
19 void dn_nsp_send_oth_ack(struct sock *sk);
20 void dn_nsp_delayed_ack(struct sock *sk);
21 void dn_send_conn_ack(struct sock *sk);
22 void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
23 void dn_nsp_send_disc(struct sock *sk, unsigned char type,
27 void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
28 void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
30 void dn_nsp_output(struct sock *sk);
31 int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
[all …]
Dtimewait_sock.h22 int (*twsk_unique)(struct sock *sk,
24 void (*twsk_destructor)(struct sock *sk);
27 static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) in twsk_unique() argument
29 if (sk->sk_prot->twsk_prot->twsk_unique != NULL) in twsk_unique()
30 return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp); in twsk_unique()
34 static inline void twsk_destructor(struct sock *sk) in twsk_destructor() argument
36 if (sk->sk_prot->twsk_prot->twsk_destructor != NULL) in twsk_destructor()
37 sk->sk_prot->twsk_prot->twsk_destructor(sk); in twsk_destructor()
Droute.h46 #define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE)) argument
47 #define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE)) argument
127 const struct sock *sk);
149 struct sock *sk, in ip_route_output_ports() argument
154 flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos, in ip_route_output_ports()
156 sk ? inet_sk_flowi_flags(sk) : 0, in ip_route_output_ports()
158 if (sk) in ip_route_output_ports()
159 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); in ip_route_output_ports()
160 return ip_route_output_flow(net, fl4, sk); in ip_route_output_ports()
196 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
[all …]
Dinet_hashtables.h187 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
196 static inline int inet_sk_listen_hashfn(const struct sock *sk) in inet_sk_listen_hashfn() argument
198 return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num); in inet_sk_listen_hashfn()
202 int __inet_inherit_port(const struct sock *sk, struct sock *child);
204 void inet_put_port(struct sock *sk);
208 bool inet_ehash_insert(struct sock *sk, struct sock *osk);
209 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
210 void __inet_hash(struct sock *sk, struct sock *osk);
211 void inet_hash(struct sock *sk);
212 void inet_unhash(struct sock *sk);
[all …]
Dping.h34 int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
36 void (*ip6_datagram_recv_common_ctl)(struct sock *sk,
39 void (*ip6_datagram_recv_specific_ctl)(struct sock *sk,
43 void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err,
67 int ping_get_port(struct sock *sk, unsigned short ident);
68 void ping_hash(struct sock *sk);
69 void ping_unhash(struct sock *sk);
71 int ping_init_sock(struct sock *sk);
72 void ping_close(struct sock *sk, long timeout);
73 int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len);
[all …]
Dllc_conn.h34 struct sock sk; member
83 static inline struct llc_sock *llc_sk(const struct sock *sk) in llc_sk() argument
85 return (struct llc_sock *)sk; in llc_sk()
100 void llc_sk_free(struct sock *sk);
102 void llc_sk_reset(struct sock *sk);
105 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
106 void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
107 void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
108 void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
109 void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
[all …]
Dip6_route.h67 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
71 const struct sock *sk, in ip6_route_output() argument
74 return ip6_route_output_flags(net, sk, fl6, 0); in ip6_route_output()
120 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
124 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
144 static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, in ip6_dst_store() argument
148 struct ipv6_pinfo *np = inet6_sk(sk); in ip6_dst_store()
151 sk_setup_caps(sk, dst); in ip6_dst_store()
175 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
180 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? in ip6_skb_dst_mtu()
[all …]
Dudp.h131 static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb) in udp_csum_outgoing() argument
135 skb_queue_walk(&sk->sk_write_queue, skb) { in udp_csum_outgoing()
180 static inline void udp_lib_hash(struct sock *sk) in udp_lib_hash() argument
185 void udp_lib_unhash(struct sock *sk);
186 void udp_lib_rehash(struct sock *sk, u16 new_hash);
188 static inline void udp_lib_close(struct sock *sk, long timeout) in udp_lib_close() argument
190 sk_common_release(sk); in udp_lib_close()
193 int udp_lib_get_port(struct sock *sk, unsigned short snum,
237 int udp_get_port(struct sock *sk, unsigned short snum,
241 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
[all …]
Dipv6.h194 struct sock *sk; member
274 struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
278 void fl6_free_socklist(struct sock *sk);
279 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
280 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
293 int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
296 int ip6_ra_control(struct sock *sk, int sel);
300 struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
302 struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
310 bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
[all …]
/linux-4.4.14/net/netrom/
Dnr_timer.c38 void nr_init_timers(struct sock *sk) in nr_init_timers() argument
40 struct nr_sock *nr = nr_sk(sk); in nr_init_timers()
42 setup_timer(&nr->t1timer, nr_t1timer_expiry, (unsigned long)sk); in nr_init_timers()
43 setup_timer(&nr->t2timer, nr_t2timer_expiry, (unsigned long)sk); in nr_init_timers()
44 setup_timer(&nr->t4timer, nr_t4timer_expiry, (unsigned long)sk); in nr_init_timers()
45 setup_timer(&nr->idletimer, nr_idletimer_expiry, (unsigned long)sk); in nr_init_timers()
48 sk->sk_timer.data = (unsigned long)sk; in nr_init_timers()
49 sk->sk_timer.function = &nr_heartbeat_expiry; in nr_init_timers()
52 void nr_start_t1timer(struct sock *sk) in nr_start_t1timer() argument
54 struct nr_sock *nr = nr_sk(sk); in nr_start_t1timer()
[all …]
Daf_netrom.c94 static void nr_remove_socket(struct sock *sk) in nr_remove_socket() argument
97 sk_del_node_init(sk); in nr_remove_socket()
137 static void nr_insert_socket(struct sock *sk) in nr_insert_socket() argument
140 sk_add_node(sk, &nr_list); in nr_insert_socket()
218 struct sock *sk; in nr_find_next_circuit() local
225 if ((sk=nr_find_socket(i, j)) == NULL) in nr_find_next_circuit()
227 bh_unlock_sock(sk); in nr_find_next_circuit()
246 struct sock *sk=(struct sock *)data; in nr_destroy_timer() local
247 bh_lock_sock(sk); in nr_destroy_timer()
248 sock_hold(sk); in nr_destroy_timer()
[all …]
Dnr_in.c32 static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) in nr_queue_rx_frame() argument
35 struct nr_sock *nr = nr_sk(sk); in nr_queue_rx_frame()
39 nr_start_idletimer(sk); in nr_queue_rx_frame()
66 return sock_queue_rcv_skb(sk, skbn); in nr_queue_rx_frame()
74 static int nr_state1_machine(struct sock *sk, struct sk_buff *skb, in nr_state1_machine() argument
79 struct nr_sock *nr = nr_sk(sk); in nr_state1_machine()
81 nr_stop_t1timer(sk); in nr_state1_machine()
82 nr_start_idletimer(sk); in nr_state1_machine()
92 sk->sk_state = TCP_ESTABLISHED; in nr_state1_machine()
93 if (!sock_flag(sk, SOCK_DEAD)) in nr_state1_machine()
[all …]
Dnr_out.c35 void nr_output(struct sock *sk, struct sk_buff *skb) in nr_output() argument
49 if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL) in nr_output()
67 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ in nr_output()
72 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ in nr_output()
75 nr_kick(sk); in nr_output()
82 static void nr_send_iframe(struct sock *sk, struct sk_buff *skb) in nr_send_iframe() argument
84 struct nr_sock *nr = nr_sk(sk); in nr_send_iframe()
95 nr_start_idletimer(sk); in nr_send_iframe()
97 nr_transmit_buffer(sk, skb); in nr_send_iframe()
100 void nr_send_nak_frame(struct sock *sk) in nr_send_nak_frame() argument
[all …]
Dnr_subr.c34 void nr_clear_queues(struct sock *sk) in nr_clear_queues() argument
36 struct nr_sock *nr = nr_sk(sk); in nr_clear_queues()
38 skb_queue_purge(&sk->sk_write_queue); in nr_clear_queues()
49 void nr_frames_acked(struct sock *sk, unsigned short nr) in nr_frames_acked() argument
51 struct nr_sock *nrom = nr_sk(sk); in nr_frames_acked()
71 void nr_requeue_frames(struct sock *sk) in nr_requeue_frames() argument
75 while ((skb = skb_dequeue(&nr_sk(sk)->ack_queue)) != NULL) { in nr_requeue_frames()
77 skb_queue_head(&sk->sk_write_queue, skb); in nr_requeue_frames()
79 skb_append(skb_prev, skb, &sk->sk_write_queue); in nr_requeue_frames()
88 int nr_validate_nr(struct sock *sk, unsigned short nr) in nr_validate_nr() argument
[all …]
/linux-4.4.14/net/bluetooth/
Dsco.c48 struct sock *sk; member
56 static void sco_sock_close(struct sock *sk);
57 static void sco_sock_kill(struct sock *sk);
60 #define sco_pi(sk) ((struct sco_pinfo *) sk) argument
77 struct sock *sk = (struct sock *)arg; in sco_sock_timeout() local
79 BT_DBG("sock %p state %d", sk, sk->sk_state); in sco_sock_timeout()
81 bh_lock_sock(sk); in sco_sock_timeout()
82 sk->sk_err = ETIMEDOUT; in sco_sock_timeout()
83 sk->sk_state_change(sk); in sco_sock_timeout()
84 bh_unlock_sock(sk); in sco_sock_timeout()
[all …]
Dl2cap_sock.c44 static void l2cap_sock_init(struct sock *sk, struct sock *parent);
82 struct sock *sk = sock->sk; in l2cap_sock_bind() local
83 struct l2cap_chan *chan = l2cap_pi(sk)->chan; in l2cap_sock_bind()
87 BT_DBG("sk %p", sk); in l2cap_sock_bind()
109 lock_sock(sk); in l2cap_sock_bind()
111 if (sk->sk_state != BT_OPEN) { in l2cap_sock_bind()
166 sk->sk_state = BT_BOUND; in l2cap_sock_bind()
169 release_sock(sk); in l2cap_sock_bind()
176 struct sock *sk = sock->sk; in l2cap_sock_connect() local
177 struct l2cap_chan *chan = l2cap_pi(sk)->chan; in l2cap_sock_connect()
[all …]
Daf_bluetooth.c67 void bt_sock_reclassify_lock(struct sock *sk, int proto) in bt_sock_reclassify_lock() argument
69 BUG_ON(!sk); in bt_sock_reclassify_lock()
70 BUG_ON(sock_owned_by_user(sk)); in bt_sock_reclassify_lock()
72 sock_lock_init_class_and_name(sk, in bt_sock_reclassify_lock()
130 bt_sock_reclassify_lock(sock->sk, proto); in bt_sock_create()
139 void bt_sock_link(struct bt_sock_list *l, struct sock *sk) in bt_sock_link() argument
142 sk_add_node(sk, &l->head); in bt_sock_link()
147 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk) in bt_sock_unlink() argument
150 sk_del_node_init(sk); in bt_sock_unlink()
155 void bt_accept_enqueue(struct sock *parent, struct sock *sk) in bt_accept_enqueue() argument
[all …]
Dhci_sock.c45 #define hci_pi(sk) ((struct hci_pinfo *) sk) argument
56 void hci_sock_set_flag(struct sock *sk, int nr) in hci_sock_set_flag() argument
58 set_bit(nr, &hci_pi(sk)->flags); in hci_sock_set_flag()
61 void hci_sock_clear_flag(struct sock *sk, int nr) in hci_sock_clear_flag() argument
63 clear_bit(nr, &hci_pi(sk)->flags); in hci_sock_clear_flag()
66 int hci_sock_test_flag(struct sock *sk, int nr) in hci_sock_test_flag() argument
68 return test_bit(nr, &hci_pi(sk)->flags); in hci_sock_test_flag()
71 unsigned short hci_sock_get_channel(struct sock *sk) in hci_sock_get_channel() argument
73 return hci_pi(sk)->channel; in hci_sock_get_channel()
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb) in is_filtered_packet() argument
[all …]
Dmgmt.c279 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, in read_version() argument
284 BT_DBG("sock %p", sk); in read_version()
289 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, in read_version()
293 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, in read_commands() argument
301 BT_DBG("sock %p", sk); in read_commands()
303 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) { in read_commands()
320 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) { in read_commands()
338 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, in read_commands()
345 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, in read_index_list() argument
354 BT_DBG("sock %p", sk); in read_index_list()
[all …]
/linux-4.4.14/net/ipv4/
Dtcp_timer.c35 static void tcp_write_err(struct sock *sk) in tcp_write_err() argument
37 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; in tcp_write_err()
38 sk->sk_error_report(sk); in tcp_write_err()
40 tcp_done(sk); in tcp_write_err()
41 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); in tcp_write_err()
55 static int tcp_out_of_resources(struct sock *sk, bool do_reset) in tcp_out_of_resources() argument
57 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources()
66 if (sk->sk_err_soft) in tcp_out_of_resources()
69 if (tcp_check_oom(sk, shift)) { in tcp_out_of_resources()
77 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_out_of_resources()
[all …]
Dinet_hashtables.c43 u32 sk_ehashfn(const struct sock *sk) in sk_ehashfn() argument
46 if (sk->sk_family == AF_INET6 && in sk_ehashfn()
47 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) in sk_ehashfn()
48 return inet6_ehashfn(sock_net(sk), in sk_ehashfn()
49 &sk->sk_v6_rcv_saddr, sk->sk_num, in sk_ehashfn()
50 &sk->sk_v6_daddr, sk->sk_dport); in sk_ehashfn()
52 return inet_ehashfn(sock_net(sk), in sk_ehashfn()
53 sk->sk_rcv_saddr, sk->sk_num, in sk_ehashfn()
54 sk->sk_daddr, sk->sk_dport); in sk_ehashfn()
91 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, in inet_bind_hash() argument
[all …]
Dtcp.c329 void tcp_enter_memory_pressure(struct sock *sk) in tcp_enter_memory_pressure() argument
332 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); in tcp_enter_memory_pressure()
380 void tcp_init_sock(struct sock *sk) in tcp_init_sock() argument
382 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock()
383 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock()
386 tcp_init_xmit_timers(sk); in tcp_init_sock()
411 tcp_assign_congestion_control(sk); in tcp_init_sock()
415 sk->sk_state = TCP_CLOSE; in tcp_init_sock()
417 sk->sk_write_space = sk_stream_write_space; in tcp_init_sock()
418 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); in tcp_init_sock()
[all …]
Dinet_connection_sock.c46 int inet_csk_bind_conflict(const struct sock *sk, in inet_csk_bind_conflict() argument
50 int reuse = sk->sk_reuse; in inet_csk_bind_conflict()
51 int reuseport = sk->sk_reuseport; in inet_csk_bind_conflict()
52 kuid_t uid = sock_i_uid((struct sock *)sk); in inet_csk_bind_conflict()
62 if (sk != sk2 && in inet_csk_bind_conflict()
64 (!sk->sk_bound_dev_if || in inet_csk_bind_conflict()
66 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { in inet_csk_bind_conflict()
73 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr || in inet_csk_bind_conflict()
74 sk2->sk_rcv_saddr == sk->sk_rcv_saddr) in inet_csk_bind_conflict()
80 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr || in inet_csk_bind_conflict()
[all …]
Dtcp_input.c132 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) in tcp_measure_rcv_mss() argument
134 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss()
165 len -= tcp_sk(sk)->tcp_header_len; in tcp_measure_rcv_mss()
178 static void tcp_incr_quickack(struct sock *sk) in tcp_incr_quickack() argument
180 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack()
181 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack()
189 static void tcp_enter_quickack_mode(struct sock *sk) in tcp_enter_quickack_mode() argument
191 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode()
192 tcp_incr_quickack(sk); in tcp_enter_quickack_mode()
201 static bool tcp_in_quickack_mode(struct sock *sk) in tcp_in_quickack_mode() argument
[all …]
Dtcp_output.c68 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
72 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) in tcp_event_new_data_sent() argument
74 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent()
75 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent()
78 tcp_advance_send_head(sk, skb); in tcp_event_new_data_sent()
84 tcp_rearm_rto(sk); in tcp_event_new_data_sent()
87 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, in tcp_event_new_data_sent()
97 static inline __u32 tcp_acceptable_seq(const struct sock *sk) in tcp_acceptable_seq() argument
99 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq()
121 static __u16 tcp_advertise_mss(struct sock *sk) in tcp_advertise_mss() argument
[all …]
Dudp.c138 struct sock *sk, in udp_lib_lport_inuse() argument
145 kuid_t uid = sock_i_uid(sk); in udp_lib_lport_inuse()
149 sk2 != sk && in udp_lib_lport_inuse()
151 (!sk2->sk_reuse || !sk->sk_reuse) && in udp_lib_lport_inuse()
152 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || in udp_lib_lport_inuse()
153 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_lib_lport_inuse()
154 (!sk2->sk_reuseport || !sk->sk_reuseport || in udp_lib_lport_inuse()
156 saddr_comp(sk, sk2)) { in udp_lib_lport_inuse()
171 struct sock *sk, in udp_lib_lport_inuse2() argument
177 kuid_t uid = sock_i_uid(sk); in udp_lib_lport_inuse2()
[all …]
Dping.c82 int ping_get_port(struct sock *sk, unsigned short ident) in ping_get_port() argument
89 isk = inet_sk(sk); in ping_get_port()
98 hlist = ping_hashslot(&ping_table, sock_net(sk), in ping_get_port()
116 hlist = ping_hashslot(&ping_table, sock_net(sk), ident); in ping_get_port()
125 (sk2 != sk) && in ping_get_port()
126 (!sk2->sk_reuse || !sk->sk_reuse)) in ping_get_port()
133 if (sk_unhashed(sk)) { in ping_get_port()
135 sock_hold(sk); in ping_get_port()
136 hlist_nulls_add_head(&sk->sk_nulls_node, hlist); in ping_get_port()
137 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in ping_get_port()
[all …]
Draw.c96 void raw_hash_sk(struct sock *sk) in raw_hash_sk() argument
98 struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; in raw_hash_sk()
101 head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; in raw_hash_sk()
104 sk_add_node(sk, head); in raw_hash_sk()
105 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in raw_hash_sk()
110 void raw_unhash_sk(struct sock *sk) in raw_unhash_sk() argument
112 struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; in raw_unhash_sk()
115 if (sk_del_node_init(sk)) in raw_unhash_sk()
116 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); in raw_unhash_sk()
121 static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, in __raw_v4_lookup() argument
[all …]
Daf_inet.c133 void inet_sock_destruct(struct sock *sk) in inet_sock_destruct() argument
135 struct inet_sock *inet = inet_sk(sk); in inet_sock_destruct()
137 __skb_queue_purge(&sk->sk_receive_queue); in inet_sock_destruct()
138 __skb_queue_purge(&sk->sk_error_queue); in inet_sock_destruct()
140 sk_mem_reclaim(sk); in inet_sock_destruct()
142 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { in inet_sock_destruct()
144 sk->sk_state, sk); in inet_sock_destruct()
147 if (!sock_flag(sk, SOCK_DEAD)) { in inet_sock_destruct()
148 pr_err("Attempt to release alive inet socket %p\n", sk); in inet_sock_destruct()
152 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in inet_sock_destruct()
[all …]
Dtcp_ipv4.c108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) in tcp_twsk_unique() argument
111 struct tcp_sock *tp = tcp_sk(sk); in tcp_twsk_unique()
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) in tcp_v4_connect() argument
144 struct inet_sock *inet = inet_sk(sk); in tcp_v4_connect()
145 struct tcp_sock *tp = tcp_sk(sk); in tcp_v4_connect()
161 sock_owned_by_user(sk)); in tcp_v4_connect()
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, in tcp_v4_connect()
174 orig_sport, orig_dport, sk); in tcp_v4_connect()
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); in tcp_v4_connect()
192 sk_rcv_saddr_set(sk, inet->inet_saddr); in tcp_v4_connect()
[all …]
Ddatagram.c23 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) in __ip4_datagram_connect() argument
25 struct inet_sock *inet = inet_sk(sk); in __ip4_datagram_connect()
40 sk_dst_reset(sk); in __ip4_datagram_connect()
42 oif = sk->sk_bound_dev_if; in __ip4_datagram_connect()
52 RT_CONN_FLAGS(sk), oif, in __ip4_datagram_connect()
53 sk->sk_protocol, in __ip4_datagram_connect()
54 inet->inet_sport, usin->sin_port, sk); in __ip4_datagram_connect()
58 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); in __ip4_datagram_connect()
62 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) { in __ip4_datagram_connect()
71 if (sk->sk_prot->rehash) in __ip4_datagram_connect()
[all …]
Dtcp_dctcp.c84 static void dctcp_init(struct sock *sk) in dctcp_init() argument
86 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_init()
89 (sk->sk_state == TCP_LISTEN || in dctcp_init()
90 sk->sk_state == TCP_CLOSE)) { in dctcp_init()
91 struct dctcp *ca = inet_csk_ca(sk); in dctcp_init()
108 inet_csk(sk)->icsk_ca_ops = &dctcp_reno; in dctcp_init()
109 INET_ECN_dontxmit(sk); in dctcp_init()
112 static u32 dctcp_ssthresh(struct sock *sk) in dctcp_ssthresh() argument
114 const struct dctcp *ca = inet_csk_ca(sk); in dctcp_ssthresh()
115 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ssthresh()
[all …]
Dtcp_westwood.c60 static void tcp_westwood_init(struct sock *sk) in tcp_westwood_init() argument
62 struct westwood *w = inet_csk_ca(sk); in tcp_westwood_init()
72 w->snd_una = tcp_sk(sk)->snd_una; in tcp_westwood_init()
102 static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt) in tcp_westwood_pkts_acked() argument
104 struct westwood *w = inet_csk_ca(sk); in tcp_westwood_pkts_acked()
115 static void westwood_update_window(struct sock *sk) in westwood_update_window() argument
117 struct westwood *w = inet_csk_ca(sk); in westwood_update_window()
125 w->snd_una = tcp_sk(sk)->snd_una; in westwood_update_window()
161 static inline void westwood_fast_bw(struct sock *sk) in westwood_fast_bw() argument
163 const struct tcp_sock *tp = tcp_sk(sk); in westwood_fast_bw()
[all …]
Dtcp_veno.c44 static inline void veno_enable(struct sock *sk) in veno_enable() argument
46 struct veno *veno = inet_csk_ca(sk); in veno_enable()
54 static inline void veno_disable(struct sock *sk) in veno_disable() argument
56 struct veno *veno = inet_csk_ca(sk); in veno_disable()
62 static void tcp_veno_init(struct sock *sk) in tcp_veno_init() argument
64 struct veno *veno = inet_csk_ca(sk); in tcp_veno_init()
68 veno_enable(sk); in tcp_veno_init()
72 static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us) in tcp_veno_pkts_acked() argument
74 struct veno *veno = inet_csk_ca(sk); in tcp_veno_pkts_acked()
94 static void tcp_veno_state(struct sock *sk, u8 ca_state) in tcp_veno_state() argument
[all …]
Dip_output.c87 ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
99 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) in __ip_local_out() argument
106 net, sk, skb, NULL, skb_dst(skb)->dev, in __ip_local_out()
110 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_local_out() argument
114 err = __ip_local_out(net, sk, skb); in ip_local_out()
116 err = dst_output(net, sk, skb); in ip_local_out()
135 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, in ip_build_and_send_pkt() argument
138 struct inet_sock *inet = inet_sk(sk); in ip_build_and_send_pkt()
140 struct net *net = sock_net(sk); in ip_build_and_send_pkt()
153 iph->protocol = sk->sk_protocol; in ip_build_and_send_pkt()
[all …]
Dinet_diag.c69 static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk) in inet_diag_msg_common_fill() argument
71 r->idiag_family = sk->sk_family; in inet_diag_msg_common_fill()
73 r->id.idiag_sport = htons(sk->sk_num); in inet_diag_msg_common_fill()
74 r->id.idiag_dport = sk->sk_dport; in inet_diag_msg_common_fill()
75 r->id.idiag_if = sk->sk_bound_dev_if; in inet_diag_msg_common_fill()
76 sock_diag_save_cookie(sk, r->id.idiag_cookie); in inet_diag_msg_common_fill()
79 if (sk->sk_family == AF_INET6) { in inet_diag_msg_common_fill()
80 *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr; in inet_diag_msg_common_fill()
81 *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr; in inet_diag_msg_common_fill()
88 r->id.idiag_src[0] = sk->sk_rcv_saddr; in inet_diag_msg_common_fill()
[all …]
Dtcp_vegas.c70 static void vegas_enable(struct sock *sk) in vegas_enable() argument
72 const struct tcp_sock *tp = tcp_sk(sk); in vegas_enable()
73 struct vegas *vegas = inet_csk_ca(sk); in vegas_enable()
86 static inline void vegas_disable(struct sock *sk) in vegas_disable() argument
88 struct vegas *vegas = inet_csk_ca(sk); in vegas_disable()
93 void tcp_vegas_init(struct sock *sk) in tcp_vegas_init() argument
95 struct vegas *vegas = inet_csk_ca(sk); in tcp_vegas_init()
98 vegas_enable(sk); in tcp_vegas_init()
110 void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us) in tcp_vegas_pkts_acked() argument
112 struct vegas *vegas = inet_csk_ca(sk); in tcp_vegas_pkts_acked()
[all …]
Dip_sockglue.c157 struct inet_sock *inet = inet_sk(skb->sk); in ip_cmsg_recv_offset()
317 int ip_ra_control(struct sock *sk, unsigned char on, in ip_ra_control() argument
323 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) in ip_ra_control()
333 if (ra->sk == sk) { in ip_ra_control()
340 ra->sk = NULL; in ip_ra_control()
345 ra->destructor(sk); in ip_ra_control()
351 ra->saved_sk = sk; in ip_ra_control()
360 new_ra->sk = sk; in ip_ra_control()
365 sock_hold(sk); in ip_ra_control()
371 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, in ip_icmp_error() argument
[all …]
Dtcp_cdg.c138 static void tcp_cdg_hystart_update(struct sock *sk) in tcp_cdg_hystart_update() argument
140 struct cdg *ca = inet_csk_ca(sk); in tcp_cdg_hystart_update()
141 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_hystart_update()
150 if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) { in tcp_cdg_hystart_update()
158 NET_INC_STATS_BH(sock_net(sk), in tcp_cdg_hystart_update()
160 NET_ADD_STATS_BH(sock_net(sk), in tcp_cdg_hystart_update()
177 NET_INC_STATS_BH(sock_net(sk), in tcp_cdg_hystart_update()
179 NET_ADD_STATS_BH(sock_net(sk), in tcp_cdg_hystart_update()
239 static bool tcp_cdg_backoff(struct sock *sk, u32 grad) in tcp_cdg_backoff() argument
241 struct cdg *ca = inet_csk_ca(sk); in tcp_cdg_backoff()
[all …]
Dudp_diag.c20 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, in sk_diag_dump() argument
25 if (!inet_diag_bc_sk(bc, sk)) in sk_diag_dump()
28 return inet_sk_diag_fill(sk, NULL, skb, req, in sk_diag_dump()
29 sk_user_ns(NETLINK_CB(cb->skb).sk), in sk_diag_dump()
39 struct sock *sk; in udp_dump_one() local
41 struct net *net = sock_net(in_skb->sk); in udp_dump_one()
44 sk = __udp4_lib_lookup(net, in udp_dump_one()
50 sk = __udp6_lib_lookup(net, in udp_dump_one()
61 if (!sk) in udp_dump_one()
64 err = sock_diag_check_cookie(sk, req->id.idiag_cookie); in udp_dump_one()
[all …]
Dtcp_cubic.c129 static inline void bictcp_hystart_reset(struct sock *sk) in bictcp_hystart_reset() argument
131 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset()
132 struct bictcp *ca = inet_csk_ca(sk); in bictcp_hystart_reset()
140 static void bictcp_init(struct sock *sk) in bictcp_init() argument
142 struct bictcp *ca = inet_csk_ca(sk); in bictcp_init()
148 bictcp_hystart_reset(sk); in bictcp_init()
151 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; in bictcp_init()
154 static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) in bictcp_cwnd_event() argument
157 struct bictcp *ca = inet_csk_ca(sk); in bictcp_cwnd_event()
161 delta = now - tcp_sk(sk)->lsndtime; in bictcp_cwnd_event()
[all …]
Dtcp_illinois.c56 static void rtt_reset(struct sock *sk) in rtt_reset() argument
58 struct tcp_sock *tp = tcp_sk(sk); in rtt_reset()
59 struct illinois *ca = inet_csk_ca(sk); in rtt_reset()
68 static void tcp_illinois_init(struct sock *sk) in tcp_illinois_init() argument
70 struct illinois *ca = inet_csk_ca(sk); in tcp_illinois_init()
81 rtt_reset(sk); in tcp_illinois_init()
85 static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt) in tcp_illinois_acked() argument
87 struct illinois *ca = inet_csk_ca(sk); in tcp_illinois_acked()
220 static void update_params(struct sock *sk) in update_params() argument
222 struct tcp_sock *tp = tcp_sk(sk); in update_params()
[all …]
Dtcp_cong.c153 void tcp_assign_congestion_control(struct sock *sk) in tcp_assign_congestion_control() argument
155 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_assign_congestion_control()
177 INET_ECN_xmit(sk); in tcp_assign_congestion_control()
179 INET_ECN_dontxmit(sk); in tcp_assign_congestion_control()
182 void tcp_init_congestion_control(struct sock *sk) in tcp_init_congestion_control() argument
184 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control()
187 icsk->icsk_ca_ops->init(sk); in tcp_init_congestion_control()
188 if (tcp_ca_needs_ecn(sk)) in tcp_init_congestion_control()
189 INET_ECN_xmit(sk); in tcp_init_congestion_control()
191 INET_ECN_dontxmit(sk); in tcp_init_congestion_control()
[all …]
Dtcp_hybla.c33 static inline void hybla_recalc_param (struct sock *sk) in hybla_recalc_param() argument
35 struct hybla *ca = inet_csk_ca(sk); in hybla_recalc_param()
38 tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC), in hybla_recalc_param()
45 static void hybla_init(struct sock *sk) in hybla_init() argument
47 struct tcp_sock *tp = tcp_sk(sk); in hybla_init()
48 struct hybla *ca = inet_csk_ca(sk); in hybla_init()
60 hybla_recalc_param(sk); in hybla_init()
67 static void hybla_state(struct sock *sk, u8 ca_state) in hybla_state() argument
69 struct hybla *ca = inet_csk_ca(sk); in hybla_state()
89 static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) in hybla_cong_avoid() argument
[all …]
Dtcp_htcp.c67 static u32 htcp_cwnd_undo(struct sock *sk) in htcp_cwnd_undo() argument
69 const struct tcp_sock *tp = tcp_sk(sk); in htcp_cwnd_undo()
70 struct htcp *ca = inet_csk_ca(sk); in htcp_cwnd_undo()
82 static inline void measure_rtt(struct sock *sk, u32 srtt) in measure_rtt() argument
84 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt()
85 struct htcp *ca = inet_csk_ca(sk); in measure_rtt()
101 static void measure_achieved_throughput(struct sock *sk, in measure_achieved_throughput() argument
104 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput()
105 const struct tcp_sock *tp = tcp_sk(sk); in measure_achieved_throughput()
106 struct htcp *ca = inet_csk_ca(sk); in measure_achieved_throughput()
[all …]
Dtcp_minisocks.c267 void tcp_time_wait(struct sock *sk, int state, int timeo) in tcp_time_wait() argument
269 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait()
270 const struct tcp_sock *tp = tcp_sk(sk); in tcp_time_wait()
275 recycle_ok = tcp_remember_stamp(sk); in tcp_time_wait()
277 tw = inet_twsk_alloc(sk, &tcp_death_row, state); in tcp_time_wait()
282 struct inet_sock *inet = inet_sk(sk); in tcp_time_wait()
296 struct ipv6_pinfo *np = inet6_sk(sk); in tcp_time_wait()
298 tw->tw_v6_daddr = sk->sk_v6_daddr; in tcp_time_wait()
299 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; in tcp_time_wait()
302 tw->tw_ipv6only = sk->sk_ipv6only; in tcp_time_wait()
[all …]
Dtcp_bic.c70 static void bictcp_init(struct sock *sk) in bictcp_init() argument
72 struct bictcp *ca = inet_csk_ca(sk); in bictcp_init()
78 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; in bictcp_init()
141 static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in bictcp_cong_avoid() argument
143 struct tcp_sock *tp = tcp_sk(sk); in bictcp_cong_avoid()
144 struct bictcp *ca = inet_csk_ca(sk); in bictcp_cong_avoid()
146 if (!tcp_is_cwnd_limited(sk)) in bictcp_cong_avoid()
161 static u32 bictcp_recalc_ssthresh(struct sock *sk) in bictcp_recalc_ssthresh() argument
163 const struct tcp_sock *tp = tcp_sk(sk); in bictcp_recalc_ssthresh()
164 struct bictcp *ca = inet_csk_ca(sk); in bictcp_recalc_ssthresh()
[all …]
Dinet_timewait_sock.c101 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, in __inet_twsk_hashdance() argument
104 const struct inet_sock *inet = inet_sk(sk); in __inet_twsk_hashdance()
105 const struct inet_connection_sock *icsk = inet_csk(sk); in __inet_twsk_hashdance()
106 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); in __inet_twsk_hashdance()
107 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); in __inet_twsk_hashdance()
138 if (__sk_nulls_del_node_init_rcu(sk)) in __inet_twsk_hashdance()
139 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); in __inet_twsk_hashdance()
156 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, in inet_twsk_alloc() argument
165 tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, in inet_twsk_alloc()
168 const struct inet_sock *inet = inet_sk(sk); in inet_twsk_alloc()
[all …]
Dtcp_lp.c95 static void tcp_lp_init(struct sock *sk) in tcp_lp_init() argument
97 struct lp *lp = inet_csk_ca(sk); in tcp_lp_init()
118 static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_lp_cong_avoid() argument
120 struct lp *lp = inet_csk_ca(sk); in tcp_lp_cong_avoid()
123 tcp_reno_cong_avoid(sk, ack, acked); in tcp_lp_cong_avoid()
133 static u32 tcp_lp_remote_hz_estimator(struct sock *sk) in tcp_lp_remote_hz_estimator() argument
135 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_remote_hz_estimator()
136 struct lp *lp = inet_csk_ca(sk); in tcp_lp_remote_hz_estimator()
186 static u32 tcp_lp_owd_calculator(struct sock *sk) in tcp_lp_owd_calculator() argument
188 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_owd_calculator()
[all …]
Dudp_impl.h11 int udp_v4_get_port(struct sock *sk, unsigned short snum);
13 int udp_setsockopt(struct sock *sk, int level, int optname,
15 int udp_getsockopt(struct sock *sk, int level, int optname,
19 int compat_udp_setsockopt(struct sock *sk, int level, int optname,
21 int compat_udp_getsockopt(struct sock *sk, int level, int optname,
24 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
26 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
28 int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
29 void udp_destroy_sock(struct sock *sk);
Dtcp_yeah.c42 static void tcp_yeah_init(struct sock *sk) in tcp_yeah_init() argument
44 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_init()
45 struct yeah *yeah = inet_csk_ca(sk); in tcp_yeah_init()
47 tcp_vegas_init(sk); in tcp_yeah_init()
59 static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us) in tcp_yeah_pkts_acked() argument
61 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_yeah_pkts_acked()
62 struct yeah *yeah = inet_csk_ca(sk); in tcp_yeah_pkts_acked()
67 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us); in tcp_yeah_pkts_acked()
70 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_yeah_cong_avoid() argument
72 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_cong_avoid()
[all …]
Dtcp_highspeed.c99 static void hstcp_init(struct sock *sk) in hstcp_init() argument
101 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init()
102 struct hstcp *ca = inet_csk_ca(sk); in hstcp_init()
111 static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in hstcp_cong_avoid() argument
113 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid()
114 struct hstcp *ca = inet_csk_ca(sk); in hstcp_cong_avoid()
116 if (!tcp_is_cwnd_limited(sk)) in hstcp_cong_avoid()
150 static u32 hstcp_ssthresh(struct sock *sk) in hstcp_ssthresh() argument
152 const struct tcp_sock *tp = tcp_sk(sk); in hstcp_ssthresh()
153 const struct hstcp *ca = inet_csk_ca(sk); in hstcp_ssthresh()
Dxfrm4_output.c35 if (skb->sk) in xfrm4_tunnel_check_size()
74 int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb) in xfrm4_output_finish() argument
82 return xfrm_output(sk, skb); in xfrm4_output_finish()
85 static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb) in __xfrm4_output() argument
92 return dst_output(net, sk, skb); in __xfrm4_output()
96 return x->outer_mode->afinfo->output_finish(sk, skb); in __xfrm4_output()
99 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb) in xfrm4_output() argument
102 net, sk, skb, NULL, skb_dst(skb)->dev, in xfrm4_output()
112 ip_local_error(skb->sk, EMSGSIZE, hdr->daddr, in xfrm4_local_error()
113 inet_sk(skb->sk)->inet_dport, mtu); in xfrm4_local_error()
Dudp_tunnel.c41 sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; in udp_sock_create4()
59 struct sock *sk = sock->sk; in setup_udp_tunnel_sock() local
62 inet_sk(sk)->mc_loop = 0; in setup_udp_tunnel_sock()
65 inet_inc_convert_csum(sk); in setup_udp_tunnel_sock()
67 rcu_assign_sk_user_data(sk, cfg->sk_user_data); in setup_udp_tunnel_sock()
69 udp_sk(sk)->encap_type = cfg->encap_type; in setup_udp_tunnel_sock()
70 udp_sk(sk)->encap_rcv = cfg->encap_rcv; in setup_udp_tunnel_sock()
71 udp_sk(sk)->encap_destroy = cfg->encap_destroy; in setup_udp_tunnel_sock()
77 int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, in udp_tunnel_xmit_skb() argument
96 return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, in udp_tunnel_xmit_skb()
[all …]
Dsyncookies.c218 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, in tcp_get_cookie_sock() argument
222 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_get_cookie_sock()
226 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, in tcp_get_cookie_sock()
231 inet_csk_reqsk_queue_add(sk, req, child); in tcp_get_cookie_sock()
294 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) in cookie_v4_check() argument
300 struct tcp_sock *tp = tcp_sk(sk); in cookie_v4_check()
303 struct sock *ret = sk; in cookie_v4_check()
313 if (tcp_synq_no_recent_overflow(sk)) in cookie_v4_check()
318 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); in cookie_v4_check()
322 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); in cookie_v4_check()
[all …]
/linux-4.4.14/net/bluetooth/rfcomm/
Dsock.c42 static void rfcomm_sock_close(struct sock *sk);
43 static void rfcomm_sock_kill(struct sock *sk);
51 struct sock *sk = d->owner; in rfcomm_sk_data_ready() local
52 if (!sk) in rfcomm_sk_data_ready()
55 atomic_add(skb->len, &sk->sk_rmem_alloc); in rfcomm_sk_data_ready()
56 skb_queue_tail(&sk->sk_receive_queue, skb); in rfcomm_sk_data_ready()
57 sk->sk_data_ready(sk); in rfcomm_sk_data_ready()
59 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in rfcomm_sk_data_ready()
65 struct sock *sk = d->owner, *parent; in rfcomm_sk_state_change() local
68 if (!sk) in rfcomm_sk_state_change()
[all …]
/linux-4.4.14/net/core/
Dsock.c159 bool sk_ns_capable(const struct sock *sk, in sk_ns_capable() argument
162 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && in sk_ns_capable()
176 bool sk_capable(const struct sock *sk, int cap) in sk_capable() argument
178 return sk_ns_capable(sk, &init_user_ns, cap); in sk_capable()
191 bool sk_net_capable(const struct sock *sk, int cap) in sk_net_capable() argument
193 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); in sk_net_capable()
342 void sk_set_memalloc(struct sock *sk) in sk_set_memalloc() argument
344 sock_set_flag(sk, SOCK_MEMALLOC); in sk_set_memalloc()
345 sk->sk_allocation |= __GFP_MEMALLOC; in sk_set_memalloc()
350 void sk_clear_memalloc(struct sock *sk) in sk_clear_memalloc() argument
[all …]
Dstream.c28 void sk_stream_write_space(struct sock *sk) in sk_stream_write_space() argument
30 struct socket *sock = sk->sk_socket; in sk_stream_write_space()
33 if (sk_stream_is_writeable(sk) && sock) { in sk_stream_write_space()
37 wq = rcu_dereference(sk->sk_wq); in sk_stream_write_space()
41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) in sk_stream_write_space()
55 int sk_stream_wait_connect(struct sock *sk, long *timeo_p) in sk_stream_wait_connect() argument
62 int err = sock_error(sk); in sk_stream_wait_connect()
65 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) in sk_stream_wait_connect()
72 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in sk_stream_wait_connect()
73 sk->sk_write_pending++; in sk_stream_wait_connect()
[all …]
Dsock_diag.c22 static u64 sock_gen_cookie(struct sock *sk) in sock_gen_cookie() argument
25 u64 res = atomic64_read(&sk->sk_cookie); in sock_gen_cookie()
29 res = atomic64_inc_return(&sock_net(sk)->cookie_gen); in sock_gen_cookie()
30 atomic64_cmpxchg(&sk->sk_cookie, 0, res); in sock_gen_cookie()
34 int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie) in sock_diag_check_cookie() argument
41 res = sock_gen_cookie(sk); in sock_diag_check_cookie()
49 void sock_diag_save_cookie(struct sock *sk, __u32 *cookie) in sock_diag_save_cookie() argument
51 u64 res = sock_gen_cookie(sk); in sock_diag_save_cookie()
58 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) in sock_diag_put_meminfo() argument
62 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); in sock_diag_put_meminfo()
[all …]
Ddatagram.c66 static inline int connection_based(struct sock *sk) in connection_based() argument
68 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; in connection_based()
86 static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, in wait_for_more_packets() argument
92 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in wait_for_more_packets()
95 error = sock_error(sk); in wait_for_more_packets()
99 if (sk->sk_receive_queue.prev != skb) in wait_for_more_packets()
103 if (sk->sk_shutdown & RCV_SHUTDOWN) in wait_for_more_packets()
110 if (connection_based(sk) && in wait_for_more_packets()
111 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN)) in wait_for_more_packets()
121 finish_wait(sk_sleep(sk), &wait); in wait_for_more_packets()
[all …]
/linux-4.4.14/net/rose/
Drose_timer.c35 void rose_start_heartbeat(struct sock *sk) in rose_start_heartbeat() argument
37 del_timer(&sk->sk_timer); in rose_start_heartbeat()
39 sk->sk_timer.data = (unsigned long)sk; in rose_start_heartbeat()
40 sk->sk_timer.function = &rose_heartbeat_expiry; in rose_start_heartbeat()
41 sk->sk_timer.expires = jiffies + 5 * HZ; in rose_start_heartbeat()
43 add_timer(&sk->sk_timer); in rose_start_heartbeat()
46 void rose_start_t1timer(struct sock *sk) in rose_start_t1timer() argument
48 struct rose_sock *rose = rose_sk(sk); in rose_start_t1timer()
52 rose->timer.data = (unsigned long)sk; in rose_start_t1timer()
59 void rose_start_t2timer(struct sock *sk) in rose_start_t2timer() argument
[all …]
Drose_in.c39 static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) in rose_state1_machine() argument
41 struct rose_sock *rose = rose_sk(sk); in rose_state1_machine()
45 rose_stop_timer(sk); in rose_state1_machine()
46 rose_start_idletimer(sk); in rose_state1_machine()
53 sk->sk_state = TCP_ESTABLISHED; in rose_state1_machine()
54 if (!sock_flag(sk, SOCK_DEAD)) in rose_state1_machine()
55 sk->sk_state_change(sk); in rose_state1_machine()
59 rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION); in rose_state1_machine()
60 rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); in rose_state1_machine()
76 static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) in rose_state2_machine() argument
[all …]
Daf_rose.c154 static void rose_remove_socket(struct sock *sk) in rose_remove_socket() argument
157 sk_del_node_init(sk); in rose_remove_socket()
233 static void rose_insert_socket(struct sock *sk) in rose_insert_socket() argument
237 sk_add_node(sk, &rose_list); in rose_insert_socket()
332 void rose_destroy_socket(struct sock *sk) in rose_destroy_socket() argument
336 rose_remove_socket(sk); in rose_destroy_socket()
337 rose_stop_heartbeat(sk); in rose_destroy_socket()
338 rose_stop_idletimer(sk); in rose_destroy_socket()
339 rose_stop_timer(sk); in rose_destroy_socket()
341 rose_clear_queues(sk); /* Flush the queues */ in rose_destroy_socket()
[all …]
Drose_out.c33 static void rose_send_iframe(struct sock *sk, struct sk_buff *skb) in rose_send_iframe() argument
35 struct rose_sock *rose = rose_sk(sk); in rose_send_iframe()
43 rose_start_idletimer(sk); in rose_send_iframe()
48 void rose_kick(struct sock *sk) in rose_kick() argument
50 struct rose_sock *rose = rose_sk(sk); in rose_kick()
60 if (!skb_peek(&sk->sk_write_queue)) in rose_kick()
76 skb = skb_dequeue(&sk->sk_write_queue); in rose_kick()
80 skb_queue_head(&sk->sk_write_queue, skb); in rose_kick()
84 skb_set_owner_w(skbn, sk); in rose_kick()
89 rose_send_iframe(sk, skbn); in rose_kick()
[all …]
Drose_subr.c35 void rose_clear_queues(struct sock *sk) in rose_clear_queues() argument
37 skb_queue_purge(&sk->sk_write_queue); in rose_clear_queues()
38 skb_queue_purge(&rose_sk(sk)->ack_queue); in rose_clear_queues()
46 void rose_frames_acked(struct sock *sk, unsigned short nr) in rose_frames_acked() argument
49 struct rose_sock *rose = rose_sk(sk); in rose_frames_acked()
63 void rose_requeue_frames(struct sock *sk) in rose_requeue_frames() argument
72 while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) { in rose_requeue_frames()
74 skb_queue_head(&sk->sk_write_queue, skb); in rose_requeue_frames()
76 skb_append(skb_prev, skb, &sk->sk_write_queue); in rose_requeue_frames()
85 int rose_validate_nr(struct sock *sk, unsigned short nr) in rose_validate_nr() argument
[all …]
/linux-4.4.14/net/x25/
Dx25_timer.c32 void x25_init_timers(struct sock *sk) in x25_init_timers() argument
34 struct x25_sock *x25 = x25_sk(sk); in x25_init_timers()
36 setup_timer(&x25->timer, x25_timer_expiry, (unsigned long)sk); in x25_init_timers()
39 sk->sk_timer.data = (unsigned long)sk; in x25_init_timers()
40 sk->sk_timer.function = &x25_heartbeat_expiry; in x25_init_timers()
43 void x25_start_heartbeat(struct sock *sk) in x25_start_heartbeat() argument
45 mod_timer(&sk->sk_timer, jiffies + 5 * HZ); in x25_start_heartbeat()
48 void x25_stop_heartbeat(struct sock *sk) in x25_stop_heartbeat() argument
50 del_timer(&sk->sk_timer); in x25_stop_heartbeat()
53 void x25_start_t2timer(struct sock *sk) in x25_start_t2timer() argument
[all …]
Dx25_in.c37 static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) in x25_queue_rx_frame() argument
40 struct x25_sock *x25 = x25_sk(sk); in x25_queue_rx_frame()
45 skb_set_owner_r(skb, sk); in x25_queue_rx_frame()
79 skb_set_owner_r(skbn, sk); in x25_queue_rx_frame()
80 skb_queue_tail(&sk->sk_receive_queue, skbn); in x25_queue_rx_frame()
81 if (!sock_flag(sk, SOCK_DEAD)) in x25_queue_rx_frame()
82 sk->sk_data_ready(sk); in x25_queue_rx_frame()
92 static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) in x25_state1_machine() argument
96 struct x25_sock *x25 = x25_sk(sk); in x25_state1_machine()
101 x25_stop_timer(sk); in x25_state1_machine()
[all …]
Daf_x25.c200 static void x25_remove_socket(struct sock *sk) in x25_remove_socket() argument
203 sk_del_node_init(sk); in x25_remove_socket()
265 static void x25_insert_socket(struct sock *sk) in x25_insert_socket() argument
268 sk_add_node(sk, &x25_list); in x25_insert_socket()
353 struct sock *sk; in x25_new_lci() local
357 while ((sk = __x25_find_socket(lci, nb)) != NULL) { in x25_new_lci()
358 sock_put(sk); in x25_new_lci()
389 static void __x25_destroy_socket(struct sock *sk) in __x25_destroy_socket() argument
393 x25_stop_heartbeat(sk); in __x25_destroy_socket()
394 x25_stop_timer(sk); in __x25_destroy_socket()
[all …]
Dx25_out.c52 int x25_output(struct sock *sk, struct sk_buff *skb) in x25_output() argument
58 struct x25_sock *x25 = x25_sk(sk); in x25_output()
71 release_sock(sk); in x25_output()
72 skbn = sock_alloc_send_skb(sk, frontlen + max_len, in x25_output()
74 lock_sock(sk); in x25_output()
80 SOCK_DEBUG(sk, "x25_output: fragment alloc" in x25_output()
105 skb_queue_tail(&sk->sk_write_queue, skbn); in x25_output()
111 skb_queue_tail(&sk->sk_write_queue, skb); in x25_output()
121 static void x25_send_iframe(struct sock *sk, struct sk_buff *skb) in x25_send_iframe() argument
123 struct x25_sock *x25 = x25_sk(sk); in x25_send_iframe()
[all …]
Dx25_subr.c39 void x25_clear_queues(struct sock *sk) in x25_clear_queues() argument
41 struct x25_sock *x25 = x25_sk(sk); in x25_clear_queues()
43 skb_queue_purge(&sk->sk_write_queue); in x25_clear_queues()
56 void x25_frames_acked(struct sock *sk, unsigned short nr) in x25_frames_acked() argument
59 struct x25_sock *x25 = x25_sk(sk); in x25_frames_acked()
73 void x25_requeue_frames(struct sock *sk) in x25_requeue_frames() argument
82 while ((skb = skb_dequeue(&x25_sk(sk)->ack_queue)) != NULL) { in x25_requeue_frames()
84 skb_queue_head(&sk->sk_write_queue, skb); in x25_requeue_frames()
86 skb_append(skb_prev, skb, &sk->sk_write_queue); in x25_requeue_frames()
95 int x25_validate_nr(struct sock *sk, unsigned short nr) in x25_validate_nr() argument
[all …]
/linux-4.4.14/net/caif/
Dcaif_socket.c47 struct sock sk; /* must be first member */ member
92 static void caif_read_lock(struct sock *sk) in caif_read_lock() argument
95 cf_sk = container_of(sk, struct caifsock, sk); in caif_read_lock()
99 static void caif_read_unlock(struct sock *sk) in caif_read_unlock() argument
102 cf_sk = container_of(sk, struct caifsock, sk); in caif_read_unlock()
109 return cf_sk->sk.sk_rcvbuf / 4; in sk_rcvbuf_lowwater()
112 static void caif_flow_ctrl(struct sock *sk, int mode) in caif_flow_ctrl() argument
115 cf_sk = container_of(sk, struct caifsock, sk); in caif_flow_ctrl()
124 static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in caif_queue_rcv_skb() argument
128 struct sk_buff_head *list = &sk->sk_receive_queue; in caif_queue_rcv_skb()
[all …]
/linux-4.4.14/net/dccp/
Dinput.c26 static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb) in dccp_enqueue_skb() argument
29 __skb_queue_tail(&sk->sk_receive_queue, skb); in dccp_enqueue_skb()
30 skb_set_owner_r(skb, sk); in dccp_enqueue_skb()
31 sk->sk_data_ready(sk); in dccp_enqueue_skb()
34 static void dccp_fin(struct sock *sk, struct sk_buff *skb) in dccp_fin() argument
42 sk->sk_shutdown = SHUTDOWN_MASK; in dccp_fin()
43 sock_set_flag(sk, SOCK_DONE); in dccp_fin()
44 dccp_enqueue_skb(sk, skb); in dccp_fin()
47 static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb) in dccp_rcv_close() argument
51 switch (sk->sk_state) { in dccp_rcv_close()
[all …]
Dproto.c77 void dccp_set_state(struct sock *sk, const int state) in dccp_set_state() argument
79 const int oldstate = sk->sk_state; in dccp_set_state()
81 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk, in dccp_set_state()
91 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg); in dccp_set_state()
99 sk->sk_prot->unhash(sk); in dccp_set_state()
100 if (inet_csk(sk)->icsk_bind_hash != NULL && in dccp_set_state()
101 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) in dccp_set_state()
102 inet_put_port(sk); in dccp_set_state()
112 sk->sk_state = state; in dccp_set_state()
117 static void dccp_finish_passive_close(struct sock *sk) in dccp_finish_passive_close() argument
[all …]
Dtimer.c24 static void dccp_write_err(struct sock *sk) in dccp_write_err() argument
26 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; in dccp_write_err()
27 sk->sk_error_report(sk); in dccp_write_err()
29 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); in dccp_write_err()
30 dccp_done(sk); in dccp_write_err()
35 static int dccp_write_timeout(struct sock *sk) in dccp_write_timeout() argument
37 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout()
40 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { in dccp_write_timeout()
42 dst_negative_advice(sk); in dccp_write_timeout()
67 dst_negative_advice(sk); in dccp_write_timeout()
[all …]
Doutput.c25 static inline void dccp_event_ack_sent(struct sock *sk) in dccp_event_ack_sent() argument
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); in dccp_event_ack_sent()
31 static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb) in dccp_skb_entail() argument
33 skb_set_owner_w(skb, sk); in dccp_skb_entail()
34 WARN_ON(sk->sk_send_head); in dccp_skb_entail()
35 sk->sk_send_head = skb; in dccp_skb_entail()
36 return skb_clone(sk->sk_send_head, gfp_any()); in dccp_skb_entail()
45 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) in dccp_transmit_skb() argument
48 struct inet_sock *inet = inet_sk(sk); in dccp_transmit_skb()
49 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb()
[all …]
Dqpolicy.c18 static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb) in qpolicy_simple_push() argument
20 skb_queue_tail(&sk->sk_write_queue, skb); in qpolicy_simple_push()
23 static bool qpolicy_simple_full(struct sock *sk) in qpolicy_simple_full() argument
25 return dccp_sk(sk)->dccps_tx_qlen && in qpolicy_simple_full()
26 sk->sk_write_queue.qlen >= dccp_sk(sk)->dccps_tx_qlen; in qpolicy_simple_full()
29 static struct sk_buff *qpolicy_simple_top(struct sock *sk) in qpolicy_simple_top() argument
31 return skb_peek(&sk->sk_write_queue); in qpolicy_simple_top()
39 static struct sk_buff *qpolicy_prio_best_skb(struct sock *sk) in qpolicy_prio_best_skb() argument
43 skb_queue_walk(&sk->sk_write_queue, skb) in qpolicy_prio_best_skb()
49 static struct sk_buff *qpolicy_prio_worst_skb(struct sock *sk) in qpolicy_prio_worst_skb() argument
[all …]
Dipv6.c51 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb) in dccp_v6_send_check() argument
53 struct ipv6_pinfo *np = inet6_sk(sk); in dccp_v6_send_check()
57 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr); in dccp_v6_send_check()
76 struct sock *sk; in dccp_v6_err() local
88 sk = __inet6_lookup_established(net, &dccp_hashinfo, in dccp_v6_err()
93 if (!sk) { in dccp_v6_err()
99 if (sk->sk_state == DCCP_TIME_WAIT) { in dccp_v6_err()
100 inet_twsk_put(inet_twsk(sk)); in dccp_v6_err()
104 if (sk->sk_state == DCCP_NEW_SYN_RECV) in dccp_v6_err()
105 return dccp_req_err(sk, seq); in dccp_v6_err()
[all …]
Dipv4.c42 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) in dccp_v4_connect() argument
45 struct inet_sock *inet = inet_sk(sk); in dccp_v4_connect()
46 struct dccp_sock *dp = dccp_sk(sk); in dccp_v4_connect()
65 sock_owned_by_user(sk)); in dccp_v4_connect()
76 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, in dccp_v4_connect()
78 orig_sport, orig_dport, sk); in dccp_v4_connect()
92 sk_rcv_saddr_set(sk, inet->inet_saddr); in dccp_v4_connect()
94 sk_daddr_set(sk, daddr); in dccp_v4_connect()
96 inet_csk(sk)->icsk_ext_hdr_len = 0; in dccp_v4_connect()
98 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in dccp_v4_connect()
[all …]
Dccid.h59 int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk);
60 int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk);
61 void (*ccid_hc_rx_exit)(struct sock *sk);
62 void (*ccid_hc_tx_exit)(struct sock *sk);
63 void (*ccid_hc_rx_packet_recv)(struct sock *sk,
65 int (*ccid_hc_rx_parse_options)(struct sock *sk, u8 pkt,
67 int (*ccid_hc_rx_insert_options)(struct sock *sk,
69 void (*ccid_hc_tx_packet_recv)(struct sock *sk,
71 int (*ccid_hc_tx_parse_options)(struct sock *sk, u8 pkt,
73 int (*ccid_hc_tx_send_packet)(struct sock *sk,
[all …]
Ddccp.h56 void dccp_time_wait(struct sock *sk, int state, int timeo);
227 void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
229 int dccp_retransmit_skb(struct sock *sk);
231 void dccp_send_ack(struct sock *sk);
232 void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
235 void dccp_send_sync(struct sock *sk, const u64 seq,
241 void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
242 bool dccp_qpolicy_full(struct sock *sk);
243 void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
244 struct sk_buff *dccp_qpolicy_top(struct sock *sk);
[all …]
Dminisocks.c35 void dccp_time_wait(struct sock *sk, int state, int timeo) in dccp_time_wait() argument
39 tw = inet_twsk_alloc(sk, &dccp_death_row, state); in dccp_time_wait()
42 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait()
46 tw->tw_v6_daddr = sk->sk_v6_daddr; in dccp_time_wait()
47 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; in dccp_time_wait()
48 tw->tw_ipv6only = sk->sk_ipv6only; in dccp_time_wait()
62 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); in dccp_time_wait()
72 dccp_done(sk); in dccp_time_wait()
75 struct sock *dccp_create_openreq_child(const struct sock *sk, in dccp_create_openreq_child() argument
85 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); in dccp_create_openreq_child()
[all …]
Ddiag.c19 static void dccp_get_info(struct sock *sk, struct tcp_info *info) in dccp_get_info() argument
21 struct dccp_sock *dp = dccp_sk(sk); in dccp_get_info()
22 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_get_info()
26 info->tcpi_state = sk->sk_state; in dccp_get_info()
36 ccid_hc_rx_get_info(dp->dccps_hc_rx_ccid, sk, info); in dccp_get_info()
39 ccid_hc_tx_get_info(dp->dccps_hc_tx_ccid, sk, info); in dccp_get_info()
42 static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, in dccp_diag_get_info() argument
48 dccp_get_info(sk, _info); in dccp_diag_get_info()
/linux-4.4.14/net/nfc/
Drawsock.c34 static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk) in nfc_sock_link() argument
37 sk_add_node(sk, &l->head); in nfc_sock_link()
41 static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk) in nfc_sock_unlink() argument
44 sk_del_node_init(sk); in nfc_sock_unlink()
48 static void rawsock_write_queue_purge(struct sock *sk) in rawsock_write_queue_purge() argument
50 pr_debug("sk=%p\n", sk); in rawsock_write_queue_purge()
52 spin_lock_bh(&sk->sk_write_queue.lock); in rawsock_write_queue_purge()
53 __skb_queue_purge(&sk->sk_write_queue); in rawsock_write_queue_purge()
54 nfc_rawsock(sk)->tx_work_scheduled = false; in rawsock_write_queue_purge()
55 spin_unlock_bh(&sk->sk_write_queue.lock); in rawsock_write_queue_purge()
[all …]
Dllcp_sock.c28 static int sock_wait_state(struct sock *sk, int state, unsigned long timeo) in sock_wait_state() argument
33 pr_debug("sk %p", sk); in sock_wait_state()
35 add_wait_queue(sk_sleep(sk), &wait); in sock_wait_state()
38 while (sk->sk_state != state) { in sock_wait_state()
49 release_sock(sk); in sock_wait_state()
51 lock_sock(sk); in sock_wait_state()
54 err = sock_error(sk); in sock_wait_state()
60 remove_wait_queue(sk_sleep(sk), &wait); in sock_wait_state()
72 struct sock *sk = sock->sk; in llcp_sock_bind() local
73 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); in llcp_sock_bind()
[all …]
Dllcp_core.c35 void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *sk) in nfc_llcp_sock_link() argument
38 sk_add_node(sk, &l->head); in nfc_llcp_sock_link()
42 void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk) in nfc_llcp_sock_unlink() argument
45 sk_del_node_init(sk); in nfc_llcp_sock_unlink()
60 pr_debug("%p\n", &sock->sk); in nfc_llcp_socket_purge()
70 if (s->sk != &sock->sk) in nfc_llcp_socket_purge()
81 struct sock *sk; in nfc_llcp_socket_release() local
89 sk_for_each_safe(sk, tmp, &local->sockets.head) { in nfc_llcp_socket_release()
90 llcp_sock = nfc_llcp_sock(sk); in nfc_llcp_socket_release()
92 bh_lock_sock(sk); in nfc_llcp_socket_release()
[all …]
/linux-4.4.14/net/iucv/
Daf_iucv.c52 #define __iucv_sock_wait(sk, condition, timeo, ret) \ argument
57 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
67 release_sock(sk); \
69 lock_sock(sk); \
70 ret = sock_error(sk); \
74 finish_wait(sk_sleep(sk), &__wait); \
77 #define iucv_sock_wait(sk, condition, timeo) \ argument
81 __iucv_sock_wait(sk, condition, timeo, __ret); \
85 static void iucv_sock_kill(struct sock *sk);
86 static void iucv_sock_close(struct sock *sk);
[all …]
/linux-4.4.14/net/phonet/
Dpep.c81 static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload, in pep_alloc_skb() argument
87 skb_set_owner_w(skb, sk); in pep_alloc_skb()
97 static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code, in pep_reply() argument
105 skb = pep_alloc_skb(sk, data, len, priority); in pep_reply()
116 return pn_skb_send(sk, skb, &peer); in pep_reply()
119 static int pep_indicate(struct sock *sk, u8 id, u8 code, in pep_indicate() argument
122 struct pep_sock *pn = pep_sk(sk); in pep_indicate()
126 skb = pep_alloc_skb(sk, data, len, priority); in pep_indicate()
135 return pn_skb_send(sk, skb, NULL); in pep_indicate()
140 static int pipe_handler_request(struct sock *sk, u8 id, u8 code, in pipe_handler_request() argument
[all …]
Dsocket.c41 struct sock *sk = sock->sk; in pn_socket_release() local
43 if (sk) { in pn_socket_release()
44 sock->sk = NULL; in pn_socket_release()
45 sk->sk_prot->close(sk, 0); in pn_socket_release()
143 void pn_sock_hash(struct sock *sk) in pn_sock_hash() argument
145 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); in pn_sock_hash()
148 sk_add_node_rcu(sk, hlist); in pn_sock_hash()
153 void pn_sock_unhash(struct sock *sk) in pn_sock_unhash() argument
156 sk_del_node_init_rcu(sk); in pn_sock_unhash()
158 pn_sock_unbind_all_res(sk); in pn_sock_unhash()
[all …]
Dpep-gprs.c38 struct sock *sk; member
68 if (pep_writeable(gp->sk)) in gprs_writeable()
76 static void gprs_state_change(struct sock *sk) in gprs_state_change() argument
78 struct gprs_dev *gp = sk->sk_user_data; in gprs_state_change()
80 if (sk->sk_state == TCP_CLOSE_WAIT) { in gprs_state_change()
149 static void gprs_data_ready(struct sock *sk) in gprs_data_ready() argument
151 struct gprs_dev *gp = sk->sk_user_data; in gprs_data_ready()
154 while ((skb = pep_read(sk)) != NULL) { in gprs_data_ready()
160 static void gprs_write_space(struct sock *sk) in gprs_write_space() argument
162 struct gprs_dev *gp = sk->sk_user_data; in gprs_write_space()
[all …]
Ddatagram.c36 static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb);
39 static void pn_sock_close(struct sock *sk, long timeout) in pn_sock_close() argument
41 sk_common_release(sk); in pn_sock_close()
44 static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg) in pn_ioctl() argument
51 lock_sock(sk); in pn_ioctl()
52 skb = skb_peek(&sk->sk_receive_queue); in pn_ioctl()
54 release_sock(sk); in pn_ioctl()
65 return pn_sock_bind_res(sk, res); in pn_ioctl()
67 return pn_sock_unbind_res(sk, res); in pn_ioctl()
75 static void pn_destruct(struct sock *sk) in pn_destruct() argument
[all …]
Daf_phonet.c66 struct sock *sk; in pn_socket_create() local
100 sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot, kern); in pn_socket_create()
101 if (sk == NULL) { in pn_socket_create()
106 sock_init_data(sock, sk); in pn_socket_create()
109 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; in pn_socket_create()
110 sk->sk_protocol = protocol; in pn_socket_create()
111 pn = pn_sk(sk); in pn_socket_create()
115 sk->sk_prot->init(sk); in pn_socket_create()
237 int pn_skb_send(struct sock *sk, struct sk_buff *skb, in pn_skb_send() argument
240 struct net *net = sock_net(sk); in pn_skb_send()
[all …]
/linux-4.4.14/net/llc/
Dllc_conn.c33 static void llc_conn_send_pdus(struct sock *sk);
34 static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
35 static int llc_exec_conn_trans_actions(struct sock *sk,
38 static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
59 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) in llc_conn_state_process() argument
62 struct llc_sock *llc = llc_sk(skb->sk); in llc_conn_state_process()
75 rc = llc_conn_service(skb->sk, skb); in llc_conn_state_process()
93 llc_save_primitive(sk, skb, LLC_DATA_PRIM); in llc_conn_state_process()
94 if (unlikely(sock_queue_rcv_skb(sk, skb))) { in llc_conn_state_process()
109 skb_queue_tail(&sk->sk_receive_queue, skb); in llc_conn_state_process()
[all …]
Daf_llc.c41 static int llc_ui_wait_for_conn(struct sock *sk, long timeout);
42 static int llc_ui_wait_for_disc(struct sock *sk, long timeout);
43 static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
95 static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr) in llc_ui_header_len() argument
101 else if (sk->sk_type == SOCK_STREAM) in llc_ui_header_len()
115 static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock) in llc_ui_send_data() argument
117 struct llc_sock* llc = llc_sk(sk); in llc_ui_send_data()
123 long timeout = sock_sndtimeo(sk, noblock); in llc_ui_send_data()
125 rc = llc_ui_wait_for_busy_core(sk, timeout); in llc_ui_send_data()
128 rc = llc_build_and_send_pkt(sk, skb); in llc_ui_send_data()
[all …]
Dllc_c_ac.c32 static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb);
33 static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb);
34 static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *ev);
36 static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb);
38 static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk,
41 static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb);
45 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb) in llc_conn_ac_clear_remote_busy() argument
47 struct llc_sock *llc = llc_sk(sk); in llc_conn_ac_clear_remote_busy()
56 llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); in llc_conn_ac_clear_remote_busy()
61 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb) in llc_conn_ac_conn_ind() argument
[all …]
Dllc_c_ev.c75 static u16 llc_util_nr_inside_tx_window(struct sock *sk, u8 nr) in llc_util_nr_inside_tx_window() argument
80 struct llc_sock *llc = llc_sk(sk); in llc_util_nr_inside_tx_window()
99 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_conn_req() argument
107 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_data_req() argument
115 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_disc_req() argument
123 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_rst_req() argument
131 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_local_busy_detected() argument
139 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_local_busy_cleared() argument
147 int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_rx_bad_pdu() argument
152 int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_rx_disc_cmd_pbit_set_x() argument
[all …]
Dllc_proc.c37 struct sock *sk = NULL; in llc_get_sk_idx() local
46 sk_nulls_for_each(sk, node, head) { in llc_get_sk_idx()
54 sk = NULL; in llc_get_sk_idx()
56 return sk; in llc_get_sk_idx()
70 struct sock *sk = NULL; in laddr_hash_next() local
73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket]) in laddr_hash_next()
77 return sk; in laddr_hash_next()
82 struct sock* sk, *next; in llc_seq_next() local
88 sk = llc_get_sk_idx(0); in llc_seq_next()
91 sk = v; in llc_seq_next()
[all …]
Dllc_if.c42 int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb) in llc_build_and_send_pkt() argument
46 struct llc_sock *llc = llc_sk(sk); in llc_build_and_send_pkt()
61 rc = llc_conn_state_process(sk, skb); in llc_build_and_send_pkt()
79 int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap) in llc_establish_connection() argument
84 struct llc_sock *llc = llc_sk(sk); in llc_establish_connection()
94 sk = existing; in llc_establish_connection()
99 sock_hold(sk); in llc_establish_connection()
108 skb_set_owner_w(skb, sk); in llc_establish_connection()
109 rc = llc_conn_state_process(sk, skb); in llc_establish_connection()
112 sock_put(sk); in llc_establish_connection()
[all …]
Dllc_sap.c47 struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev, in llc_alloc_frame() argument
63 if (sk != NULL) in llc_alloc_frame()
64 skb_set_owner_w(skb, sk); in llc_alloc_frame()
69 void llc_save_primitive(struct sock *sk, struct sk_buff *skb, u8 prim) in llc_save_primitive() argument
77 addr->sllc_family = sk->sk_family; in llc_save_primitive()
214 if (skb->sk->sk_state == TCP_LISTEN) in llc_sap_state_process()
217 llc_save_primitive(skb->sk, skb, ev->prim); in llc_sap_state_process()
220 if (sock_queue_rcv_skb(skb->sk, skb)) in llc_sap_state_process()
287 struct sock *sk) in llc_sap_rcv() argument
293 skb->sk = sk; in llc_sap_rcv()
[all …]
/linux-4.4.14/drivers/isdn/mISDN/
Dsocket.c31 #define _pms(sk) ((struct mISDN_sock *)sk) argument
55 mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk) in mISDN_sock_link() argument
58 sk_add_node(sk, &l->head); in mISDN_sock_link()
62 static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk) in mISDN_sock_unlink() argument
65 sk_del_node_init(sk); in mISDN_sock_unlink()
78 if (msk->sk.sk_state == MISDN_CLOSED) in mISDN_send()
81 err = sock_queue_rcv_skb(&msk->sk, skb); in mISDN_send()
97 msk->sk.sk_state = MISDN_CLOSED; in mISDN_ctrl()
104 mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) in mISDN_sock_cmsg() argument
108 if (_pms(sk)->cmask & MISDN_TIME_STAMP) { in mISDN_sock_cmsg()
[all …]
Ddsp_dtmf.c123 s32 sk, sk1, sk2; in dsp_dtmf_goertzel_decode() local
160 sk = (*hfccoeff++) >> 4; in dsp_dtmf_goertzel_decode()
161 if (sk > 32767 || sk < -32767 || sk2 > 32767 in dsp_dtmf_goertzel_decode()
167 (sk * sk) - in dsp_dtmf_goertzel_decode()
168 (((cos2pik[k] * sk) >> 15) * sk2) + in dsp_dtmf_goertzel_decode()
185 sk = 0; in dsp_dtmf_goertzel_decode()
191 sk = ((cos2pik_ * sk1) >> 15) - sk2 + (*buf++); in dsp_dtmf_goertzel_decode()
193 sk1 = sk; in dsp_dtmf_goertzel_decode()
195 sk >>= 8; in dsp_dtmf_goertzel_decode()
197 if (sk > 32767 || sk < -32767 || sk2 > 32767 || sk2 < -32767) in dsp_dtmf_goertzel_decode()
[all …]
/linux-4.4.14/sound/usb/usx2y/
Dusb_stream.c27 static unsigned usb_stream_next_packet_size(struct usb_stream_kernel *sk) in usb_stream_next_packet_size() argument
29 struct usb_stream *s = sk->s; in usb_stream_next_packet_size()
30 sk->out_phase_peeked = (sk->out_phase & 0xffff) + sk->freqn; in usb_stream_next_packet_size()
31 return (sk->out_phase_peeked >> 16) * s->cfg.frame_size; in usb_stream_next_packet_size()
34 static void playback_prep_freqn(struct usb_stream_kernel *sk, struct urb *urb) in playback_prep_freqn() argument
36 struct usb_stream *s = sk->s; in playback_prep_freqn()
39 for (pack = 0; pack < sk->n_o_ps; pack++) { in playback_prep_freqn()
40 int l = usb_stream_next_packet_size(sk); in playback_prep_freqn()
44 sk->out_phase = sk->out_phase_peeked; in playback_prep_freqn()
59 static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize, in init_pipe_urbs() argument
[all …]
/linux-4.4.14/net/decnet/
Daf_decnet.c143 struct sock sk; member
147 static void dn_keepalive(struct sock *sk);
163 static struct hlist_head *dn_find_list(struct sock *sk) in dn_find_list() argument
165 struct dn_scp *scp = DN_SK(sk); in dn_find_list()
178 struct sock *sk; in check_port() local
183 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { in check_port()
184 struct dn_scp *scp = DN_SK(sk); in check_port()
191 static unsigned short port_alloc(struct sock *sk) in port_alloc() argument
193 struct dn_scp *scp = DN_SK(sk); in port_alloc()
212 static int dn_hash_sock(struct sock *sk) in dn_hash_sock() argument
[all …]
Ddn_nsp_in.c100 static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack) in dn_ack() argument
102 struct dn_scp *scp = DN_SK(sk); in dn_ack()
110 wakeup |= dn_nsp_check_xmit_queue(sk, skb, in dn_ack()
120 wakeup |= dn_nsp_check_xmit_queue(sk, skb, in dn_ack()
129 if (wakeup && !sock_flag(sk, SOCK_DEAD)) in dn_ack()
130 sk->sk_state_change(sk); in dn_ack()
136 static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth) in dn_process_ack() argument
152 dn_ack(sk, skb, ack); in dn_process_ack()
165 dn_ack(sk, skb, ack); in dn_process_ack()
332 static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb) in dn_nsp_conn_init() argument
[all …]
Ddn_timer.c38 void dn_start_slow_timer(struct sock *sk) in dn_start_slow_timer() argument
40 setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk); in dn_start_slow_timer()
41 sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); in dn_start_slow_timer()
44 void dn_stop_slow_timer(struct sock *sk) in dn_stop_slow_timer() argument
46 sk_stop_timer(sk, &sk->sk_timer); in dn_stop_slow_timer()
51 struct sock *sk = (struct sock *)arg; in dn_slow_timer() local
52 struct dn_scp *scp = DN_SK(sk); in dn_slow_timer()
54 bh_lock_sock(sk); in dn_slow_timer()
56 if (sock_owned_by_user(sk)) { in dn_slow_timer()
57 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10); in dn_slow_timer()
[all …]
Ddn_nsp_out.c76 struct sock *sk = skb->sk; in dn_nsp_send() local
77 struct dn_scp *scp = DN_SK(sk); in dn_nsp_send()
84 dst = sk_dst_check(sk, 0); in dn_nsp_send()
88 dst_output(&init_net, skb->sk, skb); in dn_nsp_send()
93 fld.flowidn_oif = sk->sk_bound_dev_if; in dn_nsp_send()
98 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, 0) == 0) { in dn_nsp_send()
99 dst = sk_dst_get(sk); in dn_nsp_send()
100 sk->sk_route_caps = dst->dev->features; in dn_nsp_send()
104 sk->sk_err = EHOSTUNREACH; in dn_nsp_send()
105 if (!sock_flag(sk, SOCK_DEAD)) in dn_nsp_send()
[all …]
/linux-4.4.14/net/ipv6/
Dudp.c79 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) in ipv6_rcv_saddr_equal() argument
83 int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); in ipv6_rcv_saddr_equal()
89 (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr || in ipv6_rcv_saddr_equal()
90 sk->sk_rcv_saddr == sk2->sk_rcv_saddr)); in ipv6_rcv_saddr_equal()
97 !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED)) in ipv6_rcv_saddr_equal()
101 ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6)) in ipv6_rcv_saddr_equal()
123 int udp_v6_get_port(struct sock *sk, unsigned short snum) in udp_v6_get_port() argument
126 udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum); in udp_v6_get_port()
128 udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); in udp_v6_get_port()
131 udp_sk(sk)->udp_portaddr_hash = hash2_partial; in udp_v6_get_port()
[all …]
Dinet6_connection_sock.c30 int inet6_csk_bind_conflict(const struct sock *sk, in inet6_csk_bind_conflict() argument
34 int reuse = sk->sk_reuse; in inet6_csk_bind_conflict()
35 int reuseport = sk->sk_reuseport; in inet6_csk_bind_conflict()
36 kuid_t uid = sock_i_uid((struct sock *)sk); in inet6_csk_bind_conflict()
44 if (sk != sk2 && in inet6_csk_bind_conflict()
45 (!sk->sk_bound_dev_if || in inet6_csk_bind_conflict()
47 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { in inet6_csk_bind_conflict()
54 if (ipv6_rcv_saddr_equal(sk, sk2)) in inet6_csk_bind_conflict()
59 ipv6_rcv_saddr_equal(sk, sk2)) in inet6_csk_bind_conflict()
68 struct dst_entry *inet6_csk_route_req(const struct sock *sk, in inet6_csk_route_req() argument
[all …]
Draw.c72 static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, in __raw_v6_lookup() argument
78 sk_for_each_from(sk) in __raw_v6_lookup()
79 if (inet_sk(sk)->inet_num == num) { in __raw_v6_lookup()
81 if (!net_eq(sock_net(sk), net)) in __raw_v6_lookup()
84 if (!ipv6_addr_any(&sk->sk_v6_daddr) && in __raw_v6_lookup()
85 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) in __raw_v6_lookup()
88 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) in __raw_v6_lookup()
91 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { in __raw_v6_lookup()
92 if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)) in __raw_v6_lookup()
95 inet6_mc_check(sk, loc_addr, rmt_addr)) in __raw_v6_lookup()
[all …]
Dinet6_hashtables.c59 struct sock *sk; in __inet6_lookup_established() local
72 sk_nulls_for_each_rcu(sk, node, &head->chain) { in __inet6_lookup_established()
73 if (sk->sk_hash != hash) in __inet6_lookup_established()
75 if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif)) in __inet6_lookup_established()
77 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) in __inet6_lookup_established()
80 if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) { in __inet6_lookup_established()
81 sock_gen_put(sk); in __inet6_lookup_established()
89 sk = NULL; in __inet6_lookup_established()
92 return sk; in __inet6_lookup_established()
96 static inline int compute_score(struct sock *sk, struct net *net, in compute_score() argument
[all …]
Dtcp_ipv6.c73 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, in tcp_v6_md5_do_lookup() argument
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) in inet6_sk_rx_dst_set() argument
99 sk->sk_rx_dst = dst; in inet6_sk_rx_dst_set()
100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; in inet6_sk_rx_dst_set()
101 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); in inet6_sk_rx_dst_set()
113 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, in tcp_v6_connect() argument
117 struct inet_sock *inet = inet_sk(sk); in tcp_v6_connect()
[all …]
Dipv6_sockglue.c61 int ip6_ra_control(struct sock *sk, int sel) in ip6_ra_control() argument
66 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW) in ip6_ra_control()
73 if (ra->sk == sk) { in ip6_ra_control()
83 sock_put(sk); in ip6_ra_control()
92 new_ra->sk = sk; in ip6_ra_control()
96 sock_hold(sk); in ip6_ra_control()
102 struct ipv6_txoptions *ipv6_update_options(struct sock *sk, in ipv6_update_options() argument
105 if (inet_sk(sk)->is_icsk) { in ipv6_update_options()
107 !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && in ipv6_update_options()
108 inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) { in ipv6_update_options()
[all …]
Daf_inet6.c93 static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) in inet6_sk_generic() argument
95 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); in inet6_sk_generic()
97 return (struct ipv6_pinfo *)(((u8 *)sk) + offset); in inet6_sk_generic()
105 struct sock *sk; in inet6_create() local
173 sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, kern); in inet6_create()
174 if (!sk) in inet6_create()
177 sock_init_data(sock, sk); in inet6_create()
181 sk->sk_reuse = SK_CAN_REUSE; in inet6_create()
183 inet = inet_sk(sk); in inet6_create()
192 sk->sk_destruct = inet_sock_destruct; in inet6_create()
[all …]
Dxfrm6_output.c33 struct sock *sk = skb->sk; in xfrm6_local_dontfrag() local
35 if (sk) { in xfrm6_local_dontfrag()
36 if (sk->sk_family != AF_INET6) in xfrm6_local_dontfrag()
39 proto = sk->sk_protocol; in xfrm6_local_dontfrag()
41 return inet6_sk(sk)->dontfrag; in xfrm6_local_dontfrag()
50 struct sock *sk = skb->sk; in xfrm6_local_rxpmtu() local
52 fl6.flowi6_oif = sk->sk_bound_dev_if; in xfrm6_local_rxpmtu()
55 ipv6_local_rxpmtu(sk, &fl6, mtu); in xfrm6_local_rxpmtu()
62 struct sock *sk = skb->sk; in xfrm6_local_error() local
65 fl6.fl6_dport = inet_sk(sk)->inet_dport; in xfrm6_local_error()
[all …]
Dip6_output.c60 static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb) in ip6_finish_output2() argument
74 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) && in ip6_finish_output2()
86 net, sk, newskb, NULL, newskb->dev, in ip6_finish_output2()
124 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) in ip6_finish_output() argument
129 return ip6_fragment(net, sk, skb, ip6_finish_output2); in ip6_finish_output()
131 return ip6_finish_output2(net, sk, skb); in ip6_finish_output()
134 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) in ip6_output() argument
146 net, sk, skb, NULL, dev, in ip6_output()
157 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, in ip6_xmit() argument
160 struct net *net = sock_net(sk); in ip6_xmit()
[all …]
Ddatagram.c43 static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) in __ip6_datagram_connect() argument
46 struct inet_sock *inet = inet_sk(sk); in __ip6_datagram_connect()
47 struct ipv6_pinfo *np = inet6_sk(sk); in __ip6_datagram_connect()
57 if (__ipv6_only_sock(sk)) in __ip6_datagram_connect()
59 err = __ip4_datagram_connect(sk, uaddr, addr_len); in __ip6_datagram_connect()
73 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); in __ip6_datagram_connect()
93 if (__ipv6_only_sock(sk)) { in __ip6_datagram_connect()
101 err = __ip4_datagram_connect(sk, in __ip6_datagram_connect()
109 ipv6_addr_set_v4mapped(inet->inet_daddr, &sk->sk_v6_daddr); in __ip6_datagram_connect()
115 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) || in __ip6_datagram_connect()
[all …]
Dudp_impl.h14 int udp_v6_get_port(struct sock *sk, unsigned short snum);
16 int udpv6_getsockopt(struct sock *sk, int level, int optname,
18 int udpv6_setsockopt(struct sock *sk, int level, int optname,
21 int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
23 int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
26 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
27 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
29 int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
30 void udpv6_destroy_sock(struct sock *sk);
32 void udp_v6_clear_sk(struct sock *sk, int size);
Dping.c59 static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, in dummy_ipv6_recv_error() argument
64 static void dummy_ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, in dummy_ip6_datagram_recv_ctl() argument
72 static void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, in dummy_ipv6_icmp_error() argument
80 int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) in ping_v6_sendmsg() argument
82 struct inet_sock *inet = inet_sk(sk); in ping_v6_sendmsg()
83 struct ipv6_pinfo *np = inet6_sk(sk); in ping_v6_sendmsg()
109 if (sk->sk_bound_dev_if && in ping_v6_sendmsg()
110 sk->sk_bound_dev_if != u->sin6_scope_id) { in ping_v6_sendmsg()
116 if (sk->sk_state != TCP_ESTABLISHED) in ping_v6_sendmsg()
118 daddr = &sk->sk_v6_daddr; in ping_v6_sendmsg()
[all …]
Dicmp.c114 struct sock *sk; in icmpv6_xmit_lock() local
118 sk = icmpv6_sk(net); in icmpv6_xmit_lock()
119 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { in icmpv6_xmit_lock()
127 return sk; in icmpv6_xmit_lock()
130 static __inline__ void icmpv6_xmit_unlock(struct sock *sk) in icmpv6_xmit_unlock() argument
132 spin_unlock_bh(&sk->sk_lock.slock); in icmpv6_xmit_unlock()
173 static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, in icmpv6_xrlim_allow() argument
176 struct net *net = sock_net(sk); in icmpv6_xrlim_allow()
193 dst = ip6_route_output(net, sk, fl6); in icmpv6_xrlim_allow()
239 int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, in icmpv6_push_pending_frames() argument
[all …]
Dsyncookies.c136 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) in cookie_v6_check() argument
141 struct ipv6_pinfo *np = inet6_sk(sk); in cookie_v6_check()
142 struct tcp_sock *tp = tcp_sk(sk); in cookie_v6_check()
145 struct sock *ret = sk; in cookie_v6_check()
154 if (tcp_synq_no_recent_overflow(sk)) in cookie_v6_check()
159 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); in cookie_v6_check()
163 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); in cookie_v6_check()
173 req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false); in cookie_v6_check()
181 if (security_inet_conn_request(sk, skb, req)) in cookie_v6_check()
189 if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || in cookie_v6_check()
[all …]
/linux-4.4.14/net/vmw_vsock/
Daf_vsock.c102 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
103 static void vsock_sk_destruct(struct sock *sk);
104 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
169 struct sock *sk = sk_vsock(vsk); in vsock_auto_bind() local
175 return __vsock_bind(sk, &local_addr); in vsock_auto_bind()
192 sock_hold(&vsk->sk); in __vsock_insert_bound()
199 sock_hold(&vsk->sk); in __vsock_insert_connected()
206 sock_put(&vsk->sk); in __vsock_remove_bound()
212 sock_put(&vsk->sk); in __vsock_remove_connected()
288 struct sock *sk; in vsock_find_bound_socket() local
[all …]
Dvmci_transport_notify_qstate.c81 vmci_transport_handle_read(struct sock *sk, in vmci_transport_handle_read() argument
86 sk->sk_write_space(sk); in vmci_transport_handle_read()
90 vmci_transport_handle_wrote(struct sock *sk, in vmci_transport_handle_wrote() argument
95 sk->sk_data_ready(sk); in vmci_transport_handle_wrote()
98 static void vsock_block_update_write_window(struct sock *sk) in vsock_block_update_write_window() argument
100 struct vsock_sock *vsk = vsock_sk(sk); in vsock_block_update_write_window()
108 static int vmci_transport_send_read_notification(struct sock *sk) in vmci_transport_send_read_notification() argument
115 vsk = vsock_sk(sk); in vmci_transport_send_read_notification()
131 err = vmci_transport_send_read(sk); in vmci_transport_send_read_notification()
140 sk); in vmci_transport_send_read_notification()
[all …]
Dvmci_transport_notify.c113 vmci_transport_handle_waiting_read(struct sock *sk, in vmci_transport_handle_waiting_read() argument
122 vsk = vsock_sk(sk); in vmci_transport_handle_waiting_read()
134 sent = vmci_transport_send_wrote(sk) > 0; in vmci_transport_handle_waiting_read()
143 vmci_transport_handle_waiting_write(struct sock *sk, in vmci_transport_handle_waiting_write() argument
152 vsk = vsock_sk(sk); in vmci_transport_handle_waiting_write()
164 sent = vmci_transport_send_read(sk) > 0; in vmci_transport_handle_waiting_write()
173 vmci_transport_handle_read(struct sock *sk, in vmci_transport_handle_read() argument
181 vsk = vsock_sk(sk); in vmci_transport_handle_read()
185 sk->sk_write_space(sk); in vmci_transport_handle_read()
188 static bool send_waiting_read(struct sock *sk, u64 room_needed) in send_waiting_read() argument
[all …]
Dvmci_transport.c48 static int vmci_transport_recv_listen(struct sock *sk,
51 struct sock *sk,
55 struct sock *sk,
58 struct sock *sk,
61 struct sock *sk,
63 static int vmci_transport_recv_connected(struct sock *sk,
67 static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
72 struct sock *sk; member
276 vmci_transport_send_control_pkt(struct sock *sk, in vmci_transport_send_control_pkt() argument
288 vsk = vsock_sk(sk); in vmci_transport_send_control_pkt()
[all …]
Dvmci_transport_notify.h48 void (*socket_init) (struct sock *sk);
50 int (*poll_in) (struct sock *sk, size_t target,
52 int (*poll_out) (struct sock *sk, size_t target,
54 void (*handle_notify_pkt) (struct sock *sk,
59 int (*recv_init) (struct sock *sk, size_t target,
61 int (*recv_pre_block) (struct sock *sk, size_t target,
63 int (*recv_pre_dequeue) (struct sock *sk, size_t target,
65 int (*recv_post_dequeue) (struct sock *sk, size_t target,
68 int (*send_init) (struct sock *sk,
70 int (*send_pre_block) (struct sock *sk,
[all …]
/linux-4.4.14/net/unix/
Daf_unix.c138 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE) argument
182 #define unix_peer(sk) (unix_sk(sk)->peer) argument
184 static inline int unix_our_peer(struct sock *sk, struct sock *osk) in unix_our_peer() argument
186 return unix_peer(osk) == sk; in unix_our_peer()
189 static inline int unix_may_send(struct sock *sk, struct sock *osk) in unix_may_send() argument
191 return unix_peer(osk) == NULL || unix_our_peer(sk, osk); in unix_may_send()
194 static inline int unix_recvq_full(struct sock const *sk) in unix_recvq_full() argument
196 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; in unix_recvq_full()
248 static void __unix_remove_socket(struct sock *sk) in __unix_remove_socket() argument
250 sk_del_node_init(sk); in __unix_remove_socket()
[all …]
Ddiag.c11 static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) in sk_diag_dump_name() argument
13 struct unix_address *addr = unix_sk(sk)->addr; in sk_diag_dump_name()
22 static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) in sk_diag_dump_vfs() argument
24 struct dentry *dentry = unix_sk(sk)->path.dentry; in sk_diag_dump_vfs()
38 static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) in sk_diag_dump_peer() argument
43 peer = unix_peer_get(sk); in sk_diag_dump_peer()
56 static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) in sk_diag_dump_icons() argument
63 if (sk->sk_state == TCP_LISTEN) { in sk_diag_dump_icons()
64 spin_lock(&sk->sk_receive_queue.lock); in sk_diag_dump_icons()
67 sk->sk_receive_queue.qlen * sizeof(u32)); in sk_diag_dump_icons()
[all …]
/linux-4.4.14/net/l2tp/
Dl2tp_ip6.c54 static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk) in l2tp_ip6_sk() argument
56 return (struct l2tp_ip6_sock *)sk; in l2tp_ip6_sk()
63 struct sock *sk; in __l2tp_ip6_bind_lookup() local
65 sk_for_each_bound(sk, &l2tp_ip6_bind_table) { in __l2tp_ip6_bind_lookup()
66 const struct in6_addr *addr = inet6_rcv_saddr(sk); in __l2tp_ip6_bind_lookup()
67 struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); in __l2tp_ip6_bind_lookup()
73 net_eq(sock_net(sk), net) && in __l2tp_ip6_bind_lookup()
75 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) in __l2tp_ip6_bind_lookup()
79 sk = NULL; in __l2tp_ip6_bind_lookup()
81 return sk; in __l2tp_ip6_bind_lookup()
[all …]
Dl2tp_ip.c45 static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) in l2tp_ip_sk() argument
47 return (struct l2tp_ip_sock *)sk; in l2tp_ip_sk()
52 struct sock *sk; in __l2tp_ip_bind_lookup() local
54 sk_for_each_bound(sk, &l2tp_ip_bind_table) { in __l2tp_ip_bind_lookup()
55 struct inet_sock *inet = inet_sk(sk); in __l2tp_ip_bind_lookup()
56 struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); in __l2tp_ip_bind_lookup()
62 net_eq(sock_net(sk), net) && in __l2tp_ip_bind_lookup()
64 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) in __l2tp_ip_bind_lookup()
68 sk = NULL; in __l2tp_ip_bind_lookup()
70 return sk; in __l2tp_ip_bind_lookup()
[all …]
Dl2tp_ppp.c143 static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) in pppol2tp_sock_to_session() argument
147 if (sk == NULL) in pppol2tp_sock_to_session()
150 sock_hold(sk); in pppol2tp_sock_to_session()
151 session = (struct l2tp_session *)(sk->sk_user_data); in pppol2tp_sock_to_session()
153 sock_put(sk); in pppol2tp_sock_to_session()
193 struct sock *sk = sock->sk; in pppol2tp_recvmsg() local
196 if (sk->sk_state & PPPOX_BOUND) in pppol2tp_recvmsg()
200 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, in pppol2tp_recvmsg()
222 struct sock *sk = NULL; in pppol2tp_recv() local
227 sk = ps->sock; in pppol2tp_recv()
[all …]
/linux-4.4.14/net/ieee802154/
Dsocket.c90 struct sock *sk = sock->sk; in ieee802154_sock_release() local
92 if (sk) { in ieee802154_sock_release()
93 sock->sk = NULL; in ieee802154_sock_release()
94 sk->sk_prot->close(sk, 0); in ieee802154_sock_release()
102 struct sock *sk = sock->sk; in ieee802154_sock_sendmsg() local
104 return sk->sk_prot->sendmsg(sk, msg, len); in ieee802154_sock_sendmsg()
110 struct sock *sk = sock->sk; in ieee802154_sock_bind() local
112 if (sk->sk_prot->bind) in ieee802154_sock_bind()
113 return sk->sk_prot->bind(sk, uaddr, addr_len); in ieee802154_sock_bind()
121 struct sock *sk = sock->sk; in ieee802154_sock_connect() local
[all …]
/linux-4.4.14/net/can/
Draw.c84 struct sock sk; member
113 static inline struct raw_sock *raw_sk(const struct sock *sk) in raw_sk() argument
115 return (struct raw_sock *)sk; in raw_sk()
120 struct sock *sk = (struct sock *)data; in raw_rcv() local
121 struct raw_sock *ro = raw_sk(sk); in raw_rcv()
127 if (!ro->recv_own_msgs && oskb->sk == sk) in raw_rcv()
175 if (oskb->sk) in raw_rcv()
177 if (oskb->sk == sk) in raw_rcv()
180 if (sock_queue_rcv_skb(sk, skb) < 0) in raw_rcv()
184 static int raw_enable_filters(struct net_device *dev, struct sock *sk, in raw_enable_filters() argument
[all …]
Dbcm.c111 struct sock *sk; member
118 struct sock sk; member
129 static inline struct bcm_sock *bcm_sk(const struct sock *sk) in bcm_sk() argument
131 return (struct bcm_sock *)sk; in bcm_sk()
167 struct sock *sk = (struct sock *)m->private; in bcm_proc_show() local
168 struct bcm_sock *bo = bcm_sk(sk); in bcm_proc_show()
171 seq_printf(m, ">>> socket %pK", sk->sk_socket); in bcm_proc_show()
172 seq_printf(m, " / sk %pK", sk); in bcm_proc_show()
275 can_skb_set_owner(skb, op->sk); in bcm_can_tx()
299 struct sock *sk = op->sk; in bcm_send_to_user() local
[all …]
/linux-4.4.14/net/netlink/
Daf_netlink.c89 static inline int netlink_is_kernel(struct sock *sk) in netlink_is_kernel() argument
91 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET; in netlink_is_kernel()
99 static int netlink_dump(struct sock *sk);
199 struct sock *sk = skb->sk; in netlink_filter_tap() local
204 switch (sk->sk_protocol) { in netlink_filter_tap()
223 struct sock *sk = skb->sk; in __netlink_deliver_tap_skb() local
234 nskb->protocol = htons((u16) sk->sk_protocol); in __netlink_deliver_tap_skb()
235 nskb->pkt_type = netlink_is_kernel(sk) ? in __netlink_deliver_tap_skb()
279 static void netlink_overrun(struct sock *sk) in netlink_overrun() argument
281 struct netlink_sock *nlk = nlk_sk(sk); in netlink_overrun()
[all …]
Ddiag.c25 static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb) in sk_diag_put_rings_cfg() argument
27 struct netlink_sock *nlk = nlk_sk(sk); in sk_diag_put_rings_cfg()
40 static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb) in sk_diag_put_rings_cfg() argument
46 static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb) in sk_diag_dump_groups() argument
48 struct netlink_sock *nlk = nlk_sk(sk); in sk_diag_dump_groups()
57 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, in sk_diag_fill() argument
63 struct netlink_sock *nlk = nlk_sk(sk); in sk_diag_fill()
72 rep->ndiag_type = sk->sk_type; in sk_diag_fill()
73 rep->ndiag_protocol = sk->sk_protocol; in sk_diag_fill()
74 rep->ndiag_state = sk->sk_state; in sk_diag_fill()
[all …]
/linux-4.4.14/net/sctp/
Dsocket.c83 static int sctp_writeable(struct sock *sk);
87 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
89 static int sctp_wait_for_accept(struct sock *sk, long timeo);
90 static void sctp_wait_for_close(struct sock *sk, long timeo);
91 static void sctp_destruct_sock(struct sock *sk);
101 static int sctp_autobind(struct sock *sk);
109 static void sctp_enter_memory_pressure(struct sock *sk) in sctp_enter_memory_pressure() argument
123 amt = sk_wmem_alloc_get(asoc->base.sk); in sctp_wspace()
125 if (amt >= asoc->base.sk->sk_sndbuf) { in sctp_wspace()
126 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) in sctp_wspace()
[all …]
Dinput.c75 static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
109 struct sock *sk; in sctp_rcv() local
181 sk = rcvr->sk; in sctp_rcv()
187 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { in sctp_rcv()
195 sk = net->sctp.ctl_sock; in sctp_rcv()
196 ep = sctp_sk(sk)->ep; in sctp_rcv()
216 if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family)) in sctp_rcv()
220 if (sk_filter(sk, skb)) in sctp_rcv()
224 chunk = sctp_chunkify(skb, asoc, sk); in sctp_rcv()
245 bh_lock_sock(sk); in sctp_rcv()
[all …]
Dendpointola.c58 struct sock *sk, in sctp_endpoint_init() argument
61 struct net *net = sock_net(sk); in sctp_endpoint_init()
130 ep->base.sk = sk; in sctp_endpoint_init()
131 sock_hold(ep->base.sk); in sctp_endpoint_init()
139 sk->sk_data_ready = sctp_data_ready; in sctp_endpoint_init()
140 sk->sk_write_space = sctp_write_space; in sctp_endpoint_init()
141 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); in sctp_endpoint_init()
184 struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) in sctp_endpoint_new() argument
193 if (!sctp_endpoint_init(ep, sk, gfp)) in sctp_endpoint_new()
209 struct sock *sk = ep->base.sk; in sctp_endpoint_add_asoc() local
[all …]
Dipv6.c145 struct sock *sk; in sctp_v6_err() local
160 sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport); in sctp_v6_err()
164 if (!sk) { in sctp_v6_err()
175 if (ip6_sk_accept_pmtu(sk)) in sctp_v6_err()
176 sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info)); in sctp_v6_err()
180 sctp_icmp_proto_unreachable(sk, asoc, transport); in sctp_v6_err()
185 sctp_icmp_redirect(sk, transport, skb); in sctp_v6_err()
191 np = inet6_sk(sk); in sctp_v6_err()
193 if (!sock_owned_by_user(sk) && np->recverr) { in sctp_v6_err()
194 sk->sk_err = err; in sctp_v6_err()
[all …]
Dulpqueue.c134 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc) in sctp_clear_pd() argument
136 struct sctp_sock *sp = sctp_sk(sk); in sctp_clear_pd()
144 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); in sctp_clear_pd()
145 list = (struct list_head *)&sctp_sk(sk)->pd_lobby; in sctp_clear_pd()
163 __skb_queue_tail(&sk->sk_receive_queue, in sctp_clear_pd()
176 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); in sctp_ulpq_set_pd()
187 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); in sctp_ulpq_clear_pd()
195 struct sock *sk = ulpq->asoc->base.sk; in sctp_ulpq_tail_event() local
205 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) in sctp_ulpq_tail_event()
209 sk_mark_napi_id(sk, skb); in sctp_ulpq_tail_event()
[all …]
Dproc.c214 struct sock *sk; in sctp_eps_seq_show() local
225 sk = epb->sk; in sctp_eps_seq_show()
226 if (!net_eq(sock_net(sk), seq_file_net(seq))) in sctp_eps_seq_show()
228 seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk, in sctp_eps_seq_show()
229 sctp_sk(sk)->type, sk->sk_state, hash, in sctp_eps_seq_show()
231 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), in sctp_eps_seq_show()
232 sock_i_ino(sk)); in sctp_eps_seq_show()
322 struct sock *sk; in sctp_assocs_seq_show() local
333 sk = epb->sk; in sctp_assocs_seq_show()
334 if (!net_eq(sock_net(sk), seq_file_net(seq))) in sctp_assocs_seq_show()
[all …]
/linux-4.4.14/net/rxrpc/
Daf_rxrpc.c55 static inline int rxrpc_writable(struct sock *sk) in rxrpc_writable() argument
57 return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; in rxrpc_writable()
63 static void rxrpc_write_space(struct sock *sk) in rxrpc_write_space() argument
65 _enter("%p", sk); in rxrpc_write_space()
67 if (rxrpc_writable(sk)) { in rxrpc_write_space()
68 struct socket_wq *wq = rcu_dereference(sk->sk_wq); in rxrpc_write_space()
72 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); in rxrpc_write_space()
125 struct sock *sk = sock->sk; in rxrpc_bind() local
127 struct rxrpc_sock *rx = rxrpc_sk(sk), *prx; in rxrpc_bind()
137 lock_sock(&rx->sk); in rxrpc_bind()
[all …]
Dar-recvmsg.c51 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); in rxrpc_recvmsg()
66 timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); in rxrpc_recvmsg()
69 lock_sock(&rx->sk); in rxrpc_recvmsg()
77 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { in rxrpc_recvmsg()
78 release_sock(&rx->sk); in rxrpc_recvmsg()
86 skb = skb_peek(&rx->sk.sk_receive_queue); in rxrpc_recvmsg()
94 release_sock(&rx->sk); in rxrpc_recvmsg()
95 prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, in rxrpc_recvmsg()
97 ret = sock_error(&rx->sk); in rxrpc_recvmsg()
101 if (skb_queue_empty(&rx->sk.sk_receive_queue)) { in rxrpc_recvmsg()
[all …]
/linux-4.4.14/net/ax25/
Daf_ax25.c58 static void ax25_free_sock(struct sock *sk) in ax25_free_sock() argument
60 ax25_cb_put(sk_to_ax25(sk)); in ax25_free_sock()
164 if (s->sk && !ax25cmp(&s->source_addr, addr) && in ax25_find_listener()
165 s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) { in ax25_find_listener()
168 sock_hold(s->sk); in ax25_find_listener()
170 return s->sk; in ax25_find_listener()
185 struct sock *sk = NULL; in ax25_get_socket() local
190 if (s->sk && !ax25cmp(&s->source_addr, my_addr) && in ax25_get_socket()
192 s->sk->sk_type == type) { in ax25_get_socket()
193 sk = s->sk; in ax25_get_socket()
[all …]
Dax25_std_timer.c34 struct sock *sk = ax25->sk; in ax25_std_heartbeat_expiry() local
36 if (sk) in ax25_std_heartbeat_expiry()
37 bh_lock_sock(sk); in ax25_std_heartbeat_expiry()
43 if (!sk || sock_flag(sk, SOCK_DESTROY) || in ax25_std_heartbeat_expiry()
44 (sk->sk_state == TCP_LISTEN && in ax25_std_heartbeat_expiry()
45 sock_flag(sk, SOCK_DEAD))) { in ax25_std_heartbeat_expiry()
46 if (sk) { in ax25_std_heartbeat_expiry()
47 sock_hold(sk); in ax25_std_heartbeat_expiry()
49 bh_unlock_sock(sk); in ax25_std_heartbeat_expiry()
50 sock_put(sk); in ax25_std_heartbeat_expiry()
[all …]
Dax25_ds_timer.c97 struct sock *sk=ax25->sk; in ax25_ds_heartbeat_expiry() local
99 if (sk) in ax25_ds_heartbeat_expiry()
100 bh_lock_sock(sk); in ax25_ds_heartbeat_expiry()
107 if (!sk || sock_flag(sk, SOCK_DESTROY) || in ax25_ds_heartbeat_expiry()
108 (sk->sk_state == TCP_LISTEN && in ax25_ds_heartbeat_expiry()
109 sock_flag(sk, SOCK_DEAD))) { in ax25_ds_heartbeat_expiry()
110 if (sk) { in ax25_ds_heartbeat_expiry()
111 sock_hold(sk); in ax25_ds_heartbeat_expiry()
113 bh_unlock_sock(sk); in ax25_ds_heartbeat_expiry()
114 sock_put(sk); in ax25_ds_heartbeat_expiry()
[all …]
Dax25_in.c144 if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) { in ax25_rx_iframe()
145 if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) || in ax25_rx_iframe()
147 if (sock_queue_rcv_skb(ax25->sk, skb) == 0) in ax25_rx_iframe()
191 struct sock *make, *sk; in ax25_rcv() local
263 sk = ax25_get_socket(&dest, &src, SOCK_DGRAM); in ax25_rcv()
264 if (sk != NULL) { in ax25_rcv()
265 bh_lock_sock(sk); in ax25_rcv()
266 if (atomic_read(&sk->sk_rmem_alloc) >= in ax25_rcv()
267 sk->sk_rcvbuf) { in ax25_rcv()
274 if (sock_queue_rcv_skb(sk, skb) != 0) in ax25_rcv()
[all …]
/linux-4.4.14/net/dccp/ccids/
Dccid3.c63 static void ccid3_hc_tx_set_state(struct sock *sk, in ccid3_hc_tx_set_state() argument
66 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); in ccid3_hc_tx_set_state()
70 dccp_role(sk), sk, ccid3_tx_state_name(oldstate), in ccid3_hc_tx_set_state()
85 static inline u64 rfc3390_initial_rate(struct sock *sk) in rfc3390_initial_rate() argument
87 const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); in rfc3390_initial_rate()
124 static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) in ccid3_hc_tx_update_x() argument
126 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); in ccid3_hc_tx_update_x()
138 min_rate = rfc3390_initial_rate(sk); in ccid3_hc_tx_update_x()
200 struct sock *sk = (struct sock *)data; in ccid3_hc_tx_no_feedback_timer() local
201 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); in ccid3_hc_tx_no_feedback_timer()
[all …]
Dccid2.c79 static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) in ccid2_hc_tx_send_packet() argument
81 if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk))) in ccid2_hc_tx_send_packet()
86 static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) in ccid2_change_l_ack_ratio() argument
88 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); in ccid2_change_l_ack_ratio()
100 dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO, in ccid2_change_l_ack_ratio()
104 static void ccid2_check_l_ack_ratio(struct sock *sk) in ccid2_check_l_ack_ratio() argument
106 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); in ccid2_check_l_ack_ratio()
118 if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd) in ccid2_check_l_ack_ratio()
119 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U); in ccid2_check_l_ack_ratio()
122 static void ccid2_change_l_seq_window(struct sock *sk, u64 val) in ccid2_change_l_seq_window() argument
[all …]
/linux-4.4.14/net/rds/
Dtcp_listen.c81 ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, in rds_tcp_accept_one()
82 sock->sk->sk_type, sock->sk->sk_protocol, in rds_tcp_accept_one()
99 inet = inet_sk(new_sock->sk); in rds_tcp_accept_one()
105 conn = rds_conn_create(sock_net(sock->sk), in rds_tcp_accept_one()
120 struct sock *nsk = new_sock->sk; in rds_tcp_accept_one()
145 void rds_tcp_listen_data_ready(struct sock *sk) in rds_tcp_listen_data_ready() argument
147 void (*ready)(struct sock *sk); in rds_tcp_listen_data_ready()
149 rdsdebug("listen data ready sk %p\n", sk); in rds_tcp_listen_data_ready()
151 read_lock(&sk->sk_callback_lock); in rds_tcp_listen_data_ready()
152 ready = sk->sk_user_data; in rds_tcp_listen_data_ready()
[all …]
Dtcp.c81 return tcp_sk(tc->t_sock->sk)->snd_nxt; in rds_tcp_snd_nxt()
86 return tcp_sk(tc->t_sock->sk)->snd_una; in rds_tcp_snd_una()
93 write_lock_bh(&sock->sk->sk_callback_lock); in rds_tcp_restore_callbacks()
103 sock->sk->sk_write_space = tc->t_orig_write_space; in rds_tcp_restore_callbacks()
104 sock->sk->sk_data_ready = tc->t_orig_data_ready; in rds_tcp_restore_callbacks()
105 sock->sk->sk_state_change = tc->t_orig_state_change; in rds_tcp_restore_callbacks()
106 sock->sk->sk_user_data = NULL; in rds_tcp_restore_callbacks()
108 write_unlock_bh(&sock->sk->sk_callback_lock); in rds_tcp_restore_callbacks()
121 write_lock_bh(&sock->sk->sk_callback_lock); in rds_tcp_set_callbacks()
130 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready) in rds_tcp_set_callbacks()
[all …]
Daf_rds.c60 struct sock *sk = sock->sk; in rds_release() local
63 if (!sk) in rds_release()
66 rs = rds_sk_to_rs(sk); in rds_release()
68 sock_orphan(sk); in rds_release()
88 sock->sk = NULL; in rds_release()
89 sock_put(sk); in rds_release()
116 struct rds_sock *rs = rds_sk_to_rs(sock->sk); in rds_getname()
158 struct sock *sk = sock->sk; in rds_poll() local
159 struct rds_sock *rs = rds_sk_to_rs(sk); in rds_poll()
163 poll_wait(file, sk_sleep(sk), wait); in rds_poll()
[all …]
Dtcp_connect.c40 void rds_tcp_state_change(struct sock *sk) in rds_tcp_state_change() argument
42 void (*state_change)(struct sock *sk); in rds_tcp_state_change()
46 read_lock(&sk->sk_callback_lock); in rds_tcp_state_change()
47 conn = sk->sk_user_data; in rds_tcp_state_change()
49 state_change = sk->sk_state_change; in rds_tcp_state_change()
55 rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); in rds_tcp_state_change()
57 switch(sk->sk_state) { in rds_tcp_state_change()
72 read_unlock(&sk->sk_callback_lock); in rds_tcp_state_change()
73 state_change(sk); in rds_tcp_state_change()
146 lock_sock(sock->sk); in rds_tcp_conn_shutdown()
[all …]
Dtcp_send.c108 set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags); in rds_tcp_xmit()
177 void rds_tcp_write_space(struct sock *sk) in rds_tcp_write_space() argument
179 void (*write_space)(struct sock *sk); in rds_tcp_write_space()
183 read_lock(&sk->sk_callback_lock); in rds_tcp_write_space()
184 conn = sk->sk_user_data; in rds_tcp_write_space()
186 write_space = sk->sk_write_space; in rds_tcp_write_space()
199 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) in rds_tcp_write_space()
203 read_unlock(&sk->sk_callback_lock); in rds_tcp_write_space()
217 write_space(sk); in rds_tcp_write_space()
219 if (sk->sk_socket) in rds_tcp_write_space()
[all …]
/linux-4.4.14/net/tipc/
Dsocket.c82 struct sock sk; member
105 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
106 static void tipc_data_ready(struct sock *sk);
107 static void tipc_write_space(struct sock *sk);
108 static void tipc_sock_destruct(struct sock *sk);
232 static struct tipc_sock *tipc_sk(const struct sock *sk) in tipc_sk() argument
234 return container_of(sk, struct tipc_sock, sk); in tipc_sk()
247 static void tsk_advance_rx_queue(struct sock *sk) in tsk_advance_rx_queue() argument
249 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); in tsk_advance_rx_queue()
254 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) in tipc_sk_respond() argument
[all …]
Dserver.c95 struct sock *sk; in tipc_conn_kref_release() local
98 sk = sock->sk; in tipc_conn_kref_release()
101 __module_get(sk->sk_prot_creator->owner); in tipc_conn_kref_release()
135 static void sock_data_ready(struct sock *sk) in sock_data_ready() argument
139 read_lock(&sk->sk_callback_lock); in sock_data_ready()
140 con = sock2con(sk); in sock_data_ready()
146 read_unlock(&sk->sk_callback_lock); in sock_data_ready()
149 static void sock_write_space(struct sock *sk) in sock_write_space() argument
153 read_lock(&sk->sk_callback_lock); in sock_write_space()
154 con = sock2con(sk); in sock_write_space()
[all …]
/linux-4.4.14/net/irda/
Daf_irda.c84 struct sock *sk; in irda_data_indication() local
88 sk = instance; in irda_data_indication()
90 err = sock_queue_rcv_skb(sk, skb); in irda_data_indication()
112 struct sock *sk; in irda_disconnect_indication() local
122 sk = instance; in irda_disconnect_indication()
123 if (sk == NULL) { in irda_disconnect_indication()
130 bh_lock_sock(sk); in irda_disconnect_indication()
131 if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) { in irda_disconnect_indication()
132 sk->sk_state = TCP_CLOSE; in irda_disconnect_indication()
133 sk->sk_shutdown |= SEND_SHUTDOWN; in irda_disconnect_indication()
[all …]
/linux-4.4.14/net/netfilter/
Dxt_TPROXY.c43 static bool tproxy_sk_is_transparent(struct sock *sk) in tproxy_sk_is_transparent() argument
45 switch (sk->sk_state) { in tproxy_sk_is_transparent()
47 if (inet_twsk(sk)->tw_transparent) in tproxy_sk_is_transparent()
51 if (inet_rsk(inet_reqsk(sk))->no_srccheck) in tproxy_sk_is_transparent()
55 if (inet_sk(sk)->transparent) in tproxy_sk_is_transparent()
59 sock_gen_put(sk); in tproxy_sk_is_transparent()
114 struct sock *sk; in nf_tproxy_get_sock_v4() local
120 sk = inet_lookup_listener(net, &tcp_hashinfo, in nf_tproxy_get_sock_v4()
132 sk = inet_lookup_established(net, &tcp_hashinfo, in nf_tproxy_get_sock_v4()
141 sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, in nf_tproxy_get_sock_v4()
[all …]
Dnf_sockopt.c61 static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, u_int8_t pf, in nf_sockopt_find() argument
92 static int nf_sockopt(struct sock *sk, u_int8_t pf, int val, in nf_sockopt() argument
98 ops = nf_sockopt_find(sk, pf, val, get); in nf_sockopt()
103 ret = ops->get(sk, val, opt, len); in nf_sockopt()
105 ret = ops->set(sk, val, opt, *len); in nf_sockopt()
111 int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, in nf_setsockopt() argument
114 return nf_sockopt(sk, pf, val, opt, &len, 0); in nf_setsockopt()
118 int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, in nf_getsockopt() argument
121 return nf_sockopt(sk, pf, val, opt, len, 1); in nf_getsockopt()
126 static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val, in compat_nf_sockopt() argument
[all …]
Dxt_socket.c132 static bool xt_socket_sk_is_transparent(struct sock *sk) in xt_socket_sk_is_transparent() argument
134 switch (sk->sk_state) { in xt_socket_sk_is_transparent()
136 return inet_twsk(sk)->tw_transparent; in xt_socket_sk_is_transparent()
139 return inet_rsk(inet_reqsk(sk))->no_srccheck; in xt_socket_sk_is_transparent()
142 return inet_sk(sk)->transparent; in xt_socket_sk_is_transparent()
210 struct sock *sk = skb->sk; in socket_match() local
212 if (!sk) in socket_match()
213 sk = xt_socket_lookup_slow_v4(par->net, skb, par->in); in socket_match()
214 if (sk) { in socket_match()
222 sk_fullsock(sk) && in socket_match()
[all …]
Dnft_meta.c34 struct sock *sk; in nft_meta_get_eval() local
90 sk = skb_to_full_sk(skb); in nft_meta_get_eval()
91 if (!sk || !sk_fullsock(sk)) in nft_meta_get_eval()
94 read_lock_bh(&sk->sk_callback_lock); in nft_meta_get_eval()
95 if (sk->sk_socket == NULL || in nft_meta_get_eval()
96 sk->sk_socket->file == NULL) { in nft_meta_get_eval()
97 read_unlock_bh(&sk->sk_callback_lock); in nft_meta_get_eval()
102 sk->sk_socket->file->f_cred->fsuid); in nft_meta_get_eval()
103 read_unlock_bh(&sk->sk_callback_lock); in nft_meta_get_eval()
106 sk = skb_to_full_sk(skb); in nft_meta_get_eval()
[all …]
/linux-4.4.14/crypto/
Dalgif_aead.c57 static inline int aead_sndbuf(struct sock *sk) in aead_sndbuf() argument
59 struct alg_sock *ask = alg_sk(sk); in aead_sndbuf()
62 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - in aead_sndbuf()
66 static inline bool aead_writable(struct sock *sk) in aead_writable() argument
68 return PAGE_SIZE <= aead_sndbuf(sk); in aead_writable()
78 static void aead_put_sgl(struct sock *sk) in aead_put_sgl() argument
80 struct alg_sock *ask = alg_sk(sk); in aead_put_sgl()
100 static void aead_wmem_wakeup(struct sock *sk) in aead_wmem_wakeup() argument
104 if (!aead_writable(sk)) in aead_wmem_wakeup()
108 wq = rcu_dereference(sk->sk_wq); in aead_wmem_wakeup()
[all …]
Dalgif_skcipher.c106 static inline int skcipher_sndbuf(struct sock *sk) in skcipher_sndbuf() argument
108 struct alg_sock *ask = alg_sk(sk); in skcipher_sndbuf()
111 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - in skcipher_sndbuf()
115 static inline bool skcipher_writable(struct sock *sk) in skcipher_writable() argument
117 return PAGE_SIZE <= skcipher_sndbuf(sk); in skcipher_writable()
120 static int skcipher_alloc_sgl(struct sock *sk) in skcipher_alloc_sgl() argument
122 struct alg_sock *ask = alg_sk(sk); in skcipher_alloc_sgl()
132 sgl = sock_kmalloc(sk, sizeof(*sgl) + in skcipher_alloc_sgl()
150 static void skcipher_pull_sgl(struct sock *sk, int used, int put) in skcipher_pull_sgl() argument
152 struct alg_sock *ask = alg_sk(sk); in skcipher_pull_sgl()
[all …]
Dalgif_hash.c46 struct sock *sk = sock->sk; in hash_sendmsg() local
47 struct alg_sock *ask = alg_sk(sk); in hash_sendmsg()
52 if (limit > sk->sk_sndbuf) in hash_sendmsg()
53 limit = sk->sk_sndbuf; in hash_sendmsg()
55 lock_sock(sk); in hash_sendmsg()
99 release_sock(sk); in hash_sendmsg()
107 struct sock *sk = sock->sk; in hash_sendpage() local
108 struct alg_sock *ask = alg_sk(sk); in hash_sendpage()
115 lock_sock(sk); in hash_sendpage()
144 release_sock(sk); in hash_sendpage()
[all …]
Daf_alg.c124 if (sock->sk) in af_alg_release()
125 sock_put(sock->sk); in af_alg_release()
130 void af_alg_release_parent(struct sock *sk) in af_alg_release_parent() argument
132 struct alg_sock *ask = alg_sk(sk); in af_alg_release_parent()
136 sk = ask->parent; in af_alg_release_parent()
137 ask = alg_sk(sk); in af_alg_release_parent()
139 lock_sock(sk); in af_alg_release_parent()
143 release_sock(sk); in af_alg_release_parent()
146 sock_put(sk); in af_alg_release_parent()
153 struct sock *sk = sock->sk; in alg_bind() local
[all …]
Dalgif_rng.c61 struct sock *sk = sock->sk; in rng_recvmsg() local
62 struct alg_sock *ask = alg_sk(sk); in rng_recvmsg()
127 static void rng_sock_destruct(struct sock *sk) in rng_sock_destruct() argument
129 struct alg_sock *ask = alg_sk(sk); in rng_sock_destruct()
132 sock_kfree_s(sk, ctx, ctx->len); in rng_sock_destruct()
133 af_alg_release_parent(sk); in rng_sock_destruct()
136 static int rng_accept_parent(void *private, struct sock *sk) in rng_accept_parent() argument
139 struct alg_sock *ask = alg_sk(sk); in rng_accept_parent()
142 ctx = sock_kmalloc(sk, len, GFP_KERNEL); in rng_accept_parent()
156 sk->sk_destruct = rng_sock_destruct; in rng_accept_parent()
/linux-4.4.14/net/atm/
Dsvc.c49 struct sock *sk = sk_atm(vcc); in svc_disconnect() local
55 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); in svc_disconnect()
60 finish_wait(sk_sleep(sk), &wait); in svc_disconnect()
64 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { in svc_disconnect()
76 struct sock *sk = sock->sk; in svc_release() local
79 if (sk) { in svc_release()
98 struct sock *sk = sock->sk; in svc_bind() local
105 lock_sock(sk); in svc_bind()
131 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); in svc_bind()
136 finish_wait(sk_sleep(sk), &wait); in svc_bind()
[all …]
Dcommon.c42 static void __vcc_insert_socket(struct sock *sk) in __vcc_insert_socket() argument
44 struct atm_vcc *vcc = atm_sk(sk); in __vcc_insert_socket()
46 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1); in __vcc_insert_socket()
47 sk_add_node(sk, head); in __vcc_insert_socket()
50 void vcc_insert_socket(struct sock *sk) in vcc_insert_socket() argument
53 __vcc_insert_socket(sk); in vcc_insert_socket()
58 static void vcc_remove_socket(struct sock *sk) in vcc_remove_socket() argument
61 sk_del_node_init(sk); in vcc_remove_socket()
68 struct sock *sk = sk_atm(vcc); in alloc_tx() local
70 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { in alloc_tx()
[all …]
Dproc.c70 struct sock *sk; member
74 static inline int compare_family(struct sock *sk, int family) in compare_family() argument
76 return !family || (sk->sk_family == family); in compare_family()
81 struct sock *sk = *sock; in __vcc_walk() local
83 if (sk == SEQ_START_TOKEN) { in __vcc_walk()
87 sk = hlist_empty(head) ? NULL : __sk_head(head); in __vcc_walk()
88 if (sk) in __vcc_walk()
94 for (; sk; sk = sk_next(sk)) { in __vcc_walk()
95 l -= compare_family(sk, family); in __vcc_walk()
99 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { in __vcc_walk()
[all …]
Draw.c25 struct sock *sk = sk_atm(vcc); in atm_push_raw() local
27 skb_queue_tail(&sk->sk_receive_queue, skb); in atm_push_raw()
28 sk->sk_data_ready(sk); in atm_push_raw()
34 struct sock *sk = sk_atm(vcc); in atm_pop_raw() local
37 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize); in atm_pop_raw()
38 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); in atm_pop_raw()
40 sk->sk_write_space(sk); in atm_pop_raw()
Dpvc.c29 struct sock *sk = sock->sk; in pvc_bind() local
39 lock_sock(sk); in pvc_bind()
54 release_sock(sk); in pvc_bind()
67 struct sock *sk = sock->sk; in pvc_setsockopt() local
70 lock_sock(sk); in pvc_setsockopt()
72 release_sock(sk); in pvc_setsockopt()
79 struct sock *sk = sock->sk; in pvc_getsockopt() local
82 lock_sock(sk); in pvc_getsockopt()
84 release_sock(sk); in pvc_getsockopt()
/linux-4.4.14/net/sched/
Dem_meta.c275 (unlikely(skb->sk == NULL))
283 dst->value = skb->sk->sk_family; in META_COLLECTOR()
292 dst->value = skb->sk->sk_state; in META_COLLECTOR()
301 dst->value = skb->sk->sk_reuse; in META_COLLECTOR()
311 dst->value = skb->sk->sk_bound_dev_if; in META_COLLECTOR()
321 if (skb->sk->sk_bound_dev_if == 0) { in META_COLLECTOR()
328 dev = dev_get_by_index_rcu(sock_net(skb->sk), in META_COLLECTOR()
329 skb->sk->sk_bound_dev_if); in META_COLLECTOR()
341 dst->value = atomic_read(&skb->sk->sk_refcnt); in META_COLLECTOR()
346 const struct sock *sk = skb_to_full_sk(skb); in META_COLLECTOR() local
[all …]
/linux-4.4.14/drivers/net/ppp/
Dpppoe.c92 static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
289 struct sock *sk; in pppoe_flush_dev() local
299 sk = sk_pppox(po); in pppoe_flush_dev()
309 sock_hold(sk); in pppoe_flush_dev()
311 lock_sock(sk); in pppoe_flush_dev()
314 sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { in pppoe_flush_dev()
315 pppox_unbind_sock(sk); in pppoe_flush_dev()
316 sk->sk_state_change(sk); in pppoe_flush_dev()
321 release_sock(sk); in pppoe_flush_dev()
322 sock_put(sk); in pppoe_flush_dev()
[all …]
Dpptp.c173 struct sock *sk = (struct sock *) chan->private; in pptp_xmit() local
174 struct pppox_sock *po = pppox_sk(sk); in pptp_xmit()
175 struct net *net = sock_net(sk); in pptp_xmit()
212 if (skb->sk) in pptp_xmit()
213 skb_set_owner_w(new_skb, skb->sk); in pptp_xmit()
269 if (ip_dont_fragment(sk, &rt->dst)) in pptp_xmit()
289 ip_local_out(net, skb->sk, skb); in pptp_xmit()
297 static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) in pptp_rcv_core() argument
299 struct pppox_sock *po = pppox_sk(sk); in pptp_rcv_core()
305 if (!(sk->sk_state & PPPOX_CONNECTED)) { in pptp_rcv_core()
[all …]
Dpppox.c57 void pppox_unbind_sock(struct sock *sk) in pppox_unbind_sock() argument
61 if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED | PPPOX_ZOMBIE)) { in pppox_unbind_sock()
62 ppp_unregister_channel(&pppox_sk(sk)->chan); in pppox_unbind_sock()
63 sk->sk_state = PPPOX_DEAD; in pppox_unbind_sock()
73 struct sock *sk = sock->sk; in pppox_ioctl() local
74 struct pppox_sock *po = pppox_sk(sk); in pppox_ioctl()
77 lock_sock(sk); in pppox_ioctl()
83 if (!(sk->sk_state & PPPOX_CONNECTED)) in pppox_ioctl()
92 sk->sk_state |= PPPOX_BOUND; in pppox_ioctl()
96 rc = pppox_protos[sk->sk_protocol]->ioctl ? in pppox_ioctl()
[all …]
/linux-4.4.14/include/linux/
Dudp.h45 #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
46 #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
47 #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
72 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
73 void (*encap_destroy)(struct sock *sk);
76 static inline struct udp_sock *udp_sk(const struct sock *sk) in udp_sk() argument
78 return (struct udp_sock *)sk; in udp_sk()
81 static inline void udp_set_no_check6_tx(struct sock *sk, bool val) in udp_set_no_check6_tx() argument
83 udp_sk(sk)->no_check6_tx = val; in udp_set_no_check6_tx()
86 static inline void udp_set_no_check6_rx(struct sock *sk, bool val) in udp_set_no_check6_rx() argument
[all …]
Dsock_diag.h17 int (*get_info)(struct sk_buff *skb, struct sock *sk);
26 int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
27 void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
29 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
30 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
34 enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk) in sock_diag_destroy_group() argument
36 switch (sk->sk_family) { in sock_diag_destroy_group()
38 switch (sk->sk_protocol) { in sock_diag_destroy_group()
47 switch (sk->sk_protocol) { in sock_diag_destroy_group()
61 bool sock_diag_has_destroy_listeners(const struct sock *sk) in sock_diag_has_destroy_listeners() argument
[all …]
Dipv6.h260 extern int inet6_sk_rebuild_header(struct sock *sk);
272 static inline struct raw6_sock *raw6_sk(const struct sock *sk) in raw6_sk() argument
274 return (struct raw6_sock *)sk; in raw6_sk()
288 #define __ipv6_only_sock(sk) (sk->sk_ipv6only) argument
289 #define ipv6_only_sock(sk) (__ipv6_only_sock(sk)) argument
290 #define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \ argument
291 inet6_sk(sk)->rxopt.bits.rxinfo)
293 static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk) in inet6_rcv_saddr() argument
295 if (sk->sk_family == AF_INET6) in inet6_rcv_saddr()
296 return &sk->sk_v6_rcv_saddr; in inet6_rcv_saddr()
[all …]
Dnetfilter.h56 struct sock *sk; member
68 struct sock *sk, in nf_hook_state_init() argument
77 p->sk = sk; in nf_hook_state_init()
108 int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
110 int (*compat_set)(struct sock *sk, int optval,
115 int (*get)(struct sock *sk, int optval, void __user *user, int *len);
117 int (*compat_get)(struct sock *sk, int optval,
173 struct sock *sk, in nf_hook_thresh() argument
186 pf, indev, outdev, sk, net, okfn); in nf_hook_thresh()
193 struct sock *sk, struct sk_buff *skb, in nf_hook() argument
[all …]
/linux-4.4.14/security/selinux/
Dnetlabel.c81 static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk) in selinux_netlbl_sock_genattr() argument
84 struct sk_security_struct *sksec = sk->sk_security; in selinux_netlbl_sock_genattr()
113 const struct sock *sk, in selinux_netlbl_sock_getattr() argument
116 struct sk_security_struct *sksec = sk->sk_security; in selinux_netlbl_sock_getattr()
244 struct sock *sk; in selinux_netlbl_skbuff_setsid() local
248 sk = skb_to_full_sk(skb); in selinux_netlbl_skbuff_setsid()
249 if (sk != NULL) { in selinux_netlbl_skbuff_setsid()
250 struct sk_security_struct *sksec = sk->sk_security; in selinux_netlbl_skbuff_setsid()
253 secattr = selinux_netlbl_sock_getattr(sk, sid); in selinux_netlbl_skbuff_setsid()
310 void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family) in selinux_netlbl_inet_csk_clone() argument
[all …]
/linux-4.4.14/net/appletalk/
Dddp.c80 static inline void __atalk_insert_socket(struct sock *sk) in __atalk_insert_socket() argument
82 sk_add_node(sk, &atalk_sockets); in __atalk_insert_socket()
85 static inline void atalk_remove_socket(struct sock *sk) in atalk_remove_socket() argument
88 sk_del_node_init(sk); in atalk_remove_socket()
139 static struct sock *atalk_find_or_insert_socket(struct sock *sk, in atalk_find_or_insert_socket() argument
155 __atalk_insert_socket(sk); /* Wheee, it's free, assign and insert. */ in atalk_find_or_insert_socket()
163 struct sock *sk = (struct sock *)data; in atalk_destroy_timer() local
165 if (sk_has_allocations(sk)) { in atalk_destroy_timer()
166 sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME; in atalk_destroy_timer()
167 add_timer(&sk->sk_timer); in atalk_destroy_timer()
[all …]
/linux-4.4.14/net/packet/
Daf_packet.c169 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
217 static void packet_flush_mclist(struct sock *sk);
246 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
247 static void __fanout_link(struct sock *sk, struct packet_sock *po);
340 static void register_prot_hook(struct sock *sk) in register_prot_hook() argument
342 struct packet_sock *po = pkt_sk(sk); in register_prot_hook()
346 __fanout_link(sk, po); in register_prot_hook()
350 sock_hold(sk); in register_prot_hook()
362 static void __unregister_prot_hook(struct sock *sk, bool sync) in __unregister_prot_hook() argument
364 struct packet_sock *po = pkt_sk(sk); in __unregister_prot_hook()
[all …]
/linux-4.4.14/drivers/target/iscsi/
Discsi_target_nego.c406 static void iscsi_target_sk_data_ready(struct sock *sk) in iscsi_target_sk_data_ready() argument
408 struct iscsi_conn *conn = sk->sk_user_data; in iscsi_target_sk_data_ready()
413 write_lock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
414 if (!sk->sk_user_data) { in iscsi_target_sk_data_ready()
415 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
419 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
424 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
429 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
439 write_unlock_bh(&sk->sk_callback_lock); in iscsi_target_sk_data_ready()
446 struct sock *sk; in iscsi_target_set_sock_callbacks() local
[all …]
/linux-4.4.14/drivers/scsi/
Discsi_tcp.c114 static inline int iscsi_sw_sk_state_check(struct sock *sk) in iscsi_sw_sk_state_check() argument
116 struct iscsi_conn *conn = sk->sk_user_data; in iscsi_sw_sk_state_check()
118 if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && in iscsi_sw_sk_state_check()
120 !atomic_read(&sk->sk_rmem_alloc)) { in iscsi_sw_sk_state_check()
128 static void iscsi_sw_tcp_data_ready(struct sock *sk) in iscsi_sw_tcp_data_ready() argument
134 read_lock(&sk->sk_callback_lock); in iscsi_sw_tcp_data_ready()
135 conn = sk->sk_user_data; in iscsi_sw_tcp_data_ready()
137 read_unlock(&sk->sk_callback_lock); in iscsi_sw_tcp_data_ready()
150 tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv); in iscsi_sw_tcp_data_ready()
152 iscsi_sw_sk_state_check(sk); in iscsi_sw_tcp_data_ready()
[all …]
/linux-4.4.14/net/ipx/
Daf_ipx.c114 static void ipx_remove_socket(struct sock *sk) in ipx_remove_socket() argument
117 struct ipx_interface *intrfc = ipx_sk(sk)->intrfc; in ipx_remove_socket()
124 sk_del_node_init(sk); in ipx_remove_socket()
131 static void ipx_destroy_socket(struct sock *sk) in ipx_destroy_socket() argument
133 ipx_remove_socket(sk); in ipx_destroy_socket()
134 skb_queue_purge(&sk->sk_receive_queue); in ipx_destroy_socket()
135 sk_refcnt_debug_dec(sk); in ipx_destroy_socket()
201 static void ipxitf_insert_socket(struct ipx_interface *intrfc, struct sock *sk) in ipxitf_insert_socket() argument
205 ipx_sk(sk)->intrfc = intrfc; in ipxitf_insert_socket()
206 sk_add_node(sk, &intrfc->if_sklist); in ipxitf_insert_socket()
[all …]
/linux-4.4.14/net/sunrpc/
Dxprtsock.c219 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) in xprt_from_sock() argument
221 return (struct rpc_xprt *) sk->sk_user_data; in xprt_from_sock()
456 struct sock *sk = transport->inet; in xs_nospace() local
469 sk->sk_write_pending++; in xs_nospace()
477 sk->sk_write_space(sk); in xs_nospace()
742 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) in xs_save_old_callbacks() argument
744 transport->old_data_ready = sk->sk_data_ready; in xs_save_old_callbacks()
745 transport->old_state_change = sk->sk_state_change; in xs_save_old_callbacks()
746 transport->old_write_space = sk->sk_write_space; in xs_save_old_callbacks()
747 transport->old_error_report = sk->sk_error_report; in xs_save_old_callbacks()
[all …]
Dsvcsock.c86 struct sock *sk = sock->sk; in svc_reclassify_socket() local
88 WARN_ON_ONCE(sock_owned_by_user(sk)); in svc_reclassify_socket()
89 if (sock_owned_by_user(sk)) in svc_reclassify_socket()
92 switch (sk->sk_family) { in svc_reclassify_socket()
94 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", in svc_reclassify_socket()
101 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", in svc_reclassify_socket()
282 const struct sock *sk = svsk->sk_sk; in svc_one_sock_name() local
283 const char *proto_name = sk->sk_protocol == IPPROTO_UDP ? in svc_one_sock_name()
287 switch (sk->sk_family) { in svc_one_sock_name()
291 &inet_sk(sk)->inet_rcv_saddr, in svc_one_sock_name()
[all …]
/linux-4.4.14/net/bluetooth/cmtp/
Dsock.c51 struct sock *sk = sock->sk; in cmtp_sock_release() local
53 BT_DBG("sock %p sk %p", sock, sk); in cmtp_sock_release()
55 if (!sk) in cmtp_sock_release()
58 bt_sock_unlink(&cmtp_sk_list, sk); in cmtp_sock_release()
60 sock_orphan(sk); in cmtp_sock_release()
61 sock_put(sk); in cmtp_sock_release()
90 if (nsock->sk->sk_state != BT_CONNECTED) { in cmtp_sock_ioctl()
201 struct sock *sk; in cmtp_sock_create() local
208 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, kern); in cmtp_sock_create()
209 if (!sk) in cmtp_sock_create()
[all …]
/linux-4.4.14/net/bluetooth/bnep/
Dsock.c38 struct sock *sk = sock->sk; in bnep_sock_release() local
40 BT_DBG("sock %p sk %p", sock, sk); in bnep_sock_release()
42 if (!sk) in bnep_sock_release()
45 bt_sock_unlink(&bnep_sk_list, sk); in bnep_sock_release()
47 sock_orphan(sk); in bnep_sock_release()
48 sock_put(sk); in bnep_sock_release()
77 if (nsock->sk->sk_state != BT_CONNECTED) { in bnep_sock_ioctl()
198 struct sock *sk; in bnep_sock_create() local
205 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern); in bnep_sock_create()
206 if (!sk) in bnep_sock_create()
[all …]
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/socklnd/
Dsocklnd_lib.c65 int caps = conn->ksnc_sock->sk->sk_route_caps; in ksocknal_lib_zc_capable()
132 struct sock *sk = sock->sk; in ksocknal_lib_send_kiov() local
145 if (sk->sk_prot->sendpage != NULL) { in ksocknal_lib_send_kiov()
146 rc = sk->sk_prot->sendpage(sk, page, in ksocknal_lib_send_kiov()
149 rc = tcp_sendpage(sk, page, offset, fragsize, msgflg); in ksocknal_lib_send_kiov()
464 sock->sk->sk_allocation = GFP_NOFS; in ksocknal_lib_setup_sock()
554 struct sock *sk; in ksocknal_lib_push_conn() local
564 sk = conn->ksnc_sock->sk; in ksocknal_lib_push_conn()
565 tp = tcp_sk(sk); in ksocknal_lib_push_conn()
567 lock_sock(sk); in ksocknal_lib_push_conn()
[all …]
/linux-4.4.14/security/smack/
Dsmack_netfilter.c29 struct sock *sk = skb_to_full_sk(skb); in smack_ipv6_output() local
33 if (sk && sk->sk_security) { in smack_ipv6_output()
34 ssp = sk->sk_security; in smack_ipv6_output()
47 struct sock *sk = skb_to_full_sk(skb); in smack_ipv4_output() local
51 if (sk && sk->sk_security) { in smack_ipv4_output()
52 ssp = sk->sk_security; in smack_ipv4_output()
/linux-4.4.14/include/net/phonet/
Dphonet.h37 struct sock sk; member
43 static inline struct pn_sock *pn_sk(struct sock *sk) in pn_sk() argument
45 return (struct pn_sock *)sk; in pn_sk()
54 void pn_sock_hash(struct sock *sk);
55 void pn_sock_unhash(struct sock *sk);
56 int pn_sock_get_port(struct sock *sk, unsigned short sport);
60 int pn_sock_unbind_res(struct sock *sk, u8 res);
61 void pn_sock_unbind_all_res(struct sock *sk);
63 int pn_skb_send(struct sock *sk, struct sk_buff *skb,
/linux-4.4.14/net/bluetooth/hidp/
Dsock.c34 struct sock *sk = sock->sk; in hidp_sock_release() local
36 BT_DBG("sock %p sk %p", sock, sk); in hidp_sock_release()
38 if (!sk) in hidp_sock_release()
41 bt_sock_unlink(&hidp_sk_list, sk); in hidp_sock_release()
43 sock_orphan(sk); in hidp_sock_release()
44 sock_put(sk); in hidp_sock_release()
231 struct sock *sk; in hidp_sock_create() local
238 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, kern); in hidp_sock_create()
239 if (!sk) in hidp_sock_create()
242 sock_init_data(sock, sk); in hidp_sock_create()
[all …]

123