Lines Matching refs:sk
52 #define __iucv_sock_wait(sk, condition, timeo, ret) \ argument
57 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
67 release_sock(sk); \
69 lock_sock(sk); \
70 ret = sock_error(sk); \
74 finish_wait(sk_sleep(sk), &__wait); \
77 #define iucv_sock_wait(sk, condition, timeo) \ argument
81 __iucv_sock_wait(sk, condition, timeo, __ret); \
85 static void iucv_sock_kill(struct sock *sk);
86 static void iucv_sock_close(struct sock *sk);
151 struct sock *sk; in afiucv_pm_freeze() local
158 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_pm_freeze()
159 iucv = iucv_sk(sk); in afiucv_pm_freeze()
160 switch (sk->sk_state) { in afiucv_pm_freeze()
164 iucv_sever_path(sk, 0); in afiucv_pm_freeze()
188 struct sock *sk; in afiucv_pm_restore_thaw() local
194 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_pm_restore_thaw()
195 switch (sk->sk_state) { in afiucv_pm_restore_thaw()
197 sk->sk_err = EPIPE; in afiucv_pm_restore_thaw()
198 sk->sk_state = IUCV_DISCONN; in afiucv_pm_restore_thaw()
199 sk->sk_state_change(sk); in afiucv_pm_restore_thaw()
271 static int iucv_sock_in_state(struct sock *sk, int state, int state2) in iucv_sock_in_state() argument
273 return (sk->sk_state == state || sk->sk_state == state2); in iucv_sock_in_state()
284 static inline int iucv_below_msglim(struct sock *sk) in iucv_below_msglim() argument
286 struct iucv_sock *iucv = iucv_sk(sk); in iucv_below_msglim()
288 if (sk->sk_state != IUCV_CONNECTED) in iucv_below_msglim()
300 static void iucv_sock_wake_msglim(struct sock *sk) in iucv_sock_wake_msglim() argument
305 wq = rcu_dereference(sk->sk_wq); in iucv_sock_wake_msglim()
308 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); in iucv_sock_wake_msglim()
383 struct sock *sk; in __iucv_get_sock_by_name() local
385 sk_for_each(sk, &iucv_sk_list.head) in __iucv_get_sock_by_name()
386 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) in __iucv_get_sock_by_name()
387 return sk; in __iucv_get_sock_by_name()
392 static void iucv_sock_destruct(struct sock *sk) in iucv_sock_destruct() argument
394 skb_queue_purge(&sk->sk_receive_queue); in iucv_sock_destruct()
395 skb_queue_purge(&sk->sk_error_queue); in iucv_sock_destruct()
397 sk_mem_reclaim(sk); in iucv_sock_destruct()
399 if (!sock_flag(sk, SOCK_DEAD)) { in iucv_sock_destruct()
400 pr_err("Attempt to release alive iucv socket %p\n", sk); in iucv_sock_destruct()
404 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in iucv_sock_destruct()
405 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); in iucv_sock_destruct()
406 WARN_ON(sk->sk_wmem_queued); in iucv_sock_destruct()
407 WARN_ON(sk->sk_forward_alloc); in iucv_sock_destruct()
413 struct sock *sk; in iucv_sock_cleanup_listen() local
416 while ((sk = iucv_accept_dequeue(parent, NULL))) { in iucv_sock_cleanup_listen()
417 iucv_sock_close(sk); in iucv_sock_cleanup_listen()
418 iucv_sock_kill(sk); in iucv_sock_cleanup_listen()
425 static void iucv_sock_kill(struct sock *sk) in iucv_sock_kill() argument
427 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) in iucv_sock_kill()
430 iucv_sock_unlink(&iucv_sk_list, sk); in iucv_sock_kill()
431 sock_set_flag(sk, SOCK_DEAD); in iucv_sock_kill()
432 sock_put(sk); in iucv_sock_kill()
436 static void iucv_sever_path(struct sock *sk, int with_user_data) in iucv_sever_path() argument
439 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sever_path()
456 static int iucv_send_ctrl(struct sock *sk, u8 flags) in iucv_send_ctrl() argument
463 skb = sock_alloc_send_skb(sk, blen, 1, &err); in iucv_send_ctrl()
466 err = afiucv_hs_send(NULL, sk, skb, flags); in iucv_send_ctrl()
472 static void iucv_sock_close(struct sock *sk) in iucv_sock_close() argument
474 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_close()
478 lock_sock(sk); in iucv_sock_close()
480 switch (sk->sk_state) { in iucv_sock_close()
482 iucv_sock_cleanup_listen(sk); in iucv_sock_close()
487 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); in iucv_sock_close()
488 sk->sk_state = IUCV_DISCONN; in iucv_sock_close()
489 sk->sk_state_change(sk); in iucv_sock_close()
492 sk->sk_state = IUCV_CLOSING; in iucv_sock_close()
493 sk->sk_state_change(sk); in iucv_sock_close()
496 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) in iucv_sock_close()
497 timeo = sk->sk_lingertime; in iucv_sock_close()
500 iucv_sock_wait(sk, in iucv_sock_close()
501 iucv_sock_in_state(sk, IUCV_CLOSED, 0), in iucv_sock_close()
506 sk->sk_state = IUCV_CLOSED; in iucv_sock_close()
507 sk->sk_state_change(sk); in iucv_sock_close()
509 sk->sk_err = ECONNRESET; in iucv_sock_close()
510 sk->sk_state_change(sk); in iucv_sock_close()
516 iucv_sever_path(sk, 1); in iucv_sock_close()
522 sk->sk_bound_dev_if = 0; in iucv_sock_close()
526 sock_set_flag(sk, SOCK_ZAPPED); in iucv_sock_close()
528 release_sock(sk); in iucv_sock_close()
531 static void iucv_sock_init(struct sock *sk, struct sock *parent) in iucv_sock_init() argument
534 sk->sk_type = parent->sk_type; in iucv_sock_init()
539 struct sock *sk; in iucv_sock_alloc() local
542 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); in iucv_sock_alloc()
543 if (!sk) in iucv_sock_alloc()
545 iucv = iucv_sk(sk); in iucv_sock_alloc()
547 sock_init_data(sock, sk); in iucv_sock_alloc()
568 sk->sk_destruct = iucv_sock_destruct; in iucv_sock_alloc()
569 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; in iucv_sock_alloc()
570 sk->sk_allocation = GFP_DMA; in iucv_sock_alloc()
572 sock_reset_flag(sk, SOCK_ZAPPED); in iucv_sock_alloc()
574 sk->sk_protocol = proto; in iucv_sock_alloc()
575 sk->sk_state = IUCV_OPEN; in iucv_sock_alloc()
577 iucv_sock_link(&iucv_sk_list, sk); in iucv_sock_alloc()
578 return sk; in iucv_sock_alloc()
585 struct sock *sk; in iucv_sock_create() local
604 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); in iucv_sock_create()
605 if (!sk) in iucv_sock_create()
608 iucv_sock_init(sk, NULL); in iucv_sock_create()
613 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) in iucv_sock_link() argument
616 sk_add_node(sk, &l->head); in iucv_sock_link()
620 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) in iucv_sock_unlink() argument
623 sk_del_node_init(sk); in iucv_sock_unlink()
627 void iucv_accept_enqueue(struct sock *parent, struct sock *sk) in iucv_accept_enqueue() argument
632 sock_hold(sk); in iucv_accept_enqueue()
634 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); in iucv_accept_enqueue()
636 iucv_sk(sk)->parent = parent; in iucv_accept_enqueue()
640 void iucv_accept_unlink(struct sock *sk) in iucv_accept_unlink() argument
643 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); in iucv_accept_unlink()
646 list_del_init(&iucv_sk(sk)->accept_q); in iucv_accept_unlink()
648 sk_acceptq_removed(iucv_sk(sk)->parent); in iucv_accept_unlink()
649 iucv_sk(sk)->parent = NULL; in iucv_accept_unlink()
650 sock_put(sk); in iucv_accept_unlink()
656 struct sock *sk; in iucv_accept_dequeue() local
659 sk = (struct sock *) isk; in iucv_accept_dequeue()
660 lock_sock(sk); in iucv_accept_dequeue()
662 if (sk->sk_state == IUCV_CLOSED) { in iucv_accept_dequeue()
663 iucv_accept_unlink(sk); in iucv_accept_dequeue()
664 release_sock(sk); in iucv_accept_dequeue()
668 if (sk->sk_state == IUCV_CONNECTED || in iucv_accept_dequeue()
669 sk->sk_state == IUCV_DISCONN || in iucv_accept_dequeue()
671 iucv_accept_unlink(sk); in iucv_accept_dequeue()
673 sock_graft(sk, newsock); in iucv_accept_dequeue()
675 release_sock(sk); in iucv_accept_dequeue()
676 return sk; in iucv_accept_dequeue()
679 release_sock(sk); in iucv_accept_dequeue()
701 struct sock *sk = sock->sk; in iucv_sock_bind() local
714 lock_sock(sk); in iucv_sock_bind()
715 if (sk->sk_state != IUCV_OPEN) { in iucv_sock_bind()
722 iucv = iucv_sk(sk); in iucv_sock_bind()
747 sk->sk_bound_dev_if = dev->ifindex; in iucv_sock_bind()
750 sk->sk_state = IUCV_BOUND; in iucv_sock_bind()
764 sk->sk_state = IUCV_BOUND; in iucv_sock_bind()
776 release_sock(sk); in iucv_sock_bind()
781 static int iucv_sock_autobind(struct sock *sk) in iucv_sock_autobind() argument
783 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_autobind()
804 struct sock *sk = sock->sk; in afiucv_path_connect() local
805 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_path_connect()
822 sk); in afiucv_path_connect()
851 struct sock *sk = sock->sk; in iucv_sock_connect() local
852 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_connect()
858 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) in iucv_sock_connect()
861 if (sk->sk_state == IUCV_OPEN && in iucv_sock_connect()
865 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) in iucv_sock_connect()
868 if (sk->sk_state == IUCV_OPEN) { in iucv_sock_connect()
869 err = iucv_sock_autobind(sk); in iucv_sock_connect()
874 lock_sock(sk); in iucv_sock_connect()
881 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); in iucv_sock_connect()
887 if (sk->sk_state != IUCV_CONNECTED) in iucv_sock_connect()
888 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, in iucv_sock_connect()
890 sock_sndtimeo(sk, flags & O_NONBLOCK)); in iucv_sock_connect()
892 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) in iucv_sock_connect()
896 iucv_sever_path(sk, 0); in iucv_sock_connect()
899 release_sock(sk); in iucv_sock_connect()
906 struct sock *sk = sock->sk; in iucv_sock_listen() local
909 lock_sock(sk); in iucv_sock_listen()
912 if (sk->sk_state != IUCV_BOUND) in iucv_sock_listen()
918 sk->sk_max_ack_backlog = backlog; in iucv_sock_listen()
919 sk->sk_ack_backlog = 0; in iucv_sock_listen()
920 sk->sk_state = IUCV_LISTEN; in iucv_sock_listen()
924 release_sock(sk); in iucv_sock_listen()
933 struct sock *sk = sock->sk, *nsk; in iucv_sock_accept() local
937 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); in iucv_sock_accept()
939 if (sk->sk_state != IUCV_LISTEN) { in iucv_sock_accept()
944 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); in iucv_sock_accept()
947 add_wait_queue_exclusive(sk_sleep(sk), &wait); in iucv_sock_accept()
948 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { in iucv_sock_accept()
955 release_sock(sk); in iucv_sock_accept()
957 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); in iucv_sock_accept()
959 if (sk->sk_state != IUCV_LISTEN) { in iucv_sock_accept()
971 remove_wait_queue(sk_sleep(sk), &wait); in iucv_sock_accept()
979 release_sock(sk); in iucv_sock_accept()
987 struct sock *sk = sock->sk; in iucv_sock_getname() local
988 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getname()
1034 struct sock *sk = sock->sk; in iucv_sock_sendmsg() local
1035 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_sendmsg()
1046 err = sock_error(sk); in iucv_sock_sendmsg()
1054 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) in iucv_sock_sendmsg()
1057 lock_sock(sk); in iucv_sock_sendmsg()
1059 if (sk->sk_shutdown & SEND_SHUTDOWN) { in iucv_sock_sendmsg()
1065 if (sk->sk_state != IUCV_CONNECTED) { in iucv_sock_sendmsg()
1114 skb = sock_alloc_send_skb(sk, in iucv_sock_sendmsg()
1118 skb = sock_alloc_send_skb(sk, len, noblock, &err); in iucv_sock_sendmsg()
1129 timeo = sock_sndtimeo(sk, noblock); in iucv_sock_sendmsg()
1130 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); in iucv_sock_sendmsg()
1135 if (sk->sk_state != IUCV_CONNECTED) { in iucv_sock_sendmsg()
1146 err = afiucv_hs_send(&txmsg, sk, skb, 0); in iucv_sock_sendmsg()
1194 release_sock(sk); in iucv_sock_sendmsg()
1200 release_sock(sk); in iucv_sock_sendmsg()
1208 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) in iucv_fragment_skb() argument
1215 if (dataleft >= sk->sk_rcvbuf / 4) in iucv_fragment_skb()
1216 size = sk->sk_rcvbuf / 4; in iucv_fragment_skb()
1236 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); in iucv_fragment_skb()
1246 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, in iucv_process_message() argument
1276 if (sk->sk_type == SOCK_STREAM && in iucv_process_message()
1277 skb->truesize >= sk->sk_rcvbuf / 4) { in iucv_process_message()
1278 rc = iucv_fragment_skb(sk, skb, len); in iucv_process_message()
1285 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); in iucv_process_message()
1294 if (sock_queue_rcv_skb(sk, skb)) in iucv_process_message()
1295 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); in iucv_process_message()
1302 static void iucv_process_message_q(struct sock *sk) in iucv_process_message_q() argument
1304 struct iucv_sock *iucv = iucv_sk(sk); in iucv_process_message_q()
1312 iucv_process_message(sk, skb, p->path, &p->msg); in iucv_process_message_q()
1324 struct sock *sk = sock->sk; in iucv_sock_recvmsg() local
1325 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_recvmsg()
1331 if ((sk->sk_state == IUCV_DISCONN) && in iucv_sock_recvmsg()
1333 skb_queue_empty(&sk->sk_receive_queue) && in iucv_sock_recvmsg()
1342 skb = skb_recv_datagram(sk, flags, noblock, &err); in iucv_sock_recvmsg()
1344 if (sk->sk_shutdown & RCV_SHUTDOWN) in iucv_sock_recvmsg()
1353 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; in iucv_sock_recvmsg()
1358 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1363 if (sk->sk_type == SOCK_SEQPACKET) { in iucv_sock_recvmsg()
1378 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1386 if (sk->sk_type == SOCK_STREAM) { in iucv_sock_recvmsg()
1389 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1399 iucv_sock_close(sk); in iucv_sock_recvmsg()
1409 if (sock_queue_rcv_skb(sk, rskb)) { in iucv_sock_recvmsg()
1419 iucv_process_message_q(sk); in iucv_sock_recvmsg()
1422 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); in iucv_sock_recvmsg()
1424 sk->sk_state = IUCV_DISCONN; in iucv_sock_recvmsg()
1425 sk->sk_state_change(sk); in iucv_sock_recvmsg()
1434 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) in iucv_sock_recvmsg()
1443 struct sock *sk; in iucv_accept_poll() local
1446 sk = (struct sock *) isk; in iucv_accept_poll()
1448 if (sk->sk_state == IUCV_CONNECTED) in iucv_accept_poll()
1458 struct sock *sk = sock->sk; in iucv_sock_poll() local
1461 sock_poll_wait(file, sk_sleep(sk), wait); in iucv_sock_poll()
1463 if (sk->sk_state == IUCV_LISTEN) in iucv_sock_poll()
1464 return iucv_accept_poll(sk); in iucv_sock_poll()
1466 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) in iucv_sock_poll()
1468 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); in iucv_sock_poll()
1470 if (sk->sk_shutdown & RCV_SHUTDOWN) in iucv_sock_poll()
1473 if (sk->sk_shutdown == SHUTDOWN_MASK) in iucv_sock_poll()
1476 if (!skb_queue_empty(&sk->sk_receive_queue) || in iucv_sock_poll()
1477 (sk->sk_shutdown & RCV_SHUTDOWN)) in iucv_sock_poll()
1480 if (sk->sk_state == IUCV_CLOSED) in iucv_sock_poll()
1483 if (sk->sk_state == IUCV_DISCONN) in iucv_sock_poll()
1486 if (sock_writeable(sk) && iucv_below_msglim(sk)) in iucv_sock_poll()
1489 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in iucv_sock_poll()
1496 struct sock *sk = sock->sk; in iucv_sock_shutdown() local
1497 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_shutdown()
1506 lock_sock(sk); in iucv_sock_shutdown()
1507 switch (sk->sk_state) { in iucv_sock_shutdown()
1538 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); in iucv_sock_shutdown()
1541 sk->sk_shutdown |= how; in iucv_sock_shutdown()
1550 skb_queue_purge(&sk->sk_receive_queue); in iucv_sock_shutdown()
1554 sk->sk_state_change(sk); in iucv_sock_shutdown()
1557 release_sock(sk); in iucv_sock_shutdown()
1563 struct sock *sk = sock->sk; in iucv_sock_release() local
1566 if (!sk) in iucv_sock_release()
1569 iucv_sock_close(sk); in iucv_sock_release()
1571 sock_orphan(sk); in iucv_sock_release()
1572 iucv_sock_kill(sk); in iucv_sock_release()
1580 struct sock *sk = sock->sk; in iucv_sock_setsockopt() local
1581 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_setsockopt()
1596 lock_sock(sk); in iucv_sock_setsockopt()
1605 switch (sk->sk_state) { in iucv_sock_setsockopt()
1622 release_sock(sk); in iucv_sock_setsockopt()
1630 struct sock *sk = sock->sk; in iucv_sock_getsockopt() local
1631 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getsockopt()
1651 lock_sock(sk); in iucv_sock_getsockopt()
1654 release_sock(sk); in iucv_sock_getsockopt()
1657 if (sk->sk_state == IUCV_OPEN) in iucv_sock_getsockopt()
1683 struct sock *sk, *nsk; in iucv_callback_connreq() local
1692 sk = NULL; in iucv_callback_connreq()
1693 sk_for_each(sk, &iucv_sk_list.head) in iucv_callback_connreq()
1694 if (sk->sk_state == IUCV_LISTEN && in iucv_callback_connreq()
1695 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { in iucv_callback_connreq()
1700 iucv = iucv_sk(sk); in iucv_callback_connreq()
1708 bh_lock_sock(sk); in iucv_callback_connreq()
1714 if (sk->sk_state != IUCV_LISTEN) { in iucv_callback_connreq()
1721 if (sk_acceptq_is_full(sk)) { in iucv_callback_connreq()
1728 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); in iucv_callback_connreq()
1736 iucv_sock_init(nsk, sk); in iucv_callback_connreq()
1761 iucv_accept_enqueue(sk, nsk); in iucv_callback_connreq()
1765 sk->sk_data_ready(sk); in iucv_callback_connreq()
1768 bh_unlock_sock(sk); in iucv_callback_connreq()
1774 struct sock *sk = path->private; in iucv_callback_connack() local
1776 sk->sk_state = IUCV_CONNECTED; in iucv_callback_connack()
1777 sk->sk_state_change(sk); in iucv_callback_connack()
1782 struct sock *sk = path->private; in iucv_callback_rx() local
1783 struct iucv_sock *iucv = iucv_sk(sk); in iucv_callback_rx()
1788 if (sk->sk_shutdown & RCV_SHUTDOWN) { in iucv_callback_rx()
1799 len = atomic_read(&sk->sk_rmem_alloc); in iucv_callback_rx()
1801 if (len > sk->sk_rcvbuf) in iucv_callback_rx()
1808 iucv_process_message(sk, skb, path, msg); in iucv_callback_rx()
1827 struct sock *sk = path->private; in iucv_callback_txdone() local
1829 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; in iucv_callback_txdone()
1833 bh_lock_sock(sk); in iucv_callback_txdone()
1852 iucv_sock_wake_msglim(sk); in iucv_callback_txdone()
1856 if (sk->sk_state == IUCV_CLOSING) { in iucv_callback_txdone()
1857 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { in iucv_callback_txdone()
1858 sk->sk_state = IUCV_CLOSED; in iucv_callback_txdone()
1859 sk->sk_state_change(sk); in iucv_callback_txdone()
1862 bh_unlock_sock(sk); in iucv_callback_txdone()
1868 struct sock *sk = path->private; in iucv_callback_connrej() local
1870 if (sk->sk_state == IUCV_CLOSED) in iucv_callback_connrej()
1873 bh_lock_sock(sk); in iucv_callback_connrej()
1874 iucv_sever_path(sk, 1); in iucv_callback_connrej()
1875 sk->sk_state = IUCV_DISCONN; in iucv_callback_connrej()
1877 sk->sk_state_change(sk); in iucv_callback_connrej()
1878 bh_unlock_sock(sk); in iucv_callback_connrej()
1886 struct sock *sk = path->private; in iucv_callback_shutdown() local
1888 bh_lock_sock(sk); in iucv_callback_shutdown()
1889 if (sk->sk_state != IUCV_CLOSED) { in iucv_callback_shutdown()
1890 sk->sk_shutdown |= SEND_SHUTDOWN; in iucv_callback_shutdown()
1891 sk->sk_state_change(sk); in iucv_callback_shutdown()
1893 bh_unlock_sock(sk); in iucv_callback_shutdown()
1921 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_syn() argument
1928 iucv = iucv_sk(sk); in afiucv_hs_callback_syn()
1938 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); in afiucv_hs_callback_syn()
1939 bh_lock_sock(sk); in afiucv_hs_callback_syn()
1940 if ((sk->sk_state != IUCV_LISTEN) || in afiucv_hs_callback_syn()
1941 sk_acceptq_is_full(sk) || in afiucv_hs_callback_syn()
1948 bh_unlock_sock(sk); in afiucv_hs_callback_syn()
1953 iucv_sock_init(nsk, sk); in afiucv_hs_callback_syn()
1964 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; in afiucv_hs_callback_syn()
1973 iucv_accept_enqueue(sk, nsk); in afiucv_hs_callback_syn()
1975 sk->sk_data_ready(sk); in afiucv_hs_callback_syn()
1978 bh_unlock_sock(sk); in afiucv_hs_callback_syn()
1987 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_synack() argument
1989 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synack()
1995 if (sk->sk_state != IUCV_BOUND) in afiucv_hs_callback_synack()
1997 bh_lock_sock(sk); in afiucv_hs_callback_synack()
1999 sk->sk_state = IUCV_CONNECTED; in afiucv_hs_callback_synack()
2000 sk->sk_state_change(sk); in afiucv_hs_callback_synack()
2001 bh_unlock_sock(sk); in afiucv_hs_callback_synack()
2010 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_synfin() argument
2012 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synfin()
2016 if (sk->sk_state != IUCV_BOUND) in afiucv_hs_callback_synfin()
2018 bh_lock_sock(sk); in afiucv_hs_callback_synfin()
2019 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_synfin()
2020 sk->sk_state_change(sk); in afiucv_hs_callback_synfin()
2021 bh_unlock_sock(sk); in afiucv_hs_callback_synfin()
2030 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_fin() argument
2032 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_fin()
2037 bh_lock_sock(sk); in afiucv_hs_callback_fin()
2038 if (sk->sk_state == IUCV_CONNECTED) { in afiucv_hs_callback_fin()
2039 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_fin()
2040 sk->sk_state_change(sk); in afiucv_hs_callback_fin()
2042 bh_unlock_sock(sk); in afiucv_hs_callback_fin()
2051 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_win() argument
2053 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_win()
2060 if (sk->sk_state != IUCV_CONNECTED) in afiucv_hs_callback_win()
2064 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_win()
2071 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_rx() argument
2073 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_rx()
2080 if (sk->sk_state != IUCV_CONNECTED) { in afiucv_hs_callback_rx()
2085 if (sk->sk_shutdown & RCV_SHUTDOWN) { in afiucv_hs_callback_rx()
2101 if (sock_queue_rcv_skb(sk, skb)) { in afiucv_hs_callback_rx()
2106 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); in afiucv_hs_callback_rx()
2119 struct sock *sk; in afiucv_hs_rcv() local
2133 sk = NULL; in afiucv_hs_rcv()
2135 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_hs_rcv()
2137 if ((!memcmp(&iucv_sk(sk)->src_name, in afiucv_hs_rcv()
2139 (!memcmp(&iucv_sk(sk)->src_user_id, in afiucv_hs_rcv()
2141 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && in afiucv_hs_rcv()
2142 (!memcmp(&iucv_sk(sk)->dst_user_id, in afiucv_hs_rcv()
2144 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2148 if ((!memcmp(&iucv_sk(sk)->src_name, in afiucv_hs_rcv()
2150 (!memcmp(&iucv_sk(sk)->src_user_id, in afiucv_hs_rcv()
2152 (!memcmp(&iucv_sk(sk)->dst_name, in afiucv_hs_rcv()
2154 (!memcmp(&iucv_sk(sk)->dst_user_id, in afiucv_hs_rcv()
2156 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2163 sk = NULL; in afiucv_hs_rcv()
2177 err = afiucv_hs_callback_syn(sk, skb); in afiucv_hs_rcv()
2181 err = afiucv_hs_callback_synack(sk, skb); in afiucv_hs_rcv()
2185 err = afiucv_hs_callback_synfin(sk, skb); in afiucv_hs_rcv()
2189 err = afiucv_hs_callback_fin(sk, skb); in afiucv_hs_rcv()
2192 err = afiucv_hs_callback_win(sk, skb); in afiucv_hs_rcv()
2204 err = afiucv_hs_callback_rx(sk, skb); in afiucv_hs_rcv()
2220 struct sock *isk = skb->sk; in afiucv_hs_callback_txnotify()
2221 struct sock *sk = NULL; in afiucv_hs_callback_txnotify() local
2229 sk_for_each(sk, &iucv_sk_list.head) in afiucv_hs_callback_txnotify()
2230 if (sk == isk) { in afiucv_hs_callback_txnotify()
2231 iucv = iucv_sk(sk); in afiucv_hs_callback_txnotify()
2236 if (!iucv || sock_flag(sk, SOCK_ZAPPED)) in afiucv_hs_callback_txnotify()
2251 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_txnotify()
2260 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_txnotify()
2270 if (sk->sk_state == IUCV_CONNECTED) { in afiucv_hs_callback_txnotify()
2271 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_txnotify()
2272 sk->sk_state_change(sk); in afiucv_hs_callback_txnotify()
2284 if (sk->sk_state == IUCV_CLOSING) { in afiucv_hs_callback_txnotify()
2285 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { in afiucv_hs_callback_txnotify()
2286 sk->sk_state = IUCV_CLOSED; in afiucv_hs_callback_txnotify()
2287 sk->sk_state_change(sk); in afiucv_hs_callback_txnotify()
2300 struct sock *sk; in afiucv_netdev_event() local
2306 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_netdev_event()
2307 iucv = iucv_sk(sk); in afiucv_netdev_event()
2309 (sk->sk_state == IUCV_CONNECTED)) { in afiucv_netdev_event()
2311 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); in afiucv_netdev_event()
2312 sk->sk_state = IUCV_DISCONN; in afiucv_netdev_event()
2313 sk->sk_state_change(sk); in afiucv_netdev_event()