1/*
2 *  linux/fs/ncpfs/sock.c
3 *
4 *  Copyright (C) 1992, 1993  Rick Sladkey
5 *
6 *  Modified 1995, 1996 by Volker Lendecke to be usable for ncp
7 *  Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
8 *
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/time.h>
14#include <linux/errno.h>
15#include <linux/socket.h>
16#include <linux/fcntl.h>
17#include <linux/stat.h>
18#include <linux/string.h>
19#include <asm/uaccess.h>
20#include <linux/in.h>
21#include <linux/net.h>
22#include <linux/mm.h>
23#include <linux/netdevice.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <net/scm.h>
27#include <net/sock.h>
28#include <linux/ipx.h>
29#include <linux/poll.h>
30#include <linux/file.h>
31
32#include "ncp_fs.h"
33
34#include "ncpsign_kernel.h"
35
36static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
37{
38	struct msghdr msg = {NULL, };
39	struct kvec iov = {buf, size};
40	return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
41}
42
43static inline int do_send(struct socket *sock, struct kvec *vec, int count,
44			  int len, unsigned flags)
45{
46	struct msghdr msg = { .msg_flags = flags };
47	return kernel_sendmsg(sock, &msg, vec, count, len);
48}
49
50static int _send(struct socket *sock, const void *buff, int len)
51{
52	struct kvec vec;
53	vec.iov_base = (void *) buff;
54	vec.iov_len = len;
55	return do_send(sock, &vec, 1, len, 0);
56}
57
58struct ncp_request_reply {
59	struct list_head req;
60	wait_queue_head_t wq;
61	atomic_t refs;
62	unsigned char* reply_buf;
63	size_t datalen;
64	int result;
65	enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
66	struct kvec* tx_ciov;
67	size_t tx_totallen;
68	size_t tx_iovlen;
69	struct kvec tx_iov[3];
70	u_int16_t tx_type;
71	u_int32_t sign[6];
72};
73
74static inline struct ncp_request_reply* ncp_alloc_req(void)
75{
76	struct ncp_request_reply *req;
77
78	req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
79	if (!req)
80		return NULL;
81
82	init_waitqueue_head(&req->wq);
83	atomic_set(&req->refs, (1));
84	req->status = RQ_IDLE;
85
86	return req;
87}
88
89static void ncp_req_get(struct ncp_request_reply *req)
90{
91	atomic_inc(&req->refs);
92}
93
94static void ncp_req_put(struct ncp_request_reply *req)
95{
96	if (atomic_dec_and_test(&req->refs))
97		kfree(req);
98}
99
100void ncp_tcp_data_ready(struct sock *sk)
101{
102	struct ncp_server *server = sk->sk_user_data;
103
104	server->data_ready(sk);
105	schedule_work(&server->rcv.tq);
106}
107
108void ncp_tcp_error_report(struct sock *sk)
109{
110	struct ncp_server *server = sk->sk_user_data;
111
112	server->error_report(sk);
113	schedule_work(&server->rcv.tq);
114}
115
116void ncp_tcp_write_space(struct sock *sk)
117{
118	struct ncp_server *server = sk->sk_user_data;
119
120	/* We do not need any locking: we first set tx.creq, and then we do sendmsg,
121	   not vice versa... */
122	server->write_space(sk);
123	if (server->tx.creq)
124		schedule_work(&server->tx.tq);
125}
126
127void ncpdgram_timeout_call(unsigned long v)
128{
129	struct ncp_server *server = (void*)v;
130
131	schedule_work(&server->timeout_tq);
132}
133
134static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
135{
136	req->result = result;
137	if (req->status != RQ_ABANDONED)
138		memcpy(req->reply_buf, server->rxbuf, req->datalen);
139	req->status = RQ_DONE;
140	wake_up_all(&req->wq);
141	ncp_req_put(req);
142}
143
144static void __abort_ncp_connection(struct ncp_server *server)
145{
146	struct ncp_request_reply *req;
147
148	ncp_invalidate_conn(server);
149	del_timer(&server->timeout_tm);
150	while (!list_empty(&server->tx.requests)) {
151		req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
152
153		list_del_init(&req->req);
154		ncp_finish_request(server, req, -EIO);
155	}
156	req = server->rcv.creq;
157	if (req) {
158		server->rcv.creq = NULL;
159		ncp_finish_request(server, req, -EIO);
160		server->rcv.ptr = NULL;
161		server->rcv.state = 0;
162	}
163	req = server->tx.creq;
164	if (req) {
165		server->tx.creq = NULL;
166		ncp_finish_request(server, req, -EIO);
167	}
168}
169
170static inline int get_conn_number(struct ncp_reply_header *rp)
171{
172	return rp->conn_low | (rp->conn_high << 8);
173}
174
175static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
176{
177	/* If req is done, we got signal, but we also received answer... */
178	switch (req->status) {
179		case RQ_IDLE:
180		case RQ_DONE:
181			break;
182		case RQ_QUEUED:
183			list_del_init(&req->req);
184			ncp_finish_request(server, req, err);
185			break;
186		case RQ_INPROGRESS:
187			req->status = RQ_ABANDONED;
188			break;
189		case RQ_ABANDONED:
190			break;
191	}
192}
193
194static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
195{
196	mutex_lock(&server->rcv.creq_mutex);
197	__ncp_abort_request(server, req, err);
198	mutex_unlock(&server->rcv.creq_mutex);
199}
200
201static inline void __ncptcp_abort(struct ncp_server *server)
202{
203	__abort_ncp_connection(server);
204}
205
206static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
207{
208	struct kvec vec[3];
209	/* sock_sendmsg updates iov pointers for us :-( */
210	memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
211	return do_send(sock, vec, req->tx_iovlen,
212		       req->tx_totallen, MSG_DONTWAIT);
213}
214
215static void __ncptcp_try_send(struct ncp_server *server)
216{
217	struct ncp_request_reply *rq;
218	struct kvec *iov;
219	struct kvec iovc[3];
220	int result;
221
222	rq = server->tx.creq;
223	if (!rq)
224		return;
225
226	/* sock_sendmsg updates iov pointers for us :-( */
227	memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
228	result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
229			 rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
230
231	if (result == -EAGAIN)
232		return;
233
234	if (result < 0) {
235		pr_err("tcp: Send failed: %d\n", result);
236		__ncp_abort_request(server, rq, result);
237		return;
238	}
239	if (result >= rq->tx_totallen) {
240		server->rcv.creq = rq;
241		server->tx.creq = NULL;
242		return;
243	}
244	rq->tx_totallen -= result;
245	iov = rq->tx_ciov;
246	while (iov->iov_len <= result) {
247		result -= iov->iov_len;
248		iov++;
249		rq->tx_iovlen--;
250	}
251	iov->iov_base += result;
252	iov->iov_len -= result;
253	rq->tx_ciov = iov;
254}
255
256static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
257{
258	req->status = RQ_INPROGRESS;
259	h->conn_low = server->connection;
260	h->conn_high = server->connection >> 8;
261	h->sequence = ++server->sequence;
262}
263
264static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
265{
266	size_t signlen;
267	struct ncp_request_header* h;
268
269	req->tx_ciov = req->tx_iov + 1;
270
271	h = req->tx_iov[1].iov_base;
272	ncp_init_header(server, req, h);
273	signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
274			req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
275			cpu_to_le32(req->tx_totallen), req->sign);
276	if (signlen) {
277		req->tx_ciov[1].iov_base = req->sign;
278		req->tx_ciov[1].iov_len = signlen;
279		req->tx_iovlen += 1;
280		req->tx_totallen += signlen;
281	}
282	server->rcv.creq = req;
283	server->timeout_last = server->m.time_out;
284	server->timeout_retries = server->m.retry_count;
285	ncpdgram_send(server->ncp_sock, req);
286	mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
287}
288
289#define NCP_TCP_XMIT_MAGIC	(0x446D6454)
290#define NCP_TCP_XMIT_VERSION	(1)
291#define NCP_TCP_RCVD_MAGIC	(0x744E6350)
292
293static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
294{
295	size_t signlen;
296	struct ncp_request_header* h;
297
298	req->tx_ciov = req->tx_iov;
299	h = req->tx_iov[1].iov_base;
300	ncp_init_header(server, req, h);
301	signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
302			req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
303			cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16;
304
305	req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
306	req->sign[1] = htonl(req->tx_totallen + signlen);
307	req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
308	req->sign[3] = htonl(req->datalen + 8);
309	req->tx_iov[0].iov_base = req->sign;
310	req->tx_iov[0].iov_len = signlen;
311	req->tx_iovlen += 1;
312	req->tx_totallen += signlen;
313
314	server->tx.creq = req;
315	__ncptcp_try_send(server);
316}
317
318static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
319{
320	/* we copy the data so that we do not depend on the caller
321	   staying alive */
322	memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
323	req->tx_iov[1].iov_base = server->txbuf;
324
325	if (server->ncp_sock->type == SOCK_STREAM)
326		ncptcp_start_request(server, req);
327	else
328		ncpdgram_start_request(server, req);
329}
330
331static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
332{
333	mutex_lock(&server->rcv.creq_mutex);
334	if (!ncp_conn_valid(server)) {
335		mutex_unlock(&server->rcv.creq_mutex);
336		pr_err("tcp: Server died\n");
337		return -EIO;
338	}
339	ncp_req_get(req);
340	if (server->tx.creq || server->rcv.creq) {
341		req->status = RQ_QUEUED;
342		list_add_tail(&req->req, &server->tx.requests);
343		mutex_unlock(&server->rcv.creq_mutex);
344		return 0;
345	}
346	__ncp_start_request(server, req);
347	mutex_unlock(&server->rcv.creq_mutex);
348	return 0;
349}
350
351static void __ncp_next_request(struct ncp_server *server)
352{
353	struct ncp_request_reply *req;
354
355	server->rcv.creq = NULL;
356	if (list_empty(&server->tx.requests)) {
357		return;
358	}
359	req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
360	list_del_init(&req->req);
361	__ncp_start_request(server, req);
362}
363
364static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
365{
366	if (server->info_sock) {
367		struct kvec iov[2];
368		__be32 hdr[2];
369
370		hdr[0] = cpu_to_be32(len + 8);
371		hdr[1] = cpu_to_be32(id);
372
373		iov[0].iov_base = hdr;
374		iov[0].iov_len = 8;
375		iov[1].iov_base = (void *) data;
376		iov[1].iov_len = len;
377
378		do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL);
379	}
380}
381
382void ncpdgram_rcv_proc(struct work_struct *work)
383{
384	struct ncp_server *server =
385		container_of(work, struct ncp_server, rcv.tq);
386	struct socket* sock;
387
388	sock = server->ncp_sock;
389
390	while (1) {
391		struct ncp_reply_header reply;
392		int result;
393
394		result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
395		if (result < 0) {
396			break;
397		}
398		if (result >= sizeof(reply)) {
399			struct ncp_request_reply *req;
400
401			if (reply.type == NCP_WATCHDOG) {
402				unsigned char buf[10];
403
404				if (server->connection != get_conn_number(&reply)) {
405					goto drop;
406				}
407				result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
408				if (result < 0) {
409					ncp_dbg(1, "recv failed with %d\n", result);
410					continue;
411				}
412				if (result < 10) {
413					ncp_dbg(1, "too short (%u) watchdog packet\n", result);
414					continue;
415				}
416				if (buf[9] != '?') {
417					ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf[9]);
418					continue;
419				}
420				buf[9] = 'Y';
421				_send(sock, buf, sizeof(buf));
422				continue;
423			}
424			if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
425				result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
426				if (result < 0) {
427					continue;
428				}
429				info_server(server, 0, server->unexpected_packet.data, result);
430				continue;
431			}
432			mutex_lock(&server->rcv.creq_mutex);
433			req = server->rcv.creq;
434			if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
435					server->connection == get_conn_number(&reply)))) {
436				if (reply.type == NCP_POSITIVE_ACK) {
437					server->timeout_retries = server->m.retry_count;
438					server->timeout_last = NCP_MAX_RPC_TIMEOUT;
439					mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
440				} else if (reply.type == NCP_REPLY) {
441					result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
442#ifdef CONFIG_NCPFS_PACKET_SIGNING
443					if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
444						if (result < 8 + 8) {
445							result = -EIO;
446						} else {
447							unsigned int hdrl;
448
449							result -= 8;
450							hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
451							if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
452								pr_info("Signature violation\n");
453								result = -EIO;
454							}
455						}
456					}
457#endif
458					del_timer(&server->timeout_tm);
459				     	server->rcv.creq = NULL;
460					ncp_finish_request(server, req, result);
461					__ncp_next_request(server);
462					mutex_unlock(&server->rcv.creq_mutex);
463					continue;
464				}
465			}
466			mutex_unlock(&server->rcv.creq_mutex);
467		}
468drop:;
469		_recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
470	}
471}
472
473static void __ncpdgram_timeout_proc(struct ncp_server *server)
474{
475	/* If timer is pending, we are processing another request... */
476	if (!timer_pending(&server->timeout_tm)) {
477		struct ncp_request_reply* req;
478
479		req = server->rcv.creq;
480		if (req) {
481			int timeout;
482
483			if (server->m.flags & NCP_MOUNT_SOFT) {
484				if (server->timeout_retries-- == 0) {
485					__ncp_abort_request(server, req, -ETIMEDOUT);
486					return;
487				}
488			}
489			/* Ignore errors */
490			ncpdgram_send(server->ncp_sock, req);
491			timeout = server->timeout_last << 1;
492			if (timeout > NCP_MAX_RPC_TIMEOUT) {
493				timeout = NCP_MAX_RPC_TIMEOUT;
494			}
495			server->timeout_last = timeout;
496			mod_timer(&server->timeout_tm, jiffies + timeout);
497		}
498	}
499}
500
501void ncpdgram_timeout_proc(struct work_struct *work)
502{
503	struct ncp_server *server =
504		container_of(work, struct ncp_server, timeout_tq);
505	mutex_lock(&server->rcv.creq_mutex);
506	__ncpdgram_timeout_proc(server);
507	mutex_unlock(&server->rcv.creq_mutex);
508}
509
510static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
511{
512	int result;
513
514	if (buffer) {
515		result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
516	} else {
517		static unsigned char dummy[1024];
518
519		if (len > sizeof(dummy)) {
520			len = sizeof(dummy);
521		}
522		result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
523	}
524	if (result < 0) {
525		return result;
526	}
527	if (result > len) {
528		pr_err("tcp: bug in recvmsg (%u > %Zu)\n", result, len);
529		return -EIO;
530	}
531	return result;
532}
533
534static int __ncptcp_rcv_proc(struct ncp_server *server)
535{
536	/* We have to check the result, so store the complete header */
537	while (1) {
538		int result;
539		struct ncp_request_reply *req;
540		int datalen;
541		int type;
542
543		while (server->rcv.len) {
544			result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
545			if (result == -EAGAIN) {
546				return 0;
547			}
548			if (result <= 0) {
549				req = server->rcv.creq;
550				if (req) {
551					__ncp_abort_request(server, req, -EIO);
552				} else {
553					__ncptcp_abort(server);
554				}
555				if (result < 0) {
556					pr_err("tcp: error in recvmsg: %d\n", result);
557				} else {
558					ncp_dbg(1, "tcp: EOF\n");
559				}
560				return -EIO;
561			}
562			if (server->rcv.ptr) {
563				server->rcv.ptr += result;
564			}
565			server->rcv.len -= result;
566		}
567		switch (server->rcv.state) {
568			case 0:
569				if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
570					pr_err("tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
571					__ncptcp_abort(server);
572					return -EIO;
573				}
574				datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
575				if (datalen < 10) {
576					pr_err("tcp: Unexpected reply len %d\n", datalen);
577					__ncptcp_abort(server);
578					return -EIO;
579				}
580#ifdef CONFIG_NCPFS_PACKET_SIGNING
581				if (server->sign_active) {
582					if (datalen < 18) {
583						pr_err("tcp: Unexpected reply len %d\n", datalen);
584						__ncptcp_abort(server);
585						return -EIO;
586					}
587					server->rcv.buf.len = datalen - 8;
588					server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
589					server->rcv.len = 8;
590					server->rcv.state = 4;
591					break;
592				}
593#endif
594				type = ntohs(server->rcv.buf.type);
595#ifdef CONFIG_NCPFS_PACKET_SIGNING
596cont:;
597#endif
598				if (type != NCP_REPLY) {
599					if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
600						*(__u16*)(server->unexpected_packet.data) = htons(type);
601						server->unexpected_packet.len = datalen - 8;
602
603						server->rcv.state = 5;
604						server->rcv.ptr = server->unexpected_packet.data + 2;
605						server->rcv.len = datalen - 10;
606						break;
607					}
608					ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type);
609skipdata2:;
610					server->rcv.state = 2;
611skipdata:;
612					server->rcv.ptr = NULL;
613					server->rcv.len = datalen - 10;
614					break;
615				}
616				req = server->rcv.creq;
617				if (!req) {
618					ncp_dbg(1, "Reply without appropriate request\n");
619					goto skipdata2;
620				}
621				if (datalen > req->datalen + 8) {
622					pr_err("tcp: Unexpected reply len %d (expected at most %Zd)\n", datalen, req->datalen + 8);
623					server->rcv.state = 3;
624					goto skipdata;
625				}
626				req->datalen = datalen - 8;
627				((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
628				server->rcv.ptr = server->rxbuf + 2;
629				server->rcv.len = datalen - 10;
630				server->rcv.state = 1;
631				break;
632#ifdef CONFIG_NCPFS_PACKET_SIGNING
633			case 4:
634				datalen = server->rcv.buf.len;
635				type = ntohs(server->rcv.buf.type2);
636				goto cont;
637#endif
638			case 1:
639				req = server->rcv.creq;
640				if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
641					if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
642						pr_err("tcp: Bad sequence number\n");
643						__ncp_abort_request(server, req, -EIO);
644						return -EIO;
645					}
646					if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
647						pr_err("tcp: Connection number mismatch\n");
648						__ncp_abort_request(server, req, -EIO);
649						return -EIO;
650					}
651				}
652#ifdef CONFIG_NCPFS_PACKET_SIGNING
653				if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
654					if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
655						pr_err("tcp: Signature violation\n");
656						__ncp_abort_request(server, req, -EIO);
657						return -EIO;
658					}
659				}
660#endif
661				ncp_finish_request(server, req, req->datalen);
662			nextreq:;
663				__ncp_next_request(server);
664			case 2:
665			next:;
666				server->rcv.ptr = (unsigned char*)&server->rcv.buf;
667				server->rcv.len = 10;
668				server->rcv.state = 0;
669				break;
670			case 3:
671				ncp_finish_request(server, server->rcv.creq, -EIO);
672				goto nextreq;
673			case 5:
674				info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
675				goto next;
676		}
677	}
678}
679
680void ncp_tcp_rcv_proc(struct work_struct *work)
681{
682	struct ncp_server *server =
683		container_of(work, struct ncp_server, rcv.tq);
684
685	mutex_lock(&server->rcv.creq_mutex);
686	__ncptcp_rcv_proc(server);
687	mutex_unlock(&server->rcv.creq_mutex);
688}
689
690void ncp_tcp_tx_proc(struct work_struct *work)
691{
692	struct ncp_server *server =
693		container_of(work, struct ncp_server, tx.tq);
694
695	mutex_lock(&server->rcv.creq_mutex);
696	__ncptcp_try_send(server);
697	mutex_unlock(&server->rcv.creq_mutex);
698}
699
700static int do_ncp_rpc_call(struct ncp_server *server, int size,
701		unsigned char* reply_buf, int max_reply_size)
702{
703	int result;
704	struct ncp_request_reply *req;
705
706	req = ncp_alloc_req();
707	if (!req)
708		return -ENOMEM;
709
710	req->reply_buf = reply_buf;
711	req->datalen = max_reply_size;
712	req->tx_iov[1].iov_base = server->packet;
713	req->tx_iov[1].iov_len = size;
714	req->tx_iovlen = 1;
715	req->tx_totallen = size;
716	req->tx_type = *(u_int16_t*)server->packet;
717
718	result = ncp_add_request(server, req);
719	if (result < 0)
720		goto out;
721
722	if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
723		ncp_abort_request(server, req, -EINTR);
724		result = -EINTR;
725		goto out;
726	}
727
728	result = req->result;
729
730out:
731	ncp_req_put(req);
732
733	return result;
734}
735
736/*
737 * We need the server to be locked here, so check!
738 */
739
740static int ncp_do_request(struct ncp_server *server, int size,
741		void* reply, int max_reply_size)
742{
743	int result;
744
745	if (server->lock == 0) {
746		pr_err("Server not locked!\n");
747		return -EIO;
748	}
749	if (!ncp_conn_valid(server)) {
750		return -EIO;
751	}
752	{
753		sigset_t old_set;
754		unsigned long mask, flags;
755
756		spin_lock_irqsave(&current->sighand->siglock, flags);
757		old_set = current->blocked;
758		if (current->flags & PF_EXITING)
759			mask = 0;
760		else
761			mask = sigmask(SIGKILL);
762		if (server->m.flags & NCP_MOUNT_INTR) {
763			/* FIXME: This doesn't seem right at all.  So, like,
764			   we can't handle SIGINT and get whatever to stop?
765			   What if we've blocked it ourselves?  What about
766			   alarms?  Why, in fact, are we mucking with the
767			   sigmask at all? -- r~ */
768			if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
769				mask |= sigmask(SIGINT);
770			if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
771				mask |= sigmask(SIGQUIT);
772		}
773		siginitsetinv(&current->blocked, mask);
774		recalc_sigpending();
775		spin_unlock_irqrestore(&current->sighand->siglock, flags);
776
777		result = do_ncp_rpc_call(server, size, reply, max_reply_size);
778
779		spin_lock_irqsave(&current->sighand->siglock, flags);
780		current->blocked = old_set;
781		recalc_sigpending();
782		spin_unlock_irqrestore(&current->sighand->siglock, flags);
783	}
784
785	ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result);
786
787	return result;
788}
789
790/* ncp_do_request assures that at least a complete reply header is
791 * received. It assumes that server->current_size contains the ncp
792 * request size
793 */
794int ncp_request2(struct ncp_server *server, int function,
795		void* rpl, int size)
796{
797	struct ncp_request_header *h;
798	struct ncp_reply_header* reply = rpl;
799	int result;
800
801	h = (struct ncp_request_header *) (server->packet);
802	if (server->has_subfunction != 0) {
803		*(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
804	}
805	h->type = NCP_REQUEST;
806	/*
807	 * The server shouldn't know or care what task is making a
808	 * request, so we always use the same task number.
809	 */
810	h->task = 2; /* (current->pid) & 0xff; */
811	h->function = function;
812
813	result = ncp_do_request(server, server->current_size, reply, size);
814	if (result < 0) {
815		ncp_dbg(1, "ncp_request_error: %d\n", result);
816		goto out;
817	}
818	server->completion = reply->completion_code;
819	server->conn_status = reply->connection_state;
820	server->reply_size = result;
821	server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
822
823	result = reply->completion_code;
824
825	if (result != 0)
826		ncp_vdbg("completion code=%x\n", result);
827out:
828	return result;
829}
830
831int ncp_connect(struct ncp_server *server)
832{
833	struct ncp_request_header *h;
834	int result;
835
836	server->connection = 0xFFFF;
837	server->sequence = 255;
838
839	h = (struct ncp_request_header *) (server->packet);
840	h->type = NCP_ALLOC_SLOT_REQUEST;
841	h->task		= 2; /* see above */
842	h->function	= 0;
843
844	result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
845	if (result < 0)
846		goto out;
847	server->connection = h->conn_low + (h->conn_high * 256);
848	result = 0;
849out:
850	return result;
851}
852
853int ncp_disconnect(struct ncp_server *server)
854{
855	struct ncp_request_header *h;
856
857	h = (struct ncp_request_header *) (server->packet);
858	h->type = NCP_DEALLOC_SLOT_REQUEST;
859	h->task		= 2; /* see above */
860	h->function	= 0;
861
862	return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
863}
864
865void ncp_lock_server(struct ncp_server *server)
866{
867	mutex_lock(&server->mutex);
868	if (server->lock)
869		pr_warn("%s: was locked!\n", __func__);
870	server->lock = 1;
871}
872
873void ncp_unlock_server(struct ncp_server *server)
874{
875	if (!server->lock) {
876		pr_warn("%s: was not locked!\n", __func__);
877		return;
878	}
879	server->lock = 0;
880	mutex_unlock(&server->mutex);
881}
882