This source file includes following definitions.
- cifs_wake_up_task
- AllocMidQEntry
- _cifs_mid_q_entry_release
- cifs_mid_q_entry_release
- DeleteMidQEntry
- cifs_delete_mid
- smb_send_kvec
- smb_rqst_len
- __smb_send_rqst
- smb_send_rqst
- smb_send
- wait_for_free_credits
- wait_for_free_request
- wait_for_compound_request
- cifs_wait_mtu_credits
- allocate_mid
- wait_for_response
- cifs_setup_async_request
- cifs_call_async
- SendReceiveNoRsp
- cifs_sync_mid_result
- send_cancel
- cifs_check_receive
- cifs_setup_request
- cifs_compound_callback
- cifs_compound_last_callback
- cifs_cancelled_callback
- compound_send_recv
- cifs_send_recv
- SendReceive2
- SendReceive
- send_lock_cancel
- SendReceiveBlockingLock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50 wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56 struct mid_q_entry *temp;
57
58 if (server == NULL) {
59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 return NULL;
61 }
62
63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 memset(temp, 0, sizeof(struct mid_q_entry));
65 kref_init(&temp->refcount);
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70
71
72 temp->when_alloc = jiffies;
73 temp->server = server;
74
75
76
77
78
79 get_task_struct(current);
80 temp->creator = current;
81 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
83
84 atomic_inc(&midCount);
85 temp->mid_state = MID_REQUEST_ALLOCATED;
86 return temp;
87 }
88
89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94 __le16 command = midEntry->server->vals->lock_cmd;
95 __u16 smb_cmd = le16_to_cpu(midEntry->command);
96 unsigned long now;
97 unsigned long roundtrip_time;
98 #endif
99 struct TCP_Server_Info *server = midEntry->server;
100
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
106 midEntry->mid_state = MID_FREE;
107 atomic_dec(&midCount);
108 if (midEntry->large_buf)
109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
114 if (now < midEntry->when_alloc)
115 cifs_server_dbg(VFS, "invalid mid allocation time\n");
116 roundtrip_time = now - midEntry->when_alloc;
117
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
127 }
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
130 }
131
132
133
134
135
136
137
138
139
140
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143 (midEntry->command != command)) {
144
145
146
147
148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150
151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
154 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
155 midEntry->command, midEntry->mid);
156 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
160 }
161 }
162 #endif
163 put_task_struct(midEntry->creator);
164
165 mempool_free(midEntry, cifs_mid_poolp);
166 }
167
168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
173 }
174
175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177 cifs_mid_q_entry_release(midEntry);
178 }
179
180 void
181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183 spin_lock(&GlobalMid_Lock);
184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
187 }
188 spin_unlock(&GlobalMid_Lock);
189
190 DeleteMidQEntry(mid);
191 }
192
193
194
195
196
197
198
199
200
201
202 static int
203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
205 {
206 int rc = 0;
207 int retries = 0;
208 struct socket *ssocket = server->ssocket;
209
210 *sent = 0;
211
212 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 smb_msg->msg_namelen = sizeof(struct sockaddr);
214 smb_msg->msg_control = NULL;
215 smb_msg->msg_controllen = 0;
216 if (server->noblocksnd)
217 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
218 else
219 smb_msg->msg_flags = MSG_NOSIGNAL;
220
221 while (msg_data_left(smb_msg)) {
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240 rc = sock_sendmsg(ssocket, smb_msg);
241 if (rc == -EAGAIN) {
242 retries++;
243 if (retries >= 14 ||
244 (!server->noblocksnd && (retries > 2))) {
245 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
246 ssocket);
247 return -EAGAIN;
248 }
249 msleep(1 << retries);
250 continue;
251 }
252
253 if (rc < 0)
254 return rc;
255
256 if (rc == 0) {
257
258
259 cifs_server_dbg(VFS, "tcp sent no data\n");
260 msleep(500);
261 continue;
262 }
263
264
265 *sent += rc;
266 retries = 0;
267 }
268 return 0;
269 }
270
271 unsigned long
272 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
273 {
274 unsigned int i;
275 struct kvec *iov;
276 int nvec;
277 unsigned long buflen = 0;
278
279 if (server->vals->header_preamble_size == 0 &&
280 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
281 iov = &rqst->rq_iov[1];
282 nvec = rqst->rq_nvec - 1;
283 } else {
284 iov = rqst->rq_iov;
285 nvec = rqst->rq_nvec;
286 }
287
288
289 for (i = 0; i < nvec; i++)
290 buflen += iov[i].iov_len;
291
292
293
294
295
296
297
298 if (rqst->rq_npages) {
299 if (rqst->rq_npages == 1)
300 buflen += rqst->rq_tailsz;
301 else {
302
303
304
305
306 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 rqst->rq_offset;
308 buflen += rqst->rq_tailsz;
309 }
310 }
311
312 return buflen;
313 }
314
315 static int
316 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 struct smb_rqst *rqst)
318 {
319 int rc = 0;
320 struct kvec *iov;
321 int n_vec;
322 unsigned int send_length = 0;
323 unsigned int i, j;
324 sigset_t mask, oldmask;
325 size_t total_len = 0, sent, size;
326 struct socket *ssocket = server->ssocket;
327 struct msghdr smb_msg;
328 int val = 1;
329 __be32 rfc1002_marker;
330
331 if (cifs_rdma_enabled(server)) {
332
333 rc = -EAGAIN;
334 if (server->smbd_conn)
335 rc = smbd_send(server, num_rqst, rqst);
336 goto smbd_done;
337 }
338
339 if (ssocket == NULL)
340 return -EAGAIN;
341
342 if (signal_pending(current)) {
343 cifs_dbg(FYI, "signal is pending before sending any data\n");
344 return -EINTR;
345 }
346
347
348 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
349 (char *)&val, sizeof(val));
350
351 for (j = 0; j < num_rqst; j++)
352 send_length += smb_rqst_len(server, &rqst[j]);
353 rfc1002_marker = cpu_to_be32(send_length);
354
355
356
357
358
359
360
361
362 sigfillset(&mask);
363 sigprocmask(SIG_BLOCK, &mask, &oldmask);
364
365
366 if (server->vals->header_preamble_size == 0) {
367 struct kvec hiov = {
368 .iov_base = &rfc1002_marker,
369 .iov_len = 4
370 };
371 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
372 rc = smb_send_kvec(server, &smb_msg, &sent);
373 if (rc < 0)
374 goto unmask;
375
376 total_len += sent;
377 send_length += 4;
378 }
379
380 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
381
382 for (j = 0; j < num_rqst; j++) {
383 iov = rqst[j].rq_iov;
384 n_vec = rqst[j].rq_nvec;
385
386 size = 0;
387 for (i = 0; i < n_vec; i++) {
388 dump_smb(iov[i].iov_base, iov[i].iov_len);
389 size += iov[i].iov_len;
390 }
391
392 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
393
394 rc = smb_send_kvec(server, &smb_msg, &sent);
395 if (rc < 0)
396 goto unmask;
397
398 total_len += sent;
399
400
401 for (i = 0; i < rqst[j].rq_npages; i++) {
402 struct bio_vec bvec;
403
404 bvec.bv_page = rqst[j].rq_pages[i];
405 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
406 &bvec.bv_offset);
407
408 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
409 &bvec, 1, bvec.bv_len);
410 rc = smb_send_kvec(server, &smb_msg, &sent);
411 if (rc < 0)
412 break;
413
414 total_len += sent;
415 }
416 }
417
418 unmask:
419 sigprocmask(SIG_SETMASK, &oldmask, NULL);
420
421
422
423
424
425
426
427
428
429
430
431
432 if (signal_pending(current) && (total_len != send_length)) {
433 cifs_dbg(FYI, "signal is pending after attempt to send\n");
434 rc = -EINTR;
435 }
436
437
438 val = 0;
439 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
440 (char *)&val, sizeof(val));
441
442 if ((total_len > 0) && (total_len != send_length)) {
443 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
444 send_length, total_len);
445
446
447
448
449
450 server->tcpStatus = CifsNeedReconnect;
451 trace_smb3_partial_send_reconnect(server->CurrentMid,
452 server->hostname);
453 }
454 smbd_done:
455 if (rc < 0 && rc != -EINTR)
456 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
457 rc);
458 else if (rc > 0)
459 rc = 0;
460
461 return rc;
462 }
463
464 static int
465 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
466 struct smb_rqst *rqst, int flags)
467 {
468 struct kvec iov;
469 struct smb2_transform_hdr *tr_hdr;
470 struct smb_rqst cur_rqst[MAX_COMPOUND];
471 int rc;
472
473 if (!(flags & CIFS_TRANSFORM_REQ))
474 return __smb_send_rqst(server, num_rqst, rqst);
475
476 if (num_rqst > MAX_COMPOUND - 1)
477 return -ENOMEM;
478
479 if (!server->ops->init_transform_rq) {
480 cifs_server_dbg(VFS, "Encryption requested but transform "
481 "callback is missing\n");
482 return -EIO;
483 }
484
485 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
486 if (!tr_hdr)
487 return -ENOMEM;
488
489 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
490 memset(&iov, 0, sizeof(iov));
491 memset(tr_hdr, 0, sizeof(*tr_hdr));
492
493 iov.iov_base = tr_hdr;
494 iov.iov_len = sizeof(*tr_hdr);
495 cur_rqst[0].rq_iov = &iov;
496 cur_rqst[0].rq_nvec = 1;
497
498 rc = server->ops->init_transform_rq(server, num_rqst + 1,
499 &cur_rqst[0], rqst);
500 if (rc)
501 goto out;
502
503 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
504 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
505 out:
506 kfree(tr_hdr);
507 return rc;
508 }
509
510 int
511 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
512 unsigned int smb_buf_length)
513 {
514 struct kvec iov[2];
515 struct smb_rqst rqst = { .rq_iov = iov,
516 .rq_nvec = 2 };
517
518 iov[0].iov_base = smb_buffer;
519 iov[0].iov_len = 4;
520 iov[1].iov_base = (char *)smb_buffer + 4;
521 iov[1].iov_len = smb_buf_length;
522
523 return __smb_send_rqst(server, 1, &rqst);
524 }
525
526 static int
527 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
528 const int timeout, const int flags,
529 unsigned int *instance)
530 {
531 int rc;
532 int *credits;
533 int optype;
534 long int t;
535
536 if (timeout < 0)
537 t = MAX_JIFFY_OFFSET;
538 else
539 t = msecs_to_jiffies(timeout);
540
541 optype = flags & CIFS_OP_MASK;
542
543 *instance = 0;
544
545 credits = server->ops->get_credits_field(server, optype);
546
547 if (*credits <= 0 && optype == CIFS_ECHO_OP)
548 return -EAGAIN;
549
550 spin_lock(&server->req_lock);
551 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
552
553 server->in_flight++;
554 if (server->in_flight > server->max_in_flight)
555 server->max_in_flight = server->in_flight;
556 *credits -= 1;
557 *instance = server->reconnect_instance;
558 spin_unlock(&server->req_lock);
559 return 0;
560 }
561
562 while (1) {
563 if (*credits < num_credits) {
564 spin_unlock(&server->req_lock);
565 cifs_num_waiters_inc(server);
566 rc = wait_event_killable_timeout(server->request_q,
567 has_credits(server, credits, num_credits), t);
568 cifs_num_waiters_dec(server);
569 if (!rc) {
570 trace_smb3_credit_timeout(server->CurrentMid,
571 server->hostname, num_credits);
572 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
573 timeout);
574 return -ENOTSUPP;
575 }
576 if (rc == -ERESTARTSYS)
577 return -ERESTARTSYS;
578 spin_lock(&server->req_lock);
579 } else {
580 if (server->tcpStatus == CifsExiting) {
581 spin_unlock(&server->req_lock);
582 return -ENOENT;
583 }
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598 if (!optype && num_credits == 1 &&
599 server->in_flight > 2 * MAX_COMPOUND &&
600 *credits <= MAX_COMPOUND) {
601 spin_unlock(&server->req_lock);
602 cifs_num_waiters_inc(server);
603 rc = wait_event_killable_timeout(
604 server->request_q,
605 has_credits(server, credits,
606 MAX_COMPOUND + 1),
607 t);
608 cifs_num_waiters_dec(server);
609 if (!rc) {
610 trace_smb3_credit_timeout(
611 server->CurrentMid,
612 server->hostname, num_credits);
613 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
614 timeout);
615 return -ENOTSUPP;
616 }
617 if (rc == -ERESTARTSYS)
618 return -ERESTARTSYS;
619 spin_lock(&server->req_lock);
620 continue;
621 }
622
623
624
625
626
627
628
629 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
630 *credits -= num_credits;
631 server->in_flight += num_credits;
632 if (server->in_flight > server->max_in_flight)
633 server->max_in_flight = server->in_flight;
634 *instance = server->reconnect_instance;
635 }
636 spin_unlock(&server->req_lock);
637 break;
638 }
639 }
640 return 0;
641 }
642
643 static int
644 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
645 unsigned int *instance)
646 {
647 return wait_for_free_credits(server, 1, -1, flags,
648 instance);
649 }
650
651 static int
652 wait_for_compound_request(struct TCP_Server_Info *server, int num,
653 const int flags, unsigned int *instance)
654 {
655 int *credits;
656
657 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
658
659 spin_lock(&server->req_lock);
660 if (*credits < num) {
661
662
663
664
665 if (server->in_flight < num - *credits) {
666 spin_unlock(&server->req_lock);
667 return -ENOTSUPP;
668 }
669 }
670 spin_unlock(&server->req_lock);
671
672 return wait_for_free_credits(server, num, 60000, flags,
673 instance);
674 }
675
676 int
677 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
678 unsigned int *num, struct cifs_credits *credits)
679 {
680 *num = size;
681 credits->value = 0;
682 credits->instance = server->reconnect_instance;
683 return 0;
684 }
685
686 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
687 struct mid_q_entry **ppmidQ)
688 {
689 if (ses->server->tcpStatus == CifsExiting) {
690 return -ENOENT;
691 }
692
693 if (ses->server->tcpStatus == CifsNeedReconnect) {
694 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
695 return -EAGAIN;
696 }
697
698 if (ses->status == CifsNew) {
699 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
700 (in_buf->Command != SMB_COM_NEGOTIATE))
701 return -EAGAIN;
702
703 }
704
705 if (ses->status == CifsExiting) {
706
707 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
708 return -EAGAIN;
709
710 }
711
712 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
713 if (*ppmidQ == NULL)
714 return -ENOMEM;
715 spin_lock(&GlobalMid_Lock);
716 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
717 spin_unlock(&GlobalMid_Lock);
718 return 0;
719 }
720
721 static int
722 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
723 {
724 int error;
725
726 error = wait_event_freezekillable_unsafe(server->response_q,
727 midQ->mid_state != MID_REQUEST_SUBMITTED);
728 if (error < 0)
729 return -ERESTARTSYS;
730
731 return 0;
732 }
733
734 struct mid_q_entry *
735 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
736 {
737 int rc;
738 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
739 struct mid_q_entry *mid;
740
741 if (rqst->rq_iov[0].iov_len != 4 ||
742 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
743 return ERR_PTR(-EIO);
744
745
746 if (server->sign)
747 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
748
749 mid = AllocMidQEntry(hdr, server);
750 if (mid == NULL)
751 return ERR_PTR(-ENOMEM);
752
753 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
754 if (rc) {
755 DeleteMidQEntry(mid);
756 return ERR_PTR(rc);
757 }
758
759 return mid;
760 }
761
762
763
764
765
766 int
767 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
768 mid_receive_t *receive, mid_callback_t *callback,
769 mid_handle_t *handle, void *cbdata, const int flags,
770 const struct cifs_credits *exist_credits)
771 {
772 int rc;
773 struct mid_q_entry *mid;
774 struct cifs_credits credits = { .value = 0, .instance = 0 };
775 unsigned int instance;
776 int optype;
777
778 optype = flags & CIFS_OP_MASK;
779
780 if ((flags & CIFS_HAS_CREDITS) == 0) {
781 rc = wait_for_free_request(server, flags, &instance);
782 if (rc)
783 return rc;
784 credits.value = 1;
785 credits.instance = instance;
786 } else
787 instance = exist_credits->instance;
788
789 mutex_lock(&server->srv_mutex);
790
791
792
793
794
795
796 if (instance != server->reconnect_instance) {
797 mutex_unlock(&server->srv_mutex);
798 add_credits_and_wake_if(server, &credits, optype);
799 return -EAGAIN;
800 }
801
802 mid = server->ops->setup_async_request(server, rqst);
803 if (IS_ERR(mid)) {
804 mutex_unlock(&server->srv_mutex);
805 add_credits_and_wake_if(server, &credits, optype);
806 return PTR_ERR(mid);
807 }
808
809 mid->receive = receive;
810 mid->callback = callback;
811 mid->callback_data = cbdata;
812 mid->handle = handle;
813 mid->mid_state = MID_REQUEST_SUBMITTED;
814
815
816 spin_lock(&GlobalMid_Lock);
817 list_add_tail(&mid->qhead, &server->pending_mid_q);
818 spin_unlock(&GlobalMid_Lock);
819
820
821
822
823
824 cifs_save_when_sent(mid);
825 cifs_in_send_inc(server);
826 rc = smb_send_rqst(server, 1, rqst, flags);
827 cifs_in_send_dec(server);
828
829 if (rc < 0) {
830 revert_current_mid(server, mid->credits);
831 server->sequence_number -= 2;
832 cifs_delete_mid(mid);
833 }
834
835 mutex_unlock(&server->srv_mutex);
836
837 if (rc == 0)
838 return 0;
839
840 add_credits_and_wake_if(server, &credits, optype);
841 return rc;
842 }
843
844
845
846
847
848
849
850
851
852
853 int
854 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
855 char *in_buf, int flags)
856 {
857 int rc;
858 struct kvec iov[1];
859 struct kvec rsp_iov;
860 int resp_buf_type;
861
862 iov[0].iov_base = in_buf;
863 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
864 flags |= CIFS_NO_RSP_BUF;
865 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
866 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
867
868 return rc;
869 }
870
871 static int
872 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
873 {
874 int rc = 0;
875
876 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
877 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
878
879 spin_lock(&GlobalMid_Lock);
880 switch (mid->mid_state) {
881 case MID_RESPONSE_RECEIVED:
882 spin_unlock(&GlobalMid_Lock);
883 return rc;
884 case MID_RETRY_NEEDED:
885 rc = -EAGAIN;
886 break;
887 case MID_RESPONSE_MALFORMED:
888 rc = -EIO;
889 break;
890 case MID_SHUTDOWN:
891 rc = -EHOSTDOWN;
892 break;
893 default:
894 if (!(mid->mid_flags & MID_DELETED)) {
895 list_del_init(&mid->qhead);
896 mid->mid_flags |= MID_DELETED;
897 }
898 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
899 __func__, mid->mid, mid->mid_state);
900 rc = -EIO;
901 }
902 spin_unlock(&GlobalMid_Lock);
903
904 DeleteMidQEntry(mid);
905 return rc;
906 }
907
908 static inline int
909 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
910 struct mid_q_entry *mid)
911 {
912 return server->ops->send_cancel ?
913 server->ops->send_cancel(server, rqst, mid) : 0;
914 }
915
916 int
917 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
918 bool log_error)
919 {
920 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
921
922 dump_smb(mid->resp_buf, min_t(u32, 92, len));
923
924
925 if (server->sign) {
926 struct kvec iov[2];
927 int rc = 0;
928 struct smb_rqst rqst = { .rq_iov = iov,
929 .rq_nvec = 2 };
930
931 iov[0].iov_base = mid->resp_buf;
932 iov[0].iov_len = 4;
933 iov[1].iov_base = (char *)mid->resp_buf + 4;
934 iov[1].iov_len = len - 4;
935
936 rc = cifs_verify_signature(&rqst, server,
937 mid->sequence_number);
938 if (rc)
939 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
940 rc);
941 }
942
943
944 return map_smb_to_linux_error(mid->resp_buf, log_error);
945 }
946
947 struct mid_q_entry *
948 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
949 {
950 int rc;
951 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
952 struct mid_q_entry *mid;
953
954 if (rqst->rq_iov[0].iov_len != 4 ||
955 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
956 return ERR_PTR(-EIO);
957
958 rc = allocate_mid(ses, hdr, &mid);
959 if (rc)
960 return ERR_PTR(rc);
961 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
962 if (rc) {
963 cifs_delete_mid(mid);
964 return ERR_PTR(rc);
965 }
966 return mid;
967 }
968
969 static void
970 cifs_compound_callback(struct mid_q_entry *mid)
971 {
972 struct TCP_Server_Info *server = mid->server;
973 struct cifs_credits credits;
974
975 credits.value = server->ops->get_credits(mid);
976 credits.instance = server->reconnect_instance;
977
978 add_credits(server, &credits, mid->optype);
979 }
980
981 static void
982 cifs_compound_last_callback(struct mid_q_entry *mid)
983 {
984 cifs_compound_callback(mid);
985 cifs_wake_up_task(mid);
986 }
987
988 static void
989 cifs_cancelled_callback(struct mid_q_entry *mid)
990 {
991 cifs_compound_callback(mid);
992 DeleteMidQEntry(mid);
993 }
994
995 int
996 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
997 const int flags, const int num_rqst, struct smb_rqst *rqst,
998 int *resp_buf_type, struct kvec *resp_iov)
999 {
1000 int i, j, optype, rc = 0;
1001 struct mid_q_entry *midQ[MAX_COMPOUND];
1002 bool cancelled_mid[MAX_COMPOUND] = {false};
1003 struct cifs_credits credits[MAX_COMPOUND] = {
1004 { .value = 0, .instance = 0 }
1005 };
1006 unsigned int instance;
1007 char *buf;
1008 struct TCP_Server_Info *server;
1009
1010 optype = flags & CIFS_OP_MASK;
1011
1012 for (i = 0; i < num_rqst; i++)
1013 resp_buf_type[i] = CIFS_NO_BUFFER;
1014
1015 if ((ses == NULL) || (ses->server == NULL)) {
1016 cifs_dbg(VFS, "Null session\n");
1017 return -EIO;
1018 }
1019
1020 server = ses->server;
1021 if (server->tcpStatus == CifsExiting)
1022 return -ENOENT;
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032 rc = wait_for_compound_request(server, num_rqst, flags,
1033 &instance);
1034 if (rc)
1035 return rc;
1036
1037 for (i = 0; i < num_rqst; i++) {
1038 credits[i].value = 1;
1039 credits[i].instance = instance;
1040 }
1041
1042
1043
1044
1045
1046
1047
1048 mutex_lock(&server->srv_mutex);
1049
1050
1051
1052
1053
1054
1055
1056
1057 if (instance != server->reconnect_instance) {
1058 mutex_unlock(&server->srv_mutex);
1059 for (j = 0; j < num_rqst; j++)
1060 add_credits(server, &credits[j], optype);
1061 return -EAGAIN;
1062 }
1063
1064 for (i = 0; i < num_rqst; i++) {
1065 midQ[i] = server->ops->setup_request(ses, &rqst[i]);
1066 if (IS_ERR(midQ[i])) {
1067 revert_current_mid(server, i);
1068 for (j = 0; j < i; j++)
1069 cifs_delete_mid(midQ[j]);
1070 mutex_unlock(&server->srv_mutex);
1071
1072
1073 for (j = 0; j < num_rqst; j++)
1074 add_credits(server, &credits[j], optype);
1075 return PTR_ERR(midQ[i]);
1076 }
1077
1078 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1079 midQ[i]->optype = optype;
1080
1081
1082
1083
1084
1085 if (i < num_rqst - 1)
1086 midQ[i]->callback = cifs_compound_callback;
1087 else
1088 midQ[i]->callback = cifs_compound_last_callback;
1089 }
1090 cifs_in_send_inc(server);
1091 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1092 cifs_in_send_dec(server);
1093
1094 for (i = 0; i < num_rqst; i++)
1095 cifs_save_when_sent(midQ[i]);
1096
1097 if (rc < 0) {
1098 revert_current_mid(server, num_rqst);
1099 server->sequence_number -= 2;
1100 }
1101
1102 mutex_unlock(&server->srv_mutex);
1103
1104
1105
1106
1107
1108 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1109 for (i = 0; i < num_rqst; i++)
1110 add_credits(server, &credits[i], optype);
1111 goto out;
1112 }
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1126 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1127 rqst[0].rq_nvec);
1128
1129 for (i = 0; i < num_rqst; i++) {
1130 rc = wait_for_response(server, midQ[i]);
1131 if (rc != 0)
1132 break;
1133 }
1134 if (rc != 0) {
1135 for (; i < num_rqst; i++) {
1136 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1137 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1138 send_cancel(server, &rqst[i], midQ[i]);
1139 spin_lock(&GlobalMid_Lock);
1140 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1141 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1142 midQ[i]->callback = cifs_cancelled_callback;
1143 cancelled_mid[i] = true;
1144 credits[i].value = 0;
1145 }
1146 spin_unlock(&GlobalMid_Lock);
1147 }
1148 }
1149
1150 for (i = 0; i < num_rqst; i++) {
1151 if (rc < 0)
1152 goto out;
1153
1154 rc = cifs_sync_mid_result(midQ[i], server);
1155 if (rc != 0) {
1156
1157 cancelled_mid[i] = true;
1158 goto out;
1159 }
1160
1161 if (!midQ[i]->resp_buf ||
1162 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1163 rc = -EIO;
1164 cifs_dbg(FYI, "Bad MID state?\n");
1165 goto out;
1166 }
1167
1168 buf = (char *)midQ[i]->resp_buf;
1169 resp_iov[i].iov_base = buf;
1170 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1171 server->vals->header_preamble_size;
1172
1173 if (midQ[i]->large_buf)
1174 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1175 else
1176 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1177
1178 rc = server->ops->check_receive(midQ[i], server,
1179 flags & CIFS_LOG_ERROR);
1180
1181
1182 if ((flags & CIFS_NO_RSP_BUF) == 0)
1183 midQ[i]->resp_buf = NULL;
1184
1185 }
1186
1187
1188
1189
1190 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1191 struct kvec iov = {
1192 .iov_base = resp_iov[0].iov_base,
1193 .iov_len = resp_iov[0].iov_len
1194 };
1195 smb311_update_preauth_hash(ses, &iov, 1);
1196 }
1197
1198 out:
1199
1200
1201
1202
1203
1204
1205 for (i = 0; i < num_rqst; i++) {
1206 if (!cancelled_mid[i])
1207 cifs_delete_mid(midQ[i]);
1208 }
1209
1210 return rc;
1211 }
1212
1213 int
1214 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1215 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1216 struct kvec *resp_iov)
1217 {
1218 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1219 resp_iov);
1220 }
1221
1222 int
1223 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1224 struct kvec *iov, int n_vec, int *resp_buf_type ,
1225 const int flags, struct kvec *resp_iov)
1226 {
1227 struct smb_rqst rqst;
1228 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1229 int rc;
1230
1231 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1232 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1233 GFP_KERNEL);
1234 if (!new_iov) {
1235
1236 *resp_buf_type = CIFS_NO_BUFFER;
1237 return -ENOMEM;
1238 }
1239 } else
1240 new_iov = s_iov;
1241
1242
1243 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1244
1245 new_iov[0].iov_base = new_iov[1].iov_base;
1246 new_iov[0].iov_len = 4;
1247 new_iov[1].iov_base += 4;
1248 new_iov[1].iov_len -= 4;
1249
1250 memset(&rqst, 0, sizeof(struct smb_rqst));
1251 rqst.rq_iov = new_iov;
1252 rqst.rq_nvec = n_vec + 1;
1253
1254 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1255 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1256 kfree(new_iov);
1257 return rc;
1258 }
1259
1260 int
1261 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1262 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1263 int *pbytes_returned, const int flags)
1264 {
1265 int rc = 0;
1266 struct mid_q_entry *midQ;
1267 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1268 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1269 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1270 struct cifs_credits credits = { .value = 1, .instance = 0 };
1271 struct TCP_Server_Info *server;
1272
1273 if (ses == NULL) {
1274 cifs_dbg(VFS, "Null smb session\n");
1275 return -EIO;
1276 }
1277 server = ses->server;
1278 if (server == NULL) {
1279 cifs_dbg(VFS, "Null tcp session\n");
1280 return -EIO;
1281 }
1282
1283 if (server->tcpStatus == CifsExiting)
1284 return -ENOENT;
1285
1286
1287
1288
1289
1290 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1291 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1292 len);
1293 return -EIO;
1294 }
1295
1296 rc = wait_for_free_request(server, flags, &credits.instance);
1297 if (rc)
1298 return rc;
1299
1300
1301
1302
1303
1304 mutex_lock(&server->srv_mutex);
1305
1306 rc = allocate_mid(ses, in_buf, &midQ);
1307 if (rc) {
1308 mutex_unlock(&ses->server->srv_mutex);
1309
1310 add_credits(server, &credits, 0);
1311 return rc;
1312 }
1313
1314 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1315 if (rc) {
1316 mutex_unlock(&server->srv_mutex);
1317 goto out;
1318 }
1319
1320 midQ->mid_state = MID_REQUEST_SUBMITTED;
1321
1322 cifs_in_send_inc(server);
1323 rc = smb_send(server, in_buf, len);
1324 cifs_in_send_dec(server);
1325 cifs_save_when_sent(midQ);
1326
1327 if (rc < 0)
1328 server->sequence_number -= 2;
1329
1330 mutex_unlock(&server->srv_mutex);
1331
1332 if (rc < 0)
1333 goto out;
1334
1335 rc = wait_for_response(server, midQ);
1336 if (rc != 0) {
1337 send_cancel(server, &rqst, midQ);
1338 spin_lock(&GlobalMid_Lock);
1339 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1340
1341 midQ->callback = DeleteMidQEntry;
1342 spin_unlock(&GlobalMid_Lock);
1343 add_credits(server, &credits, 0);
1344 return rc;
1345 }
1346 spin_unlock(&GlobalMid_Lock);
1347 }
1348
1349 rc = cifs_sync_mid_result(midQ, server);
1350 if (rc != 0) {
1351 add_credits(server, &credits, 0);
1352 return rc;
1353 }
1354
1355 if (!midQ->resp_buf || !out_buf ||
1356 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1357 rc = -EIO;
1358 cifs_server_dbg(VFS, "Bad MID state?\n");
1359 goto out;
1360 }
1361
1362 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1363 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1364 rc = cifs_check_receive(midQ, server, 0);
1365 out:
1366 cifs_delete_mid(midQ);
1367 add_credits(server, &credits, 0);
1368
1369 return rc;
1370 }
1371
1372
1373
1374
1375 static int
1376 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1377 struct smb_hdr *in_buf,
1378 struct smb_hdr *out_buf)
1379 {
1380 int bytes_returned;
1381 struct cifs_ses *ses = tcon->ses;
1382 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1383
1384
1385
1386
1387
1388
1389 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1390 pSMB->Timeout = 0;
1391 pSMB->hdr.Mid = get_next_mid(ses->server);
1392
1393 return SendReceive(xid, ses, in_buf, out_buf,
1394 &bytes_returned, 0);
1395 }
1396
1397 int
1398 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1399 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1400 int *pbytes_returned)
1401 {
1402 int rc = 0;
1403 int rstart = 0;
1404 struct mid_q_entry *midQ;
1405 struct cifs_ses *ses;
1406 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1407 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1408 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1409 unsigned int instance;
1410 struct TCP_Server_Info *server;
1411
1412 if (tcon == NULL || tcon->ses == NULL) {
1413 cifs_dbg(VFS, "Null smb session\n");
1414 return -EIO;
1415 }
1416 ses = tcon->ses;
1417 server = ses->server;
1418
1419 if (server == NULL) {
1420 cifs_dbg(VFS, "Null tcp session\n");
1421 return -EIO;
1422 }
1423
1424 if (server->tcpStatus == CifsExiting)
1425 return -ENOENT;
1426
1427
1428
1429
1430
1431 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1432 cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1433 len);
1434 return -EIO;
1435 }
1436
1437 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1438 if (rc)
1439 return rc;
1440
1441
1442
1443
1444
1445 mutex_lock(&server->srv_mutex);
1446
1447 rc = allocate_mid(ses, in_buf, &midQ);
1448 if (rc) {
1449 mutex_unlock(&server->srv_mutex);
1450 return rc;
1451 }
1452
1453 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1454 if (rc) {
1455 cifs_delete_mid(midQ);
1456 mutex_unlock(&server->srv_mutex);
1457 return rc;
1458 }
1459
1460 midQ->mid_state = MID_REQUEST_SUBMITTED;
1461 cifs_in_send_inc(server);
1462 rc = smb_send(server, in_buf, len);
1463 cifs_in_send_dec(server);
1464 cifs_save_when_sent(midQ);
1465
1466 if (rc < 0)
1467 server->sequence_number -= 2;
1468
1469 mutex_unlock(&server->srv_mutex);
1470
1471 if (rc < 0) {
1472 cifs_delete_mid(midQ);
1473 return rc;
1474 }
1475
1476
1477 rc = wait_event_interruptible(server->response_q,
1478 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1479 ((server->tcpStatus != CifsGood) &&
1480 (server->tcpStatus != CifsNew)));
1481
1482
1483 if ((rc == -ERESTARTSYS) &&
1484 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1485 ((server->tcpStatus == CifsGood) ||
1486 (server->tcpStatus == CifsNew))) {
1487
1488 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1489
1490
1491 rc = send_cancel(server, &rqst, midQ);
1492 if (rc) {
1493 cifs_delete_mid(midQ);
1494 return rc;
1495 }
1496 } else {
1497
1498
1499
1500 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1501
1502
1503
1504 if (rc && rc != -ENOLCK) {
1505 cifs_delete_mid(midQ);
1506 return rc;
1507 }
1508 }
1509
1510 rc = wait_for_response(server, midQ);
1511 if (rc) {
1512 send_cancel(server, &rqst, midQ);
1513 spin_lock(&GlobalMid_Lock);
1514 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1515
1516 midQ->callback = DeleteMidQEntry;
1517 spin_unlock(&GlobalMid_Lock);
1518 return rc;
1519 }
1520 spin_unlock(&GlobalMid_Lock);
1521 }
1522
1523
1524 rstart = 1;
1525 }
1526
1527 rc = cifs_sync_mid_result(midQ, server);
1528 if (rc != 0)
1529 return rc;
1530
1531
1532 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1533 rc = -EIO;
1534 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1535 goto out;
1536 }
1537
1538 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1539 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1540 rc = cifs_check_receive(midQ, server, 0);
1541 out:
1542 cifs_delete_mid(midQ);
1543 if (rstart && rc == -EACCES)
1544 return -ERESTARTSYS;
1545 return rc;
1546 }