1 /*
2  *   fs/cifs/misc.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   This library is free software; you can redistribute it and/or modify
8  *   it under the terms of the GNU Lesser General Public License as published
9  *   by the Free Software Foundation; either version 2.1 of the License, or
10  *   (at your option) any later version.
11  *
12  *   This library is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
15  *   the GNU Lesser General Public License for more details.
16  *
17  *   You should have received a copy of the GNU Lesser General Public License
18  *   along with this library; if not, write to the Free Software
19  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20  */
21 
22 #include <linux/slab.h>
23 #include <linux/ctype.h>
24 #include <linux/mempool.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smberr.h"
30 #include "nterr.h"
31 #include "cifs_unicode.h"
32 #ifdef CONFIG_CIFS_SMB2
33 #include "smb2pdu.h"
34 #endif
35 
36 extern mempool_t *cifs_sm_req_poolp;
37 extern mempool_t *cifs_req_poolp;
38 
39 /* The xid serves as a useful identifier for each incoming vfs request,
40    in a similar way to the mid which is useful to track each sent smb,
41    and CurrentXid can also provide a running counter (although it
42    will eventually wrap past zero) of the total vfs operations handled
43    since the cifs fs was mounted */
44 
45 unsigned int
_get_xid(void)46 _get_xid(void)
47 {
48 	unsigned int xid;
49 
50 	spin_lock(&GlobalMid_Lock);
51 	GlobalTotalActiveXid++;
52 
53 	/* keep high water mark for number of simultaneous ops in filesystem */
54 	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
55 		GlobalMaxActiveXid = GlobalTotalActiveXid;
56 	if (GlobalTotalActiveXid > 65000)
57 		cifs_dbg(FYI, "warning: more than 65000 requests active\n");
58 	xid = GlobalCurrentXid++;
59 	spin_unlock(&GlobalMid_Lock);
60 	return xid;
61 }
62 
63 void
_free_xid(unsigned int xid)64 _free_xid(unsigned int xid)
65 {
66 	spin_lock(&GlobalMid_Lock);
67 	/* if (GlobalTotalActiveXid == 0)
68 		BUG(); */
69 	GlobalTotalActiveXid--;
70 	spin_unlock(&GlobalMid_Lock);
71 }
72 
73 struct cifs_ses *
sesInfoAlloc(void)74 sesInfoAlloc(void)
75 {
76 	struct cifs_ses *ret_buf;
77 
78 	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
79 	if (ret_buf) {
80 		atomic_inc(&sesInfoAllocCount);
81 		ret_buf->status = CifsNew;
82 		++ret_buf->ses_count;
83 		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
84 		INIT_LIST_HEAD(&ret_buf->tcon_list);
85 		mutex_init(&ret_buf->session_mutex);
86 	}
87 	return ret_buf;
88 }
89 
90 void
sesInfoFree(struct cifs_ses * buf_to_free)91 sesInfoFree(struct cifs_ses *buf_to_free)
92 {
93 	if (buf_to_free == NULL) {
94 		cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
95 		return;
96 	}
97 
98 	atomic_dec(&sesInfoAllocCount);
99 	kfree(buf_to_free->serverOS);
100 	kfree(buf_to_free->serverDomain);
101 	kfree(buf_to_free->serverNOS);
102 	if (buf_to_free->password) {
103 		memset(buf_to_free->password, 0, strlen(buf_to_free->password));
104 		kfree(buf_to_free->password);
105 	}
106 	kfree(buf_to_free->user_name);
107 	kfree(buf_to_free->domainName);
108 	kfree(buf_to_free->auth_key.response);
109 	kfree(buf_to_free);
110 }
111 
112 struct cifs_tcon *
tconInfoAlloc(void)113 tconInfoAlloc(void)
114 {
115 	struct cifs_tcon *ret_buf;
116 	ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
117 	if (ret_buf) {
118 		atomic_inc(&tconInfoAllocCount);
119 		ret_buf->tidStatus = CifsNew;
120 		++ret_buf->tc_count;
121 		INIT_LIST_HEAD(&ret_buf->openFileList);
122 		INIT_LIST_HEAD(&ret_buf->tcon_list);
123 #ifdef CONFIG_CIFS_STATS
124 		spin_lock_init(&ret_buf->stat_lock);
125 #endif
126 	}
127 	return ret_buf;
128 }
129 
130 void
tconInfoFree(struct cifs_tcon * buf_to_free)131 tconInfoFree(struct cifs_tcon *buf_to_free)
132 {
133 	if (buf_to_free == NULL) {
134 		cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
135 		return;
136 	}
137 	atomic_dec(&tconInfoAllocCount);
138 	kfree(buf_to_free->nativeFileSystem);
139 	if (buf_to_free->password) {
140 		memset(buf_to_free->password, 0, strlen(buf_to_free->password));
141 		kfree(buf_to_free->password);
142 	}
143 	kfree(buf_to_free);
144 }
145 
146 struct smb_hdr *
cifs_buf_get(void)147 cifs_buf_get(void)
148 {
149 	struct smb_hdr *ret_buf = NULL;
150 	size_t buf_size = sizeof(struct smb_hdr);
151 
152 #ifdef CONFIG_CIFS_SMB2
153 	/*
154 	 * SMB2 header is bigger than CIFS one - no problems to clean some
155 	 * more bytes for CIFS.
156 	 */
157 	buf_size = sizeof(struct smb2_hdr);
158 #endif
159 	/*
160 	 * We could use negotiated size instead of max_msgsize -
161 	 * but it may be more efficient to always alloc same size
162 	 * albeit slightly larger than necessary and maxbuffersize
163 	 * defaults to this and can not be bigger.
164 	 */
165 	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
166 
167 	/* clear the first few header bytes */
168 	/* for most paths, more is cleared in header_assemble */
169 	if (ret_buf) {
170 		memset(ret_buf, 0, buf_size + 3);
171 		atomic_inc(&bufAllocCount);
172 #ifdef CONFIG_CIFS_STATS2
173 		atomic_inc(&totBufAllocCount);
174 #endif /* CONFIG_CIFS_STATS2 */
175 	}
176 
177 	return ret_buf;
178 }
179 
180 void
cifs_buf_release(void * buf_to_free)181 cifs_buf_release(void *buf_to_free)
182 {
183 	if (buf_to_free == NULL) {
184 		/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
185 		return;
186 	}
187 	mempool_free(buf_to_free, cifs_req_poolp);
188 
189 	atomic_dec(&bufAllocCount);
190 	return;
191 }
192 
193 struct smb_hdr *
cifs_small_buf_get(void)194 cifs_small_buf_get(void)
195 {
196 	struct smb_hdr *ret_buf = NULL;
197 
198 /* We could use negotiated size instead of max_msgsize -
199    but it may be more efficient to always alloc same size
200    albeit slightly larger than necessary and maxbuffersize
201    defaults to this and can not be bigger */
202 	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
203 	if (ret_buf) {
204 	/* No need to clear memory here, cleared in header assemble */
205 	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
206 		atomic_inc(&smBufAllocCount);
207 #ifdef CONFIG_CIFS_STATS2
208 		atomic_inc(&totSmBufAllocCount);
209 #endif /* CONFIG_CIFS_STATS2 */
210 
211 	}
212 	return ret_buf;
213 }
214 
215 void
cifs_small_buf_release(void * buf_to_free)216 cifs_small_buf_release(void *buf_to_free)
217 {
218 
219 	if (buf_to_free == NULL) {
220 		cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
221 		return;
222 	}
223 	mempool_free(buf_to_free, cifs_sm_req_poolp);
224 
225 	atomic_dec(&smBufAllocCount);
226 	return;
227 }
228 
229 void
free_rsp_buf(int resp_buftype,void * rsp)230 free_rsp_buf(int resp_buftype, void *rsp)
231 {
232 	if (resp_buftype == CIFS_SMALL_BUFFER)
233 		cifs_small_buf_release(rsp);
234 	else if (resp_buftype == CIFS_LARGE_BUFFER)
235 		cifs_buf_release(rsp);
236 }
237 
238 /* NB: MID can not be set if treeCon not passed in, in that
239    case it is responsbility of caller to set the mid */
240 void
header_assemble(struct smb_hdr * buffer,char smb_command,const struct cifs_tcon * treeCon,int word_count)241 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
242 		const struct cifs_tcon *treeCon, int word_count
243 		/* length of fixed section (word count) in two byte units  */)
244 {
245 	char *temp = (char *) buffer;
246 
247 	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
248 
249 	buffer->smb_buf_length = cpu_to_be32(
250 	    (2 * word_count) + sizeof(struct smb_hdr) -
251 	    4 /*  RFC 1001 length field does not count */  +
252 	    2 /* for bcc field itself */) ;
253 
254 	buffer->Protocol[0] = 0xFF;
255 	buffer->Protocol[1] = 'S';
256 	buffer->Protocol[2] = 'M';
257 	buffer->Protocol[3] = 'B';
258 	buffer->Command = smb_command;
259 	buffer->Flags = 0x00;	/* case sensitive */
260 	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
261 	buffer->Pid = cpu_to_le16((__u16)current->tgid);
262 	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
263 	if (treeCon) {
264 		buffer->Tid = treeCon->tid;
265 		if (treeCon->ses) {
266 			if (treeCon->ses->capabilities & CAP_UNICODE)
267 				buffer->Flags2 |= SMBFLG2_UNICODE;
268 			if (treeCon->ses->capabilities & CAP_STATUS32)
269 				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
270 
271 			/* Uid is not converted */
272 			buffer->Uid = treeCon->ses->Suid;
273 			buffer->Mid = get_next_mid(treeCon->ses->server);
274 		}
275 		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
276 			buffer->Flags2 |= SMBFLG2_DFS;
277 		if (treeCon->nocase)
278 			buffer->Flags  |= SMBFLG_CASELESS;
279 		if ((treeCon->ses) && (treeCon->ses->server))
280 			if (treeCon->ses->server->sign)
281 				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
282 	}
283 
284 /*  endian conversion of flags is now done just before sending */
285 	buffer->WordCount = (char) word_count;
286 	return;
287 }
288 
289 static int
check_smb_hdr(struct smb_hdr * smb)290 check_smb_hdr(struct smb_hdr *smb)
291 {
292 	/* does it have the right SMB "signature" ? */
293 	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
294 		cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
295 			 *(unsigned int *)smb->Protocol);
296 		return 1;
297 	}
298 
299 	/* if it's a response then accept */
300 	if (smb->Flags & SMBFLG_RESPONSE)
301 		return 0;
302 
303 	/* only one valid case where server sends us request */
304 	if (smb->Command == SMB_COM_LOCKING_ANDX)
305 		return 0;
306 
307 	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
308 		 get_mid(smb));
309 	return 1;
310 }
311 
312 int
checkSMB(char * buf,unsigned int total_read)313 checkSMB(char *buf, unsigned int total_read)
314 {
315 	struct smb_hdr *smb = (struct smb_hdr *)buf;
316 	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
317 	__u32 clc_len;  /* calculated length */
318 	cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
319 		 total_read, rfclen);
320 
321 	/* is this frame too small to even get to a BCC? */
322 	if (total_read < 2 + sizeof(struct smb_hdr)) {
323 		if ((total_read >= sizeof(struct smb_hdr) - 1)
324 			    && (smb->Status.CifsError != 0)) {
325 			/* it's an error return */
326 			smb->WordCount = 0;
327 			/* some error cases do not return wct and bcc */
328 			return 0;
329 		} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
330 				(smb->WordCount == 0)) {
331 			char *tmp = (char *)smb;
332 			/* Need to work around a bug in two servers here */
333 			/* First, check if the part of bcc they sent was zero */
334 			if (tmp[sizeof(struct smb_hdr)] == 0) {
335 				/* some servers return only half of bcc
336 				 * on simple responses (wct, bcc both zero)
337 				 * in particular have seen this on
338 				 * ulogoffX and FindClose. This leaves
339 				 * one byte of bcc potentially unitialized
340 				 */
341 				/* zero rest of bcc */
342 				tmp[sizeof(struct smb_hdr)+1] = 0;
343 				return 0;
344 			}
345 			cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
346 		} else {
347 			cifs_dbg(VFS, "Length less than smb header size\n");
348 		}
349 		return -EIO;
350 	}
351 
352 	/* otherwise, there is enough to get to the BCC */
353 	if (check_smb_hdr(smb))
354 		return -EIO;
355 	clc_len = smbCalcSize(smb);
356 
357 	if (4 + rfclen != total_read) {
358 		cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
359 			 rfclen);
360 		return -EIO;
361 	}
362 
363 	if (4 + rfclen != clc_len) {
364 		__u16 mid = get_mid(smb);
365 		/* check if bcc wrapped around for large read responses */
366 		if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
367 			/* check if lengths match mod 64K */
368 			if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
369 				return 0; /* bcc wrapped */
370 		}
371 		cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
372 			 clc_len, 4 + rfclen, mid);
373 
374 		if (4 + rfclen < clc_len) {
375 			cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
376 				 rfclen, mid);
377 			return -EIO;
378 		} else if (rfclen > clc_len + 512) {
379 			/*
380 			 * Some servers (Windows XP in particular) send more
381 			 * data than the lengths in the SMB packet would
382 			 * indicate on certain calls (byte range locks and
383 			 * trans2 find first calls in particular). While the
384 			 * client can handle such a frame by ignoring the
385 			 * trailing data, we choose limit the amount of extra
386 			 * data to 512 bytes.
387 			 */
388 			cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
389 				 rfclen, mid);
390 			return -EIO;
391 		}
392 	}
393 	return 0;
394 }
395 
396 bool
is_valid_oplock_break(char * buffer,struct TCP_Server_Info * srv)397 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
398 {
399 	struct smb_hdr *buf = (struct smb_hdr *)buffer;
400 	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
401 	struct list_head *tmp, *tmp1, *tmp2;
402 	struct cifs_ses *ses;
403 	struct cifs_tcon *tcon;
404 	struct cifsInodeInfo *pCifsInode;
405 	struct cifsFileInfo *netfile;
406 
407 	cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
408 	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
409 	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
410 		struct smb_com_transaction_change_notify_rsp *pSMBr =
411 			(struct smb_com_transaction_change_notify_rsp *)buf;
412 		struct file_notify_information *pnotify;
413 		__u32 data_offset = 0;
414 		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
415 			data_offset = le32_to_cpu(pSMBr->DataOffset);
416 
417 			pnotify = (struct file_notify_information *)
418 				((char *)&pSMBr->hdr.Protocol + data_offset);
419 			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
420 				 pnotify->FileName, pnotify->Action);
421 			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
422 				sizeof(struct smb_hdr)+60); */
423 			return true;
424 		}
425 		if (pSMBr->hdr.Status.CifsError) {
426 			cifs_dbg(FYI, "notify err 0x%x\n",
427 				 pSMBr->hdr.Status.CifsError);
428 			return true;
429 		}
430 		return false;
431 	}
432 	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
433 		return false;
434 	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
435 		/* no sense logging error on invalid handle on oplock
436 		   break - harmless race between close request and oplock
437 		   break response is expected from time to time writing out
438 		   large dirty files cached on the client */
439 		if ((NT_STATUS_INVALID_HANDLE) ==
440 		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
441 			cifs_dbg(FYI, "invalid handle on oplock break\n");
442 			return true;
443 		} else if (ERRbadfid ==
444 		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
445 			return true;
446 		} else {
447 			return false; /* on valid oplock brk we get "request" */
448 		}
449 	}
450 	if (pSMB->hdr.WordCount != 8)
451 		return false;
452 
453 	cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
454 		 pSMB->LockType, pSMB->OplockLevel);
455 	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
456 		return false;
457 
458 	/* look up tcon based on tid & uid */
459 	spin_lock(&cifs_tcp_ses_lock);
460 	list_for_each(tmp, &srv->smb_ses_list) {
461 		ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
462 		list_for_each(tmp1, &ses->tcon_list) {
463 			tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
464 			if (tcon->tid != buf->Tid)
465 				continue;
466 
467 			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
468 			spin_lock(&cifs_file_list_lock);
469 			list_for_each(tmp2, &tcon->openFileList) {
470 				netfile = list_entry(tmp2, struct cifsFileInfo,
471 						     tlist);
472 				if (pSMB->Fid != netfile->fid.netfid)
473 					continue;
474 
475 				cifs_dbg(FYI, "file id match, oplock break\n");
476 				pCifsInode = CIFS_I(d_inode(netfile->dentry));
477 
478 				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
479 					&pCifsInode->flags);
480 
481 				/*
482 				 * Set flag if the server downgrades the oplock
483 				 * to L2 else clear.
484 				 */
485 				if (pSMB->OplockLevel)
486 					set_bit(
487 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
488 					   &pCifsInode->flags);
489 				else
490 					clear_bit(
491 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
492 					   &pCifsInode->flags);
493 
494 				queue_work(cifsiod_wq,
495 					   &netfile->oplock_break);
496 				netfile->oplock_break_cancelled = false;
497 
498 				spin_unlock(&cifs_file_list_lock);
499 				spin_unlock(&cifs_tcp_ses_lock);
500 				return true;
501 			}
502 			spin_unlock(&cifs_file_list_lock);
503 			spin_unlock(&cifs_tcp_ses_lock);
504 			cifs_dbg(FYI, "No matching file for oplock break\n");
505 			return true;
506 		}
507 	}
508 	spin_unlock(&cifs_tcp_ses_lock);
509 	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
510 	return true;
511 }
512 
513 void
dump_smb(void * buf,int smb_buf_length)514 dump_smb(void *buf, int smb_buf_length)
515 {
516 	if (traceSMB == 0)
517 		return;
518 
519 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
520 		       smb_buf_length, true);
521 }
522 
523 void
cifs_autodisable_serverino(struct cifs_sb_info * cifs_sb)524 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
525 {
526 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
527 		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
528 		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
529 			 cifs_sb_master_tcon(cifs_sb)->treeName);
530 	}
531 }
532 
cifs_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock)533 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
534 {
535 	oplock &= 0xF;
536 
537 	if (oplock == OPLOCK_EXCLUSIVE) {
538 		cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
539 		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
540 			 &cinode->vfs_inode);
541 	} else if (oplock == OPLOCK_READ) {
542 		cinode->oplock = CIFS_CACHE_READ_FLG;
543 		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
544 			 &cinode->vfs_inode);
545 	} else
546 		cinode->oplock = 0;
547 }
548 
549 /*
550  * We wait for oplock breaks to be processed before we attempt to perform
551  * writes.
552  */
cifs_get_writer(struct cifsInodeInfo * cinode)553 int cifs_get_writer(struct cifsInodeInfo *cinode)
554 {
555 	int rc;
556 
557 start:
558 	rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
559 			 TASK_KILLABLE);
560 	if (rc)
561 		return rc;
562 
563 	spin_lock(&cinode->writers_lock);
564 	if (!cinode->writers)
565 		set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
566 	cinode->writers++;
567 	/* Check to see if we have started servicing an oplock break */
568 	if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
569 		cinode->writers--;
570 		if (cinode->writers == 0) {
571 			clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
572 			wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
573 		}
574 		spin_unlock(&cinode->writers_lock);
575 		goto start;
576 	}
577 	spin_unlock(&cinode->writers_lock);
578 	return 0;
579 }
580 
cifs_put_writer(struct cifsInodeInfo * cinode)581 void cifs_put_writer(struct cifsInodeInfo *cinode)
582 {
583 	spin_lock(&cinode->writers_lock);
584 	cinode->writers--;
585 	if (cinode->writers == 0) {
586 		clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
587 		wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
588 	}
589 	spin_unlock(&cinode->writers_lock);
590 }
591 
cifs_done_oplock_break(struct cifsInodeInfo * cinode)592 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
593 {
594 	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
595 	wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
596 }
597 
598 bool
backup_cred(struct cifs_sb_info * cifs_sb)599 backup_cred(struct cifs_sb_info *cifs_sb)
600 {
601 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
602 		if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
603 			return true;
604 	}
605 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
606 		if (in_group_p(cifs_sb->mnt_backupgid))
607 			return true;
608 	}
609 
610 	return false;
611 }
612 
613 void
cifs_del_pending_open(struct cifs_pending_open * open)614 cifs_del_pending_open(struct cifs_pending_open *open)
615 {
616 	spin_lock(&cifs_file_list_lock);
617 	list_del(&open->olist);
618 	spin_unlock(&cifs_file_list_lock);
619 }
620 
621 void
cifs_add_pending_open_locked(struct cifs_fid * fid,struct tcon_link * tlink,struct cifs_pending_open * open)622 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
623 			     struct cifs_pending_open *open)
624 {
625 #ifdef CONFIG_CIFS_SMB2
626 	memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
627 #endif
628 	open->oplock = CIFS_OPLOCK_NO_CHANGE;
629 	open->tlink = tlink;
630 	fid->pending_open = open;
631 	list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
632 }
633 
634 void
cifs_add_pending_open(struct cifs_fid * fid,struct tcon_link * tlink,struct cifs_pending_open * open)635 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
636 		      struct cifs_pending_open *open)
637 {
638 	spin_lock(&cifs_file_list_lock);
639 	cifs_add_pending_open_locked(fid, tlink, open);
640 	spin_unlock(&cifs_file_list_lock);
641 }
642