This source file includes following definitions.
- cifs_convert_flags
- cifs_posix_convert_flags
- cifs_get_disposition
- cifs_posix_open
- cifs_nt_open
- cifs_has_mand_locks
- cifs_down_write
- cifs_new_fileinfo
- cifsFileInfo_get
- cifsFileInfo_put_final
- cifsFileInfo_put_work
- cifsFileInfo_put
- _cifsFileInfo_put
- cifs_open
- cifs_relock_file
- cifs_reopen_file
- cifs_close
- cifs_reopen_persistent_handles
- cifs_closedir
- cifs_lock_init
- cifs_del_lock_waiters
- cifs_find_fid_lock_conflict
- cifs_find_lock_conflict
- cifs_lock_test
- cifs_lock_add
- cifs_lock_add_if
- cifs_posix_lock_test
- cifs_posix_lock_set
- cifs_push_mandatory_locks
- hash_lockowner
- cifs_push_posix_locks
- cifs_push_locks
- cifs_read_flock
- cifs_getlk
- cifs_move_llist
- cifs_free_llist
- cifs_unlock_range
- cifs_setlk
- cifs_lock
- cifs_update_eof
- cifs_write
- find_readable_file
- cifs_get_writable_file
- find_writable_file
- cifs_get_writable_path
- cifs_get_readable_path
- cifs_partialpagewrite
- wdata_alloc_and_fillpages
- wdata_prepare_pages
- wdata_send_pages
- cifs_writepages
- cifs_writepage_locked
- cifs_writepage
- cifs_write_end
- cifs_strict_fsync
- cifs_fsync
- cifs_flush
- cifs_write_allocate_pages
- get_numpages
- cifs_uncached_writedata_release
- cifs_uncached_writev_complete
- wdata_fill_from_iovec
- cifs_resend_wdata
- cifs_write_from_iter
- collect_uncached_write_data
- __cifs_writev
- cifs_direct_writev
- cifs_user_writev
- cifs_writev
- cifs_strict_writev
- cifs_readdata_direct_alloc
- cifs_readdata_alloc
- cifs_readdata_release
- cifs_read_allocate_pages
- cifs_uncached_readdata_release
- cifs_readdata_to_iov
- cifs_uncached_readv_complete
- uncached_fill_pages
- cifs_uncached_read_into_pages
- cifs_uncached_copy_into_pages
- cifs_resend_rdata
- cifs_send_async_read
- collect_uncached_read_data
- __cifs_readv
- cifs_direct_readv
- cifs_user_readv
- cifs_strict_readv
- cifs_read
- cifs_page_mkwrite
- cifs_file_strict_mmap
- cifs_file_mmap
- cifs_readv_complete
- readpages_fill_pages
- cifs_readpages_read_into_pages
- cifs_readpages_copy_into_pages
- readpages_get_pages
- cifs_readpages
- cifs_readpage_worker
- cifs_readpage
- is_inode_writable
- is_size_safe_to_change
- cifs_write_begin
- cifs_release_page
- cifs_invalidate_page
- cifs_launder_page
- cifs_oplock_break
- cifs_direct_io
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/mm.h>
37 #include <asm/div64.h>
38 #include "cifsfs.h"
39 #include "cifspdu.h"
40 #include "cifsglob.h"
41 #include "cifsproto.h"
42 #include "cifs_unicode.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
45 #include "fscache.h"
46 #include "smbdirect.h"
47
48 static inline int cifs_convert_flags(unsigned int flags)
49 {
50 if ((flags & O_ACCMODE) == O_RDONLY)
51 return GENERIC_READ;
52 else if ((flags & O_ACCMODE) == O_WRONLY)
53 return GENERIC_WRITE;
54 else if ((flags & O_ACCMODE) == O_RDWR) {
55
56
57
58 return (GENERIC_READ | GENERIC_WRITE);
59 }
60
61 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
62 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
63 FILE_READ_DATA);
64 }
65
66 static u32 cifs_posix_convert_flags(unsigned int flags)
67 {
68 u32 posix_flags = 0;
69
70 if ((flags & O_ACCMODE) == O_RDONLY)
71 posix_flags = SMB_O_RDONLY;
72 else if ((flags & O_ACCMODE) == O_WRONLY)
73 posix_flags = SMB_O_WRONLY;
74 else if ((flags & O_ACCMODE) == O_RDWR)
75 posix_flags = SMB_O_RDWR;
76
77 if (flags & O_CREAT) {
78 posix_flags |= SMB_O_CREAT;
79 if (flags & O_EXCL)
80 posix_flags |= SMB_O_EXCL;
81 } else if (flags & O_EXCL)
82 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
83 current->comm, current->tgid);
84
85 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87
88 if (flags & O_DSYNC)
89 posix_flags |= SMB_O_SYNC;
90 if (flags & O_DIRECTORY)
91 posix_flags |= SMB_O_DIRECTORY;
92 if (flags & O_NOFOLLOW)
93 posix_flags |= SMB_O_NOFOLLOW;
94 if (flags & O_DIRECT)
95 posix_flags |= SMB_O_DIRECT;
96
97 return posix_flags;
98 }
99
100 static inline int cifs_get_disposition(unsigned int flags)
101 {
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
110 else
111 return FILE_OPEN;
112 }
113
114 int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
117 {
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
124 struct cifs_tcon *tcon;
125
126 cifs_dbg(FYI, "posix open %s\n", full_path);
127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
144 cifs_remap(cifs_sb));
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret;
152
153 if (!pinode)
154 goto posix_open_ret;
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170 posix_open_ret:
171 kfree(presp_data);
172 return rc;
173 }
174
175 static int
176 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
179 {
180 int rc;
181 int desired_access;
182 int disposition;
183 int create_options = CREATE_NOT_DIR;
184 FILE_ALL_INFO *buf;
185 struct TCP_Server_Info *server = tcon->ses->server;
186 struct cifs_open_parms oparms;
187
188 if (!server->ops->open)
189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217 disposition = cifs_get_disposition(f_flags);
218
219
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
228
229 if (f_flags & O_SYNC)
230 create_options |= CREATE_WRITE_THROUGH;
231
232 if (f_flags & O_DIRECT)
233 create_options |= CREATE_NO_BUFFER;
234
235 oparms.tcon = tcon;
236 oparms.cifs_sb = cifs_sb;
237 oparms.desired_access = desired_access;
238 oparms.create_options = create_options;
239 oparms.disposition = disposition;
240 oparms.path = full_path;
241 oparms.fid = fid;
242 oparms.reconnect = false;
243
244 rc = server->ops->open(xid, &oparms, oplock, buf);
245
246 if (rc)
247 goto out;
248
249 if (tcon->unix_ext)
250 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
251 xid);
252 else
253 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
254 xid, fid);
255
256 if (rc) {
257 server->ops->close(xid, tcon, fid);
258 if (rc == -ESTALE)
259 rc = -EOPENSTALE;
260 }
261
262 out:
263 kfree(buf);
264 return rc;
265 }
266
267 static bool
268 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
269 {
270 struct cifs_fid_locks *cur;
271 bool has_locks = false;
272
273 down_read(&cinode->lock_sem);
274 list_for_each_entry(cur, &cinode->llist, llist) {
275 if (!list_empty(&cur->locks)) {
276 has_locks = true;
277 break;
278 }
279 }
280 up_read(&cinode->lock_sem);
281 return has_locks;
282 }
283
284 void
285 cifs_down_write(struct rw_semaphore *sem)
286 {
287 while (!down_write_trylock(sem))
288 msleep(10);
289 }
290
291 static void cifsFileInfo_put_work(struct work_struct *work);
292
293 struct cifsFileInfo *
294 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
295 struct tcon_link *tlink, __u32 oplock)
296 {
297 struct dentry *dentry = file_dentry(file);
298 struct inode *inode = d_inode(dentry);
299 struct cifsInodeInfo *cinode = CIFS_I(inode);
300 struct cifsFileInfo *cfile;
301 struct cifs_fid_locks *fdlocks;
302 struct cifs_tcon *tcon = tlink_tcon(tlink);
303 struct TCP_Server_Info *server = tcon->ses->server;
304
305 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
306 if (cfile == NULL)
307 return cfile;
308
309 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
310 if (!fdlocks) {
311 kfree(cfile);
312 return NULL;
313 }
314
315 INIT_LIST_HEAD(&fdlocks->locks);
316 fdlocks->cfile = cfile;
317 cfile->llist = fdlocks;
318
319 cfile->count = 1;
320 cfile->pid = current->tgid;
321 cfile->uid = current_fsuid();
322 cfile->dentry = dget(dentry);
323 cfile->f_flags = file->f_flags;
324 cfile->invalidHandle = false;
325 cfile->tlink = cifs_get_tlink(tlink);
326 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
327 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
328 mutex_init(&cfile->fh_mutex);
329 spin_lock_init(&cfile->file_info_lock);
330
331 cifs_sb_active(inode->i_sb);
332
333
334
335
336
337 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
338 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
339 oplock = 0;
340 }
341
342 cifs_down_write(&cinode->lock_sem);
343 list_add(&fdlocks->llist, &cinode->llist);
344 up_write(&cinode->lock_sem);
345
346 spin_lock(&tcon->open_file_lock);
347 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
348 oplock = fid->pending_open->oplock;
349 list_del(&fid->pending_open->olist);
350
351 fid->purge_cache = false;
352 server->ops->set_fid(cfile, fid, oplock);
353
354 list_add(&cfile->tlist, &tcon->openFileList);
355 atomic_inc(&tcon->num_local_opens);
356
357
358 spin_lock(&cinode->open_file_lock);
359 if (file->f_mode & FMODE_READ)
360 list_add(&cfile->flist, &cinode->openFileList);
361 else
362 list_add_tail(&cfile->flist, &cinode->openFileList);
363 spin_unlock(&cinode->open_file_lock);
364 spin_unlock(&tcon->open_file_lock);
365
366 if (fid->purge_cache)
367 cifs_zap_mapping(inode);
368
369 file->private_data = cfile;
370 return cfile;
371 }
372
373 struct cifsFileInfo *
374 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
375 {
376 spin_lock(&cifs_file->file_info_lock);
377 cifsFileInfo_get_locked(cifs_file);
378 spin_unlock(&cifs_file->file_info_lock);
379 return cifs_file;
380 }
381
382 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
383 {
384 struct inode *inode = d_inode(cifs_file->dentry);
385 struct cifsInodeInfo *cifsi = CIFS_I(inode);
386 struct cifsLockInfo *li, *tmp;
387 struct super_block *sb = inode->i_sb;
388
389
390
391
392
393 cifs_down_write(&cifsi->lock_sem);
394 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
395 list_del(&li->llist);
396 cifs_del_lock_waiters(li);
397 kfree(li);
398 }
399 list_del(&cifs_file->llist->llist);
400 kfree(cifs_file->llist);
401 up_write(&cifsi->lock_sem);
402
403 cifs_put_tlink(cifs_file->tlink);
404 dput(cifs_file->dentry);
405 cifs_sb_deactive(sb);
406 kfree(cifs_file);
407 }
408
409 static void cifsFileInfo_put_work(struct work_struct *work)
410 {
411 struct cifsFileInfo *cifs_file = container_of(work,
412 struct cifsFileInfo, put);
413
414 cifsFileInfo_put_final(cifs_file);
415 }
416
417
418
419
420
421
422 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
423 {
424 _cifsFileInfo_put(cifs_file, true, true);
425 }
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
441 bool wait_oplock_handler, bool offload)
442 {
443 struct inode *inode = d_inode(cifs_file->dentry);
444 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
445 struct TCP_Server_Info *server = tcon->ses->server;
446 struct cifsInodeInfo *cifsi = CIFS_I(inode);
447 struct super_block *sb = inode->i_sb;
448 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
449 struct cifs_fid fid;
450 struct cifs_pending_open open;
451 bool oplock_break_cancelled;
452
453 spin_lock(&tcon->open_file_lock);
454 spin_lock(&cifsi->open_file_lock);
455 spin_lock(&cifs_file->file_info_lock);
456 if (--cifs_file->count > 0) {
457 spin_unlock(&cifs_file->file_info_lock);
458 spin_unlock(&cifsi->open_file_lock);
459 spin_unlock(&tcon->open_file_lock);
460 return;
461 }
462 spin_unlock(&cifs_file->file_info_lock);
463
464 if (server->ops->get_lease_key)
465 server->ops->get_lease_key(inode, &fid);
466
467
468 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
469
470
471 list_del(&cifs_file->flist);
472 list_del(&cifs_file->tlist);
473 atomic_dec(&tcon->num_local_opens);
474
475 if (list_empty(&cifsi->openFileList)) {
476 cifs_dbg(FYI, "closing last open instance for inode %p\n",
477 d_inode(cifs_file->dentry));
478
479
480
481
482
483 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
484 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
485 cifs_set_oplock_level(cifsi, 0);
486 }
487
488 spin_unlock(&cifsi->open_file_lock);
489 spin_unlock(&tcon->open_file_lock);
490
491 oplock_break_cancelled = wait_oplock_handler ?
492 cancel_work_sync(&cifs_file->oplock_break) : false;
493
494 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
495 struct TCP_Server_Info *server = tcon->ses->server;
496 unsigned int xid;
497
498 xid = get_xid();
499 if (server->ops->close)
500 server->ops->close(xid, tcon, &cifs_file->fid);
501 _free_xid(xid);
502 }
503
504 if (oplock_break_cancelled)
505 cifs_done_oplock_break(cifsi);
506
507 cifs_del_pending_open(&open);
508
509 if (offload)
510 queue_work(fileinfo_put_wq, &cifs_file->put);
511 else
512 cifsFileInfo_put_final(cifs_file);
513 }
514
515 int cifs_open(struct inode *inode, struct file *file)
516
517 {
518 int rc = -EACCES;
519 unsigned int xid;
520 __u32 oplock;
521 struct cifs_sb_info *cifs_sb;
522 struct TCP_Server_Info *server;
523 struct cifs_tcon *tcon;
524 struct tcon_link *tlink;
525 struct cifsFileInfo *cfile = NULL;
526 char *full_path = NULL;
527 bool posix_open_ok = false;
528 struct cifs_fid fid;
529 struct cifs_pending_open open;
530
531 xid = get_xid();
532
533 cifs_sb = CIFS_SB(inode->i_sb);
534 tlink = cifs_sb_tlink(cifs_sb);
535 if (IS_ERR(tlink)) {
536 free_xid(xid);
537 return PTR_ERR(tlink);
538 }
539 tcon = tlink_tcon(tlink);
540 server = tcon->ses->server;
541
542 full_path = build_path_from_dentry(file_dentry(file));
543 if (full_path == NULL) {
544 rc = -ENOMEM;
545 goto out;
546 }
547
548 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
549 inode, file->f_flags, full_path);
550
551 if (file->f_flags & O_DIRECT &&
552 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
553 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
554 file->f_op = &cifs_file_direct_nobrl_ops;
555 else
556 file->f_op = &cifs_file_direct_ops;
557 }
558
559 if (server->oplocks)
560 oplock = REQ_OPLOCK;
561 else
562 oplock = 0;
563
564 if (!tcon->broken_posix_open && tcon->unix_ext &&
565 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
566 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
567
568 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
569 cifs_sb->mnt_file_mode ,
570 file->f_flags, &oplock, &fid.netfid, xid);
571 if (rc == 0) {
572 cifs_dbg(FYI, "posix open succeeded\n");
573 posix_open_ok = true;
574 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
575 if (tcon->ses->serverNOS)
576 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
577 tcon->ses->serverName,
578 tcon->ses->serverNOS);
579 tcon->broken_posix_open = true;
580 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
581 (rc != -EOPNOTSUPP))
582 goto out;
583
584
585
586
587 }
588
589 if (server->ops->get_lease_key)
590 server->ops->get_lease_key(inode, &fid);
591
592 cifs_add_pending_open(&fid, tlink, &open);
593
594 if (!posix_open_ok) {
595 if (server->ops->get_lease_key)
596 server->ops->get_lease_key(inode, &fid);
597
598 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
599 file->f_flags, &oplock, &fid, xid);
600 if (rc) {
601 cifs_del_pending_open(&open);
602 goto out;
603 }
604 }
605
606 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
607 if (cfile == NULL) {
608 if (server->ops->close)
609 server->ops->close(xid, tcon, &fid);
610 cifs_del_pending_open(&open);
611 rc = -ENOMEM;
612 goto out;
613 }
614
615 cifs_fscache_set_inode_cookie(inode, file);
616
617 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
618
619
620
621
622 struct cifs_unix_set_info_args args = {
623 .mode = inode->i_mode,
624 .uid = INVALID_UID,
625 .gid = INVALID_GID,
626 .ctime = NO_CHANGE_64,
627 .atime = NO_CHANGE_64,
628 .mtime = NO_CHANGE_64,
629 .device = 0,
630 };
631 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
632 cfile->pid);
633 }
634
635 out:
636 kfree(full_path);
637 free_xid(xid);
638 cifs_put_tlink(tlink);
639 return rc;
640 }
641
642 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
643
644
645
646
647
648 static int
649 cifs_relock_file(struct cifsFileInfo *cfile)
650 {
651 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
652 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
653 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
654 int rc = 0;
655
656 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
657 if (cinode->can_cache_brlcks) {
658
659 up_read(&cinode->lock_sem);
660 return rc;
661 }
662
663 if (cap_unix(tcon->ses) &&
664 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
665 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
666 rc = cifs_push_posix_locks(cfile);
667 else
668 rc = tcon->ses->server->ops->push_mand_locks(cfile);
669
670 up_read(&cinode->lock_sem);
671 return rc;
672 }
673
674 static int
675 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
676 {
677 int rc = -EACCES;
678 unsigned int xid;
679 __u32 oplock;
680 struct cifs_sb_info *cifs_sb;
681 struct cifs_tcon *tcon;
682 struct TCP_Server_Info *server;
683 struct cifsInodeInfo *cinode;
684 struct inode *inode;
685 char *full_path = NULL;
686 int desired_access;
687 int disposition = FILE_OPEN;
688 int create_options = CREATE_NOT_DIR;
689 struct cifs_open_parms oparms;
690
691 xid = get_xid();
692 mutex_lock(&cfile->fh_mutex);
693 if (!cfile->invalidHandle) {
694 mutex_unlock(&cfile->fh_mutex);
695 rc = 0;
696 free_xid(xid);
697 return rc;
698 }
699
700 inode = d_inode(cfile->dentry);
701 cifs_sb = CIFS_SB(inode->i_sb);
702 tcon = tlink_tcon(cfile->tlink);
703 server = tcon->ses->server;
704
705
706
707
708
709
710
711 full_path = build_path_from_dentry(cfile->dentry);
712 if (full_path == NULL) {
713 rc = -ENOMEM;
714 mutex_unlock(&cfile->fh_mutex);
715 free_xid(xid);
716 return rc;
717 }
718
719 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
720 inode, cfile->f_flags, full_path);
721
722 if (tcon->ses->server->oplocks)
723 oplock = REQ_OPLOCK;
724 else
725 oplock = 0;
726
727 if (tcon->unix_ext && cap_unix(tcon->ses) &&
728 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
729 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
730
731
732
733
734 unsigned int oflags = cfile->f_flags &
735 ~(O_CREAT | O_EXCL | O_TRUNC);
736
737 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
738 cifs_sb->mnt_file_mode ,
739 oflags, &oplock, &cfile->fid.netfid, xid);
740 if (rc == 0) {
741 cifs_dbg(FYI, "posix reopen succeeded\n");
742 oparms.reconnect = true;
743 goto reopen_success;
744 }
745
746
747
748
749 }
750
751 desired_access = cifs_convert_flags(cfile->f_flags);
752
753 if (backup_cred(cifs_sb))
754 create_options |= CREATE_OPEN_BACKUP_INTENT;
755
756
757 if (cfile->f_flags & O_SYNC)
758 create_options |= CREATE_WRITE_THROUGH;
759
760 if (cfile->f_flags & O_DIRECT)
761 create_options |= CREATE_NO_BUFFER;
762
763 if (server->ops->get_lease_key)
764 server->ops->get_lease_key(inode, &cfile->fid);
765
766 oparms.tcon = tcon;
767 oparms.cifs_sb = cifs_sb;
768 oparms.desired_access = desired_access;
769 oparms.create_options = create_options;
770 oparms.disposition = disposition;
771 oparms.path = full_path;
772 oparms.fid = &cfile->fid;
773 oparms.reconnect = true;
774
775
776
777
778
779
780
781
782 rc = server->ops->open(xid, &oparms, &oplock, NULL);
783 if (rc == -ENOENT && oparms.reconnect == false) {
784
785 rc = server->ops->open(xid, &oparms, &oplock, NULL);
786
787 oparms.reconnect = true;
788 }
789
790 if (rc) {
791 mutex_unlock(&cfile->fh_mutex);
792 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
793 cifs_dbg(FYI, "oplock: %d\n", oplock);
794 goto reopen_error_exit;
795 }
796
797 reopen_success:
798 cfile->invalidHandle = false;
799 mutex_unlock(&cfile->fh_mutex);
800 cinode = CIFS_I(inode);
801
802 if (can_flush) {
803 rc = filemap_write_and_wait(inode->i_mapping);
804 if (!is_interrupt_error(rc))
805 mapping_set_error(inode->i_mapping, rc);
806
807 if (tcon->unix_ext)
808 rc = cifs_get_inode_info_unix(&inode, full_path,
809 inode->i_sb, xid);
810 else
811 rc = cifs_get_inode_info(&inode, full_path, NULL,
812 inode->i_sb, xid, NULL);
813 }
814
815
816
817
818
819
820
821
822
823
824
825 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
826 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
827 oplock = 0;
828 }
829
830 server->ops->set_fid(cfile, &cfile->fid, oplock);
831 if (oparms.reconnect)
832 cifs_relock_file(cfile);
833
834 reopen_error_exit:
835 kfree(full_path);
836 free_xid(xid);
837 return rc;
838 }
839
840 int cifs_close(struct inode *inode, struct file *file)
841 {
842 if (file->private_data != NULL) {
843 _cifsFileInfo_put(file->private_data, true, false);
844 file->private_data = NULL;
845 }
846
847
848 return 0;
849 }
850
851 void
852 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
853 {
854 struct cifsFileInfo *open_file;
855 struct list_head *tmp;
856 struct list_head *tmp1;
857 struct list_head tmp_list;
858
859 if (!tcon->use_persistent || !tcon->need_reopen_files)
860 return;
861
862 tcon->need_reopen_files = false;
863
864 cifs_dbg(FYI, "Reopen persistent handles");
865 INIT_LIST_HEAD(&tmp_list);
866
867
868 spin_lock(&tcon->open_file_lock);
869 list_for_each(tmp, &tcon->openFileList) {
870 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
871 if (!open_file->invalidHandle)
872 continue;
873 cifsFileInfo_get(open_file);
874 list_add_tail(&open_file->rlist, &tmp_list);
875 }
876 spin_unlock(&tcon->open_file_lock);
877
878 list_for_each_safe(tmp, tmp1, &tmp_list) {
879 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
880 if (cifs_reopen_file(open_file, false ))
881 tcon->need_reopen_files = true;
882 list_del_init(&open_file->rlist);
883 cifsFileInfo_put(open_file);
884 }
885 }
886
887 int cifs_closedir(struct inode *inode, struct file *file)
888 {
889 int rc = 0;
890 unsigned int xid;
891 struct cifsFileInfo *cfile = file->private_data;
892 struct cifs_tcon *tcon;
893 struct TCP_Server_Info *server;
894 char *buf;
895
896 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
897
898 if (cfile == NULL)
899 return rc;
900
901 xid = get_xid();
902 tcon = tlink_tcon(cfile->tlink);
903 server = tcon->ses->server;
904
905 cifs_dbg(FYI, "Freeing private data in close dir\n");
906 spin_lock(&cfile->file_info_lock);
907 if (server->ops->dir_needs_close(cfile)) {
908 cfile->invalidHandle = true;
909 spin_unlock(&cfile->file_info_lock);
910 if (server->ops->close_dir)
911 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
912 else
913 rc = -ENOSYS;
914 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
915
916 rc = 0;
917 } else
918 spin_unlock(&cfile->file_info_lock);
919
920 buf = cfile->srch_inf.ntwrk_buf_start;
921 if (buf) {
922 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
923 cfile->srch_inf.ntwrk_buf_start = NULL;
924 if (cfile->srch_inf.smallBuf)
925 cifs_small_buf_release(buf);
926 else
927 cifs_buf_release(buf);
928 }
929
930 cifs_put_tlink(cfile->tlink);
931 kfree(file->private_data);
932 file->private_data = NULL;
933
934 free_xid(xid);
935 return rc;
936 }
937
938 static struct cifsLockInfo *
939 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
940 {
941 struct cifsLockInfo *lock =
942 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
943 if (!lock)
944 return lock;
945 lock->offset = offset;
946 lock->length = length;
947 lock->type = type;
948 lock->pid = current->tgid;
949 lock->flags = flags;
950 INIT_LIST_HEAD(&lock->blist);
951 init_waitqueue_head(&lock->block_q);
952 return lock;
953 }
954
955 void
956 cifs_del_lock_waiters(struct cifsLockInfo *lock)
957 {
958 struct cifsLockInfo *li, *tmp;
959 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
960 list_del_init(&li->blist);
961 wake_up(&li->block_q);
962 }
963 }
964
965 #define CIFS_LOCK_OP 0
966 #define CIFS_READ_OP 1
967 #define CIFS_WRITE_OP 2
968
969
970 static bool
971 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
972 __u64 length, __u8 type, __u16 flags,
973 struct cifsFileInfo *cfile,
974 struct cifsLockInfo **conf_lock, int rw_check)
975 {
976 struct cifsLockInfo *li;
977 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
978 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
979
980 list_for_each_entry(li, &fdlocks->locks, llist) {
981 if (offset + length <= li->offset ||
982 offset >= li->offset + li->length)
983 continue;
984 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
985 server->ops->compare_fids(cfile, cur_cfile)) {
986
987 if (!(li->type & server->vals->shared_lock_type) ||
988 rw_check != CIFS_WRITE_OP)
989 continue;
990 }
991 if ((type & server->vals->shared_lock_type) &&
992 ((server->ops->compare_fids(cfile, cur_cfile) &&
993 current->tgid == li->pid) || type == li->type))
994 continue;
995 if (rw_check == CIFS_LOCK_OP &&
996 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
997 server->ops->compare_fids(cfile, cur_cfile))
998 continue;
999 if (conf_lock)
1000 *conf_lock = li;
1001 return true;
1002 }
1003 return false;
1004 }
1005
1006 bool
1007 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1008 __u8 type, __u16 flags,
1009 struct cifsLockInfo **conf_lock, int rw_check)
1010 {
1011 bool rc = false;
1012 struct cifs_fid_locks *cur;
1013 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1014
1015 list_for_each_entry(cur, &cinode->llist, llist) {
1016 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1017 flags, cfile, conf_lock,
1018 rw_check);
1019 if (rc)
1020 break;
1021 }
1022
1023 return rc;
1024 }
1025
1026
1027
1028
1029
1030
1031
1032
1033 static int
1034 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1035 __u8 type, struct file_lock *flock)
1036 {
1037 int rc = 0;
1038 struct cifsLockInfo *conf_lock;
1039 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1040 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1041 bool exist;
1042
1043 down_read(&cinode->lock_sem);
1044
1045 exist = cifs_find_lock_conflict(cfile, offset, length, type,
1046 flock->fl_flags, &conf_lock,
1047 CIFS_LOCK_OP);
1048 if (exist) {
1049 flock->fl_start = conf_lock->offset;
1050 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1051 flock->fl_pid = conf_lock->pid;
1052 if (conf_lock->type & server->vals->shared_lock_type)
1053 flock->fl_type = F_RDLCK;
1054 else
1055 flock->fl_type = F_WRLCK;
1056 } else if (!cinode->can_cache_brlcks)
1057 rc = 1;
1058 else
1059 flock->fl_type = F_UNLCK;
1060
1061 up_read(&cinode->lock_sem);
1062 return rc;
1063 }
1064
1065 static void
1066 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1067 {
1068 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1069 cifs_down_write(&cinode->lock_sem);
1070 list_add_tail(&lock->llist, &cfile->llist->locks);
1071 up_write(&cinode->lock_sem);
1072 }
1073
1074
1075
1076
1077
1078
1079
1080 static int
1081 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1082 bool wait)
1083 {
1084 struct cifsLockInfo *conf_lock;
1085 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1086 bool exist;
1087 int rc = 0;
1088
1089 try_again:
1090 exist = false;
1091 cifs_down_write(&cinode->lock_sem);
1092
1093 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1094 lock->type, lock->flags, &conf_lock,
1095 CIFS_LOCK_OP);
1096 if (!exist && cinode->can_cache_brlcks) {
1097 list_add_tail(&lock->llist, &cfile->llist->locks);
1098 up_write(&cinode->lock_sem);
1099 return rc;
1100 }
1101
1102 if (!exist)
1103 rc = 1;
1104 else if (!wait)
1105 rc = -EACCES;
1106 else {
1107 list_add_tail(&lock->blist, &conf_lock->blist);
1108 up_write(&cinode->lock_sem);
1109 rc = wait_event_interruptible(lock->block_q,
1110 (lock->blist.prev == &lock->blist) &&
1111 (lock->blist.next == &lock->blist));
1112 if (!rc)
1113 goto try_again;
1114 cifs_down_write(&cinode->lock_sem);
1115 list_del_init(&lock->blist);
1116 }
1117
1118 up_write(&cinode->lock_sem);
1119 return rc;
1120 }
1121
1122
1123
1124
1125
1126
1127
1128
1129 static int
1130 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1131 {
1132 int rc = 0;
1133 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1134 unsigned char saved_type = flock->fl_type;
1135
1136 if ((flock->fl_flags & FL_POSIX) == 0)
1137 return 1;
1138
1139 down_read(&cinode->lock_sem);
1140 posix_test_lock(file, flock);
1141
1142 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1143 flock->fl_type = saved_type;
1144 rc = 1;
1145 }
1146
1147 up_read(&cinode->lock_sem);
1148 return rc;
1149 }
1150
1151
1152
1153
1154
1155
1156
1157 static int
1158 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1159 {
1160 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1161 int rc = 1;
1162
1163 if ((flock->fl_flags & FL_POSIX) == 0)
1164 return rc;
1165
1166 try_again:
1167 cifs_down_write(&cinode->lock_sem);
1168 if (!cinode->can_cache_brlcks) {
1169 up_write(&cinode->lock_sem);
1170 return rc;
1171 }
1172
1173 rc = posix_lock_file(file, flock, NULL);
1174 up_write(&cinode->lock_sem);
1175 if (rc == FILE_LOCK_DEFERRED) {
1176 rc = wait_event_interruptible(flock->fl_wait,
1177 list_empty(&flock->fl_blocked_member));
1178 if (!rc)
1179 goto try_again;
1180 locks_delete_block(flock);
1181 }
1182 return rc;
1183 }
1184
1185 int
1186 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1187 {
1188 unsigned int xid;
1189 int rc = 0, stored_rc;
1190 struct cifsLockInfo *li, *tmp;
1191 struct cifs_tcon *tcon;
1192 unsigned int num, max_num, max_buf;
1193 LOCKING_ANDX_RANGE *buf, *cur;
1194 static const int types[] = {
1195 LOCKING_ANDX_LARGE_FILES,
1196 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1197 };
1198 int i;
1199
1200 xid = get_xid();
1201 tcon = tlink_tcon(cfile->tlink);
1202
1203
1204
1205
1206
1207 max_buf = tcon->ses->server->maxBuf;
1208 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1209 free_xid(xid);
1210 return -EINVAL;
1211 }
1212
1213 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1214 PAGE_SIZE);
1215 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1216 PAGE_SIZE);
1217 max_num = (max_buf - sizeof(struct smb_hdr)) /
1218 sizeof(LOCKING_ANDX_RANGE);
1219 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1220 if (!buf) {
1221 free_xid(xid);
1222 return -ENOMEM;
1223 }
1224
1225 for (i = 0; i < 2; i++) {
1226 cur = buf;
1227 num = 0;
1228 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1229 if (li->type != types[i])
1230 continue;
1231 cur->Pid = cpu_to_le16(li->pid);
1232 cur->LengthLow = cpu_to_le32((u32)li->length);
1233 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1234 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1235 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1236 if (++num == max_num) {
1237 stored_rc = cifs_lockv(xid, tcon,
1238 cfile->fid.netfid,
1239 (__u8)li->type, 0, num,
1240 buf);
1241 if (stored_rc)
1242 rc = stored_rc;
1243 cur = buf;
1244 num = 0;
1245 } else
1246 cur++;
1247 }
1248
1249 if (num) {
1250 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1251 (__u8)types[i], 0, num, buf);
1252 if (stored_rc)
1253 rc = stored_rc;
1254 }
1255 }
1256
1257 kfree(buf);
1258 free_xid(xid);
1259 return rc;
1260 }
1261
1262 static __u32
1263 hash_lockowner(fl_owner_t owner)
1264 {
1265 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1266 }
1267
1268 struct lock_to_push {
1269 struct list_head llist;
1270 __u64 offset;
1271 __u64 length;
1272 __u32 pid;
1273 __u16 netfid;
1274 __u8 type;
1275 };
1276
1277 static int
1278 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1279 {
1280 struct inode *inode = d_inode(cfile->dentry);
1281 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1282 struct file_lock *flock;
1283 struct file_lock_context *flctx = inode->i_flctx;
1284 unsigned int count = 0, i;
1285 int rc = 0, xid, type;
1286 struct list_head locks_to_send, *el;
1287 struct lock_to_push *lck, *tmp;
1288 __u64 length;
1289
1290 xid = get_xid();
1291
1292 if (!flctx)
1293 goto out;
1294
1295 spin_lock(&flctx->flc_lock);
1296 list_for_each(el, &flctx->flc_posix) {
1297 count++;
1298 }
1299 spin_unlock(&flctx->flc_lock);
1300
1301 INIT_LIST_HEAD(&locks_to_send);
1302
1303
1304
1305
1306
1307
1308 for (i = 0; i < count; i++) {
1309 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1310 if (!lck) {
1311 rc = -ENOMEM;
1312 goto err_out;
1313 }
1314 list_add_tail(&lck->llist, &locks_to_send);
1315 }
1316
1317 el = locks_to_send.next;
1318 spin_lock(&flctx->flc_lock);
1319 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1320 if (el == &locks_to_send) {
1321
1322
1323
1324
1325 cifs_dbg(VFS, "Can't push all brlocks!\n");
1326 break;
1327 }
1328 length = 1 + flock->fl_end - flock->fl_start;
1329 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1330 type = CIFS_RDLCK;
1331 else
1332 type = CIFS_WRLCK;
1333 lck = list_entry(el, struct lock_to_push, llist);
1334 lck->pid = hash_lockowner(flock->fl_owner);
1335 lck->netfid = cfile->fid.netfid;
1336 lck->length = length;
1337 lck->type = type;
1338 lck->offset = flock->fl_start;
1339 }
1340 spin_unlock(&flctx->flc_lock);
1341
1342 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1343 int stored_rc;
1344
1345 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1346 lck->offset, lck->length, NULL,
1347 lck->type, 0);
1348 if (stored_rc)
1349 rc = stored_rc;
1350 list_del(&lck->llist);
1351 kfree(lck);
1352 }
1353
1354 out:
1355 free_xid(xid);
1356 return rc;
1357 err_out:
1358 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1359 list_del(&lck->llist);
1360 kfree(lck);
1361 }
1362 goto out;
1363 }
1364
1365 static int
1366 cifs_push_locks(struct cifsFileInfo *cfile)
1367 {
1368 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1369 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1370 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1371 int rc = 0;
1372
1373
1374 cifs_down_write(&cinode->lock_sem);
1375 if (!cinode->can_cache_brlcks) {
1376 up_write(&cinode->lock_sem);
1377 return rc;
1378 }
1379
1380 if (cap_unix(tcon->ses) &&
1381 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1382 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1383 rc = cifs_push_posix_locks(cfile);
1384 else
1385 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1386
1387 cinode->can_cache_brlcks = false;
1388 up_write(&cinode->lock_sem);
1389 return rc;
1390 }
1391
1392 static void
1393 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1394 bool *wait_flag, struct TCP_Server_Info *server)
1395 {
1396 if (flock->fl_flags & FL_POSIX)
1397 cifs_dbg(FYI, "Posix\n");
1398 if (flock->fl_flags & FL_FLOCK)
1399 cifs_dbg(FYI, "Flock\n");
1400 if (flock->fl_flags & FL_SLEEP) {
1401 cifs_dbg(FYI, "Blocking lock\n");
1402 *wait_flag = true;
1403 }
1404 if (flock->fl_flags & FL_ACCESS)
1405 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1406 if (flock->fl_flags & FL_LEASE)
1407 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1408 if (flock->fl_flags &
1409 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1410 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1411 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1412
1413 *type = server->vals->large_lock_type;
1414 if (flock->fl_type == F_WRLCK) {
1415 cifs_dbg(FYI, "F_WRLCK\n");
1416 *type |= server->vals->exclusive_lock_type;
1417 *lock = 1;
1418 } else if (flock->fl_type == F_UNLCK) {
1419 cifs_dbg(FYI, "F_UNLCK\n");
1420 *type |= server->vals->unlock_lock_type;
1421 *unlock = 1;
1422
1423 } else if (flock->fl_type == F_RDLCK) {
1424 cifs_dbg(FYI, "F_RDLCK\n");
1425 *type |= server->vals->shared_lock_type;
1426 *lock = 1;
1427 } else if (flock->fl_type == F_EXLCK) {
1428 cifs_dbg(FYI, "F_EXLCK\n");
1429 *type |= server->vals->exclusive_lock_type;
1430 *lock = 1;
1431 } else if (flock->fl_type == F_SHLCK) {
1432 cifs_dbg(FYI, "F_SHLCK\n");
1433 *type |= server->vals->shared_lock_type;
1434 *lock = 1;
1435 } else
1436 cifs_dbg(FYI, "Unknown type of lock\n");
1437 }
1438
1439 static int
1440 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1441 bool wait_flag, bool posix_lck, unsigned int xid)
1442 {
1443 int rc = 0;
1444 __u64 length = 1 + flock->fl_end - flock->fl_start;
1445 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1446 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1447 struct TCP_Server_Info *server = tcon->ses->server;
1448 __u16 netfid = cfile->fid.netfid;
1449
1450 if (posix_lck) {
1451 int posix_lock_type;
1452
1453 rc = cifs_posix_lock_test(file, flock);
1454 if (!rc)
1455 return rc;
1456
1457 if (type & server->vals->shared_lock_type)
1458 posix_lock_type = CIFS_RDLCK;
1459 else
1460 posix_lock_type = CIFS_WRLCK;
1461 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1462 hash_lockowner(flock->fl_owner),
1463 flock->fl_start, length, flock,
1464 posix_lock_type, wait_flag);
1465 return rc;
1466 }
1467
1468 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1469 if (!rc)
1470 return rc;
1471
1472
1473 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1474 1, 0, false);
1475 if (rc == 0) {
1476 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1477 type, 0, 1, false);
1478 flock->fl_type = F_UNLCK;
1479 if (rc != 0)
1480 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1481 rc);
1482 return 0;
1483 }
1484
1485 if (type & server->vals->shared_lock_type) {
1486 flock->fl_type = F_WRLCK;
1487 return 0;
1488 }
1489
1490 type &= ~server->vals->exclusive_lock_type;
1491
1492 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1493 type | server->vals->shared_lock_type,
1494 1, 0, false);
1495 if (rc == 0) {
1496 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1497 type | server->vals->shared_lock_type, 0, 1, false);
1498 flock->fl_type = F_RDLCK;
1499 if (rc != 0)
1500 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1501 rc);
1502 } else
1503 flock->fl_type = F_WRLCK;
1504
1505 return 0;
1506 }
1507
1508 void
1509 cifs_move_llist(struct list_head *source, struct list_head *dest)
1510 {
1511 struct list_head *li, *tmp;
1512 list_for_each_safe(li, tmp, source)
1513 list_move(li, dest);
1514 }
1515
1516 void
1517 cifs_free_llist(struct list_head *llist)
1518 {
1519 struct cifsLockInfo *li, *tmp;
1520 list_for_each_entry_safe(li, tmp, llist, llist) {
1521 cifs_del_lock_waiters(li);
1522 list_del(&li->llist);
1523 kfree(li);
1524 }
1525 }
1526
1527 int
1528 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1529 unsigned int xid)
1530 {
1531 int rc = 0, stored_rc;
1532 static const int types[] = {
1533 LOCKING_ANDX_LARGE_FILES,
1534 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1535 };
1536 unsigned int i;
1537 unsigned int max_num, num, max_buf;
1538 LOCKING_ANDX_RANGE *buf, *cur;
1539 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1540 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1541 struct cifsLockInfo *li, *tmp;
1542 __u64 length = 1 + flock->fl_end - flock->fl_start;
1543 struct list_head tmp_llist;
1544
1545 INIT_LIST_HEAD(&tmp_llist);
1546
1547
1548
1549
1550
1551 max_buf = tcon->ses->server->maxBuf;
1552 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1553 return -EINVAL;
1554
1555 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1556 PAGE_SIZE);
1557 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1558 PAGE_SIZE);
1559 max_num = (max_buf - sizeof(struct smb_hdr)) /
1560 sizeof(LOCKING_ANDX_RANGE);
1561 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1562 if (!buf)
1563 return -ENOMEM;
1564
1565 cifs_down_write(&cinode->lock_sem);
1566 for (i = 0; i < 2; i++) {
1567 cur = buf;
1568 num = 0;
1569 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1570 if (flock->fl_start > li->offset ||
1571 (flock->fl_start + length) <
1572 (li->offset + li->length))
1573 continue;
1574 if (current->tgid != li->pid)
1575 continue;
1576 if (types[i] != li->type)
1577 continue;
1578 if (cinode->can_cache_brlcks) {
1579
1580
1581
1582
1583 list_del(&li->llist);
1584 cifs_del_lock_waiters(li);
1585 kfree(li);
1586 continue;
1587 }
1588 cur->Pid = cpu_to_le16(li->pid);
1589 cur->LengthLow = cpu_to_le32((u32)li->length);
1590 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1591 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1592 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1593
1594
1595
1596
1597
1598 list_move(&li->llist, &tmp_llist);
1599 if (++num == max_num) {
1600 stored_rc = cifs_lockv(xid, tcon,
1601 cfile->fid.netfid,
1602 li->type, num, 0, buf);
1603 if (stored_rc) {
1604
1605
1606
1607
1608
1609 cifs_move_llist(&tmp_llist,
1610 &cfile->llist->locks);
1611 rc = stored_rc;
1612 } else
1613
1614
1615
1616
1617 cifs_free_llist(&tmp_llist);
1618 cur = buf;
1619 num = 0;
1620 } else
1621 cur++;
1622 }
1623 if (num) {
1624 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1625 types[i], num, 0, buf);
1626 if (stored_rc) {
1627 cifs_move_llist(&tmp_llist,
1628 &cfile->llist->locks);
1629 rc = stored_rc;
1630 } else
1631 cifs_free_llist(&tmp_llist);
1632 }
1633 }
1634
1635 up_write(&cinode->lock_sem);
1636 kfree(buf);
1637 return rc;
1638 }
1639
1640 static int
1641 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1642 bool wait_flag, bool posix_lck, int lock, int unlock,
1643 unsigned int xid)
1644 {
1645 int rc = 0;
1646 __u64 length = 1 + flock->fl_end - flock->fl_start;
1647 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1648 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1649 struct TCP_Server_Info *server = tcon->ses->server;
1650 struct inode *inode = d_inode(cfile->dentry);
1651
1652 if (posix_lck) {
1653 int posix_lock_type;
1654
1655 rc = cifs_posix_lock_set(file, flock);
1656 if (!rc || rc < 0)
1657 return rc;
1658
1659 if (type & server->vals->shared_lock_type)
1660 posix_lock_type = CIFS_RDLCK;
1661 else
1662 posix_lock_type = CIFS_WRLCK;
1663
1664 if (unlock == 1)
1665 posix_lock_type = CIFS_UNLCK;
1666
1667 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1668 hash_lockowner(flock->fl_owner),
1669 flock->fl_start, length,
1670 NULL, posix_lock_type, wait_flag);
1671 goto out;
1672 }
1673
1674 if (lock) {
1675 struct cifsLockInfo *lock;
1676
1677 lock = cifs_lock_init(flock->fl_start, length, type,
1678 flock->fl_flags);
1679 if (!lock)
1680 return -ENOMEM;
1681
1682 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1683 if (rc < 0) {
1684 kfree(lock);
1685 return rc;
1686 }
1687 if (!rc)
1688 goto out;
1689
1690
1691
1692
1693
1694
1695
1696
1697 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1698 CIFS_CACHE_READ(CIFS_I(inode))) {
1699 cifs_zap_mapping(inode);
1700 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1701 inode);
1702 CIFS_I(inode)->oplock = 0;
1703 }
1704
1705 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1706 type, 1, 0, wait_flag);
1707 if (rc) {
1708 kfree(lock);
1709 return rc;
1710 }
1711
1712 cifs_lock_add(cfile, lock);
1713 } else if (unlock)
1714 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1715
1716 out:
1717 if (flock->fl_flags & FL_POSIX) {
1718
1719
1720
1721
1722
1723
1724 if (rc) {
1725 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1726 if (!(flock->fl_flags & FL_CLOSE))
1727 return rc;
1728 }
1729 rc = locks_lock_file_wait(file, flock);
1730 }
1731 return rc;
1732 }
1733
1734 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1735 {
1736 int rc, xid;
1737 int lock = 0, unlock = 0;
1738 bool wait_flag = false;
1739 bool posix_lck = false;
1740 struct cifs_sb_info *cifs_sb;
1741 struct cifs_tcon *tcon;
1742 struct cifsFileInfo *cfile;
1743 __u32 type;
1744
1745 rc = -EACCES;
1746 xid = get_xid();
1747
1748 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1749 cmd, flock->fl_flags, flock->fl_type,
1750 flock->fl_start, flock->fl_end);
1751
1752 cfile = (struct cifsFileInfo *)file->private_data;
1753 tcon = tlink_tcon(cfile->tlink);
1754
1755 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1756 tcon->ses->server);
1757 cifs_sb = CIFS_FILE_SB(file);
1758
1759 if (cap_unix(tcon->ses) &&
1760 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1761 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1762 posix_lck = true;
1763
1764
1765
1766
1767 if (IS_GETLK(cmd)) {
1768 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1769 free_xid(xid);
1770 return rc;
1771 }
1772
1773 if (!lock && !unlock) {
1774
1775
1776
1777
1778 free_xid(xid);
1779 return -EOPNOTSUPP;
1780 }
1781
1782 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1783 xid);
1784 free_xid(xid);
1785 return rc;
1786 }
1787
1788
1789
1790
1791
1792 void
1793 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1794 unsigned int bytes_written)
1795 {
1796 loff_t end_of_write = offset + bytes_written;
1797
1798 if (end_of_write > cifsi->server_eof)
1799 cifsi->server_eof = end_of_write;
1800 }
1801
1802 static ssize_t
1803 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1804 size_t write_size, loff_t *offset)
1805 {
1806 int rc = 0;
1807 unsigned int bytes_written = 0;
1808 unsigned int total_written;
1809 struct cifs_tcon *tcon;
1810 struct TCP_Server_Info *server;
1811 unsigned int xid;
1812 struct dentry *dentry = open_file->dentry;
1813 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
1814 struct cifs_io_parms io_parms;
1815
1816 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1817 write_size, *offset, dentry);
1818
1819 tcon = tlink_tcon(open_file->tlink);
1820 server = tcon->ses->server;
1821
1822 if (!server->ops->sync_write)
1823 return -ENOSYS;
1824
1825 xid = get_xid();
1826
1827 for (total_written = 0; write_size > total_written;
1828 total_written += bytes_written) {
1829 rc = -EAGAIN;
1830 while (rc == -EAGAIN) {
1831 struct kvec iov[2];
1832 unsigned int len;
1833
1834 if (open_file->invalidHandle) {
1835
1836
1837
1838
1839 rc = cifs_reopen_file(open_file, false);
1840 if (rc != 0)
1841 break;
1842 }
1843
1844 len = min(server->ops->wp_retry_size(d_inode(dentry)),
1845 (unsigned int)write_size - total_written);
1846
1847 iov[1].iov_base = (char *)write_data + total_written;
1848 iov[1].iov_len = len;
1849 io_parms.pid = pid;
1850 io_parms.tcon = tcon;
1851 io_parms.offset = *offset;
1852 io_parms.length = len;
1853 rc = server->ops->sync_write(xid, &open_file->fid,
1854 &io_parms, &bytes_written, iov, 1);
1855 }
1856 if (rc || (bytes_written == 0)) {
1857 if (total_written)
1858 break;
1859 else {
1860 free_xid(xid);
1861 return rc;
1862 }
1863 } else {
1864 spin_lock(&d_inode(dentry)->i_lock);
1865 cifs_update_eof(cifsi, *offset, bytes_written);
1866 spin_unlock(&d_inode(dentry)->i_lock);
1867 *offset += bytes_written;
1868 }
1869 }
1870
1871 cifs_stats_bytes_written(tcon, total_written);
1872
1873 if (total_written > 0) {
1874 spin_lock(&d_inode(dentry)->i_lock);
1875 if (*offset > d_inode(dentry)->i_size)
1876 i_size_write(d_inode(dentry), *offset);
1877 spin_unlock(&d_inode(dentry)->i_lock);
1878 }
1879 mark_inode_dirty_sync(d_inode(dentry));
1880 free_xid(xid);
1881 return total_written;
1882 }
1883
1884 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1885 bool fsuid_only)
1886 {
1887 struct cifsFileInfo *open_file = NULL;
1888 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1889
1890
1891 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1892 fsuid_only = false;
1893
1894 spin_lock(&cifs_inode->open_file_lock);
1895
1896
1897
1898 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1899 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1900 continue;
1901 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1902 if (!open_file->invalidHandle) {
1903
1904
1905 cifsFileInfo_get(open_file);
1906 spin_unlock(&cifs_inode->open_file_lock);
1907 return open_file;
1908 }
1909
1910
1911 } else
1912 break;
1913 }
1914 spin_unlock(&cifs_inode->open_file_lock);
1915 return NULL;
1916 }
1917
1918
1919 int
1920 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
1921 struct cifsFileInfo **ret_file)
1922 {
1923 struct cifsFileInfo *open_file, *inv_file = NULL;
1924 struct cifs_sb_info *cifs_sb;
1925 bool any_available = false;
1926 int rc = -EBADF;
1927 unsigned int refind = 0;
1928 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
1929 bool with_delete = flags & FIND_WR_WITH_DELETE;
1930 *ret_file = NULL;
1931
1932
1933
1934
1935
1936
1937
1938 if (cifs_inode == NULL) {
1939 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
1940 dump_stack();
1941 return rc;
1942 }
1943
1944 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1945
1946
1947 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1948 fsuid_only = false;
1949
1950 spin_lock(&cifs_inode->open_file_lock);
1951 refind_writable:
1952 if (refind > MAX_REOPEN_ATT) {
1953 spin_unlock(&cifs_inode->open_file_lock);
1954 return rc;
1955 }
1956 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1957 if (!any_available && open_file->pid != current->tgid)
1958 continue;
1959 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1960 continue;
1961 if (with_delete && !(open_file->fid.access & DELETE))
1962 continue;
1963 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1964 if (!open_file->invalidHandle) {
1965
1966 cifsFileInfo_get(open_file);
1967 spin_unlock(&cifs_inode->open_file_lock);
1968 *ret_file = open_file;
1969 return 0;
1970 } else {
1971 if (!inv_file)
1972 inv_file = open_file;
1973 }
1974 }
1975 }
1976
1977 if (!any_available) {
1978 any_available = true;
1979 goto refind_writable;
1980 }
1981
1982 if (inv_file) {
1983 any_available = false;
1984 cifsFileInfo_get(inv_file);
1985 }
1986
1987 spin_unlock(&cifs_inode->open_file_lock);
1988
1989 if (inv_file) {
1990 rc = cifs_reopen_file(inv_file, false);
1991 if (!rc) {
1992 *ret_file = inv_file;
1993 return 0;
1994 }
1995
1996 spin_lock(&cifs_inode->open_file_lock);
1997 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
1998 spin_unlock(&cifs_inode->open_file_lock);
1999 cifsFileInfo_put(inv_file);
2000 ++refind;
2001 inv_file = NULL;
2002 spin_lock(&cifs_inode->open_file_lock);
2003 goto refind_writable;
2004 }
2005
2006 return rc;
2007 }
2008
2009 struct cifsFileInfo *
2010 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2011 {
2012 struct cifsFileInfo *cfile;
2013 int rc;
2014
2015 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2016 if (rc)
2017 cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
2018
2019 return cfile;
2020 }
2021
2022 int
2023 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2024 int flags,
2025 struct cifsFileInfo **ret_file)
2026 {
2027 struct list_head *tmp;
2028 struct cifsFileInfo *cfile;
2029 struct cifsInodeInfo *cinode;
2030 char *full_path;
2031
2032 *ret_file = NULL;
2033
2034 spin_lock(&tcon->open_file_lock);
2035 list_for_each(tmp, &tcon->openFileList) {
2036 cfile = list_entry(tmp, struct cifsFileInfo,
2037 tlist);
2038 full_path = build_path_from_dentry(cfile->dentry);
2039 if (full_path == NULL) {
2040 spin_unlock(&tcon->open_file_lock);
2041 return -ENOMEM;
2042 }
2043 if (strcmp(full_path, name)) {
2044 kfree(full_path);
2045 continue;
2046 }
2047
2048 kfree(full_path);
2049 cinode = CIFS_I(d_inode(cfile->dentry));
2050 spin_unlock(&tcon->open_file_lock);
2051 return cifs_get_writable_file(cinode, flags, ret_file);
2052 }
2053
2054 spin_unlock(&tcon->open_file_lock);
2055 return -ENOENT;
2056 }
2057
2058 int
2059 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2060 struct cifsFileInfo **ret_file)
2061 {
2062 struct list_head *tmp;
2063 struct cifsFileInfo *cfile;
2064 struct cifsInodeInfo *cinode;
2065 char *full_path;
2066
2067 *ret_file = NULL;
2068
2069 spin_lock(&tcon->open_file_lock);
2070 list_for_each(tmp, &tcon->openFileList) {
2071 cfile = list_entry(tmp, struct cifsFileInfo,
2072 tlist);
2073 full_path = build_path_from_dentry(cfile->dentry);
2074 if (full_path == NULL) {
2075 spin_unlock(&tcon->open_file_lock);
2076 return -ENOMEM;
2077 }
2078 if (strcmp(full_path, name)) {
2079 kfree(full_path);
2080 continue;
2081 }
2082
2083 kfree(full_path);
2084 cinode = CIFS_I(d_inode(cfile->dentry));
2085 spin_unlock(&tcon->open_file_lock);
2086 *ret_file = find_readable_file(cinode, 0);
2087 return *ret_file ? 0 : -ENOENT;
2088 }
2089
2090 spin_unlock(&tcon->open_file_lock);
2091 return -ENOENT;
2092 }
2093
2094 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2095 {
2096 struct address_space *mapping = page->mapping;
2097 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
2098 char *write_data;
2099 int rc = -EFAULT;
2100 int bytes_written = 0;
2101 struct inode *inode;
2102 struct cifsFileInfo *open_file;
2103
2104 if (!mapping || !mapping->host)
2105 return -EFAULT;
2106
2107 inode = page->mapping->host;
2108
2109 offset += (loff_t)from;
2110 write_data = kmap(page);
2111 write_data += from;
2112
2113 if ((to > PAGE_SIZE) || (from > to)) {
2114 kunmap(page);
2115 return -EIO;
2116 }
2117
2118
2119 if (offset > mapping->host->i_size) {
2120 kunmap(page);
2121 return 0;
2122 }
2123
2124
2125 if (mapping->host->i_size - offset < (loff_t)to)
2126 to = (unsigned)(mapping->host->i_size - offset);
2127
2128 rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
2129 &open_file);
2130 if (!rc) {
2131 bytes_written = cifs_write(open_file, open_file->pid,
2132 write_data, to - from, &offset);
2133 cifsFileInfo_put(open_file);
2134
2135 inode->i_atime = inode->i_mtime = current_time(inode);
2136 if ((bytes_written > 0) && (offset))
2137 rc = 0;
2138 else if (bytes_written < 0)
2139 rc = bytes_written;
2140 else
2141 rc = -EFAULT;
2142 } else {
2143 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2144 if (!is_retryable_error(rc))
2145 rc = -EIO;
2146 }
2147
2148 kunmap(page);
2149 return rc;
2150 }
2151
2152 static struct cifs_writedata *
2153 wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2154 pgoff_t end, pgoff_t *index,
2155 unsigned int *found_pages)
2156 {
2157 struct cifs_writedata *wdata;
2158
2159 wdata = cifs_writedata_alloc((unsigned int)tofind,
2160 cifs_writev_complete);
2161 if (!wdata)
2162 return NULL;
2163
2164 *found_pages = find_get_pages_range_tag(mapping, index, end,
2165 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
2166 return wdata;
2167 }
2168
2169 static unsigned int
2170 wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2171 struct address_space *mapping,
2172 struct writeback_control *wbc,
2173 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2174 {
2175 unsigned int nr_pages = 0, i;
2176 struct page *page;
2177
2178 for (i = 0; i < found_pages; i++) {
2179 page = wdata->pages[i];
2180
2181
2182
2183
2184
2185
2186
2187 if (nr_pages == 0)
2188 lock_page(page);
2189 else if (!trylock_page(page))
2190 break;
2191
2192 if (unlikely(page->mapping != mapping)) {
2193 unlock_page(page);
2194 break;
2195 }
2196
2197 if (!wbc->range_cyclic && page->index > end) {
2198 *done = true;
2199 unlock_page(page);
2200 break;
2201 }
2202
2203 if (*next && (page->index != *next)) {
2204
2205 unlock_page(page);
2206 break;
2207 }
2208
2209 if (wbc->sync_mode != WB_SYNC_NONE)
2210 wait_on_page_writeback(page);
2211
2212 if (PageWriteback(page) ||
2213 !clear_page_dirty_for_io(page)) {
2214 unlock_page(page);
2215 break;
2216 }
2217
2218
2219
2220
2221
2222 set_page_writeback(page);
2223 if (page_offset(page) >= i_size_read(mapping->host)) {
2224 *done = true;
2225 unlock_page(page);
2226 end_page_writeback(page);
2227 break;
2228 }
2229
2230 wdata->pages[i] = page;
2231 *next = page->index + 1;
2232 ++nr_pages;
2233 }
2234
2235
2236 if (nr_pages == 0)
2237 *index = wdata->pages[0]->index + 1;
2238
2239
2240 for (i = nr_pages; i < found_pages; i++) {
2241 put_page(wdata->pages[i]);
2242 wdata->pages[i] = NULL;
2243 }
2244
2245 return nr_pages;
2246 }
2247
2248 static int
2249 wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2250 struct address_space *mapping, struct writeback_control *wbc)
2251 {
2252 int rc;
2253 struct TCP_Server_Info *server =
2254 tlink_tcon(wdata->cfile->tlink)->ses->server;
2255
2256 wdata->sync_mode = wbc->sync_mode;
2257 wdata->nr_pages = nr_pages;
2258 wdata->offset = page_offset(wdata->pages[0]);
2259 wdata->pagesz = PAGE_SIZE;
2260 wdata->tailsz = min(i_size_read(mapping->host) -
2261 page_offset(wdata->pages[nr_pages - 1]),
2262 (loff_t)PAGE_SIZE);
2263 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2264 wdata->pid = wdata->cfile->pid;
2265
2266 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2267 if (rc)
2268 return rc;
2269
2270 if (wdata->cfile->invalidHandle)
2271 rc = -EAGAIN;
2272 else
2273 rc = server->ops->async_writev(wdata, cifs_writedata_release);
2274
2275 return rc;
2276 }
2277
2278 static int cifs_writepages(struct address_space *mapping,
2279 struct writeback_control *wbc)
2280 {
2281 struct inode *inode = mapping->host;
2282 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2283 struct TCP_Server_Info *server;
2284 bool done = false, scanned = false, range_whole = false;
2285 pgoff_t end, index;
2286 struct cifs_writedata *wdata;
2287 struct cifsFileInfo *cfile = NULL;
2288 int rc = 0;
2289 int saved_rc = 0;
2290 unsigned int xid;
2291
2292
2293
2294
2295
2296 if (cifs_sb->wsize < PAGE_SIZE)
2297 return generic_writepages(mapping, wbc);
2298
2299 xid = get_xid();
2300 if (wbc->range_cyclic) {
2301 index = mapping->writeback_index;
2302 end = -1;
2303 } else {
2304 index = wbc->range_start >> PAGE_SHIFT;
2305 end = wbc->range_end >> PAGE_SHIFT;
2306 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2307 range_whole = true;
2308 scanned = true;
2309 }
2310 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
2311 retry:
2312 while (!done && index <= end) {
2313 unsigned int i, nr_pages, found_pages, wsize;
2314 pgoff_t next = 0, tofind, saved_index = index;
2315 struct cifs_credits credits_on_stack;
2316 struct cifs_credits *credits = &credits_on_stack;
2317 int get_file_rc = 0;
2318
2319 if (cfile)
2320 cifsFileInfo_put(cfile);
2321
2322 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
2323
2324
2325 if (rc)
2326 get_file_rc = rc;
2327
2328 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2329 &wsize, credits);
2330 if (rc != 0) {
2331 done = true;
2332 break;
2333 }
2334
2335 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2336
2337 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2338 &found_pages);
2339 if (!wdata) {
2340 rc = -ENOMEM;
2341 done = true;
2342 add_credits_and_wake_if(server, credits, 0);
2343 break;
2344 }
2345
2346 if (found_pages == 0) {
2347 kref_put(&wdata->refcount, cifs_writedata_release);
2348 add_credits_and_wake_if(server, credits, 0);
2349 break;
2350 }
2351
2352 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2353 end, &index, &next, &done);
2354
2355
2356 if (nr_pages == 0) {
2357 kref_put(&wdata->refcount, cifs_writedata_release);
2358 add_credits_and_wake_if(server, credits, 0);
2359 continue;
2360 }
2361
2362 wdata->credits = credits_on_stack;
2363 wdata->cfile = cfile;
2364 cfile = NULL;
2365
2366 if (!wdata->cfile) {
2367 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2368 get_file_rc);
2369 if (is_retryable_error(get_file_rc))
2370 rc = get_file_rc;
2371 else
2372 rc = -EBADF;
2373 } else
2374 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
2375
2376 for (i = 0; i < nr_pages; ++i)
2377 unlock_page(wdata->pages[i]);
2378
2379
2380 if (rc != 0) {
2381 add_credits_and_wake_if(server, &wdata->credits, 0);
2382 for (i = 0; i < nr_pages; ++i) {
2383 if (is_retryable_error(rc))
2384 redirty_page_for_writepage(wbc,
2385 wdata->pages[i]);
2386 else
2387 SetPageError(wdata->pages[i]);
2388 end_page_writeback(wdata->pages[i]);
2389 put_page(wdata->pages[i]);
2390 }
2391 if (!is_retryable_error(rc))
2392 mapping_set_error(mapping, rc);
2393 }
2394 kref_put(&wdata->refcount, cifs_writedata_release);
2395
2396 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2397 index = saved_index;
2398 continue;
2399 }
2400
2401
2402 if (is_interrupt_error(rc)) {
2403 done = true;
2404 break;
2405 }
2406
2407 if (rc != 0 && saved_rc == 0)
2408 saved_rc = rc;
2409
2410 wbc->nr_to_write -= nr_pages;
2411 if (wbc->nr_to_write <= 0)
2412 done = true;
2413
2414 index = next;
2415 }
2416
2417 if (!scanned && !done) {
2418
2419
2420
2421
2422 scanned = true;
2423 index = 0;
2424 goto retry;
2425 }
2426
2427 if (saved_rc != 0)
2428 rc = saved_rc;
2429
2430 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2431 mapping->writeback_index = index;
2432
2433 if (cfile)
2434 cifsFileInfo_put(cfile);
2435 free_xid(xid);
2436 return rc;
2437 }
2438
2439 static int
2440 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2441 {
2442 int rc;
2443 unsigned int xid;
2444
2445 xid = get_xid();
2446
2447 get_page(page);
2448 if (!PageUptodate(page))
2449 cifs_dbg(FYI, "ppw - page not up to date\n");
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461 set_page_writeback(page);
2462 retry_write:
2463 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2464 if (is_retryable_error(rc)) {
2465 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
2466 goto retry_write;
2467 redirty_page_for_writepage(wbc, page);
2468 } else if (rc != 0) {
2469 SetPageError(page);
2470 mapping_set_error(page->mapping, rc);
2471 } else {
2472 SetPageUptodate(page);
2473 }
2474 end_page_writeback(page);
2475 put_page(page);
2476 free_xid(xid);
2477 return rc;
2478 }
2479
2480 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2481 {
2482 int rc = cifs_writepage_locked(page, wbc);
2483 unlock_page(page);
2484 return rc;
2485 }
2486
2487 static int cifs_write_end(struct file *file, struct address_space *mapping,
2488 loff_t pos, unsigned len, unsigned copied,
2489 struct page *page, void *fsdata)
2490 {
2491 int rc;
2492 struct inode *inode = mapping->host;
2493 struct cifsFileInfo *cfile = file->private_data;
2494 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2495 __u32 pid;
2496
2497 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2498 pid = cfile->pid;
2499 else
2500 pid = current->tgid;
2501
2502 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2503 page, pos, copied);
2504
2505 if (PageChecked(page)) {
2506 if (copied == len)
2507 SetPageUptodate(page);
2508 ClearPageChecked(page);
2509 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2510 SetPageUptodate(page);
2511
2512 if (!PageUptodate(page)) {
2513 char *page_data;
2514 unsigned offset = pos & (PAGE_SIZE - 1);
2515 unsigned int xid;
2516
2517 xid = get_xid();
2518
2519
2520
2521
2522
2523 page_data = kmap(page);
2524 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2525
2526 kunmap(page);
2527
2528 free_xid(xid);
2529 } else {
2530 rc = copied;
2531 pos += copied;
2532 set_page_dirty(page);
2533 }
2534
2535 if (rc > 0) {
2536 spin_lock(&inode->i_lock);
2537 if (pos > inode->i_size)
2538 i_size_write(inode, pos);
2539 spin_unlock(&inode->i_lock);
2540 }
2541
2542 unlock_page(page);
2543 put_page(page);
2544
2545 return rc;
2546 }
2547
2548 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2549 int datasync)
2550 {
2551 unsigned int xid;
2552 int rc = 0;
2553 struct cifs_tcon *tcon;
2554 struct TCP_Server_Info *server;
2555 struct cifsFileInfo *smbfile = file->private_data;
2556 struct inode *inode = file_inode(file);
2557 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2558
2559 rc = file_write_and_wait_range(file, start, end);
2560 if (rc)
2561 return rc;
2562
2563 xid = get_xid();
2564
2565 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2566 file, datasync);
2567
2568 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2569 rc = cifs_zap_mapping(inode);
2570 if (rc) {
2571 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2572 rc = 0;
2573 }
2574 }
2575
2576 tcon = tlink_tcon(smbfile->tlink);
2577 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2578 server = tcon->ses->server;
2579 if (server->ops->flush)
2580 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2581 else
2582 rc = -ENOSYS;
2583 }
2584
2585 free_xid(xid);
2586 return rc;
2587 }
2588
2589 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2590 {
2591 unsigned int xid;
2592 int rc = 0;
2593 struct cifs_tcon *tcon;
2594 struct TCP_Server_Info *server;
2595 struct cifsFileInfo *smbfile = file->private_data;
2596 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2597
2598 rc = file_write_and_wait_range(file, start, end);
2599 if (rc)
2600 return rc;
2601
2602 xid = get_xid();
2603
2604 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2605 file, datasync);
2606
2607 tcon = tlink_tcon(smbfile->tlink);
2608 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2609 server = tcon->ses->server;
2610 if (server->ops->flush)
2611 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2612 else
2613 rc = -ENOSYS;
2614 }
2615
2616 free_xid(xid);
2617 return rc;
2618 }
2619
2620
2621
2622
2623
2624 int cifs_flush(struct file *file, fl_owner_t id)
2625 {
2626 struct inode *inode = file_inode(file);
2627 int rc = 0;
2628
2629 if (file->f_mode & FMODE_WRITE)
2630 rc = filemap_write_and_wait(inode->i_mapping);
2631
2632 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2633
2634 return rc;
2635 }
2636
2637 static int
2638 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2639 {
2640 int rc = 0;
2641 unsigned long i;
2642
2643 for (i = 0; i < num_pages; i++) {
2644 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2645 if (!pages[i]) {
2646
2647
2648
2649
2650 num_pages = i;
2651 rc = -ENOMEM;
2652 break;
2653 }
2654 }
2655
2656 if (rc) {
2657 for (i = 0; i < num_pages; i++)
2658 put_page(pages[i]);
2659 }
2660 return rc;
2661 }
2662
2663 static inline
2664 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2665 {
2666 size_t num_pages;
2667 size_t clen;
2668
2669 clen = min_t(const size_t, len, wsize);
2670 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2671
2672 if (cur_len)
2673 *cur_len = clen;
2674
2675 return num_pages;
2676 }
2677
2678 static void
2679 cifs_uncached_writedata_release(struct kref *refcount)
2680 {
2681 int i;
2682 struct cifs_writedata *wdata = container_of(refcount,
2683 struct cifs_writedata, refcount);
2684
2685 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
2686 for (i = 0; i < wdata->nr_pages; i++)
2687 put_page(wdata->pages[i]);
2688 cifs_writedata_release(refcount);
2689 }
2690
2691 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2692
2693 static void
2694 cifs_uncached_writev_complete(struct work_struct *work)
2695 {
2696 struct cifs_writedata *wdata = container_of(work,
2697 struct cifs_writedata, work);
2698 struct inode *inode = d_inode(wdata->cfile->dentry);
2699 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2700
2701 spin_lock(&inode->i_lock);
2702 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2703 if (cifsi->server_eof > inode->i_size)
2704 i_size_write(inode, cifsi->server_eof);
2705 spin_unlock(&inode->i_lock);
2706
2707 complete(&wdata->done);
2708 collect_uncached_write_data(wdata->ctx);
2709
2710 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2711 }
2712
2713 static int
2714 wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2715 size_t *len, unsigned long *num_pages)
2716 {
2717 size_t save_len, copied, bytes, cur_len = *len;
2718 unsigned long i, nr_pages = *num_pages;
2719
2720 save_len = cur_len;
2721 for (i = 0; i < nr_pages; i++) {
2722 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2723 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2724 cur_len -= copied;
2725
2726
2727
2728
2729
2730
2731
2732 if (copied < bytes)
2733 break;
2734 }
2735 cur_len = save_len - cur_len;
2736 *len = cur_len;
2737
2738
2739
2740
2741
2742
2743
2744 if (!cur_len)
2745 return -EFAULT;
2746
2747
2748
2749
2750
2751 *num_pages = i + 1;
2752 return 0;
2753 }
2754
2755 static int
2756 cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2757 struct cifs_aio_ctx *ctx)
2758 {
2759 unsigned int wsize;
2760 struct cifs_credits credits;
2761 int rc;
2762 struct TCP_Server_Info *server =
2763 tlink_tcon(wdata->cfile->tlink)->ses->server;
2764
2765 do {
2766 if (wdata->cfile->invalidHandle) {
2767 rc = cifs_reopen_file(wdata->cfile, false);
2768 if (rc == -EAGAIN)
2769 continue;
2770 else if (rc)
2771 break;
2772 }
2773
2774
2775
2776
2777
2778
2779
2780 do {
2781 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2782 &wsize, &credits);
2783 if (rc)
2784 goto fail;
2785
2786 if (wsize < wdata->bytes) {
2787 add_credits_and_wake_if(server, &credits, 0);
2788 msleep(1000);
2789 }
2790 } while (wsize < wdata->bytes);
2791 wdata->credits = credits;
2792
2793 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2794
2795 if (!rc) {
2796 if (wdata->cfile->invalidHandle)
2797 rc = -EAGAIN;
2798 else
2799 rc = server->ops->async_writev(wdata,
2800 cifs_uncached_writedata_release);
2801 }
2802
2803
2804 if (!rc) {
2805 list_add_tail(&wdata->list, wdata_list);
2806 return 0;
2807 }
2808
2809
2810 add_credits_and_wake_if(server, &wdata->credits, 0);
2811 } while (rc == -EAGAIN);
2812
2813 fail:
2814 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2815 return rc;
2816 }
2817
2818 static int
2819 cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2820 struct cifsFileInfo *open_file,
2821 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2822 struct cifs_aio_ctx *ctx)
2823 {
2824 int rc = 0;
2825 size_t cur_len;
2826 unsigned long nr_pages, num_pages, i;
2827 struct cifs_writedata *wdata;
2828 struct iov_iter saved_from = *from;
2829 loff_t saved_offset = offset;
2830 pid_t pid;
2831 struct TCP_Server_Info *server;
2832 struct page **pagevec;
2833 size_t start;
2834 unsigned int xid;
2835
2836 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2837 pid = open_file->pid;
2838 else
2839 pid = current->tgid;
2840
2841 server = tlink_tcon(open_file->tlink)->ses->server;
2842 xid = get_xid();
2843
2844 do {
2845 unsigned int wsize;
2846 struct cifs_credits credits_on_stack;
2847 struct cifs_credits *credits = &credits_on_stack;
2848
2849 if (open_file->invalidHandle) {
2850 rc = cifs_reopen_file(open_file, false);
2851 if (rc == -EAGAIN)
2852 continue;
2853 else if (rc)
2854 break;
2855 }
2856
2857 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2858 &wsize, credits);
2859 if (rc)
2860 break;
2861
2862 cur_len = min_t(const size_t, len, wsize);
2863
2864 if (ctx->direct_io) {
2865 ssize_t result;
2866
2867 result = iov_iter_get_pages_alloc(
2868 from, &pagevec, cur_len, &start);
2869 if (result < 0) {
2870 cifs_dbg(VFS,
2871 "direct_writev couldn't get user pages "
2872 "(rc=%zd) iter type %d iov_offset %zd "
2873 "count %zd\n",
2874 result, from->type,
2875 from->iov_offset, from->count);
2876 dump_stack();
2877
2878 rc = result;
2879 add_credits_and_wake_if(server, credits, 0);
2880 break;
2881 }
2882 cur_len = (size_t)result;
2883 iov_iter_advance(from, cur_len);
2884
2885 nr_pages =
2886 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2887
2888 wdata = cifs_writedata_direct_alloc(pagevec,
2889 cifs_uncached_writev_complete);
2890 if (!wdata) {
2891 rc = -ENOMEM;
2892 add_credits_and_wake_if(server, credits, 0);
2893 break;
2894 }
2895
2896
2897 wdata->page_offset = start;
2898 wdata->tailsz =
2899 nr_pages > 1 ?
2900 cur_len - (PAGE_SIZE - start) -
2901 (nr_pages - 2) * PAGE_SIZE :
2902 cur_len;
2903 } else {
2904 nr_pages = get_numpages(wsize, len, &cur_len);
2905 wdata = cifs_writedata_alloc(nr_pages,
2906 cifs_uncached_writev_complete);
2907 if (!wdata) {
2908 rc = -ENOMEM;
2909 add_credits_and_wake_if(server, credits, 0);
2910 break;
2911 }
2912
2913 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2914 if (rc) {
2915 kvfree(wdata->pages);
2916 kfree(wdata);
2917 add_credits_and_wake_if(server, credits, 0);
2918 break;
2919 }
2920
2921 num_pages = nr_pages;
2922 rc = wdata_fill_from_iovec(
2923 wdata, from, &cur_len, &num_pages);
2924 if (rc) {
2925 for (i = 0; i < nr_pages; i++)
2926 put_page(wdata->pages[i]);
2927 kvfree(wdata->pages);
2928 kfree(wdata);
2929 add_credits_and_wake_if(server, credits, 0);
2930 break;
2931 }
2932
2933
2934
2935
2936
2937 for ( ; nr_pages > num_pages; nr_pages--)
2938 put_page(wdata->pages[nr_pages - 1]);
2939
2940 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2941 }
2942
2943 wdata->sync_mode = WB_SYNC_ALL;
2944 wdata->nr_pages = nr_pages;
2945 wdata->offset = (__u64)offset;
2946 wdata->cfile = cifsFileInfo_get(open_file);
2947 wdata->pid = pid;
2948 wdata->bytes = cur_len;
2949 wdata->pagesz = PAGE_SIZE;
2950 wdata->credits = credits_on_stack;
2951 wdata->ctx = ctx;
2952 kref_get(&ctx->refcount);
2953
2954 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2955
2956 if (!rc) {
2957 if (wdata->cfile->invalidHandle)
2958 rc = -EAGAIN;
2959 else
2960 rc = server->ops->async_writev(wdata,
2961 cifs_uncached_writedata_release);
2962 }
2963
2964 if (rc) {
2965 add_credits_and_wake_if(server, &wdata->credits, 0);
2966 kref_put(&wdata->refcount,
2967 cifs_uncached_writedata_release);
2968 if (rc == -EAGAIN) {
2969 *from = saved_from;
2970 iov_iter_advance(from, offset - saved_offset);
2971 continue;
2972 }
2973 break;
2974 }
2975
2976 list_add_tail(&wdata->list, wdata_list);
2977 offset += cur_len;
2978 len -= cur_len;
2979 } while (len > 0);
2980
2981 free_xid(xid);
2982 return rc;
2983 }
2984
2985 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2986 {
2987 struct cifs_writedata *wdata, *tmp;
2988 struct cifs_tcon *tcon;
2989 struct cifs_sb_info *cifs_sb;
2990 struct dentry *dentry = ctx->cfile->dentry;
2991 int rc;
2992
2993 tcon = tlink_tcon(ctx->cfile->tlink);
2994 cifs_sb = CIFS_SB(dentry->d_sb);
2995
2996 mutex_lock(&ctx->aio_mutex);
2997
2998 if (list_empty(&ctx->list)) {
2999 mutex_unlock(&ctx->aio_mutex);
3000 return;
3001 }
3002
3003 rc = ctx->rc;
3004
3005
3006
3007
3008
3009 restart_loop:
3010 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
3011 if (!rc) {
3012 if (!try_wait_for_completion(&wdata->done)) {
3013 mutex_unlock(&ctx->aio_mutex);
3014 return;
3015 }
3016
3017 if (wdata->result)
3018 rc = wdata->result;
3019 else
3020 ctx->total_len += wdata->bytes;
3021
3022
3023 if (rc == -EAGAIN) {
3024 struct list_head tmp_list;
3025 struct iov_iter tmp_from = ctx->iter;
3026
3027 INIT_LIST_HEAD(&tmp_list);
3028 list_del_init(&wdata->list);
3029
3030 if (ctx->direct_io)
3031 rc = cifs_resend_wdata(
3032 wdata, &tmp_list, ctx);
3033 else {
3034 iov_iter_advance(&tmp_from,
3035 wdata->offset - ctx->pos);
3036
3037 rc = cifs_write_from_iter(wdata->offset,
3038 wdata->bytes, &tmp_from,
3039 ctx->cfile, cifs_sb, &tmp_list,
3040 ctx);
3041
3042 kref_put(&wdata->refcount,
3043 cifs_uncached_writedata_release);
3044 }
3045
3046 list_splice(&tmp_list, &ctx->list);
3047 goto restart_loop;
3048 }
3049 }
3050 list_del_init(&wdata->list);
3051 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3052 }
3053
3054 cifs_stats_bytes_written(tcon, ctx->total_len);
3055 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3056
3057 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3058
3059 mutex_unlock(&ctx->aio_mutex);
3060
3061 if (ctx->iocb && ctx->iocb->ki_complete)
3062 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3063 else
3064 complete(&ctx->done);
3065 }
3066
3067 static ssize_t __cifs_writev(
3068 struct kiocb *iocb, struct iov_iter *from, bool direct)
3069 {
3070 struct file *file = iocb->ki_filp;
3071 ssize_t total_written = 0;
3072 struct cifsFileInfo *cfile;
3073 struct cifs_tcon *tcon;
3074 struct cifs_sb_info *cifs_sb;
3075 struct cifs_aio_ctx *ctx;
3076 struct iov_iter saved_from = *from;
3077 size_t len = iov_iter_count(from);
3078 int rc;
3079
3080
3081
3082
3083
3084
3085 if (direct && from->type & ITER_KVEC) {
3086 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
3087 direct = false;
3088 }
3089
3090 rc = generic_write_checks(iocb, from);
3091 if (rc <= 0)
3092 return rc;
3093
3094 cifs_sb = CIFS_FILE_SB(file);
3095 cfile = file->private_data;
3096 tcon = tlink_tcon(cfile->tlink);
3097
3098 if (!tcon->ses->server->ops->async_writev)
3099 return -ENOSYS;
3100
3101 ctx = cifs_aio_ctx_alloc();
3102 if (!ctx)
3103 return -ENOMEM;
3104
3105 ctx->cfile = cifsFileInfo_get(cfile);
3106
3107 if (!is_sync_kiocb(iocb))
3108 ctx->iocb = iocb;
3109
3110 ctx->pos = iocb->ki_pos;
3111
3112 if (direct) {
3113 ctx->direct_io = true;
3114 ctx->iter = *from;
3115 ctx->len = len;
3116 } else {
3117 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3118 if (rc) {
3119 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3120 return rc;
3121 }
3122 }
3123
3124
3125 mutex_lock(&ctx->aio_mutex);
3126
3127 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3128 cfile, cifs_sb, &ctx->list, ctx);
3129
3130
3131
3132
3133
3134
3135
3136 if (!list_empty(&ctx->list))
3137 rc = 0;
3138
3139 mutex_unlock(&ctx->aio_mutex);
3140
3141 if (rc) {
3142 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3143 return rc;
3144 }
3145
3146 if (!is_sync_kiocb(iocb)) {
3147 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3148 return -EIOCBQUEUED;
3149 }
3150
3151 rc = wait_for_completion_killable(&ctx->done);
3152 if (rc) {
3153 mutex_lock(&ctx->aio_mutex);
3154 ctx->rc = rc = -EINTR;
3155 total_written = ctx->total_len;
3156 mutex_unlock(&ctx->aio_mutex);
3157 } else {
3158 rc = ctx->rc;
3159 total_written = ctx->total_len;
3160 }
3161
3162 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3163
3164 if (unlikely(!total_written))
3165 return rc;
3166
3167 iocb->ki_pos += total_written;
3168 return total_written;
3169 }
3170
3171 ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3172 {
3173 return __cifs_writev(iocb, from, true);
3174 }
3175
3176 ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3177 {
3178 return __cifs_writev(iocb, from, false);
3179 }
3180
3181 static ssize_t
3182 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
3183 {
3184 struct file *file = iocb->ki_filp;
3185 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3186 struct inode *inode = file->f_mapping->host;
3187 struct cifsInodeInfo *cinode = CIFS_I(inode);
3188 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
3189 ssize_t rc;
3190
3191 inode_lock(inode);
3192
3193
3194
3195
3196 down_read(&cinode->lock_sem);
3197
3198 rc = generic_write_checks(iocb, from);
3199 if (rc <= 0)
3200 goto out;
3201
3202 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
3203 server->vals->exclusive_lock_type, 0,
3204 NULL, CIFS_WRITE_OP))
3205 rc = __generic_file_write_iter(iocb, from);
3206 else
3207 rc = -EACCES;
3208 out:
3209 up_read(&cinode->lock_sem);
3210 inode_unlock(inode);
3211
3212 if (rc > 0)
3213 rc = generic_write_sync(iocb, rc);
3214 return rc;
3215 }
3216
3217 ssize_t
3218 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
3219 {
3220 struct inode *inode = file_inode(iocb->ki_filp);
3221 struct cifsInodeInfo *cinode = CIFS_I(inode);
3222 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3223 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3224 iocb->ki_filp->private_data;
3225 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3226 ssize_t written;
3227
3228 written = cifs_get_writer(cinode);
3229 if (written)
3230 return written;
3231
3232 if (CIFS_CACHE_WRITE(cinode)) {
3233 if (cap_unix(tcon->ses) &&
3234 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
3235 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
3236 written = generic_file_write_iter(iocb, from);
3237 goto out;
3238 }
3239 written = cifs_writev(iocb, from);
3240 goto out;
3241 }
3242
3243
3244
3245
3246
3247
3248 written = cifs_user_writev(iocb, from);
3249 if (CIFS_CACHE_READ(cinode)) {
3250
3251
3252
3253
3254
3255
3256
3257 cifs_zap_mapping(inode);
3258 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
3259 inode);
3260 cinode->oplock = 0;
3261 }
3262 out:
3263 cifs_put_writer(cinode);
3264 return written;
3265 }
3266
3267 static struct cifs_readdata *
3268 cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
3269 {
3270 struct cifs_readdata *rdata;
3271
3272 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
3273 if (rdata != NULL) {
3274 rdata->pages = pages;
3275 kref_init(&rdata->refcount);
3276 INIT_LIST_HEAD(&rdata->list);
3277 init_completion(&rdata->done);
3278 INIT_WORK(&rdata->work, complete);
3279 }
3280
3281 return rdata;
3282 }
3283
3284 static struct cifs_readdata *
3285 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3286 {
3287 struct page **pages =
3288 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
3289 struct cifs_readdata *ret = NULL;
3290
3291 if (pages) {
3292 ret = cifs_readdata_direct_alloc(pages, complete);
3293 if (!ret)
3294 kfree(pages);
3295 }
3296
3297 return ret;
3298 }
3299
3300 void
3301 cifs_readdata_release(struct kref *refcount)
3302 {
3303 struct cifs_readdata *rdata = container_of(refcount,
3304 struct cifs_readdata, refcount);
3305 #ifdef CONFIG_CIFS_SMB_DIRECT
3306 if (rdata->mr) {
3307 smbd_deregister_mr(rdata->mr);
3308 rdata->mr = NULL;
3309 }
3310 #endif
3311 if (rdata->cfile)
3312 cifsFileInfo_put(rdata->cfile);
3313
3314 kvfree(rdata->pages);
3315 kfree(rdata);
3316 }
3317
3318 static int
3319 cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
3320 {
3321 int rc = 0;
3322 struct page *page;
3323 unsigned int i;
3324
3325 for (i = 0; i < nr_pages; i++) {
3326 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3327 if (!page) {
3328 rc = -ENOMEM;
3329 break;
3330 }
3331 rdata->pages[i] = page;
3332 }
3333
3334 if (rc) {
3335 unsigned int nr_page_failed = i;
3336
3337 for (i = 0; i < nr_page_failed; i++) {
3338 put_page(rdata->pages[i]);
3339 rdata->pages[i] = NULL;
3340 }
3341 }
3342 return rc;
3343 }
3344
3345 static void
3346 cifs_uncached_readdata_release(struct kref *refcount)
3347 {
3348 struct cifs_readdata *rdata = container_of(refcount,
3349 struct cifs_readdata, refcount);
3350 unsigned int i;
3351
3352 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
3353 for (i = 0; i < rdata->nr_pages; i++) {
3354 put_page(rdata->pages[i]);
3355 }
3356 cifs_readdata_release(refcount);
3357 }
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368 static int
3369 cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
3370 {
3371 size_t remaining = rdata->got_bytes;
3372 unsigned int i;
3373
3374 for (i = 0; i < rdata->nr_pages; i++) {
3375 struct page *page = rdata->pages[i];
3376 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
3377 size_t written;
3378
3379 if (unlikely(iov_iter_is_pipe(iter))) {
3380 void *addr = kmap_atomic(page);
3381
3382 written = copy_to_iter(addr, copy, iter);
3383 kunmap_atomic(addr);
3384 } else
3385 written = copy_page_to_iter(page, 0, copy, iter);
3386 remaining -= written;
3387 if (written < copy && iov_iter_count(iter) > 0)
3388 break;
3389 }
3390 return remaining ? -EFAULT : 0;
3391 }
3392
3393 static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3394
3395 static void
3396 cifs_uncached_readv_complete(struct work_struct *work)
3397 {
3398 struct cifs_readdata *rdata = container_of(work,
3399 struct cifs_readdata, work);
3400
3401 complete(&rdata->done);
3402 collect_uncached_read_data(rdata->ctx);
3403
3404 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3405 }
3406
3407 static int
3408 uncached_fill_pages(struct TCP_Server_Info *server,
3409 struct cifs_readdata *rdata, struct iov_iter *iter,
3410 unsigned int len)
3411 {
3412 int result = 0;
3413 unsigned int i;
3414 unsigned int nr_pages = rdata->nr_pages;
3415 unsigned int page_offset = rdata->page_offset;
3416
3417 rdata->got_bytes = 0;
3418 rdata->tailsz = PAGE_SIZE;
3419 for (i = 0; i < nr_pages; i++) {
3420 struct page *page = rdata->pages[i];
3421 size_t n;
3422 unsigned int segment_size = rdata->pagesz;
3423
3424 if (i == 0)
3425 segment_size -= page_offset;
3426 else
3427 page_offset = 0;
3428
3429
3430 if (len <= 0) {
3431
3432 rdata->pages[i] = NULL;
3433 rdata->nr_pages--;
3434 put_page(page);
3435 continue;
3436 }
3437
3438 n = len;
3439 if (len >= segment_size)
3440
3441 n = segment_size;
3442 else
3443 rdata->tailsz = len;
3444 len -= n;
3445
3446 if (iter)
3447 result = copy_page_from_iter(
3448 page, page_offset, n, iter);
3449 #ifdef CONFIG_CIFS_SMB_DIRECT
3450 else if (rdata->mr)
3451 result = n;
3452 #endif
3453 else
3454 result = cifs_read_page_from_socket(
3455 server, page, page_offset, n);
3456 if (result < 0)
3457 break;
3458
3459 rdata->got_bytes += result;
3460 }
3461
3462 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3463 rdata->got_bytes : result;
3464 }
3465
3466 static int
3467 cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3468 struct cifs_readdata *rdata, unsigned int len)
3469 {
3470 return uncached_fill_pages(server, rdata, NULL, len);
3471 }
3472
3473 static int
3474 cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3475 struct cifs_readdata *rdata,
3476 struct iov_iter *iter)
3477 {
3478 return uncached_fill_pages(server, rdata, iter, iter->count);
3479 }
3480
3481 static int cifs_resend_rdata(struct cifs_readdata *rdata,
3482 struct list_head *rdata_list,
3483 struct cifs_aio_ctx *ctx)
3484 {
3485 unsigned int rsize;
3486 struct cifs_credits credits;
3487 int rc;
3488 struct TCP_Server_Info *server =
3489 tlink_tcon(rdata->cfile->tlink)->ses->server;
3490
3491 do {
3492 if (rdata->cfile->invalidHandle) {
3493 rc = cifs_reopen_file(rdata->cfile, true);
3494 if (rc == -EAGAIN)
3495 continue;
3496 else if (rc)
3497 break;
3498 }
3499
3500
3501
3502
3503
3504
3505 do {
3506 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
3507 &rsize, &credits);
3508
3509 if (rc)
3510 goto fail;
3511
3512 if (rsize < rdata->bytes) {
3513 add_credits_and_wake_if(server, &credits, 0);
3514 msleep(1000);
3515 }
3516 } while (rsize < rdata->bytes);
3517 rdata->credits = credits;
3518
3519 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3520 if (!rc) {
3521 if (rdata->cfile->invalidHandle)
3522 rc = -EAGAIN;
3523 else
3524 rc = server->ops->async_readv(rdata);
3525 }
3526
3527
3528 if (!rc) {
3529
3530 list_add_tail(&rdata->list, rdata_list);
3531 return 0;
3532 }
3533
3534
3535 add_credits_and_wake_if(server, &rdata->credits, 0);
3536 } while (rc == -EAGAIN);
3537
3538 fail:
3539 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3540 return rc;
3541 }
3542
3543 static int
3544 cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3545 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3546 struct cifs_aio_ctx *ctx)
3547 {
3548 struct cifs_readdata *rdata;
3549 unsigned int npages, rsize;
3550 struct cifs_credits credits_on_stack;
3551 struct cifs_credits *credits = &credits_on_stack;
3552 size_t cur_len;
3553 int rc;
3554 pid_t pid;
3555 struct TCP_Server_Info *server;
3556 struct page **pagevec;
3557 size_t start;
3558 struct iov_iter direct_iov = ctx->iter;
3559
3560 server = tlink_tcon(open_file->tlink)->ses->server;
3561
3562 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3563 pid = open_file->pid;
3564 else
3565 pid = current->tgid;
3566
3567 if (ctx->direct_io)
3568 iov_iter_advance(&direct_iov, offset - ctx->pos);
3569
3570 do {
3571 if (open_file->invalidHandle) {
3572 rc = cifs_reopen_file(open_file, true);
3573 if (rc == -EAGAIN)
3574 continue;
3575 else if (rc)
3576 break;
3577 }
3578
3579 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3580 &rsize, credits);
3581 if (rc)
3582 break;
3583
3584 cur_len = min_t(const size_t, len, rsize);
3585
3586 if (ctx->direct_io) {
3587 ssize_t result;
3588
3589 result = iov_iter_get_pages_alloc(
3590 &direct_iov, &pagevec,
3591 cur_len, &start);
3592 if (result < 0) {
3593 cifs_dbg(VFS,
3594 "couldn't get user pages (rc=%zd)"
3595 " iter type %d"
3596 " iov_offset %zd count %zd\n",
3597 result, direct_iov.type,
3598 direct_iov.iov_offset,
3599 direct_iov.count);
3600 dump_stack();
3601
3602 rc = result;
3603 add_credits_and_wake_if(server, credits, 0);
3604 break;
3605 }
3606 cur_len = (size_t)result;
3607 iov_iter_advance(&direct_iov, cur_len);
3608
3609 rdata = cifs_readdata_direct_alloc(
3610 pagevec, cifs_uncached_readv_complete);
3611 if (!rdata) {
3612 add_credits_and_wake_if(server, credits, 0);
3613 rc = -ENOMEM;
3614 break;
3615 }
3616
3617 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3618 rdata->page_offset = start;
3619 rdata->tailsz = npages > 1 ?
3620 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3621 cur_len;
3622
3623 } else {
3624
3625 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3626
3627 rdata = cifs_readdata_alloc(npages,
3628 cifs_uncached_readv_complete);
3629 if (!rdata) {
3630 add_credits_and_wake_if(server, credits, 0);
3631 rc = -ENOMEM;
3632 break;
3633 }
3634
3635 rc = cifs_read_allocate_pages(rdata, npages);
3636 if (rc) {
3637 kvfree(rdata->pages);
3638 kfree(rdata);
3639 add_credits_and_wake_if(server, credits, 0);
3640 break;
3641 }
3642
3643 rdata->tailsz = PAGE_SIZE;
3644 }
3645
3646 rdata->cfile = cifsFileInfo_get(open_file);
3647 rdata->nr_pages = npages;
3648 rdata->offset = offset;
3649 rdata->bytes = cur_len;
3650 rdata->pid = pid;
3651 rdata->pagesz = PAGE_SIZE;
3652 rdata->read_into_pages = cifs_uncached_read_into_pages;
3653 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
3654 rdata->credits = credits_on_stack;
3655 rdata->ctx = ctx;
3656 kref_get(&ctx->refcount);
3657
3658 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3659
3660 if (!rc) {
3661 if (rdata->cfile->invalidHandle)
3662 rc = -EAGAIN;
3663 else
3664 rc = server->ops->async_readv(rdata);
3665 }
3666
3667 if (rc) {
3668 add_credits_and_wake_if(server, &rdata->credits, 0);
3669 kref_put(&rdata->refcount,
3670 cifs_uncached_readdata_release);
3671 if (rc == -EAGAIN) {
3672 iov_iter_revert(&direct_iov, cur_len);
3673 continue;
3674 }
3675 break;
3676 }
3677
3678 list_add_tail(&rdata->list, rdata_list);
3679 offset += cur_len;
3680 len -= cur_len;
3681 } while (len > 0);
3682
3683 return rc;
3684 }
3685
3686 static void
3687 collect_uncached_read_data(struct cifs_aio_ctx *ctx)
3688 {
3689 struct cifs_readdata *rdata, *tmp;
3690 struct iov_iter *to = &ctx->iter;
3691 struct cifs_sb_info *cifs_sb;
3692 int rc;
3693
3694 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
3695
3696 mutex_lock(&ctx->aio_mutex);
3697
3698 if (list_empty(&ctx->list)) {
3699 mutex_unlock(&ctx->aio_mutex);
3700 return;
3701 }
3702
3703 rc = ctx->rc;
3704
3705 again:
3706 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
3707 if (!rc) {
3708 if (!try_wait_for_completion(&rdata->done)) {
3709 mutex_unlock(&ctx->aio_mutex);
3710 return;
3711 }
3712
3713 if (rdata->result == -EAGAIN) {
3714
3715 struct list_head tmp_list;
3716 unsigned int got_bytes = rdata->got_bytes;
3717
3718 list_del_init(&rdata->list);
3719 INIT_LIST_HEAD(&tmp_list);
3720
3721
3722
3723
3724
3725
3726 if (got_bytes && got_bytes < rdata->bytes) {
3727 rc = 0;
3728 if (!ctx->direct_io)
3729 rc = cifs_readdata_to_iov(rdata, to);
3730 if (rc) {
3731 kref_put(&rdata->refcount,
3732 cifs_uncached_readdata_release);
3733 continue;
3734 }
3735 }
3736
3737 if (ctx->direct_io) {
3738
3739
3740
3741
3742 rc = cifs_resend_rdata(
3743 rdata,
3744 &tmp_list, ctx);
3745 } else {
3746 rc = cifs_send_async_read(
3747 rdata->offset + got_bytes,
3748 rdata->bytes - got_bytes,
3749 rdata->cfile, cifs_sb,
3750 &tmp_list, ctx);
3751
3752 kref_put(&rdata->refcount,
3753 cifs_uncached_readdata_release);
3754 }
3755
3756 list_splice(&tmp_list, &ctx->list);
3757
3758 goto again;
3759 } else if (rdata->result)
3760 rc = rdata->result;
3761 else if (!ctx->direct_io)
3762 rc = cifs_readdata_to_iov(rdata, to);
3763
3764
3765 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3766 rc = -ENODATA;
3767
3768 ctx->total_len += rdata->got_bytes;
3769 }
3770 list_del_init(&rdata->list);
3771 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3772 }
3773
3774 if (!ctx->direct_io)
3775 ctx->total_len = ctx->len - iov_iter_count(to);
3776
3777
3778 if (rc == -ENODATA)
3779 rc = 0;
3780
3781 ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
3782
3783 mutex_unlock(&ctx->aio_mutex);
3784
3785 if (ctx->iocb && ctx->iocb->ki_complete)
3786 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3787 else
3788 complete(&ctx->done);
3789 }
3790
3791 static ssize_t __cifs_readv(
3792 struct kiocb *iocb, struct iov_iter *to, bool direct)
3793 {
3794 size_t len;
3795 struct file *file = iocb->ki_filp;
3796 struct cifs_sb_info *cifs_sb;
3797 struct cifsFileInfo *cfile;
3798 struct cifs_tcon *tcon;
3799 ssize_t rc, total_read = 0;
3800 loff_t offset = iocb->ki_pos;
3801 struct cifs_aio_ctx *ctx;
3802
3803
3804
3805
3806
3807
3808 if (direct && to->type & ITER_KVEC) {
3809 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3810 direct = false;
3811 }
3812
3813 len = iov_iter_count(to);
3814 if (!len)
3815 return 0;
3816
3817 cifs_sb = CIFS_FILE_SB(file);
3818 cfile = file->private_data;
3819 tcon = tlink_tcon(cfile->tlink);
3820
3821 if (!tcon->ses->server->ops->async_readv)
3822 return -ENOSYS;
3823
3824 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3825 cifs_dbg(FYI, "attempting read on write only file instance\n");
3826
3827 ctx = cifs_aio_ctx_alloc();
3828 if (!ctx)
3829 return -ENOMEM;
3830
3831 ctx->cfile = cifsFileInfo_get(cfile);
3832
3833 if (!is_sync_kiocb(iocb))
3834 ctx->iocb = iocb;
3835
3836 if (iter_is_iovec(to))
3837 ctx->should_dirty = true;
3838
3839 if (direct) {
3840 ctx->pos = offset;
3841 ctx->direct_io = true;
3842 ctx->iter = *to;
3843 ctx->len = len;
3844 } else {
3845 rc = setup_aio_ctx_iter(ctx, to, READ);
3846 if (rc) {
3847 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3848 return rc;
3849 }
3850 len = ctx->len;
3851 }
3852
3853
3854 mutex_lock(&ctx->aio_mutex);
3855
3856 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3857
3858
3859 if (!list_empty(&ctx->list))
3860 rc = 0;
3861
3862 mutex_unlock(&ctx->aio_mutex);
3863
3864 if (rc) {
3865 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3866 return rc;
3867 }
3868
3869 if (!is_sync_kiocb(iocb)) {
3870 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3871 return -EIOCBQUEUED;
3872 }
3873
3874 rc = wait_for_completion_killable(&ctx->done);
3875 if (rc) {
3876 mutex_lock(&ctx->aio_mutex);
3877 ctx->rc = rc = -EINTR;
3878 total_read = ctx->total_len;
3879 mutex_unlock(&ctx->aio_mutex);
3880 } else {
3881 rc = ctx->rc;
3882 total_read = ctx->total_len;
3883 }
3884
3885 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3886
3887 if (total_read) {
3888 iocb->ki_pos += total_read;
3889 return total_read;
3890 }
3891 return rc;
3892 }
3893
3894 ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3895 {
3896 return __cifs_readv(iocb, to, true);
3897 }
3898
3899 ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3900 {
3901 return __cifs_readv(iocb, to, false);
3902 }
3903
3904 ssize_t
3905 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3906 {
3907 struct inode *inode = file_inode(iocb->ki_filp);
3908 struct cifsInodeInfo *cinode = CIFS_I(inode);
3909 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3910 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3911 iocb->ki_filp->private_data;
3912 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3913 int rc = -EACCES;
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923 if (!CIFS_CACHE_READ(cinode))
3924 return cifs_user_readv(iocb, to);
3925
3926 if (cap_unix(tcon->ses) &&
3927 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3928 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
3929 return generic_file_read_iter(iocb, to);
3930
3931
3932
3933
3934
3935 down_read(&cinode->lock_sem);
3936 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
3937 tcon->ses->server->vals->shared_lock_type,
3938 0, NULL, CIFS_READ_OP))
3939 rc = generic_file_read_iter(iocb, to);
3940 up_read(&cinode->lock_sem);
3941 return rc;
3942 }
3943
3944 static ssize_t
3945 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
3946 {
3947 int rc = -EACCES;
3948 unsigned int bytes_read = 0;
3949 unsigned int total_read;
3950 unsigned int current_read_size;
3951 unsigned int rsize;
3952 struct cifs_sb_info *cifs_sb;
3953 struct cifs_tcon *tcon;
3954 struct TCP_Server_Info *server;
3955 unsigned int xid;
3956 char *cur_offset;
3957 struct cifsFileInfo *open_file;
3958 struct cifs_io_parms io_parms;
3959 int buf_type = CIFS_NO_BUFFER;
3960 __u32 pid;
3961
3962 xid = get_xid();
3963 cifs_sb = CIFS_FILE_SB(file);
3964
3965
3966 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3967
3968 if (file->private_data == NULL) {
3969 rc = -EBADF;
3970 free_xid(xid);
3971 return rc;
3972 }
3973 open_file = file->private_data;
3974 tcon = tlink_tcon(open_file->tlink);
3975 server = tcon->ses->server;
3976
3977 if (!server->ops->sync_read) {
3978 free_xid(xid);
3979 return -ENOSYS;
3980 }
3981
3982 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3983 pid = open_file->pid;
3984 else
3985 pid = current->tgid;
3986
3987 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3988 cifs_dbg(FYI, "attempting read on write only file instance\n");
3989
3990 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3991 total_read += bytes_read, cur_offset += bytes_read) {
3992 do {
3993 current_read_size = min_t(uint, read_size - total_read,
3994 rsize);
3995
3996
3997
3998
3999
4000 if (!(tcon->ses->capabilities &
4001 tcon->ses->server->vals->cap_large_files)) {
4002 current_read_size = min_t(uint,
4003 current_read_size, CIFSMaxBufSize);
4004 }
4005 if (open_file->invalidHandle) {
4006 rc = cifs_reopen_file(open_file, true);
4007 if (rc != 0)
4008 break;
4009 }
4010 io_parms.pid = pid;
4011 io_parms.tcon = tcon;
4012 io_parms.offset = *offset;
4013 io_parms.length = current_read_size;
4014 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
4015 &bytes_read, &cur_offset,
4016 &buf_type);
4017 } while (rc == -EAGAIN);
4018
4019 if (rc || (bytes_read == 0)) {
4020 if (total_read) {
4021 break;
4022 } else {
4023 free_xid(xid);
4024 return rc;
4025 }
4026 } else {
4027 cifs_stats_bytes_read(tcon, total_read);
4028 *offset += bytes_read;
4029 }
4030 }
4031 free_xid(xid);
4032 return total_read;
4033 }
4034
4035
4036
4037
4038
4039 static vm_fault_t
4040 cifs_page_mkwrite(struct vm_fault *vmf)
4041 {
4042 struct page *page = vmf->page;
4043
4044 lock_page(page);
4045 return VM_FAULT_LOCKED;
4046 }
4047
4048 static const struct vm_operations_struct cifs_file_vm_ops = {
4049 .fault = filemap_fault,
4050 .map_pages = filemap_map_pages,
4051 .page_mkwrite = cifs_page_mkwrite,
4052 };
4053
4054 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4055 {
4056 int xid, rc = 0;
4057 struct inode *inode = file_inode(file);
4058
4059 xid = get_xid();
4060
4061 if (!CIFS_CACHE_READ(CIFS_I(inode)))
4062 rc = cifs_zap_mapping(inode);
4063 if (!rc)
4064 rc = generic_file_mmap(file, vma);
4065 if (!rc)
4066 vma->vm_ops = &cifs_file_vm_ops;
4067
4068 free_xid(xid);
4069 return rc;
4070 }
4071
4072 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4073 {
4074 int rc, xid;
4075
4076 xid = get_xid();
4077
4078 rc = cifs_revalidate_file(file);
4079 if (rc)
4080 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4081 rc);
4082 if (!rc)
4083 rc = generic_file_mmap(file, vma);
4084 if (!rc)
4085 vma->vm_ops = &cifs_file_vm_ops;
4086
4087 free_xid(xid);
4088 return rc;
4089 }
4090
4091 static void
4092 cifs_readv_complete(struct work_struct *work)
4093 {
4094 unsigned int i, got_bytes;
4095 struct cifs_readdata *rdata = container_of(work,
4096 struct cifs_readdata, work);
4097
4098 got_bytes = rdata->got_bytes;
4099 for (i = 0; i < rdata->nr_pages; i++) {
4100 struct page *page = rdata->pages[i];
4101
4102 lru_cache_add_file(page);
4103
4104 if (rdata->result == 0 ||
4105 (rdata->result == -EAGAIN && got_bytes)) {
4106 flush_dcache_page(page);
4107 SetPageUptodate(page);
4108 }
4109
4110 unlock_page(page);
4111
4112 if (rdata->result == 0 ||
4113 (rdata->result == -EAGAIN && got_bytes))
4114 cifs_readpage_to_fscache(rdata->mapping->host, page);
4115
4116 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
4117
4118 put_page(page);
4119 rdata->pages[i] = NULL;
4120 }
4121 kref_put(&rdata->refcount, cifs_readdata_release);
4122 }
4123
4124 static int
4125 readpages_fill_pages(struct TCP_Server_Info *server,
4126 struct cifs_readdata *rdata, struct iov_iter *iter,
4127 unsigned int len)
4128 {
4129 int result = 0;
4130 unsigned int i;
4131 u64 eof;
4132 pgoff_t eof_index;
4133 unsigned int nr_pages = rdata->nr_pages;
4134 unsigned int page_offset = rdata->page_offset;
4135
4136
4137 eof = CIFS_I(rdata->mapping->host)->server_eof;
4138 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
4139 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
4140
4141 rdata->got_bytes = 0;
4142 rdata->tailsz = PAGE_SIZE;
4143 for (i = 0; i < nr_pages; i++) {
4144 struct page *page = rdata->pages[i];
4145 unsigned int to_read = rdata->pagesz;
4146 size_t n;
4147
4148 if (i == 0)
4149 to_read -= page_offset;
4150 else
4151 page_offset = 0;
4152
4153 n = to_read;
4154
4155 if (len >= to_read) {
4156 len -= to_read;
4157 } else if (len > 0) {
4158
4159 zero_user(page, len + page_offset, to_read - len);
4160 n = rdata->tailsz = len;
4161 len = 0;
4162 } else if (page->index > eof_index) {
4163
4164
4165
4166
4167
4168
4169
4170
4171 zero_user(page, 0, PAGE_SIZE);
4172 lru_cache_add_file(page);
4173 flush_dcache_page(page);
4174 SetPageUptodate(page);
4175 unlock_page(page);
4176 put_page(page);
4177 rdata->pages[i] = NULL;
4178 rdata->nr_pages--;
4179 continue;
4180 } else {
4181
4182 lru_cache_add_file(page);
4183 unlock_page(page);
4184 put_page(page);
4185 rdata->pages[i] = NULL;
4186 rdata->nr_pages--;
4187 continue;
4188 }
4189
4190 if (iter)
4191 result = copy_page_from_iter(
4192 page, page_offset, n, iter);
4193 #ifdef CONFIG_CIFS_SMB_DIRECT
4194 else if (rdata->mr)
4195 result = n;
4196 #endif
4197 else
4198 result = cifs_read_page_from_socket(
4199 server, page, page_offset, n);
4200 if (result < 0)
4201 break;
4202
4203 rdata->got_bytes += result;
4204 }
4205
4206 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4207 rdata->got_bytes : result;
4208 }
4209
4210 static int
4211 cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4212 struct cifs_readdata *rdata, unsigned int len)
4213 {
4214 return readpages_fill_pages(server, rdata, NULL, len);
4215 }
4216
4217 static int
4218 cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4219 struct cifs_readdata *rdata,
4220 struct iov_iter *iter)
4221 {
4222 return readpages_fill_pages(server, rdata, iter, iter->count);
4223 }
4224
4225 static int
4226 readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4227 unsigned int rsize, struct list_head *tmplist,
4228 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4229 {
4230 struct page *page, *tpage;
4231 unsigned int expected_index;
4232 int rc;
4233 gfp_t gfp = readahead_gfp_mask(mapping);
4234
4235 INIT_LIST_HEAD(tmplist);
4236
4237 page = lru_to_page(page_list);
4238
4239
4240
4241
4242
4243
4244 __SetPageLocked(page);
4245 rc = add_to_page_cache_locked(page, mapping,
4246 page->index, gfp);
4247
4248
4249 if (rc) {
4250 __ClearPageLocked(page);
4251 return rc;
4252 }
4253
4254
4255 *offset = (loff_t)page->index << PAGE_SHIFT;
4256 *bytes = PAGE_SIZE;
4257 *nr_pages = 1;
4258 list_move_tail(&page->lru, tmplist);
4259
4260
4261 expected_index = page->index + 1;
4262 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4263
4264 if (page->index != expected_index)
4265 break;
4266
4267
4268 if (*bytes + PAGE_SIZE > rsize)
4269 break;
4270
4271 __SetPageLocked(page);
4272 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
4273 __ClearPageLocked(page);
4274 break;
4275 }
4276 list_move_tail(&page->lru, tmplist);
4277 (*bytes) += PAGE_SIZE;
4278 expected_index++;
4279 (*nr_pages)++;
4280 }
4281 return rc;
4282 }
4283
4284 static int cifs_readpages(struct file *file, struct address_space *mapping,
4285 struct list_head *page_list, unsigned num_pages)
4286 {
4287 int rc;
4288 struct list_head tmplist;
4289 struct cifsFileInfo *open_file = file->private_data;
4290 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
4291 struct TCP_Server_Info *server;
4292 pid_t pid;
4293 unsigned int xid;
4294
4295 xid = get_xid();
4296
4297
4298
4299
4300
4301
4302
4303 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4304 &num_pages);
4305 if (rc == 0) {
4306 free_xid(xid);
4307 return rc;
4308 }
4309
4310 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4311 pid = open_file->pid;
4312 else
4313 pid = current->tgid;
4314
4315 rc = 0;
4316 server = tlink_tcon(open_file->tlink)->ses->server;
4317
4318 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4319 __func__, file, mapping, num_pages);
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332 while (!list_empty(page_list)) {
4333 unsigned int i, nr_pages, bytes, rsize;
4334 loff_t offset;
4335 struct page *page, *tpage;
4336 struct cifs_readdata *rdata;
4337 struct cifs_credits credits_on_stack;
4338 struct cifs_credits *credits = &credits_on_stack;
4339
4340 if (open_file->invalidHandle) {
4341 rc = cifs_reopen_file(open_file, true);
4342 if (rc == -EAGAIN)
4343 continue;
4344 else if (rc)
4345 break;
4346 }
4347
4348 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
4349 &rsize, credits);
4350 if (rc)
4351 break;
4352
4353
4354
4355
4356
4357
4358
4359 if (unlikely(rsize < PAGE_SIZE)) {
4360 add_credits_and_wake_if(server, credits, 0);
4361 free_xid(xid);
4362 return 0;
4363 }
4364
4365 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4366 &nr_pages, &offset, &bytes);
4367 if (rc) {
4368 add_credits_and_wake_if(server, credits, 0);
4369 break;
4370 }
4371
4372 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
4373 if (!rdata) {
4374
4375 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4376 list_del(&page->lru);
4377 lru_cache_add_file(page);
4378 unlock_page(page);
4379 put_page(page);
4380 }
4381 rc = -ENOMEM;
4382 add_credits_and_wake_if(server, credits, 0);
4383 break;
4384 }
4385
4386 rdata->cfile = cifsFileInfo_get(open_file);
4387 rdata->mapping = mapping;
4388 rdata->offset = offset;
4389 rdata->bytes = bytes;
4390 rdata->pid = pid;
4391 rdata->pagesz = PAGE_SIZE;
4392 rdata->tailsz = PAGE_SIZE;
4393 rdata->read_into_pages = cifs_readpages_read_into_pages;
4394 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
4395 rdata->credits = credits_on_stack;
4396
4397 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4398 list_del(&page->lru);
4399 rdata->pages[rdata->nr_pages++] = page;
4400 }
4401
4402 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4403
4404 if (!rc) {
4405 if (rdata->cfile->invalidHandle)
4406 rc = -EAGAIN;
4407 else
4408 rc = server->ops->async_readv(rdata);
4409 }
4410
4411 if (rc) {
4412 add_credits_and_wake_if(server, &rdata->credits, 0);
4413 for (i = 0; i < rdata->nr_pages; i++) {
4414 page = rdata->pages[i];
4415 lru_cache_add_file(page);
4416 unlock_page(page);
4417 put_page(page);
4418 }
4419
4420 kref_put(&rdata->refcount, cifs_readdata_release);
4421 break;
4422 }
4423
4424 kref_put(&rdata->refcount, cifs_readdata_release);
4425 }
4426
4427
4428
4429
4430
4431 cifs_fscache_readpages_cancel(mapping->host, page_list);
4432 free_xid(xid);
4433 return rc;
4434 }
4435
4436
4437
4438
4439 static int cifs_readpage_worker(struct file *file, struct page *page,
4440 loff_t *poffset)
4441 {
4442 char *read_data;
4443 int rc;
4444
4445
4446 rc = cifs_readpage_from_fscache(file_inode(file), page);
4447 if (rc == 0)
4448 goto read_complete;
4449
4450 read_data = kmap(page);
4451
4452
4453 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
4454
4455 if (rc < 0)
4456 goto io_error;
4457 else
4458 cifs_dbg(FYI, "Bytes read %d\n", rc);
4459
4460
4461 file_inode(file)->i_atime = current_time(file_inode(file));
4462 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4463 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4464 else
4465 file_inode(file)->i_atime = current_time(file_inode(file));
4466
4467 if (PAGE_SIZE > rc)
4468 memset(read_data + rc, 0, PAGE_SIZE - rc);
4469
4470 flush_dcache_page(page);
4471 SetPageUptodate(page);
4472
4473
4474 cifs_readpage_to_fscache(file_inode(file), page);
4475
4476 rc = 0;
4477
4478 io_error:
4479 kunmap(page);
4480 unlock_page(page);
4481
4482 read_complete:
4483 return rc;
4484 }
4485
4486 static int cifs_readpage(struct file *file, struct page *page)
4487 {
4488 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
4489 int rc = -EACCES;
4490 unsigned int xid;
4491
4492 xid = get_xid();
4493
4494 if (file->private_data == NULL) {
4495 rc = -EBADF;
4496 free_xid(xid);
4497 return rc;
4498 }
4499
4500 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
4501 page, (int)offset, (int)offset);
4502
4503 rc = cifs_readpage_worker(file, page, &offset);
4504
4505 free_xid(xid);
4506 return rc;
4507 }
4508
4509 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4510 {
4511 struct cifsFileInfo *open_file;
4512
4513 spin_lock(&cifs_inode->open_file_lock);
4514 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
4515 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4516 spin_unlock(&cifs_inode->open_file_lock);
4517 return 1;
4518 }
4519 }
4520 spin_unlock(&cifs_inode->open_file_lock);
4521 return 0;
4522 }
4523
4524
4525
4526
4527
4528
4529
4530 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
4531 {
4532 if (!cifsInode)
4533 return true;
4534
4535 if (is_inode_writable(cifsInode)) {
4536
4537 struct cifs_sb_info *cifs_sb;
4538
4539 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
4540 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
4541
4542
4543 return true;
4544 }
4545
4546 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4547 return true;
4548
4549 return false;
4550 } else
4551 return true;
4552 }
4553
4554 static int cifs_write_begin(struct file *file, struct address_space *mapping,
4555 loff_t pos, unsigned len, unsigned flags,
4556 struct page **pagep, void **fsdata)
4557 {
4558 int oncethru = 0;
4559 pgoff_t index = pos >> PAGE_SHIFT;
4560 loff_t offset = pos & (PAGE_SIZE - 1);
4561 loff_t page_start = pos & PAGE_MASK;
4562 loff_t i_size;
4563 struct page *page;
4564 int rc = 0;
4565
4566 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
4567
4568 start:
4569 page = grab_cache_page_write_begin(mapping, index, flags);
4570 if (!page) {
4571 rc = -ENOMEM;
4572 goto out;
4573 }
4574
4575 if (PageUptodate(page))
4576 goto out;
4577
4578
4579
4580
4581
4582
4583 if (len == PAGE_SIZE)
4584 goto out;
4585
4586
4587
4588
4589
4590
4591
4592 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
4593 i_size = i_size_read(mapping->host);
4594 if (page_start >= i_size ||
4595 (offset == 0 && (pos + len) >= i_size)) {
4596 zero_user_segments(page, 0, offset,
4597 offset + len,
4598 PAGE_SIZE);
4599
4600
4601
4602
4603
4604
4605 SetPageChecked(page);
4606 goto out;
4607 }
4608 }
4609
4610 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
4611
4612
4613
4614
4615
4616 cifs_readpage_worker(file, page, &page_start);
4617 put_page(page);
4618 oncethru = 1;
4619 goto start;
4620 } else {
4621
4622
4623
4624
4625 }
4626 out:
4627 *pagep = page;
4628 return rc;
4629 }
4630
4631 static int cifs_release_page(struct page *page, gfp_t gfp)
4632 {
4633 if (PagePrivate(page))
4634 return 0;
4635
4636 return cifs_fscache_release_page(page, gfp);
4637 }
4638
4639 static void cifs_invalidate_page(struct page *page, unsigned int offset,
4640 unsigned int length)
4641 {
4642 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4643
4644 if (offset == 0 && length == PAGE_SIZE)
4645 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4646 }
4647
4648 static int cifs_launder_page(struct page *page)
4649 {
4650 int rc = 0;
4651 loff_t range_start = page_offset(page);
4652 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
4653 struct writeback_control wbc = {
4654 .sync_mode = WB_SYNC_ALL,
4655 .nr_to_write = 0,
4656 .range_start = range_start,
4657 .range_end = range_end,
4658 };
4659
4660 cifs_dbg(FYI, "Launder page: %p\n", page);
4661
4662 if (clear_page_dirty_for_io(page))
4663 rc = cifs_writepage_locked(page, &wbc);
4664
4665 cifs_fscache_invalidate_page(page, page->mapping->host);
4666 return rc;
4667 }
4668
4669 void cifs_oplock_break(struct work_struct *work)
4670 {
4671 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4672 oplock_break);
4673 struct inode *inode = d_inode(cfile->dentry);
4674 struct cifsInodeInfo *cinode = CIFS_I(inode);
4675 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4676 struct TCP_Server_Info *server = tcon->ses->server;
4677 int rc = 0;
4678
4679 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
4680 TASK_UNINTERRUPTIBLE);
4681
4682 server->ops->downgrade_oplock(server, cinode,
4683 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4684
4685 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
4686 cifs_has_mand_locks(cinode)) {
4687 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4688 inode);
4689 cinode->oplock = 0;
4690 }
4691
4692 if (inode && S_ISREG(inode->i_mode)) {
4693 if (CIFS_CACHE_READ(cinode))
4694 break_lease(inode, O_RDONLY);
4695 else
4696 break_lease(inode, O_WRONLY);
4697 rc = filemap_fdatawrite(inode->i_mapping);
4698 if (!CIFS_CACHE_READ(cinode)) {
4699 rc = filemap_fdatawait(inode->i_mapping);
4700 mapping_set_error(inode->i_mapping, rc);
4701 cifs_zap_mapping(inode);
4702 }
4703 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
4704 }
4705
4706 rc = cifs_push_locks(cfile);
4707 if (rc)
4708 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
4709
4710
4711
4712
4713
4714
4715
4716 if (!cfile->oplock_break_cancelled) {
4717 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4718 cinode);
4719 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
4720 }
4721 _cifsFileInfo_put(cfile, false , false);
4722 cifs_done_oplock_break(cinode);
4723 }
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734 static ssize_t
4735 cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
4736 {
4737
4738
4739
4740
4741 return -EINVAL;
4742 }
4743
4744
4745 const struct address_space_operations cifs_addr_ops = {
4746 .readpage = cifs_readpage,
4747 .readpages = cifs_readpages,
4748 .writepage = cifs_writepage,
4749 .writepages = cifs_writepages,
4750 .write_begin = cifs_write_begin,
4751 .write_end = cifs_write_end,
4752 .set_page_dirty = __set_page_dirty_nobuffers,
4753 .releasepage = cifs_release_page,
4754 .direct_IO = cifs_direct_io,
4755 .invalidatepage = cifs_invalidate_page,
4756 .launder_page = cifs_launder_page,
4757 };
4758
4759
4760
4761
4762
4763
4764 const struct address_space_operations cifs_addr_ops_smallbuf = {
4765 .readpage = cifs_readpage,
4766 .writepage = cifs_writepage,
4767 .writepages = cifs_writepages,
4768 .write_begin = cifs_write_begin,
4769 .write_end = cifs_write_end,
4770 .set_page_dirty = __set_page_dirty_nobuffers,
4771 .releasepage = cifs_release_page,
4772 .invalidatepage = cifs_invalidate_page,
4773 .launder_page = cifs_launder_page,
4774 };