ASoC: wm8731: let codec to manage clock by itself
[firefly-linux-kernel-4.4.55.git] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
37 #include "cifsfs.h"
38 #include "cifspdu.h"
39 #include "cifsglob.h"
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
44 #include "fscache.h"
45
46
47 static inline int cifs_convert_flags(unsigned int flags)
48 {
49         if ((flags & O_ACCMODE) == O_RDONLY)
50                 return GENERIC_READ;
51         else if ((flags & O_ACCMODE) == O_WRONLY)
52                 return GENERIC_WRITE;
53         else if ((flags & O_ACCMODE) == O_RDWR) {
54                 /* GENERIC_ALL is too much permission to request
55                    can cause unnecessary access denied on create */
56                 /* return GENERIC_ALL; */
57                 return (GENERIC_READ | GENERIC_WRITE);
58         }
59
60         return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61                 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62                 FILE_READ_DATA);
63 }
64
65 static u32 cifs_posix_convert_flags(unsigned int flags)
66 {
67         u32 posix_flags = 0;
68
69         if ((flags & O_ACCMODE) == O_RDONLY)
70                 posix_flags = SMB_O_RDONLY;
71         else if ((flags & O_ACCMODE) == O_WRONLY)
72                 posix_flags = SMB_O_WRONLY;
73         else if ((flags & O_ACCMODE) == O_RDWR)
74                 posix_flags = SMB_O_RDWR;
75
76         if (flags & O_CREAT) {
77                 posix_flags |= SMB_O_CREAT;
78                 if (flags & O_EXCL)
79                         posix_flags |= SMB_O_EXCL;
80         } else if (flags & O_EXCL)
81                 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82                          current->comm, current->tgid);
83
84         if (flags & O_TRUNC)
85                 posix_flags |= SMB_O_TRUNC;
86         /* be safe and imply O_SYNC for O_DSYNC */
87         if (flags & O_DSYNC)
88                 posix_flags |= SMB_O_SYNC;
89         if (flags & O_DIRECTORY)
90                 posix_flags |= SMB_O_DIRECTORY;
91         if (flags & O_NOFOLLOW)
92                 posix_flags |= SMB_O_NOFOLLOW;
93         if (flags & O_DIRECT)
94                 posix_flags |= SMB_O_DIRECT;
95
96         return posix_flags;
97 }
98
99 static inline int cifs_get_disposition(unsigned int flags)
100 {
101         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102                 return FILE_CREATE;
103         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104                 return FILE_OVERWRITE_IF;
105         else if ((flags & O_CREAT) == O_CREAT)
106                 return FILE_OPEN_IF;
107         else if ((flags & O_TRUNC) == O_TRUNC)
108                 return FILE_OVERWRITE;
109         else
110                 return FILE_OPEN;
111 }
112
113 int cifs_posix_open(char *full_path, struct inode **pinode,
114                         struct super_block *sb, int mode, unsigned int f_flags,
115                         __u32 *poplock, __u16 *pnetfid, unsigned int xid)
116 {
117         int rc;
118         FILE_UNIX_BASIC_INFO *presp_data;
119         __u32 posix_flags = 0;
120         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121         struct cifs_fattr fattr;
122         struct tcon_link *tlink;
123         struct cifs_tcon *tcon;
124
125         cifs_dbg(FYI, "posix open %s\n", full_path);
126
127         presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128         if (presp_data == NULL)
129                 return -ENOMEM;
130
131         tlink = cifs_sb_tlink(cifs_sb);
132         if (IS_ERR(tlink)) {
133                 rc = PTR_ERR(tlink);
134                 goto posix_open_ret;
135         }
136
137         tcon = tlink_tcon(tlink);
138         mode &= ~current_umask();
139
140         posix_flags = cifs_posix_convert_flags(f_flags);
141         rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142                              poplock, full_path, cifs_sb->local_nls,
143                              cifs_sb->mnt_cifs_flags &
144                                         CIFS_MOUNT_MAP_SPECIAL_CHR);
145         cifs_put_tlink(tlink);
146
147         if (rc)
148                 goto posix_open_ret;
149
150         if (presp_data->Type == cpu_to_le32(-1))
151                 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153         if (!pinode)
154                 goto posix_open_ret; /* caller does not need info */
155
156         cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158         /* get new inode and set it up */
159         if (*pinode == NULL) {
160                 cifs_fill_uniqueid(sb, &fattr);
161                 *pinode = cifs_iget(sb, &fattr);
162                 if (!*pinode) {
163                         rc = -ENOMEM;
164                         goto posix_open_ret;
165                 }
166         } else {
167                 cifs_fattr_to_inode(*pinode, &fattr);
168         }
169
170 posix_open_ret:
171         kfree(presp_data);
172         return rc;
173 }
174
175 static int
176 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
177              struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178              struct cifs_fid *fid, unsigned int xid)
179 {
180         int rc;
181         int desired_access;
182         int disposition;
183         int create_options = CREATE_NOT_DIR;
184         FILE_ALL_INFO *buf;
185         struct TCP_Server_Info *server = tcon->ses->server;
186         struct cifs_open_parms oparms;
187
188         if (!server->ops->open)
189                 return -ENOSYS;
190
191         desired_access = cifs_convert_flags(f_flags);
192
193 /*********************************************************************
194  *  open flag mapping table:
195  *
196  *      POSIX Flag            CIFS Disposition
197  *      ----------            ----------------
198  *      O_CREAT               FILE_OPEN_IF
199  *      O_CREAT | O_EXCL      FILE_CREATE
200  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
201  *      O_TRUNC               FILE_OVERWRITE
202  *      none of the above     FILE_OPEN
203  *
204  *      Note that there is not a direct match between disposition
205  *      FILE_SUPERSEDE (ie create whether or not file exists although
206  *      O_CREAT | O_TRUNC is similar but truncates the existing
207  *      file rather than creating a new file as FILE_SUPERSEDE does
208  *      (which uses the attributes / metadata passed in on open call)
209  *?
210  *?  O_SYNC is a reasonable match to CIFS writethrough flag
211  *?  and the read write flags match reasonably.  O_LARGEFILE
212  *?  is irrelevant because largefile support is always used
213  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215  *********************************************************************/
216
217         disposition = cifs_get_disposition(f_flags);
218
219         /* BB pass O_SYNC flag through on file attributes .. BB */
220
221         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222         if (!buf)
223                 return -ENOMEM;
224
225         if (backup_cred(cifs_sb))
226                 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
228         oparms.tcon = tcon;
229         oparms.cifs_sb = cifs_sb;
230         oparms.desired_access = desired_access;
231         oparms.create_options = create_options;
232         oparms.disposition = disposition;
233         oparms.path = full_path;
234         oparms.fid = fid;
235         oparms.reconnect = false;
236
237         rc = server->ops->open(xid, &oparms, oplock, buf);
238
239         if (rc)
240                 goto out;
241
242         if (tcon->unix_ext)
243                 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
244                                               xid);
245         else
246                 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
247                                          xid, fid);
248
249 out:
250         kfree(buf);
251         return rc;
252 }
253
254 static bool
255 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
256 {
257         struct cifs_fid_locks *cur;
258         bool has_locks = false;
259
260         down_read(&cinode->lock_sem);
261         list_for_each_entry(cur, &cinode->llist, llist) {
262                 if (!list_empty(&cur->locks)) {
263                         has_locks = true;
264                         break;
265                 }
266         }
267         up_read(&cinode->lock_sem);
268         return has_locks;
269 }
270
271 struct cifsFileInfo *
272 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
273                   struct tcon_link *tlink, __u32 oplock)
274 {
275         struct dentry *dentry = file->f_path.dentry;
276         struct inode *inode = dentry->d_inode;
277         struct cifsInodeInfo *cinode = CIFS_I(inode);
278         struct cifsFileInfo *cfile;
279         struct cifs_fid_locks *fdlocks;
280         struct cifs_tcon *tcon = tlink_tcon(tlink);
281         struct TCP_Server_Info *server = tcon->ses->server;
282
283         cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
284         if (cfile == NULL)
285                 return cfile;
286
287         fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
288         if (!fdlocks) {
289                 kfree(cfile);
290                 return NULL;
291         }
292
293         INIT_LIST_HEAD(&fdlocks->locks);
294         fdlocks->cfile = cfile;
295         cfile->llist = fdlocks;
296         down_write(&cinode->lock_sem);
297         list_add(&fdlocks->llist, &cinode->llist);
298         up_write(&cinode->lock_sem);
299
300         cfile->count = 1;
301         cfile->pid = current->tgid;
302         cfile->uid = current_fsuid();
303         cfile->dentry = dget(dentry);
304         cfile->f_flags = file->f_flags;
305         cfile->invalidHandle = false;
306         cfile->tlink = cifs_get_tlink(tlink);
307         INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
308         mutex_init(&cfile->fh_mutex);
309
310         cifs_sb_active(inode->i_sb);
311
312         /*
313          * If the server returned a read oplock and we have mandatory brlocks,
314          * set oplock level to None.
315          */
316         if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
317                 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
318                 oplock = 0;
319         }
320
321         spin_lock(&cifs_file_list_lock);
322         if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
323                 oplock = fid->pending_open->oplock;
324         list_del(&fid->pending_open->olist);
325
326         fid->purge_cache = false;
327         server->ops->set_fid(cfile, fid, oplock);
328
329         list_add(&cfile->tlist, &tcon->openFileList);
330         /* if readable file instance put first in list*/
331         if (file->f_mode & FMODE_READ)
332                 list_add(&cfile->flist, &cinode->openFileList);
333         else
334                 list_add_tail(&cfile->flist, &cinode->openFileList);
335         spin_unlock(&cifs_file_list_lock);
336
337         if (fid->purge_cache)
338                 cifs_zap_mapping(inode);
339
340         file->private_data = cfile;
341         return cfile;
342 }
343
344 struct cifsFileInfo *
345 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
346 {
347         spin_lock(&cifs_file_list_lock);
348         cifsFileInfo_get_locked(cifs_file);
349         spin_unlock(&cifs_file_list_lock);
350         return cifs_file;
351 }
352
353 /*
354  * Release a reference on the file private data. This may involve closing
355  * the filehandle out on the server. Must be called without holding
356  * cifs_file_list_lock.
357  */
358 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
359 {
360         struct inode *inode = cifs_file->dentry->d_inode;
361         struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
362         struct TCP_Server_Info *server = tcon->ses->server;
363         struct cifsInodeInfo *cifsi = CIFS_I(inode);
364         struct super_block *sb = inode->i_sb;
365         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
366         struct cifsLockInfo *li, *tmp;
367         struct cifs_fid fid;
368         struct cifs_pending_open open;
369         bool oplock_break_cancelled;
370
371         spin_lock(&cifs_file_list_lock);
372         if (--cifs_file->count > 0) {
373                 spin_unlock(&cifs_file_list_lock);
374                 return;
375         }
376
377         if (server->ops->get_lease_key)
378                 server->ops->get_lease_key(inode, &fid);
379
380         /* store open in pending opens to make sure we don't miss lease break */
381         cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
382
383         /* remove it from the lists */
384         list_del(&cifs_file->flist);
385         list_del(&cifs_file->tlist);
386
387         if (list_empty(&cifsi->openFileList)) {
388                 cifs_dbg(FYI, "closing last open instance for inode %p\n",
389                          cifs_file->dentry->d_inode);
390                 /*
391                  * In strict cache mode we need invalidate mapping on the last
392                  * close  because it may cause a error when we open this file
393                  * again and get at least level II oplock.
394                  */
395                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
396                         set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
397                 cifs_set_oplock_level(cifsi, 0);
398         }
399         spin_unlock(&cifs_file_list_lock);
400
401         oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
402
403         if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
404                 struct TCP_Server_Info *server = tcon->ses->server;
405                 unsigned int xid;
406
407                 xid = get_xid();
408                 if (server->ops->close)
409                         server->ops->close(xid, tcon, &cifs_file->fid);
410                 _free_xid(xid);
411         }
412
413         if (oplock_break_cancelled)
414                 cifs_done_oplock_break(cifsi);
415
416         cifs_del_pending_open(&open);
417
418         /*
419          * Delete any outstanding lock records. We'll lose them when the file
420          * is closed anyway.
421          */
422         down_write(&cifsi->lock_sem);
423         list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
424                 list_del(&li->llist);
425                 cifs_del_lock_waiters(li);
426                 kfree(li);
427         }
428         list_del(&cifs_file->llist->llist);
429         kfree(cifs_file->llist);
430         up_write(&cifsi->lock_sem);
431
432         cifs_put_tlink(cifs_file->tlink);
433         dput(cifs_file->dentry);
434         cifs_sb_deactive(sb);
435         kfree(cifs_file);
436 }
437
438 int cifs_open(struct inode *inode, struct file *file)
439
440 {
441         int rc = -EACCES;
442         unsigned int xid;
443         __u32 oplock;
444         struct cifs_sb_info *cifs_sb;
445         struct TCP_Server_Info *server;
446         struct cifs_tcon *tcon;
447         struct tcon_link *tlink;
448         struct cifsFileInfo *cfile = NULL;
449         char *full_path = NULL;
450         bool posix_open_ok = false;
451         struct cifs_fid fid;
452         struct cifs_pending_open open;
453
454         xid = get_xid();
455
456         cifs_sb = CIFS_SB(inode->i_sb);
457         tlink = cifs_sb_tlink(cifs_sb);
458         if (IS_ERR(tlink)) {
459                 free_xid(xid);
460                 return PTR_ERR(tlink);
461         }
462         tcon = tlink_tcon(tlink);
463         server = tcon->ses->server;
464
465         full_path = build_path_from_dentry(file->f_path.dentry);
466         if (full_path == NULL) {
467                 rc = -ENOMEM;
468                 goto out;
469         }
470
471         cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
472                  inode, file->f_flags, full_path);
473
474         if (file->f_flags & O_DIRECT &&
475             cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
476                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
477                         file->f_op = &cifs_file_direct_nobrl_ops;
478                 else
479                         file->f_op = &cifs_file_direct_ops;
480         }
481
482         if (server->oplocks)
483                 oplock = REQ_OPLOCK;
484         else
485                 oplock = 0;
486
487         if (!tcon->broken_posix_open && tcon->unix_ext &&
488             cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
489                                 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
490                 /* can not refresh inode info since size could be stale */
491                 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
492                                 cifs_sb->mnt_file_mode /* ignored */,
493                                 file->f_flags, &oplock, &fid.netfid, xid);
494                 if (rc == 0) {
495                         cifs_dbg(FYI, "posix open succeeded\n");
496                         posix_open_ok = true;
497                 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
498                         if (tcon->ses->serverNOS)
499                                 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
500                                          tcon->ses->serverName,
501                                          tcon->ses->serverNOS);
502                         tcon->broken_posix_open = true;
503                 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
504                          (rc != -EOPNOTSUPP)) /* path not found or net err */
505                         goto out;
506                 /*
507                  * Else fallthrough to retry open the old way on network i/o
508                  * or DFS errors.
509                  */
510         }
511
512         if (server->ops->get_lease_key)
513                 server->ops->get_lease_key(inode, &fid);
514
515         cifs_add_pending_open(&fid, tlink, &open);
516
517         if (!posix_open_ok) {
518                 if (server->ops->get_lease_key)
519                         server->ops->get_lease_key(inode, &fid);
520
521                 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
522                                   file->f_flags, &oplock, &fid, xid);
523                 if (rc) {
524                         cifs_del_pending_open(&open);
525                         goto out;
526                 }
527         }
528
529         cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
530         if (cfile == NULL) {
531                 if (server->ops->close)
532                         server->ops->close(xid, tcon, &fid);
533                 cifs_del_pending_open(&open);
534                 rc = -ENOMEM;
535                 goto out;
536         }
537
538         cifs_fscache_set_inode_cookie(inode, file);
539
540         if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
541                 /*
542                  * Time to set mode which we can not set earlier due to
543                  * problems creating new read-only files.
544                  */
545                 struct cifs_unix_set_info_args args = {
546                         .mode   = inode->i_mode,
547                         .uid    = INVALID_UID, /* no change */
548                         .gid    = INVALID_GID, /* no change */
549                         .ctime  = NO_CHANGE_64,
550                         .atime  = NO_CHANGE_64,
551                         .mtime  = NO_CHANGE_64,
552                         .device = 0,
553                 };
554                 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
555                                        cfile->pid);
556         }
557
558 out:
559         kfree(full_path);
560         free_xid(xid);
561         cifs_put_tlink(tlink);
562         return rc;
563 }
564
565 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
566
567 /*
568  * Try to reacquire byte range locks that were released when session
569  * to server was lost.
570  */
571 static int
572 cifs_relock_file(struct cifsFileInfo *cfile)
573 {
574         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
575         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
576         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
577         int rc = 0;
578
579         down_read(&cinode->lock_sem);
580         if (cinode->can_cache_brlcks) {
581                 /* can cache locks - no need to relock */
582                 up_read(&cinode->lock_sem);
583                 return rc;
584         }
585
586         if (cap_unix(tcon->ses) &&
587             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
588             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
589                 rc = cifs_push_posix_locks(cfile);
590         else
591                 rc = tcon->ses->server->ops->push_mand_locks(cfile);
592
593         up_read(&cinode->lock_sem);
594         return rc;
595 }
596
597 static int
598 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
599 {
600         int rc = -EACCES;
601         unsigned int xid;
602         __u32 oplock;
603         struct cifs_sb_info *cifs_sb;
604         struct cifs_tcon *tcon;
605         struct TCP_Server_Info *server;
606         struct cifsInodeInfo *cinode;
607         struct inode *inode;
608         char *full_path = NULL;
609         int desired_access;
610         int disposition = FILE_OPEN;
611         int create_options = CREATE_NOT_DIR;
612         struct cifs_open_parms oparms;
613
614         xid = get_xid();
615         mutex_lock(&cfile->fh_mutex);
616         if (!cfile->invalidHandle) {
617                 mutex_unlock(&cfile->fh_mutex);
618                 rc = 0;
619                 free_xid(xid);
620                 return rc;
621         }
622
623         inode = cfile->dentry->d_inode;
624         cifs_sb = CIFS_SB(inode->i_sb);
625         tcon = tlink_tcon(cfile->tlink);
626         server = tcon->ses->server;
627
628         /*
629          * Can not grab rename sem here because various ops, including those
630          * that already have the rename sem can end up causing writepage to get
631          * called and if the server was down that means we end up here, and we
632          * can never tell if the caller already has the rename_sem.
633          */
634         full_path = build_path_from_dentry(cfile->dentry);
635         if (full_path == NULL) {
636                 rc = -ENOMEM;
637                 mutex_unlock(&cfile->fh_mutex);
638                 free_xid(xid);
639                 return rc;
640         }
641
642         cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
643                  inode, cfile->f_flags, full_path);
644
645         if (tcon->ses->server->oplocks)
646                 oplock = REQ_OPLOCK;
647         else
648                 oplock = 0;
649
650         if (tcon->unix_ext && cap_unix(tcon->ses) &&
651             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
652                                 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
653                 /*
654                  * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
655                  * original open. Must mask them off for a reopen.
656                  */
657                 unsigned int oflags = cfile->f_flags &
658                                                 ~(O_CREAT | O_EXCL | O_TRUNC);
659
660                 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
661                                      cifs_sb->mnt_file_mode /* ignored */,
662                                      oflags, &oplock, &cfile->fid.netfid, xid);
663                 if (rc == 0) {
664                         cifs_dbg(FYI, "posix reopen succeeded\n");
665                         oparms.reconnect = true;
666                         goto reopen_success;
667                 }
668                 /*
669                  * fallthrough to retry open the old way on errors, especially
670                  * in the reconnect path it is important to retry hard
671                  */
672         }
673
674         desired_access = cifs_convert_flags(cfile->f_flags);
675
676         if (backup_cred(cifs_sb))
677                 create_options |= CREATE_OPEN_BACKUP_INTENT;
678
679         if (server->ops->get_lease_key)
680                 server->ops->get_lease_key(inode, &cfile->fid);
681
682         oparms.tcon = tcon;
683         oparms.cifs_sb = cifs_sb;
684         oparms.desired_access = desired_access;
685         oparms.create_options = create_options;
686         oparms.disposition = disposition;
687         oparms.path = full_path;
688         oparms.fid = &cfile->fid;
689         oparms.reconnect = true;
690
691         /*
692          * Can not refresh inode by passing in file_info buf to be returned by
693          * ops->open and then calling get_inode_info with returned buf since
694          * file might have write behind data that needs to be flushed and server
695          * version of file size can be stale. If we knew for sure that inode was
696          * not dirty locally we could do this.
697          */
698         rc = server->ops->open(xid, &oparms, &oplock, NULL);
699         if (rc == -ENOENT && oparms.reconnect == false) {
700                 /* durable handle timeout is expired - open the file again */
701                 rc = server->ops->open(xid, &oparms, &oplock, NULL);
702                 /* indicate that we need to relock the file */
703                 oparms.reconnect = true;
704         }
705
706         if (rc) {
707                 mutex_unlock(&cfile->fh_mutex);
708                 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
709                 cifs_dbg(FYI, "oplock: %d\n", oplock);
710                 goto reopen_error_exit;
711         }
712
713 reopen_success:
714         cfile->invalidHandle = false;
715         mutex_unlock(&cfile->fh_mutex);
716         cinode = CIFS_I(inode);
717
718         if (can_flush) {
719                 rc = filemap_write_and_wait(inode->i_mapping);
720                 mapping_set_error(inode->i_mapping, rc);
721
722                 if (tcon->unix_ext)
723                         rc = cifs_get_inode_info_unix(&inode, full_path,
724                                                       inode->i_sb, xid);
725                 else
726                         rc = cifs_get_inode_info(&inode, full_path, NULL,
727                                                  inode->i_sb, xid, NULL);
728         }
729         /*
730          * Else we are writing out data to server already and could deadlock if
731          * we tried to flush data, and since we do not know if we have data that
732          * would invalidate the current end of file on the server we can not go
733          * to the server to get the new inode info.
734          */
735
736         server->ops->set_fid(cfile, &cfile->fid, oplock);
737         if (oparms.reconnect)
738                 cifs_relock_file(cfile);
739
740 reopen_error_exit:
741         kfree(full_path);
742         free_xid(xid);
743         return rc;
744 }
745
746 int cifs_close(struct inode *inode, struct file *file)
747 {
748         if (file->private_data != NULL) {
749                 cifsFileInfo_put(file->private_data);
750                 file->private_data = NULL;
751         }
752
753         /* return code from the ->release op is always ignored */
754         return 0;
755 }
756
757 int cifs_closedir(struct inode *inode, struct file *file)
758 {
759         int rc = 0;
760         unsigned int xid;
761         struct cifsFileInfo *cfile = file->private_data;
762         struct cifs_tcon *tcon;
763         struct TCP_Server_Info *server;
764         char *buf;
765
766         cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
767
768         if (cfile == NULL)
769                 return rc;
770
771         xid = get_xid();
772         tcon = tlink_tcon(cfile->tlink);
773         server = tcon->ses->server;
774
775         cifs_dbg(FYI, "Freeing private data in close dir\n");
776         spin_lock(&cifs_file_list_lock);
777         if (server->ops->dir_needs_close(cfile)) {
778                 cfile->invalidHandle = true;
779                 spin_unlock(&cifs_file_list_lock);
780                 if (server->ops->close_dir)
781                         rc = server->ops->close_dir(xid, tcon, &cfile->fid);
782                 else
783                         rc = -ENOSYS;
784                 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
785                 /* not much we can do if it fails anyway, ignore rc */
786                 rc = 0;
787         } else
788                 spin_unlock(&cifs_file_list_lock);
789
790         buf = cfile->srch_inf.ntwrk_buf_start;
791         if (buf) {
792                 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
793                 cfile->srch_inf.ntwrk_buf_start = NULL;
794                 if (cfile->srch_inf.smallBuf)
795                         cifs_small_buf_release(buf);
796                 else
797                         cifs_buf_release(buf);
798         }
799
800         cifs_put_tlink(cfile->tlink);
801         kfree(file->private_data);
802         file->private_data = NULL;
803         /* BB can we lock the filestruct while this is going on? */
804         free_xid(xid);
805         return rc;
806 }
807
808 static struct cifsLockInfo *
809 cifs_lock_init(__u64 offset, __u64 length, __u8 type)
810 {
811         struct cifsLockInfo *lock =
812                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
813         if (!lock)
814                 return lock;
815         lock->offset = offset;
816         lock->length = length;
817         lock->type = type;
818         lock->pid = current->tgid;
819         INIT_LIST_HEAD(&lock->blist);
820         init_waitqueue_head(&lock->block_q);
821         return lock;
822 }
823
824 void
825 cifs_del_lock_waiters(struct cifsLockInfo *lock)
826 {
827         struct cifsLockInfo *li, *tmp;
828         list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
829                 list_del_init(&li->blist);
830                 wake_up(&li->block_q);
831         }
832 }
833
834 #define CIFS_LOCK_OP    0
835 #define CIFS_READ_OP    1
836 #define CIFS_WRITE_OP   2
837
838 /* @rw_check : 0 - no op, 1 - read, 2 - write */
839 static bool
840 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
841                             __u64 length, __u8 type, struct cifsFileInfo *cfile,
842                             struct cifsLockInfo **conf_lock, int rw_check)
843 {
844         struct cifsLockInfo *li;
845         struct cifsFileInfo *cur_cfile = fdlocks->cfile;
846         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
847
848         list_for_each_entry(li, &fdlocks->locks, llist) {
849                 if (offset + length <= li->offset ||
850                     offset >= li->offset + li->length)
851                         continue;
852                 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
853                     server->ops->compare_fids(cfile, cur_cfile)) {
854                         /* shared lock prevents write op through the same fid */
855                         if (!(li->type & server->vals->shared_lock_type) ||
856                             rw_check != CIFS_WRITE_OP)
857                                 continue;
858                 }
859                 if ((type & server->vals->shared_lock_type) &&
860                     ((server->ops->compare_fids(cfile, cur_cfile) &&
861                      current->tgid == li->pid) || type == li->type))
862                         continue;
863                 if (conf_lock)
864                         *conf_lock = li;
865                 return true;
866         }
867         return false;
868 }
869
870 bool
871 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
872                         __u8 type, struct cifsLockInfo **conf_lock,
873                         int rw_check)
874 {
875         bool rc = false;
876         struct cifs_fid_locks *cur;
877         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
878
879         list_for_each_entry(cur, &cinode->llist, llist) {
880                 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
881                                                  cfile, conf_lock, rw_check);
882                 if (rc)
883                         break;
884         }
885
886         return rc;
887 }
888
889 /*
890  * Check if there is another lock that prevents us to set the lock (mandatory
891  * style). If such a lock exists, update the flock structure with its
892  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
893  * or leave it the same if we can't. Returns 0 if we don't need to request to
894  * the server or 1 otherwise.
895  */
896 static int
897 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
898                __u8 type, struct file_lock *flock)
899 {
900         int rc = 0;
901         struct cifsLockInfo *conf_lock;
902         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
903         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
904         bool exist;
905
906         down_read(&cinode->lock_sem);
907
908         exist = cifs_find_lock_conflict(cfile, offset, length, type,
909                                         &conf_lock, CIFS_LOCK_OP);
910         if (exist) {
911                 flock->fl_start = conf_lock->offset;
912                 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
913                 flock->fl_pid = conf_lock->pid;
914                 if (conf_lock->type & server->vals->shared_lock_type)
915                         flock->fl_type = F_RDLCK;
916                 else
917                         flock->fl_type = F_WRLCK;
918         } else if (!cinode->can_cache_brlcks)
919                 rc = 1;
920         else
921                 flock->fl_type = F_UNLCK;
922
923         up_read(&cinode->lock_sem);
924         return rc;
925 }
926
927 static void
928 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
929 {
930         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
931         down_write(&cinode->lock_sem);
932         list_add_tail(&lock->llist, &cfile->llist->locks);
933         up_write(&cinode->lock_sem);
934 }
935
936 /*
937  * Set the byte-range lock (mandatory style). Returns:
938  * 1) 0, if we set the lock and don't need to request to the server;
939  * 2) 1, if no locks prevent us but we need to request to the server;
940  * 3) -EACCESS, if there is a lock that prevents us and wait is false.
941  */
942 static int
943 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
944                  bool wait)
945 {
946         struct cifsLockInfo *conf_lock;
947         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
948         bool exist;
949         int rc = 0;
950
951 try_again:
952         exist = false;
953         down_write(&cinode->lock_sem);
954
955         exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
956                                         lock->type, &conf_lock, CIFS_LOCK_OP);
957         if (!exist && cinode->can_cache_brlcks) {
958                 list_add_tail(&lock->llist, &cfile->llist->locks);
959                 up_write(&cinode->lock_sem);
960                 return rc;
961         }
962
963         if (!exist)
964                 rc = 1;
965         else if (!wait)
966                 rc = -EACCES;
967         else {
968                 list_add_tail(&lock->blist, &conf_lock->blist);
969                 up_write(&cinode->lock_sem);
970                 rc = wait_event_interruptible(lock->block_q,
971                                         (lock->blist.prev == &lock->blist) &&
972                                         (lock->blist.next == &lock->blist));
973                 if (!rc)
974                         goto try_again;
975                 down_write(&cinode->lock_sem);
976                 list_del_init(&lock->blist);
977         }
978
979         up_write(&cinode->lock_sem);
980         return rc;
981 }
982
983 /*
984  * Check if there is another lock that prevents us to set the lock (posix
985  * style). If such a lock exists, update the flock structure with its
986  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
987  * or leave it the same if we can't. Returns 0 if we don't need to request to
988  * the server or 1 otherwise.
989  */
990 static int
991 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
992 {
993         int rc = 0;
994         struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
995         unsigned char saved_type = flock->fl_type;
996
997         if ((flock->fl_flags & FL_POSIX) == 0)
998                 return 1;
999
1000         down_read(&cinode->lock_sem);
1001         posix_test_lock(file, flock);
1002
1003         if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1004                 flock->fl_type = saved_type;
1005                 rc = 1;
1006         }
1007
1008         up_read(&cinode->lock_sem);
1009         return rc;
1010 }
1011
1012 /*
1013  * Set the byte-range lock (posix style). Returns:
1014  * 1) 0, if we set the lock and don't need to request to the server;
1015  * 2) 1, if we need to request to the server;
1016  * 3) <0, if the error occurs while setting the lock.
1017  */
1018 static int
1019 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1020 {
1021         struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1022         int rc = 1;
1023
1024         if ((flock->fl_flags & FL_POSIX) == 0)
1025                 return rc;
1026
1027 try_again:
1028         down_write(&cinode->lock_sem);
1029         if (!cinode->can_cache_brlcks) {
1030                 up_write(&cinode->lock_sem);
1031                 return rc;
1032         }
1033
1034         rc = posix_lock_file(file, flock, NULL);
1035         up_write(&cinode->lock_sem);
1036         if (rc == FILE_LOCK_DEFERRED) {
1037                 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1038                 if (!rc)
1039                         goto try_again;
1040                 posix_unblock_lock(flock);
1041         }
1042         return rc;
1043 }
1044
1045 int
1046 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1047 {
1048         unsigned int xid;
1049         int rc = 0, stored_rc;
1050         struct cifsLockInfo *li, *tmp;
1051         struct cifs_tcon *tcon;
1052         unsigned int num, max_num, max_buf;
1053         LOCKING_ANDX_RANGE *buf, *cur;
1054         int types[] = {LOCKING_ANDX_LARGE_FILES,
1055                        LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1056         int i;
1057
1058         xid = get_xid();
1059         tcon = tlink_tcon(cfile->tlink);
1060
1061         /*
1062          * Accessing maxBuf is racy with cifs_reconnect - need to store value
1063          * and check it for zero before using.
1064          */
1065         max_buf = tcon->ses->server->maxBuf;
1066         if (!max_buf) {
1067                 free_xid(xid);
1068                 return -EINVAL;
1069         }
1070
1071         max_num = (max_buf - sizeof(struct smb_hdr)) /
1072                                                 sizeof(LOCKING_ANDX_RANGE);
1073         buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1074         if (!buf) {
1075                 free_xid(xid);
1076                 return -ENOMEM;
1077         }
1078
1079         for (i = 0; i < 2; i++) {
1080                 cur = buf;
1081                 num = 0;
1082                 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1083                         if (li->type != types[i])
1084                                 continue;
1085                         cur->Pid = cpu_to_le16(li->pid);
1086                         cur->LengthLow = cpu_to_le32((u32)li->length);
1087                         cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1088                         cur->OffsetLow = cpu_to_le32((u32)li->offset);
1089                         cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1090                         if (++num == max_num) {
1091                                 stored_rc = cifs_lockv(xid, tcon,
1092                                                        cfile->fid.netfid,
1093                                                        (__u8)li->type, 0, num,
1094                                                        buf);
1095                                 if (stored_rc)
1096                                         rc = stored_rc;
1097                                 cur = buf;
1098                                 num = 0;
1099                         } else
1100                                 cur++;
1101                 }
1102
1103                 if (num) {
1104                         stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1105                                                (__u8)types[i], 0, num, buf);
1106                         if (stored_rc)
1107                                 rc = stored_rc;
1108                 }
1109         }
1110
1111         kfree(buf);
1112         free_xid(xid);
1113         return rc;
1114 }
1115
1116 struct lock_to_push {
1117         struct list_head llist;
1118         __u64 offset;
1119         __u64 length;
1120         __u32 pid;
1121         __u16 netfid;
1122         __u8 type;
1123 };
1124
1125 static int
1126 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1127 {
1128         struct inode *inode = cfile->dentry->d_inode;
1129         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1130         struct file_lock *flock;
1131         struct file_lock_context *flctx = inode->i_flctx;
1132         unsigned int count = 0, i;
1133         int rc = 0, xid, type;
1134         struct list_head locks_to_send, *el;
1135         struct lock_to_push *lck, *tmp;
1136         __u64 length;
1137
1138         xid = get_xid();
1139
1140         if (!flctx)
1141                 goto out;
1142
1143         spin_lock(&flctx->flc_lock);
1144         list_for_each(el, &flctx->flc_posix) {
1145                 count++;
1146         }
1147         spin_unlock(&flctx->flc_lock);
1148
1149         INIT_LIST_HEAD(&locks_to_send);
1150
1151         /*
1152          * Allocating count locks is enough because no FL_POSIX locks can be
1153          * added to the list while we are holding cinode->lock_sem that
1154          * protects locking operations of this inode.
1155          */
1156         for (i = 0; i < count; i++) {
1157                 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1158                 if (!lck) {
1159                         rc = -ENOMEM;
1160                         goto err_out;
1161                 }
1162                 list_add_tail(&lck->llist, &locks_to_send);
1163         }
1164
1165         el = locks_to_send.next;
1166         spin_lock(&flctx->flc_lock);
1167         list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1168                 if (el == &locks_to_send) {
1169                         /*
1170                          * The list ended. We don't have enough allocated
1171                          * structures - something is really wrong.
1172                          */
1173                         cifs_dbg(VFS, "Can't push all brlocks!\n");
1174                         break;
1175                 }
1176                 length = 1 + flock->fl_end - flock->fl_start;
1177                 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1178                         type = CIFS_RDLCK;
1179                 else
1180                         type = CIFS_WRLCK;
1181                 lck = list_entry(el, struct lock_to_push, llist);
1182                 lck->pid = flock->fl_pid;
1183                 lck->netfid = cfile->fid.netfid;
1184                 lck->length = length;
1185                 lck->type = type;
1186                 lck->offset = flock->fl_start;
1187         }
1188         spin_unlock(&flctx->flc_lock);
1189
1190         list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1191                 int stored_rc;
1192
1193                 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1194                                              lck->offset, lck->length, NULL,
1195                                              lck->type, 0);
1196                 if (stored_rc)
1197                         rc = stored_rc;
1198                 list_del(&lck->llist);
1199                 kfree(lck);
1200         }
1201
1202 out:
1203         free_xid(xid);
1204         return rc;
1205 err_out:
1206         list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1207                 list_del(&lck->llist);
1208                 kfree(lck);
1209         }
1210         goto out;
1211 }
1212
1213 static int
1214 cifs_push_locks(struct cifsFileInfo *cfile)
1215 {
1216         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1217         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1218         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1219         int rc = 0;
1220
1221         /* we are going to update can_cache_brlcks here - need a write access */
1222         down_write(&cinode->lock_sem);
1223         if (!cinode->can_cache_brlcks) {
1224                 up_write(&cinode->lock_sem);
1225                 return rc;
1226         }
1227
1228         if (cap_unix(tcon->ses) &&
1229             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1230             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1231                 rc = cifs_push_posix_locks(cfile);
1232         else
1233                 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1234
1235         cinode->can_cache_brlcks = false;
1236         up_write(&cinode->lock_sem);
1237         return rc;
1238 }
1239
1240 static void
1241 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1242                 bool *wait_flag, struct TCP_Server_Info *server)
1243 {
1244         if (flock->fl_flags & FL_POSIX)
1245                 cifs_dbg(FYI, "Posix\n");
1246         if (flock->fl_flags & FL_FLOCK)
1247                 cifs_dbg(FYI, "Flock\n");
1248         if (flock->fl_flags & FL_SLEEP) {
1249                 cifs_dbg(FYI, "Blocking lock\n");
1250                 *wait_flag = true;
1251         }
1252         if (flock->fl_flags & FL_ACCESS)
1253                 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1254         if (flock->fl_flags & FL_LEASE)
1255                 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1256         if (flock->fl_flags &
1257             (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1258                FL_ACCESS | FL_LEASE | FL_CLOSE)))
1259                 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1260
1261         *type = server->vals->large_lock_type;
1262         if (flock->fl_type == F_WRLCK) {
1263                 cifs_dbg(FYI, "F_WRLCK\n");
1264                 *type |= server->vals->exclusive_lock_type;
1265                 *lock = 1;
1266         } else if (flock->fl_type == F_UNLCK) {
1267                 cifs_dbg(FYI, "F_UNLCK\n");
1268                 *type |= server->vals->unlock_lock_type;
1269                 *unlock = 1;
1270                 /* Check if unlock includes more than one lock range */
1271         } else if (flock->fl_type == F_RDLCK) {
1272                 cifs_dbg(FYI, "F_RDLCK\n");
1273                 *type |= server->vals->shared_lock_type;
1274                 *lock = 1;
1275         } else if (flock->fl_type == F_EXLCK) {
1276                 cifs_dbg(FYI, "F_EXLCK\n");
1277                 *type |= server->vals->exclusive_lock_type;
1278                 *lock = 1;
1279         } else if (flock->fl_type == F_SHLCK) {
1280                 cifs_dbg(FYI, "F_SHLCK\n");
1281                 *type |= server->vals->shared_lock_type;
1282                 *lock = 1;
1283         } else
1284                 cifs_dbg(FYI, "Unknown type of lock\n");
1285 }
1286
1287 static int
1288 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1289            bool wait_flag, bool posix_lck, unsigned int xid)
1290 {
1291         int rc = 0;
1292         __u64 length = 1 + flock->fl_end - flock->fl_start;
1293         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1294         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1295         struct TCP_Server_Info *server = tcon->ses->server;
1296         __u16 netfid = cfile->fid.netfid;
1297
1298         if (posix_lck) {
1299                 int posix_lock_type;
1300
1301                 rc = cifs_posix_lock_test(file, flock);
1302                 if (!rc)
1303                         return rc;
1304
1305                 if (type & server->vals->shared_lock_type)
1306                         posix_lock_type = CIFS_RDLCK;
1307                 else
1308                         posix_lock_type = CIFS_WRLCK;
1309                 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1310                                       flock->fl_start, length, flock,
1311                                       posix_lock_type, wait_flag);
1312                 return rc;
1313         }
1314
1315         rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1316         if (!rc)
1317                 return rc;
1318
1319         /* BB we could chain these into one lock request BB */
1320         rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1321                                     1, 0, false);
1322         if (rc == 0) {
1323                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1324                                             type, 0, 1, false);
1325                 flock->fl_type = F_UNLCK;
1326                 if (rc != 0)
1327                         cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1328                                  rc);
1329                 return 0;
1330         }
1331
1332         if (type & server->vals->shared_lock_type) {
1333                 flock->fl_type = F_WRLCK;
1334                 return 0;
1335         }
1336
1337         type &= ~server->vals->exclusive_lock_type;
1338
1339         rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1340                                     type | server->vals->shared_lock_type,
1341                                     1, 0, false);
1342         if (rc == 0) {
1343                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1344                         type | server->vals->shared_lock_type, 0, 1, false);
1345                 flock->fl_type = F_RDLCK;
1346                 if (rc != 0)
1347                         cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1348                                  rc);
1349         } else
1350                 flock->fl_type = F_WRLCK;
1351
1352         return 0;
1353 }
1354
1355 void
1356 cifs_move_llist(struct list_head *source, struct list_head *dest)
1357 {
1358         struct list_head *li, *tmp;
1359         list_for_each_safe(li, tmp, source)
1360                 list_move(li, dest);
1361 }
1362
1363 void
1364 cifs_free_llist(struct list_head *llist)
1365 {
1366         struct cifsLockInfo *li, *tmp;
1367         list_for_each_entry_safe(li, tmp, llist, llist) {
1368                 cifs_del_lock_waiters(li);
1369                 list_del(&li->llist);
1370                 kfree(li);
1371         }
1372 }
1373
1374 int
1375 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1376                   unsigned int xid)
1377 {
1378         int rc = 0, stored_rc;
1379         int types[] = {LOCKING_ANDX_LARGE_FILES,
1380                        LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1381         unsigned int i;
1382         unsigned int max_num, num, max_buf;
1383         LOCKING_ANDX_RANGE *buf, *cur;
1384         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1385         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1386         struct cifsLockInfo *li, *tmp;
1387         __u64 length = 1 + flock->fl_end - flock->fl_start;
1388         struct list_head tmp_llist;
1389
1390         INIT_LIST_HEAD(&tmp_llist);
1391
1392         /*
1393          * Accessing maxBuf is racy with cifs_reconnect - need to store value
1394          * and check it for zero before using.
1395          */
1396         max_buf = tcon->ses->server->maxBuf;
1397         if (!max_buf)
1398                 return -EINVAL;
1399
1400         max_num = (max_buf - sizeof(struct smb_hdr)) /
1401                                                 sizeof(LOCKING_ANDX_RANGE);
1402         buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1403         if (!buf)
1404                 return -ENOMEM;
1405
1406         down_write(&cinode->lock_sem);
1407         for (i = 0; i < 2; i++) {
1408                 cur = buf;
1409                 num = 0;
1410                 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1411                         if (flock->fl_start > li->offset ||
1412                             (flock->fl_start + length) <
1413                             (li->offset + li->length))
1414                                 continue;
1415                         if (current->tgid != li->pid)
1416                                 continue;
1417                         if (types[i] != li->type)
1418                                 continue;
1419                         if (cinode->can_cache_brlcks) {
1420                                 /*
1421                                  * We can cache brlock requests - simply remove
1422                                  * a lock from the file's list.
1423                                  */
1424                                 list_del(&li->llist);
1425                                 cifs_del_lock_waiters(li);
1426                                 kfree(li);
1427                                 continue;
1428                         }
1429                         cur->Pid = cpu_to_le16(li->pid);
1430                         cur->LengthLow = cpu_to_le32((u32)li->length);
1431                         cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1432                         cur->OffsetLow = cpu_to_le32((u32)li->offset);
1433                         cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1434                         /*
1435                          * We need to save a lock here to let us add it again to
1436                          * the file's list if the unlock range request fails on
1437                          * the server.
1438                          */
1439                         list_move(&li->llist, &tmp_llist);
1440                         if (++num == max_num) {
1441                                 stored_rc = cifs_lockv(xid, tcon,
1442                                                        cfile->fid.netfid,
1443                                                        li->type, num, 0, buf);
1444                                 if (stored_rc) {
1445                                         /*
1446                                          * We failed on the unlock range
1447                                          * request - add all locks from the tmp
1448                                          * list to the head of the file's list.
1449                                          */
1450                                         cifs_move_llist(&tmp_llist,
1451                                                         &cfile->llist->locks);
1452                                         rc = stored_rc;
1453                                 } else
1454                                         /*
1455                                          * The unlock range request succeed -
1456                                          * free the tmp list.
1457                                          */
1458                                         cifs_free_llist(&tmp_llist);
1459                                 cur = buf;
1460                                 num = 0;
1461                         } else
1462                                 cur++;
1463                 }
1464                 if (num) {
1465                         stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1466                                                types[i], num, 0, buf);
1467                         if (stored_rc) {
1468                                 cifs_move_llist(&tmp_llist,
1469                                                 &cfile->llist->locks);
1470                                 rc = stored_rc;
1471                         } else
1472                                 cifs_free_llist(&tmp_llist);
1473                 }
1474         }
1475
1476         up_write(&cinode->lock_sem);
1477         kfree(buf);
1478         return rc;
1479 }
1480
1481 static int
1482 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1483            bool wait_flag, bool posix_lck, int lock, int unlock,
1484            unsigned int xid)
1485 {
1486         int rc = 0;
1487         __u64 length = 1 + flock->fl_end - flock->fl_start;
1488         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1489         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1490         struct TCP_Server_Info *server = tcon->ses->server;
1491         struct inode *inode = cfile->dentry->d_inode;
1492
1493         if (posix_lck) {
1494                 int posix_lock_type;
1495
1496                 rc = cifs_posix_lock_set(file, flock);
1497                 if (!rc || rc < 0)
1498                         return rc;
1499
1500                 if (type & server->vals->shared_lock_type)
1501                         posix_lock_type = CIFS_RDLCK;
1502                 else
1503                         posix_lock_type = CIFS_WRLCK;
1504
1505                 if (unlock == 1)
1506                         posix_lock_type = CIFS_UNLCK;
1507
1508                 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1509                                       current->tgid, flock->fl_start, length,
1510                                       NULL, posix_lock_type, wait_flag);
1511                 goto out;
1512         }
1513
1514         if (lock) {
1515                 struct cifsLockInfo *lock;
1516
1517                 lock = cifs_lock_init(flock->fl_start, length, type);
1518                 if (!lock)
1519                         return -ENOMEM;
1520
1521                 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1522                 if (rc < 0) {
1523                         kfree(lock);
1524                         return rc;
1525                 }
1526                 if (!rc)
1527                         goto out;
1528
1529                 /*
1530                  * Windows 7 server can delay breaking lease from read to None
1531                  * if we set a byte-range lock on a file - break it explicitly
1532                  * before sending the lock to the server to be sure the next
1533                  * read won't conflict with non-overlapted locks due to
1534                  * pagereading.
1535                  */
1536                 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1537                                         CIFS_CACHE_READ(CIFS_I(inode))) {
1538                         cifs_zap_mapping(inode);
1539                         cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1540                                  inode);
1541                         CIFS_I(inode)->oplock = 0;
1542                 }
1543
1544                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1545                                             type, 1, 0, wait_flag);
1546                 if (rc) {
1547                         kfree(lock);
1548                         return rc;
1549                 }
1550
1551                 cifs_lock_add(cfile, lock);
1552         } else if (unlock)
1553                 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1554
1555 out:
1556         if (flock->fl_flags & FL_POSIX)
1557                 posix_lock_file_wait(file, flock);
1558         return rc;
1559 }
1560
1561 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1562 {
1563         int rc, xid;
1564         int lock = 0, unlock = 0;
1565         bool wait_flag = false;
1566         bool posix_lck = false;
1567         struct cifs_sb_info *cifs_sb;
1568         struct cifs_tcon *tcon;
1569         struct cifsInodeInfo *cinode;
1570         struct cifsFileInfo *cfile;
1571         __u16 netfid;
1572         __u32 type;
1573
1574         rc = -EACCES;
1575         xid = get_xid();
1576
1577         cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1578                  cmd, flock->fl_flags, flock->fl_type,
1579                  flock->fl_start, flock->fl_end);
1580
1581         cfile = (struct cifsFileInfo *)file->private_data;
1582         tcon = tlink_tcon(cfile->tlink);
1583
1584         cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1585                         tcon->ses->server);
1586
1587         cifs_sb = CIFS_FILE_SB(file);
1588         netfid = cfile->fid.netfid;
1589         cinode = CIFS_I(file_inode(file));
1590
1591         if (cap_unix(tcon->ses) &&
1592             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1593             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1594                 posix_lck = true;
1595         /*
1596          * BB add code here to normalize offset and length to account for
1597          * negative length which we can not accept over the wire.
1598          */
1599         if (IS_GETLK(cmd)) {
1600                 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1601                 free_xid(xid);
1602                 return rc;
1603         }
1604
1605         if (!lock && !unlock) {
1606                 /*
1607                  * if no lock or unlock then nothing to do since we do not
1608                  * know what it is
1609                  */
1610                 free_xid(xid);
1611                 return -EOPNOTSUPP;
1612         }
1613
1614         rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1615                         xid);
1616         free_xid(xid);
1617         return rc;
1618 }
1619
1620 /*
1621  * update the file size (if needed) after a write. Should be called with
1622  * the inode->i_lock held
1623  */
1624 void
1625 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1626                       unsigned int bytes_written)
1627 {
1628         loff_t end_of_write = offset + bytes_written;
1629
1630         if (end_of_write > cifsi->server_eof)
1631                 cifsi->server_eof = end_of_write;
1632 }
1633
1634 static ssize_t
1635 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1636            size_t write_size, loff_t *offset)
1637 {
1638         int rc = 0;
1639         unsigned int bytes_written = 0;
1640         unsigned int total_written;
1641         struct cifs_sb_info *cifs_sb;
1642         struct cifs_tcon *tcon;
1643         struct TCP_Server_Info *server;
1644         unsigned int xid;
1645         struct dentry *dentry = open_file->dentry;
1646         struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1647         struct cifs_io_parms io_parms;
1648
1649         cifs_sb = CIFS_SB(dentry->d_sb);
1650
1651         cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1652                  write_size, *offset, dentry);
1653
1654         tcon = tlink_tcon(open_file->tlink);
1655         server = tcon->ses->server;
1656
1657         if (!server->ops->sync_write)
1658                 return -ENOSYS;
1659
1660         xid = get_xid();
1661
1662         for (total_written = 0; write_size > total_written;
1663              total_written += bytes_written) {
1664                 rc = -EAGAIN;
1665                 while (rc == -EAGAIN) {
1666                         struct kvec iov[2];
1667                         unsigned int len;
1668
1669                         if (open_file->invalidHandle) {
1670                                 /* we could deadlock if we called
1671                                    filemap_fdatawait from here so tell
1672                                    reopen_file not to flush data to
1673                                    server now */
1674                                 rc = cifs_reopen_file(open_file, false);
1675                                 if (rc != 0)
1676                                         break;
1677                         }
1678
1679                         len = min(server->ops->wp_retry_size(dentry->d_inode),
1680                                   (unsigned int)write_size - total_written);
1681                         /* iov[0] is reserved for smb header */
1682                         iov[1].iov_base = (char *)write_data + total_written;
1683                         iov[1].iov_len = len;
1684                         io_parms.pid = pid;
1685                         io_parms.tcon = tcon;
1686                         io_parms.offset = *offset;
1687                         io_parms.length = len;
1688                         rc = server->ops->sync_write(xid, &open_file->fid,
1689                                         &io_parms, &bytes_written, iov, 1);
1690                 }
1691                 if (rc || (bytes_written == 0)) {
1692                         if (total_written)
1693                                 break;
1694                         else {
1695                                 free_xid(xid);
1696                                 return rc;
1697                         }
1698                 } else {
1699                         spin_lock(&dentry->d_inode->i_lock);
1700                         cifs_update_eof(cifsi, *offset, bytes_written);
1701                         spin_unlock(&dentry->d_inode->i_lock);
1702                         *offset += bytes_written;
1703                 }
1704         }
1705
1706         cifs_stats_bytes_written(tcon, total_written);
1707
1708         if (total_written > 0) {
1709                 spin_lock(&dentry->d_inode->i_lock);
1710                 if (*offset > dentry->d_inode->i_size)
1711                         i_size_write(dentry->d_inode, *offset);
1712                 spin_unlock(&dentry->d_inode->i_lock);
1713         }
1714         mark_inode_dirty_sync(dentry->d_inode);
1715         free_xid(xid);
1716         return total_written;
1717 }
1718
1719 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1720                                         bool fsuid_only)
1721 {
1722         struct cifsFileInfo *open_file = NULL;
1723         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1724
1725         /* only filter by fsuid on multiuser mounts */
1726         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1727                 fsuid_only = false;
1728
1729         spin_lock(&cifs_file_list_lock);
1730         /* we could simply get the first_list_entry since write-only entries
1731            are always at the end of the list but since the first entry might
1732            have a close pending, we go through the whole list */
1733         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1734                 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1735                         continue;
1736                 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1737                         if (!open_file->invalidHandle) {
1738                                 /* found a good file */
1739                                 /* lock it so it will not be closed on us */
1740                                 cifsFileInfo_get_locked(open_file);
1741                                 spin_unlock(&cifs_file_list_lock);
1742                                 return open_file;
1743                         } /* else might as well continue, and look for
1744                              another, or simply have the caller reopen it
1745                              again rather than trying to fix this handle */
1746                 } else /* write only file */
1747                         break; /* write only files are last so must be done */
1748         }
1749         spin_unlock(&cifs_file_list_lock);
1750         return NULL;
1751 }
1752
1753 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1754                                         bool fsuid_only)
1755 {
1756         struct cifsFileInfo *open_file, *inv_file = NULL;
1757         struct cifs_sb_info *cifs_sb;
1758         bool any_available = false;
1759         int rc;
1760         unsigned int refind = 0;
1761
1762         /* Having a null inode here (because mapping->host was set to zero by
1763         the VFS or MM) should not happen but we had reports of on oops (due to
1764         it being zero) during stress testcases so we need to check for it */
1765
1766         if (cifs_inode == NULL) {
1767                 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
1768                 dump_stack();
1769                 return NULL;
1770         }
1771
1772         cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1773
1774         /* only filter by fsuid on multiuser mounts */
1775         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1776                 fsuid_only = false;
1777
1778         spin_lock(&cifs_file_list_lock);
1779 refind_writable:
1780         if (refind > MAX_REOPEN_ATT) {
1781                 spin_unlock(&cifs_file_list_lock);
1782                 return NULL;
1783         }
1784         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1785                 if (!any_available && open_file->pid != current->tgid)
1786                         continue;
1787                 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1788                         continue;
1789                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1790                         if (!open_file->invalidHandle) {
1791                                 /* found a good writable file */
1792                                 cifsFileInfo_get_locked(open_file);
1793                                 spin_unlock(&cifs_file_list_lock);
1794                                 return open_file;
1795                         } else {
1796                                 if (!inv_file)
1797                                         inv_file = open_file;
1798                         }
1799                 }
1800         }
1801         /* couldn't find useable FH with same pid, try any available */
1802         if (!any_available) {
1803                 any_available = true;
1804                 goto refind_writable;
1805         }
1806
1807         if (inv_file) {
1808                 any_available = false;
1809                 cifsFileInfo_get_locked(inv_file);
1810         }
1811
1812         spin_unlock(&cifs_file_list_lock);
1813
1814         if (inv_file) {
1815                 rc = cifs_reopen_file(inv_file, false);
1816                 if (!rc)
1817                         return inv_file;
1818                 else {
1819                         spin_lock(&cifs_file_list_lock);
1820                         list_move_tail(&inv_file->flist,
1821                                         &cifs_inode->openFileList);
1822                         spin_unlock(&cifs_file_list_lock);
1823                         cifsFileInfo_put(inv_file);
1824                         spin_lock(&cifs_file_list_lock);
1825                         ++refind;
1826                         goto refind_writable;
1827                 }
1828         }
1829
1830         return NULL;
1831 }
1832
1833 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1834 {
1835         struct address_space *mapping = page->mapping;
1836         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1837         char *write_data;
1838         int rc = -EFAULT;
1839         int bytes_written = 0;
1840         struct inode *inode;
1841         struct cifsFileInfo *open_file;
1842
1843         if (!mapping || !mapping->host)
1844                 return -EFAULT;
1845
1846         inode = page->mapping->host;
1847
1848         offset += (loff_t)from;
1849         write_data = kmap(page);
1850         write_data += from;
1851
1852         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1853                 kunmap(page);
1854                 return -EIO;
1855         }
1856
1857         /* racing with truncate? */
1858         if (offset > mapping->host->i_size) {
1859                 kunmap(page);
1860                 return 0; /* don't care */
1861         }
1862
1863         /* check to make sure that we are not extending the file */
1864         if (mapping->host->i_size - offset < (loff_t)to)
1865                 to = (unsigned)(mapping->host->i_size - offset);
1866
1867         open_file = find_writable_file(CIFS_I(mapping->host), false);
1868         if (open_file) {
1869                 bytes_written = cifs_write(open_file, open_file->pid,
1870                                            write_data, to - from, &offset);
1871                 cifsFileInfo_put(open_file);
1872                 /* Does mm or vfs already set times? */
1873                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1874                 if ((bytes_written > 0) && (offset))
1875                         rc = 0;
1876                 else if (bytes_written < 0)
1877                         rc = bytes_written;
1878         } else {
1879                 cifs_dbg(FYI, "No writeable filehandles for inode\n");
1880                 rc = -EIO;
1881         }
1882
1883         kunmap(page);
1884         return rc;
1885 }
1886
1887 static struct cifs_writedata *
1888 wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1889                           pgoff_t end, pgoff_t *index,
1890                           unsigned int *found_pages)
1891 {
1892         unsigned int nr_pages;
1893         struct page **pages;
1894         struct cifs_writedata *wdata;
1895
1896         wdata = cifs_writedata_alloc((unsigned int)tofind,
1897                                      cifs_writev_complete);
1898         if (!wdata)
1899                 return NULL;
1900
1901         /*
1902          * find_get_pages_tag seems to return a max of 256 on each
1903          * iteration, so we must call it several times in order to
1904          * fill the array or the wsize is effectively limited to
1905          * 256 * PAGE_CACHE_SIZE.
1906          */
1907         *found_pages = 0;
1908         pages = wdata->pages;
1909         do {
1910                 nr_pages = find_get_pages_tag(mapping, index,
1911                                               PAGECACHE_TAG_DIRTY, tofind,
1912                                               pages);
1913                 *found_pages += nr_pages;
1914                 tofind -= nr_pages;
1915                 pages += nr_pages;
1916         } while (nr_pages && tofind && *index <= end);
1917
1918         return wdata;
1919 }
1920
1921 static unsigned int
1922 wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1923                     struct address_space *mapping,
1924                     struct writeback_control *wbc,
1925                     pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1926 {
1927         unsigned int nr_pages = 0, i;
1928         struct page *page;
1929
1930         for (i = 0; i < found_pages; i++) {
1931                 page = wdata->pages[i];
1932                 /*
1933                  * At this point we hold neither mapping->tree_lock nor
1934                  * lock on the page itself: the page may be truncated or
1935                  * invalidated (changing page->mapping to NULL), or even
1936                  * swizzled back from swapper_space to tmpfs file
1937                  * mapping
1938                  */
1939
1940                 if (nr_pages == 0)
1941                         lock_page(page);
1942                 else if (!trylock_page(page))
1943                         break;
1944
1945                 if (unlikely(page->mapping != mapping)) {
1946                         unlock_page(page);
1947                         break;
1948                 }
1949
1950                 if (!wbc->range_cyclic && page->index > end) {
1951                         *done = true;
1952                         unlock_page(page);
1953                         break;
1954                 }
1955
1956                 if (*next && (page->index != *next)) {
1957                         /* Not next consecutive page */
1958                         unlock_page(page);
1959                         break;
1960                 }
1961
1962                 if (wbc->sync_mode != WB_SYNC_NONE)
1963                         wait_on_page_writeback(page);
1964
1965                 if (PageWriteback(page) ||
1966                                 !clear_page_dirty_for_io(page)) {
1967                         unlock_page(page);
1968                         break;
1969                 }
1970
1971                 /*
1972                  * This actually clears the dirty bit in the radix tree.
1973                  * See cifs_writepage() for more commentary.
1974                  */
1975                 set_page_writeback(page);
1976                 if (page_offset(page) >= i_size_read(mapping->host)) {
1977                         *done = true;
1978                         unlock_page(page);
1979                         end_page_writeback(page);
1980                         break;
1981                 }
1982
1983                 wdata->pages[i] = page;
1984                 *next = page->index + 1;
1985                 ++nr_pages;
1986         }
1987
1988         /* reset index to refind any pages skipped */
1989         if (nr_pages == 0)
1990                 *index = wdata->pages[0]->index + 1;
1991
1992         /* put any pages we aren't going to use */
1993         for (i = nr_pages; i < found_pages; i++) {
1994                 page_cache_release(wdata->pages[i]);
1995                 wdata->pages[i] = NULL;
1996         }
1997
1998         return nr_pages;
1999 }
2000
2001 static int
2002 wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2003                  struct address_space *mapping, struct writeback_control *wbc)
2004 {
2005         int rc = 0;
2006         struct TCP_Server_Info *server;
2007         unsigned int i;
2008
2009         wdata->sync_mode = wbc->sync_mode;
2010         wdata->nr_pages = nr_pages;
2011         wdata->offset = page_offset(wdata->pages[0]);
2012         wdata->pagesz = PAGE_CACHE_SIZE;
2013         wdata->tailsz = min(i_size_read(mapping->host) -
2014                         page_offset(wdata->pages[nr_pages - 1]),
2015                         (loff_t)PAGE_CACHE_SIZE);
2016         wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz;
2017
2018         if (wdata->cfile != NULL)
2019                 cifsFileInfo_put(wdata->cfile);
2020         wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2021         if (!wdata->cfile) {
2022                 cifs_dbg(VFS, "No writable handles for inode\n");
2023                 rc = -EBADF;
2024         } else {
2025                 wdata->pid = wdata->cfile->pid;
2026                 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2027                 rc = server->ops->async_writev(wdata, cifs_writedata_release);
2028         }
2029
2030         for (i = 0; i < nr_pages; ++i)
2031                 unlock_page(wdata->pages[i]);
2032
2033         return rc;
2034 }
2035
2036 static int cifs_writepages(struct address_space *mapping,
2037                            struct writeback_control *wbc)
2038 {
2039         struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
2040         struct TCP_Server_Info *server;
2041         bool done = false, scanned = false, range_whole = false;
2042         pgoff_t end, index;
2043         struct cifs_writedata *wdata;
2044         int rc = 0;
2045
2046         /*
2047          * If wsize is smaller than the page cache size, default to writing
2048          * one page at a time via cifs_writepage
2049          */
2050         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
2051                 return generic_writepages(mapping, wbc);
2052
2053         if (wbc->range_cyclic) {
2054                 index = mapping->writeback_index; /* Start from prev offset */
2055                 end = -1;
2056         } else {
2057                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2058                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2059                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2060                         range_whole = true;
2061                 scanned = true;
2062         }
2063         server = cifs_sb_master_tcon(cifs_sb)->ses->server;
2064 retry:
2065         while (!done && index <= end) {
2066                 unsigned int i, nr_pages, found_pages, wsize, credits;
2067                 pgoff_t next = 0, tofind, saved_index = index;
2068
2069                 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2070                                                    &wsize, &credits);
2071                 if (rc)
2072                         break;
2073
2074                 tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1;
2075
2076                 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2077                                                   &found_pages);
2078                 if (!wdata) {
2079                         rc = -ENOMEM;
2080                         add_credits_and_wake_if(server, credits, 0);
2081                         break;
2082                 }
2083
2084                 if (found_pages == 0) {
2085                         kref_put(&wdata->refcount, cifs_writedata_release);
2086                         add_credits_and_wake_if(server, credits, 0);
2087                         break;
2088                 }
2089
2090                 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2091                                                end, &index, &next, &done);
2092
2093                 /* nothing to write? */
2094                 if (nr_pages == 0) {
2095                         kref_put(&wdata->refcount, cifs_writedata_release);
2096                         add_credits_and_wake_if(server, credits, 0);
2097                         continue;
2098                 }
2099
2100                 wdata->credits = credits;
2101
2102                 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
2103
2104                 /* send failure -- clean up the mess */
2105                 if (rc != 0) {
2106                         add_credits_and_wake_if(server, wdata->credits, 0);
2107                         for (i = 0; i < nr_pages; ++i) {
2108                                 if (rc == -EAGAIN)
2109                                         redirty_page_for_writepage(wbc,
2110                                                            wdata->pages[i]);
2111                                 else
2112                                         SetPageError(wdata->pages[i]);
2113                                 end_page_writeback(wdata->pages[i]);
2114                                 page_cache_release(wdata->pages[i]);
2115                         }
2116                         if (rc != -EAGAIN)
2117                                 mapping_set_error(mapping, rc);
2118                 }
2119                 kref_put(&wdata->refcount, cifs_writedata_release);
2120
2121                 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2122                         index = saved_index;
2123                         continue;
2124                 }
2125
2126                 wbc->nr_to_write -= nr_pages;
2127                 if (wbc->nr_to_write <= 0)
2128                         done = true;
2129
2130                 index = next;
2131         }
2132
2133         if (!scanned && !done) {
2134                 /*
2135                  * We hit the last page and there is more work to be done: wrap
2136                  * back to the start of the file
2137                  */
2138                 scanned = true;
2139                 index = 0;
2140                 goto retry;
2141         }
2142
2143         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2144                 mapping->writeback_index = index;
2145
2146         return rc;
2147 }
2148
2149 static int
2150 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2151 {
2152         int rc;
2153         unsigned int xid;
2154
2155         xid = get_xid();
2156 /* BB add check for wbc flags */
2157         page_cache_get(page);
2158         if (!PageUptodate(page))
2159                 cifs_dbg(FYI, "ppw - page not up to date\n");
2160
2161         /*
2162          * Set the "writeback" flag, and clear "dirty" in the radix tree.
2163          *
2164          * A writepage() implementation always needs to do either this,
2165          * or re-dirty the page with "redirty_page_for_writepage()" in
2166          * the case of a failure.
2167          *
2168          * Just unlocking the page will cause the radix tree tag-bits
2169          * to fail to update with the state of the page correctly.
2170          */
2171         set_page_writeback(page);
2172 retry_write:
2173         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
2174         if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2175                 goto retry_write;
2176         else if (rc == -EAGAIN)
2177                 redirty_page_for_writepage(wbc, page);
2178         else if (rc != 0)
2179                 SetPageError(page);
2180         else
2181                 SetPageUptodate(page);
2182         end_page_writeback(page);
2183         page_cache_release(page);
2184         free_xid(xid);
2185         return rc;
2186 }
2187
2188 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2189 {
2190         int rc = cifs_writepage_locked(page, wbc);
2191         unlock_page(page);
2192         return rc;
2193 }
2194
2195 static int cifs_write_end(struct file *file, struct address_space *mapping,
2196                         loff_t pos, unsigned len, unsigned copied,
2197                         struct page *page, void *fsdata)
2198 {
2199         int rc;
2200         struct inode *inode = mapping->host;
2201         struct cifsFileInfo *cfile = file->private_data;
2202         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2203         __u32 pid;
2204
2205         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2206                 pid = cfile->pid;
2207         else
2208                 pid = current->tgid;
2209
2210         cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2211                  page, pos, copied);
2212
2213         if (PageChecked(page)) {
2214                 if (copied == len)
2215                         SetPageUptodate(page);
2216                 ClearPageChecked(page);
2217         } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
2218                 SetPageUptodate(page);
2219
2220         if (!PageUptodate(page)) {
2221                 char *page_data;
2222                 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
2223                 unsigned int xid;
2224
2225                 xid = get_xid();
2226                 /* this is probably better than directly calling
2227                    partialpage_write since in this function the file handle is
2228                    known which we might as well leverage */
2229                 /* BB check if anything else missing out of ppw
2230                    such as updating last write time */
2231                 page_data = kmap(page);
2232                 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2233                 /* if (rc < 0) should we set writebehind rc? */
2234                 kunmap(page);
2235
2236                 free_xid(xid);
2237         } else {
2238                 rc = copied;
2239                 pos += copied;
2240                 set_page_dirty(page);
2241         }
2242
2243         if (rc > 0) {
2244                 spin_lock(&inode->i_lock);
2245                 if (pos > inode->i_size)
2246                         i_size_write(inode, pos);
2247                 spin_unlock(&inode->i_lock);
2248         }
2249
2250         unlock_page(page);
2251         page_cache_release(page);
2252
2253         return rc;
2254 }
2255
2256 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2257                       int datasync)
2258 {
2259         unsigned int xid;
2260         int rc = 0;
2261         struct cifs_tcon *tcon;
2262         struct TCP_Server_Info *server;
2263         struct cifsFileInfo *smbfile = file->private_data;
2264         struct inode *inode = file_inode(file);
2265         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2266
2267         rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2268         if (rc)
2269                 return rc;
2270         mutex_lock(&inode->i_mutex);
2271
2272         xid = get_xid();
2273
2274         cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2275                  file, datasync);
2276
2277         if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2278                 rc = cifs_zap_mapping(inode);
2279                 if (rc) {
2280                         cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2281                         rc = 0; /* don't care about it in fsync */
2282                 }
2283         }
2284
2285         tcon = tlink_tcon(smbfile->tlink);
2286         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2287                 server = tcon->ses->server;
2288                 if (server->ops->flush)
2289                         rc = server->ops->flush(xid, tcon, &smbfile->fid);
2290                 else
2291                         rc = -ENOSYS;
2292         }
2293
2294         free_xid(xid);
2295         mutex_unlock(&inode->i_mutex);
2296         return rc;
2297 }
2298
2299 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2300 {
2301         unsigned int xid;
2302         int rc = 0;
2303         struct cifs_tcon *tcon;
2304         struct TCP_Server_Info *server;
2305         struct cifsFileInfo *smbfile = file->private_data;
2306         struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2307         struct inode *inode = file->f_mapping->host;
2308
2309         rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2310         if (rc)
2311                 return rc;
2312         mutex_lock(&inode->i_mutex);
2313
2314         xid = get_xid();
2315
2316         cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2317                  file, datasync);
2318
2319         tcon = tlink_tcon(smbfile->tlink);
2320         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2321                 server = tcon->ses->server;
2322                 if (server->ops->flush)
2323                         rc = server->ops->flush(xid, tcon, &smbfile->fid);
2324                 else
2325                         rc = -ENOSYS;
2326         }
2327
2328         free_xid(xid);
2329         mutex_unlock(&inode->i_mutex);
2330         return rc;
2331 }
2332
2333 /*
2334  * As file closes, flush all cached write data for this inode checking
2335  * for write behind errors.
2336  */
2337 int cifs_flush(struct file *file, fl_owner_t id)
2338 {
2339         struct inode *inode = file_inode(file);
2340         int rc = 0;
2341
2342         if (file->f_mode & FMODE_WRITE)
2343                 rc = filemap_write_and_wait(inode->i_mapping);
2344
2345         cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2346
2347         return rc;
2348 }
2349
2350 static int
2351 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2352 {
2353         int rc = 0;
2354         unsigned long i;
2355
2356         for (i = 0; i < num_pages; i++) {
2357                 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2358                 if (!pages[i]) {
2359                         /*
2360                          * save number of pages we have already allocated and
2361                          * return with ENOMEM error
2362                          */
2363                         num_pages = i;
2364                         rc = -ENOMEM;
2365                         break;
2366                 }
2367         }
2368
2369         if (rc) {
2370                 for (i = 0; i < num_pages; i++)
2371                         put_page(pages[i]);
2372         }
2373         return rc;
2374 }
2375
2376 static inline
2377 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2378 {
2379         size_t num_pages;
2380         size_t clen;
2381
2382         clen = min_t(const size_t, len, wsize);
2383         num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2384
2385         if (cur_len)
2386                 *cur_len = clen;
2387
2388         return num_pages;
2389 }
2390
2391 static void
2392 cifs_uncached_writedata_release(struct kref *refcount)
2393 {
2394         int i;
2395         struct cifs_writedata *wdata = container_of(refcount,
2396                                         struct cifs_writedata, refcount);
2397
2398         for (i = 0; i < wdata->nr_pages; i++)
2399                 put_page(wdata->pages[i]);
2400         cifs_writedata_release(refcount);
2401 }
2402
2403 static void
2404 cifs_uncached_writev_complete(struct work_struct *work)
2405 {
2406         struct cifs_writedata *wdata = container_of(work,
2407                                         struct cifs_writedata, work);
2408         struct inode *inode = wdata->cfile->dentry->d_inode;
2409         struct cifsInodeInfo *cifsi = CIFS_I(inode);
2410
2411         spin_lock(&inode->i_lock);
2412         cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2413         if (cifsi->server_eof > inode->i_size)
2414                 i_size_write(inode, cifsi->server_eof);
2415         spin_unlock(&inode->i_lock);
2416
2417         complete(&wdata->done);
2418
2419         kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2420 }
2421
2422 static int
2423 wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2424                       size_t *len, unsigned long *num_pages)
2425 {
2426         size_t save_len, copied, bytes, cur_len = *len;
2427         unsigned long i, nr_pages = *num_pages;
2428
2429         save_len = cur_len;
2430         for (i = 0; i < nr_pages; i++) {
2431                 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2432                 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2433                 cur_len -= copied;
2434                 /*
2435                  * If we didn't copy as much as we expected, then that
2436                  * may mean we trod into an unmapped area. Stop copying
2437                  * at that point. On the next pass through the big
2438                  * loop, we'll likely end up getting a zero-length
2439                  * write and bailing out of it.
2440                  */
2441                 if (copied < bytes)
2442                         break;
2443         }
2444         cur_len = save_len - cur_len;
2445         *len = cur_len;
2446
2447         /*
2448          * If we have no data to send, then that probably means that
2449          * the copy above failed altogether. That's most likely because
2450          * the address in the iovec was bogus. Return -EFAULT and let
2451          * the caller free anything we allocated and bail out.
2452          */
2453         if (!cur_len)
2454                 return -EFAULT;
2455
2456         /*
2457          * i + 1 now represents the number of pages we actually used in
2458          * the copy phase above.
2459          */
2460         *num_pages = i + 1;
2461         return 0;
2462 }
2463
2464 static int
2465 cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2466                      struct cifsFileInfo *open_file,
2467                      struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
2468 {
2469         int rc = 0;
2470         size_t cur_len;
2471         unsigned long nr_pages, num_pages, i;
2472         struct cifs_writedata *wdata;
2473         struct iov_iter saved_from;
2474         loff_t saved_offset = offset;
2475         pid_t pid;
2476         struct TCP_Server_Info *server;
2477
2478         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2479                 pid = open_file->pid;
2480         else
2481                 pid = current->tgid;
2482
2483         server = tlink_tcon(open_file->tlink)->ses->server;
2484         memcpy(&saved_from, from, sizeof(struct iov_iter));
2485
2486         do {
2487                 unsigned int wsize, credits;
2488
2489                 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2490                                                    &wsize, &credits);
2491                 if (rc)
2492                         break;
2493
2494                 nr_pages = get_numpages(wsize, len, &cur_len);
2495                 wdata = cifs_writedata_alloc(nr_pages,
2496                                              cifs_uncached_writev_complete);
2497                 if (!wdata) {
2498                         rc = -ENOMEM;
2499                         add_credits_and_wake_if(server, credits, 0);
2500                         break;
2501                 }
2502
2503                 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2504                 if (rc) {
2505                         kfree(wdata);
2506                         add_credits_and_wake_if(server, credits, 0);
2507                         break;
2508                 }
2509
2510                 num_pages = nr_pages;
2511                 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2512                 if (rc) {
2513                         for (i = 0; i < nr_pages; i++)
2514                                 put_page(wdata->pages[i]);
2515                         kfree(wdata);
2516                         add_credits_and_wake_if(server, credits, 0);
2517                         break;
2518                 }
2519
2520                 /*
2521                  * Bring nr_pages down to the number of pages we actually used,
2522                  * and free any pages that we didn't use.
2523                  */
2524                 for ( ; nr_pages > num_pages; nr_pages--)
2525                         put_page(wdata->pages[nr_pages - 1]);
2526
2527                 wdata->sync_mode = WB_SYNC_ALL;
2528                 wdata->nr_pages = nr_pages;
2529                 wdata->offset = (__u64)offset;
2530                 wdata->cfile = cifsFileInfo_get(open_file);
2531                 wdata->pid = pid;
2532                 wdata->bytes = cur_len;
2533                 wdata->pagesz = PAGE_SIZE;
2534                 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2535                 wdata->credits = credits;
2536
2537                 if (!wdata->cfile->invalidHandle ||
2538                     !cifs_reopen_file(wdata->cfile, false))
2539                         rc = server->ops->async_writev(wdata,
2540                                         cifs_uncached_writedata_release);
2541                 if (rc) {
2542                         add_credits_and_wake_if(server, wdata->credits, 0);
2543                         kref_put(&wdata->refcount,
2544                                  cifs_uncached_writedata_release);
2545                         if (rc == -EAGAIN) {
2546                                 memcpy(from, &saved_from,
2547                                        sizeof(struct iov_iter));
2548                                 iov_iter_advance(from, offset - saved_offset);
2549                                 continue;
2550                         }
2551                         break;
2552                 }
2553
2554                 list_add_tail(&wdata->list, wdata_list);
2555                 offset += cur_len;
2556                 len -= cur_len;
2557         } while (len > 0);
2558
2559         return rc;
2560 }
2561
2562 static ssize_t
2563 cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
2564 {
2565         size_t len;
2566         ssize_t total_written = 0;
2567         struct cifsFileInfo *open_file;
2568         struct cifs_tcon *tcon;
2569         struct cifs_sb_info *cifs_sb;
2570         struct cifs_writedata *wdata, *tmp;
2571         struct list_head wdata_list;
2572         struct iov_iter saved_from;
2573         int rc;
2574
2575         len = iov_iter_count(from);
2576         rc = generic_write_checks(file, poffset, &len, 0);
2577         if (rc)
2578                 return rc;
2579
2580         if (!len)
2581                 return 0;
2582
2583         iov_iter_truncate(from, len);
2584
2585         INIT_LIST_HEAD(&wdata_list);
2586         cifs_sb = CIFS_FILE_SB(file);
2587         open_file = file->private_data;
2588         tcon = tlink_tcon(open_file->tlink);
2589
2590         if (!tcon->ses->server->ops->async_writev)
2591                 return -ENOSYS;
2592
2593         memcpy(&saved_from, from, sizeof(struct iov_iter));
2594
2595         rc = cifs_write_from_iter(*poffset, len, from, open_file, cifs_sb,
2596                                   &wdata_list);
2597
2598         /*
2599          * If at least one write was successfully sent, then discard any rc
2600          * value from the later writes. If the other write succeeds, then
2601          * we'll end up returning whatever was written. If it fails, then
2602          * we'll get a new rc value from that.
2603          */
2604         if (!list_empty(&wdata_list))
2605                 rc = 0;
2606
2607         /*
2608          * Wait for and collect replies for any successful sends in order of
2609          * increasing offset. Once an error is hit or we get a fatal signal
2610          * while waiting, then return without waiting for any more replies.
2611          */
2612 restart_loop:
2613         list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2614                 if (!rc) {
2615                         /* FIXME: freezable too? */
2616                         rc = wait_for_completion_killable(&wdata->done);
2617                         if (rc)
2618                                 rc = -EINTR;
2619                         else if (wdata->result)
2620                                 rc = wdata->result;
2621                         else
2622                                 total_written += wdata->bytes;
2623
2624                         /* resend call if it's a retryable error */
2625                         if (rc == -EAGAIN) {
2626                                 struct list_head tmp_list;
2627                                 struct iov_iter tmp_from;
2628
2629                                 INIT_LIST_HEAD(&tmp_list);
2630                                 list_del_init(&wdata->list);
2631
2632                                 memcpy(&tmp_from, &saved_from,
2633                                        sizeof(struct iov_iter));
2634                                 iov_iter_advance(&tmp_from,
2635                                                  wdata->offset - *poffset);
2636
2637                                 rc = cifs_write_from_iter(wdata->offset,
2638                                                 wdata->bytes, &tmp_from,
2639                                                 open_file, cifs_sb, &tmp_list);
2640
2641                                 list_splice(&tmp_list, &wdata_list);
2642
2643                                 kref_put(&wdata->refcount,
2644                                          cifs_uncached_writedata_release);
2645                                 goto restart_loop;
2646                         }
2647                 }
2648                 list_del_init(&wdata->list);
2649                 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2650         }
2651
2652         if (total_written > 0)
2653                 *poffset += total_written;
2654
2655         cifs_stats_bytes_written(tcon, total_written);
2656         return total_written ? total_written : (ssize_t)rc;
2657 }
2658
2659 ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
2660 {
2661         ssize_t written;
2662         struct inode *inode;
2663         loff_t pos = iocb->ki_pos;
2664
2665         inode = file_inode(iocb->ki_filp);
2666
2667         /*
2668          * BB - optimize the way when signing is disabled. We can drop this
2669          * extra memory-to-memory copying and use iovec buffers for constructing
2670          * write request.
2671          */
2672
2673         written = cifs_iovec_write(iocb->ki_filp, from, &pos);
2674         if (written > 0) {
2675                 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags);
2676                 iocb->ki_pos = pos;
2677         }
2678
2679         return written;
2680 }
2681
2682 static ssize_t
2683 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2684 {
2685         struct file *file = iocb->ki_filp;
2686         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2687         struct inode *inode = file->f_mapping->host;
2688         struct cifsInodeInfo *cinode = CIFS_I(inode);
2689         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2690         ssize_t rc = -EACCES;
2691         loff_t lock_pos = iocb->ki_pos;
2692
2693         /*
2694          * We need to hold the sem to be sure nobody modifies lock list
2695          * with a brlock that prevents writing.
2696          */
2697         down_read(&cinode->lock_sem);
2698         mutex_lock(&inode->i_mutex);
2699         if (file->f_flags & O_APPEND)
2700                 lock_pos = i_size_read(inode);
2701         if (!cifs_find_lock_conflict(cfile, lock_pos, iov_iter_count(from),
2702                                      server->vals->exclusive_lock_type, NULL,
2703                                      CIFS_WRITE_OP)) {
2704                 rc = __generic_file_write_iter(iocb, from);
2705                 mutex_unlock(&inode->i_mutex);
2706
2707                 if (rc > 0) {
2708                         ssize_t err;
2709
2710                         err = generic_write_sync(file, iocb->ki_pos - rc, rc);
2711                         if (err < 0)
2712                                 rc = err;
2713                 }
2714         } else {
2715                 mutex_unlock(&inode->i_mutex);
2716         }
2717         up_read(&cinode->lock_sem);
2718         return rc;
2719 }
2720
2721 ssize_t
2722 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2723 {
2724         struct inode *inode = file_inode(iocb->ki_filp);
2725         struct cifsInodeInfo *cinode = CIFS_I(inode);
2726         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2727         struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2728                                                 iocb->ki_filp->private_data;
2729         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2730         ssize_t written;
2731
2732         written = cifs_get_writer(cinode);
2733         if (written)
2734                 return written;
2735
2736         if (CIFS_CACHE_WRITE(cinode)) {
2737                 if (cap_unix(tcon->ses) &&
2738                 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2739                   && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2740                         written = generic_file_write_iter(iocb, from);
2741                         goto out;
2742                 }
2743                 written = cifs_writev(iocb, from);
2744                 goto out;
2745         }
2746         /*
2747          * For non-oplocked files in strict cache mode we need to write the data
2748          * to the server exactly from the pos to pos+len-1 rather than flush all
2749          * affected pages because it may cause a error with mandatory locks on
2750          * these pages but not on the region from pos to ppos+len-1.
2751          */
2752         written = cifs_user_writev(iocb, from);
2753         if (written > 0 && CIFS_CACHE_READ(cinode)) {
2754                 /*
2755                  * Windows 7 server can delay breaking level2 oplock if a write
2756                  * request comes - break it on the client to prevent reading
2757                  * an old data.
2758                  */
2759                 cifs_zap_mapping(inode);
2760                 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2761                          inode);
2762                 cinode->oplock = 0;
2763         }
2764 out:
2765         cifs_put_writer(cinode);
2766         return written;
2767 }
2768
2769 static struct cifs_readdata *
2770 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
2771 {
2772         struct cifs_readdata *rdata;
2773
2774         rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2775                         GFP_KERNEL);
2776         if (rdata != NULL) {
2777                 kref_init(&rdata->refcount);
2778                 INIT_LIST_HEAD(&rdata->list);
2779                 init_completion(&rdata->done);
2780                 INIT_WORK(&rdata->work, complete);
2781         }
2782
2783         return rdata;
2784 }
2785
2786 void
2787 cifs_readdata_release(struct kref *refcount)
2788 {
2789         struct cifs_readdata *rdata = container_of(refcount,
2790                                         struct cifs_readdata, refcount);
2791
2792         if (rdata->cfile)
2793                 cifsFileInfo_put(rdata->cfile);
2794
2795         kfree(rdata);
2796 }
2797
2798 static int
2799 cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
2800 {
2801         int rc = 0;
2802         struct page *page;
2803         unsigned int i;
2804
2805         for (i = 0; i < nr_pages; i++) {
2806                 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2807                 if (!page) {
2808                         rc = -ENOMEM;
2809                         break;
2810                 }
2811                 rdata->pages[i] = page;
2812         }
2813
2814         if (rc) {
2815                 for (i = 0; i < nr_pages; i++) {
2816                         put_page(rdata->pages[i]);
2817                         rdata->pages[i] = NULL;
2818                 }
2819         }
2820         return rc;
2821 }
2822
2823 static void
2824 cifs_uncached_readdata_release(struct kref *refcount)
2825 {
2826         struct cifs_readdata *rdata = container_of(refcount,
2827                                         struct cifs_readdata, refcount);
2828         unsigned int i;
2829
2830         for (i = 0; i < rdata->nr_pages; i++) {
2831                 put_page(rdata->pages[i]);
2832                 rdata->pages[i] = NULL;
2833         }
2834         cifs_readdata_release(refcount);
2835 }
2836
2837 /**
2838  * cifs_readdata_to_iov - copy data from pages in response to an iovec
2839  * @rdata:      the readdata response with list of pages holding data
2840  * @iter:       destination for our data
2841  *
2842  * This function copies data from a list of pages in a readdata response into
2843  * an array of iovecs. It will first calculate where the data should go
2844  * based on the info in the readdata and then copy the data into that spot.
2845  */
2846 static int
2847 cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
2848 {
2849         size_t remaining = rdata->got_bytes;
2850         unsigned int i;
2851
2852         for (i = 0; i < rdata->nr_pages; i++) {
2853                 struct page *page = rdata->pages[i];
2854                 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
2855                 size_t written = copy_page_to_iter(page, 0, copy, iter);
2856                 remaining -= written;
2857                 if (written < copy && iov_iter_count(iter) > 0)
2858                         break;
2859         }
2860         return remaining ? -EFAULT : 0;
2861 }
2862
2863 static void
2864 cifs_uncached_readv_complete(struct work_struct *work)
2865 {
2866         struct cifs_readdata *rdata = container_of(work,
2867                                                 struct cifs_readdata, work);
2868
2869         complete(&rdata->done);
2870         kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2871 }
2872
2873 static int
2874 cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2875                         struct cifs_readdata *rdata, unsigned int len)
2876 {
2877         int result = 0;
2878         unsigned int i;
2879         unsigned int nr_pages = rdata->nr_pages;
2880         struct kvec iov;
2881
2882         rdata->got_bytes = 0;
2883         rdata->tailsz = PAGE_SIZE;
2884         for (i = 0; i < nr_pages; i++) {
2885                 struct page *page = rdata->pages[i];
2886
2887                 if (len >= PAGE_SIZE) {
2888                         /* enough data to fill the page */
2889                         iov.iov_base = kmap(page);
2890                         iov.iov_len = PAGE_SIZE;
2891                         cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2892                                  i, iov.iov_base, iov.iov_len);
2893                         len -= PAGE_SIZE;
2894                 } else if (len > 0) {
2895                         /* enough for partial page, fill and zero the rest */
2896                         iov.iov_base = kmap(page);
2897                         iov.iov_len = len;
2898                         cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2899                                  i, iov.iov_base, iov.iov_len);
2900                         memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2901                         rdata->tailsz = len;
2902                         len = 0;
2903                 } else {
2904                         /* no need to hold page hostage */
2905                         rdata->pages[i] = NULL;
2906                         rdata->nr_pages--;
2907                         put_page(page);
2908                         continue;
2909                 }
2910
2911                 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2912                 kunmap(page);
2913                 if (result < 0)
2914                         break;
2915
2916                 rdata->got_bytes += result;
2917         }
2918
2919         return rdata->got_bytes > 0 && result != -ECONNABORTED ?
2920                                                 rdata->got_bytes : result;
2921 }
2922
2923 static int
2924 cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
2925                      struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
2926 {
2927         struct cifs_readdata *rdata;
2928         unsigned int npages, rsize, credits;
2929         size_t cur_len;
2930         int rc;
2931         pid_t pid;
2932         struct TCP_Server_Info *server;
2933
2934         server = tlink_tcon(open_file->tlink)->ses->server;
2935
2936         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2937                 pid = open_file->pid;
2938         else
2939                 pid = current->tgid;
2940
2941         do {
2942                 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
2943                                                    &rsize, &credits);
2944                 if (rc)
2945                         break;
2946
2947                 cur_len = min_t(const size_t, len, rsize);
2948                 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
2949
2950                 /* allocate a readdata struct */
2951                 rdata = cifs_readdata_alloc(npages,
2952                                             cifs_uncached_readv_complete);
2953                 if (!rdata) {
2954                         add_credits_and_wake_if(server, credits, 0);
2955                         rc = -ENOMEM;
2956                         break;
2957                 }
2958
2959                 rc = cifs_read_allocate_pages(rdata, npages);
2960                 if (rc)
2961                         goto error;
2962
2963                 rdata->cfile = cifsFileInfo_get(open_file);
2964                 rdata->nr_pages = npages;
2965                 rdata->offset = offset;
2966                 rdata->bytes = cur_len;
2967                 rdata->pid = pid;
2968                 rdata->pagesz = PAGE_SIZE;
2969                 rdata->read_into_pages = cifs_uncached_read_into_pages;
2970                 rdata->credits = credits;
2971
2972                 if (!rdata->cfile->invalidHandle ||
2973                     !cifs_reopen_file(rdata->cfile, true))
2974                         rc = server->ops->async_readv(rdata);
2975 error:
2976                 if (rc) {
2977                         add_credits_and_wake_if(server, rdata->credits, 0);
2978                         kref_put(&rdata->refcount,
2979                                  cifs_uncached_readdata_release);
2980                         if (rc == -EAGAIN)
2981                                 continue;
2982                         break;
2983                 }
2984
2985                 list_add_tail(&rdata->list, rdata_list);
2986                 offset += cur_len;
2987                 len -= cur_len;
2988         } while (len > 0);
2989
2990         return rc;
2991 }
2992
2993 ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
2994 {
2995         struct file *file = iocb->ki_filp;
2996         ssize_t rc;
2997         size_t len;
2998         ssize_t total_read = 0;
2999         loff_t offset = iocb->ki_pos;
3000         struct cifs_sb_info *cifs_sb;
3001         struct cifs_tcon *tcon;
3002         struct cifsFileInfo *open_file;
3003         struct cifs_readdata *rdata, *tmp;
3004         struct list_head rdata_list;
3005
3006         len = iov_iter_count(to);
3007         if (!len)
3008                 return 0;
3009
3010         INIT_LIST_HEAD(&rdata_list);
3011         cifs_sb = CIFS_FILE_SB(file);
3012         open_file = file->private_data;
3013         tcon = tlink_tcon(open_file->tlink);
3014
3015         if (!tcon->ses->server->ops->async_readv)
3016                 return -ENOSYS;
3017
3018         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3019                 cifs_dbg(FYI, "attempting read on write only file instance\n");
3020
3021         rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3022
3023         /* if at least one read request send succeeded, then reset rc */
3024         if (!list_empty(&rdata_list))
3025                 rc = 0;
3026
3027         len = iov_iter_count(to);
3028         /* the loop below should proceed in the order of increasing offsets */
3029 again:
3030         list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3031                 if (!rc) {
3032                         /* FIXME: freezable sleep too? */
3033                         rc = wait_for_completion_killable(&rdata->done);
3034                         if (rc)
3035                                 rc = -EINTR;
3036                         else if (rdata->result == -EAGAIN) {
3037                                 /* resend call if it's a retryable error */
3038                                 struct list_head tmp_list;
3039                                 unsigned int got_bytes = rdata->got_bytes;
3040
3041                                 list_del_init(&rdata->list);
3042                                 INIT_LIST_HEAD(&tmp_list);
3043
3044                                 /*
3045                                  * Got a part of data and then reconnect has
3046                                  * happened -- fill the buffer and continue
3047                                  * reading.
3048                                  */
3049                                 if (got_bytes && got_bytes < rdata->bytes) {
3050                                         rc = cifs_readdata_to_iov(rdata, to);
3051                                         if (rc) {
3052                                                 kref_put(&rdata->refcount,
3053                                                 cifs_uncached_readdata_release);
3054                                                 continue;
3055                                         }
3056                                 }
3057
3058                                 rc = cifs_send_async_read(
3059                                                 rdata->offset + got_bytes,
3060                                                 rdata->bytes - got_bytes,
3061                                                 rdata->cfile, cifs_sb,
3062                                                 &tmp_list);
3063
3064                                 list_splice(&tmp_list, &rdata_list);
3065
3066                                 kref_put(&rdata->refcount,
3067                                          cifs_uncached_readdata_release);
3068                                 goto again;
3069                         } else if (rdata->result)
3070                                 rc = rdata->result;
3071                         else
3072                                 rc = cifs_readdata_to_iov(rdata, to);
3073
3074                         /* if there was a short read -- discard anything left */
3075                         if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3076                                 rc = -ENODATA;
3077                 }
3078                 list_del_init(&rdata->list);
3079                 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3080         }
3081
3082         total_read = len - iov_iter_count(to);
3083
3084         cifs_stats_bytes_read(tcon, total_read);
3085
3086         /* mask nodata case */
3087         if (rc == -ENODATA)
3088                 rc = 0;
3089
3090         if (total_read) {
3091                 iocb->ki_pos += total_read;
3092                 return total_read;
3093         }
3094         return rc;
3095 }
3096
3097 ssize_t
3098 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3099 {
3100         struct inode *inode = file_inode(iocb->ki_filp);
3101         struct cifsInodeInfo *cinode = CIFS_I(inode);
3102         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3103         struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3104                                                 iocb->ki_filp->private_data;
3105         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3106         int rc = -EACCES;
3107
3108         /*
3109          * In strict cache mode we need to read from the server all the time
3110          * if we don't have level II oplock because the server can delay mtime
3111          * change - so we can't make a decision about inode invalidating.
3112          * And we can also fail with pagereading if there are mandatory locks
3113          * on pages affected by this read but not on the region from pos to
3114          * pos+len-1.
3115          */
3116         if (!CIFS_CACHE_READ(cinode))
3117                 return cifs_user_readv(iocb, to);
3118
3119         if (cap_unix(tcon->ses) &&
3120             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3121             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
3122                 return generic_file_read_iter(iocb, to);
3123
3124         /*
3125          * We need to hold the sem to be sure nobody modifies lock list
3126          * with a brlock that prevents reading.
3127          */
3128         down_read(&cinode->lock_sem);
3129         if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
3130                                      tcon->ses->server->vals->shared_lock_type,
3131                                      NULL, CIFS_READ_OP))
3132                 rc = generic_file_read_iter(iocb, to);
3133         up_read(&cinode->lock_sem);
3134         return rc;
3135 }
3136
3137 static ssize_t
3138 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
3139 {
3140         int rc = -EACCES;
3141         unsigned int bytes_read = 0;
3142         unsigned int total_read;
3143         unsigned int current_read_size;
3144         unsigned int rsize;
3145         struct cifs_sb_info *cifs_sb;
3146         struct cifs_tcon *tcon;
3147         struct TCP_Server_Info *server;
3148         unsigned int xid;
3149         char *cur_offset;
3150         struct cifsFileInfo *open_file;
3151         struct cifs_io_parms io_parms;
3152         int buf_type = CIFS_NO_BUFFER;
3153         __u32 pid;
3154
3155         xid = get_xid();
3156         cifs_sb = CIFS_FILE_SB(file);
3157
3158         /* FIXME: set up handlers for larger reads and/or convert to async */
3159         rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3160
3161         if (file->private_data == NULL) {
3162                 rc = -EBADF;
3163                 free_xid(xid);
3164                 return rc;
3165         }
3166         open_file = file->private_data;
3167         tcon = tlink_tcon(open_file->tlink);
3168         server = tcon->ses->server;
3169
3170         if (!server->ops->sync_read) {
3171                 free_xid(xid);
3172                 return -ENOSYS;
3173         }
3174
3175         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3176                 pid = open_file->pid;
3177         else
3178                 pid = current->tgid;
3179
3180         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3181                 cifs_dbg(FYI, "attempting read on write only file instance\n");
3182
3183         for (total_read = 0, cur_offset = read_data; read_size > total_read;
3184              total_read += bytes_read, cur_offset += bytes_read) {
3185                 do {
3186                         current_read_size = min_t(uint, read_size - total_read,
3187                                                   rsize);
3188                         /*
3189                          * For windows me and 9x we do not want to request more
3190                          * than it negotiated since it will refuse the read
3191                          * then.
3192                          */
3193                         if ((tcon->ses) && !(tcon->ses->capabilities &
3194                                 tcon->ses->server->vals->cap_large_files)) {
3195                                 current_read_size = min_t(uint,
3196                                         current_read_size, CIFSMaxBufSize);
3197                         }
3198                         if (open_file->invalidHandle) {
3199                                 rc = cifs_reopen_file(open_file, true);
3200                                 if (rc != 0)
3201                                         break;
3202                         }
3203                         io_parms.pid = pid;
3204                         io_parms.tcon = tcon;
3205                         io_parms.offset = *offset;
3206                         io_parms.length = current_read_size;
3207                         rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
3208                                                     &bytes_read, &cur_offset,
3209                                                     &buf_type);
3210                 } while (rc == -EAGAIN);
3211
3212                 if (rc || (bytes_read == 0)) {
3213                         if (total_read) {
3214                                 break;
3215                         } else {
3216                                 free_xid(xid);
3217                                 return rc;
3218                         }
3219                 } else {
3220                         cifs_stats_bytes_read(tcon, total_read);
3221                         *offset += bytes_read;
3222                 }
3223         }
3224         free_xid(xid);
3225         return total_read;
3226 }
3227
3228 /*
3229  * If the page is mmap'ed into a process' page tables, then we need to make
3230  * sure that it doesn't change while being written back.
3231  */
3232 static int
3233 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3234 {
3235         struct page *page = vmf->page;
3236
3237         lock_page(page);
3238         return VM_FAULT_LOCKED;
3239 }
3240
3241 static struct vm_operations_struct cifs_file_vm_ops = {
3242         .fault = filemap_fault,
3243         .map_pages = filemap_map_pages,
3244         .page_mkwrite = cifs_page_mkwrite,
3245 };
3246
3247 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3248 {
3249         int rc, xid;
3250         struct inode *inode = file_inode(file);
3251
3252         xid = get_xid();
3253
3254         if (!CIFS_CACHE_READ(CIFS_I(inode))) {
3255                 rc = cifs_zap_mapping(inode);
3256                 if (rc)
3257                         return rc;
3258         }
3259
3260         rc = generic_file_mmap(file, vma);
3261         if (rc == 0)
3262                 vma->vm_ops = &cifs_file_vm_ops;
3263         free_xid(xid);
3264         return rc;
3265 }
3266
3267 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3268 {
3269         int rc, xid;
3270
3271         xid = get_xid();
3272         rc = cifs_revalidate_file(file);
3273         if (rc) {
3274                 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3275                          rc);
3276                 free_xid(xid);
3277                 return rc;
3278         }
3279         rc = generic_file_mmap(file, vma);
3280         if (rc == 0)
3281                 vma->vm_ops = &cifs_file_vm_ops;
3282         free_xid(xid);
3283         return rc;
3284 }
3285
3286 static void
3287 cifs_readv_complete(struct work_struct *work)
3288 {
3289         unsigned int i, got_bytes;
3290         struct cifs_readdata *rdata = container_of(work,
3291                                                 struct cifs_readdata, work);
3292
3293         got_bytes = rdata->got_bytes;
3294         for (i = 0; i < rdata->nr_pages; i++) {
3295                 struct page *page = rdata->pages[i];
3296
3297                 lru_cache_add_file(page);
3298
3299                 if (rdata->result == 0 ||
3300                     (rdata->result == -EAGAIN && got_bytes)) {
3301                         flush_dcache_page(page);
3302                         SetPageUptodate(page);
3303                 }
3304
3305                 unlock_page(page);
3306
3307                 if (rdata->result == 0 ||
3308                     (rdata->result == -EAGAIN && got_bytes))
3309                         cifs_readpage_to_fscache(rdata->mapping->host, page);
3310
3311                 got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes);
3312
3313                 page_cache_release(page);
3314                 rdata->pages[i] = NULL;
3315         }
3316         kref_put(&rdata->refcount, cifs_readdata_release);
3317 }
3318
3319 static int
3320 cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3321                         struct cifs_readdata *rdata, unsigned int len)
3322 {
3323         int result = 0;
3324         unsigned int i;
3325         u64 eof;
3326         pgoff_t eof_index;
3327         unsigned int nr_pages = rdata->nr_pages;
3328         struct kvec iov;
3329
3330         /* determine the eof that the server (probably) has */
3331         eof = CIFS_I(rdata->mapping->host)->server_eof;
3332         eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3333         cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
3334
3335         rdata->got_bytes = 0;
3336         rdata->tailsz = PAGE_CACHE_SIZE;
3337         for (i = 0; i < nr_pages; i++) {
3338                 struct page *page = rdata->pages[i];
3339
3340                 if (len >= PAGE_CACHE_SIZE) {
3341                         /* enough data to fill the page */
3342                         iov.iov_base = kmap(page);
3343                         iov.iov_len = PAGE_CACHE_SIZE;
3344                         cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3345                                  i, page->index, iov.iov_base, iov.iov_len);
3346                         len -= PAGE_CACHE_SIZE;
3347                 } else if (len > 0) {
3348                         /* enough for partial page, fill and zero the rest */
3349                         iov.iov_base = kmap(page);
3350                         iov.iov_len = len;
3351                         cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3352                                  i, page->index, iov.iov_base, iov.iov_len);
3353                         memset(iov.iov_base + len,
3354                                 '\0', PAGE_CACHE_SIZE - len);
3355                         rdata->tailsz = len;
3356                         len = 0;
3357                 } else if (page->index > eof_index) {
3358                         /*
3359                          * The VFS will not try to do readahead past the
3360                          * i_size, but it's possible that we have outstanding
3361                          * writes with gaps in the middle and the i_size hasn't
3362                          * caught up yet. Populate those with zeroed out pages
3363                          * to prevent the VFS from repeatedly attempting to
3364                          * fill them until the writes are flushed.
3365                          */
3366                         zero_user(page, 0, PAGE_CACHE_SIZE);
3367                         lru_cache_add_file(page);
3368                         flush_dcache_page(page);
3369                         SetPageUptodate(page);
3370                         unlock_page(page);
3371                         page_cache_release(page);
3372                         rdata->pages[i] = NULL;
3373                         rdata->nr_pages--;
3374                         continue;
3375                 } else {
3376                         /* no need to hold page hostage */
3377                         lru_cache_add_file(page);
3378                         unlock_page(page);
3379                         page_cache_release(page);
3380                         rdata->pages[i] = NULL;
3381                         rdata->nr_pages--;
3382                         continue;
3383                 }
3384
3385                 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3386                 kunmap(page);
3387                 if (result < 0)
3388                         break;
3389
3390                 rdata->got_bytes += result;
3391         }
3392
3393         return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3394                                                 rdata->got_bytes : result;
3395 }
3396
3397 static int
3398 readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3399                     unsigned int rsize, struct list_head *tmplist,
3400                     unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3401 {
3402         struct page *page, *tpage;
3403         unsigned int expected_index;
3404         int rc;
3405
3406         INIT_LIST_HEAD(tmplist);
3407
3408         page = list_entry(page_list->prev, struct page, lru);
3409
3410         /*
3411          * Lock the page and put it in the cache. Since no one else
3412          * should have access to this page, we're safe to simply set
3413          * PG_locked without checking it first.
3414          */
3415         __set_page_locked(page);
3416         rc = add_to_page_cache_locked(page, mapping,
3417                                       page->index, GFP_KERNEL);
3418
3419         /* give up if we can't stick it in the cache */
3420         if (rc) {
3421                 __clear_page_locked(page);
3422                 return rc;
3423         }
3424
3425         /* move first page to the tmplist */
3426         *offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3427         *bytes = PAGE_CACHE_SIZE;
3428         *nr_pages = 1;
3429         list_move_tail(&page->lru, tmplist);
3430
3431         /* now try and add more pages onto the request */
3432         expected_index = page->index + 1;
3433         list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3434                 /* discontinuity ? */
3435                 if (page->index != expected_index)
3436                         break;
3437
3438                 /* would this page push the read over the rsize? */
3439                 if (*bytes + PAGE_CACHE_SIZE > rsize)
3440                         break;
3441
3442                 __set_page_locked(page);
3443                 if (add_to_page_cache_locked(page, mapping, page->index,
3444                                                                 GFP_KERNEL)) {
3445                         __clear_page_locked(page);
3446                         break;
3447                 }
3448                 list_move_tail(&page->lru, tmplist);
3449                 (*bytes) += PAGE_CACHE_SIZE;
3450                 expected_index++;
3451                 (*nr_pages)++;
3452         }
3453         return rc;
3454 }
3455
3456 static int cifs_readpages(struct file *file, struct address_space *mapping,
3457         struct list_head *page_list, unsigned num_pages)
3458 {
3459         int rc;
3460         struct list_head tmplist;
3461         struct cifsFileInfo *open_file = file->private_data;
3462         struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
3463         struct TCP_Server_Info *server;
3464         pid_t pid;
3465
3466         /*
3467          * Reads as many pages as possible from fscache. Returns -ENOBUFS
3468          * immediately if the cookie is negative
3469          *
3470          * After this point, every page in the list might have PG_fscache set,
3471          * so we will need to clean that up off of every page we don't use.
3472          */
3473         rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3474                                          &num_pages);
3475         if (rc == 0)
3476                 return rc;
3477
3478         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3479                 pid = open_file->pid;
3480         else
3481                 pid = current->tgid;
3482
3483         rc = 0;
3484         server = tlink_tcon(open_file->tlink)->ses->server;
3485
3486         cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3487                  __func__, file, mapping, num_pages);
3488
3489         /*
3490          * Start with the page at end of list and move it to private
3491          * list. Do the same with any following pages until we hit
3492          * the rsize limit, hit an index discontinuity, or run out of
3493          * pages. Issue the async read and then start the loop again
3494          * until the list is empty.
3495          *
3496          * Note that list order is important. The page_list is in
3497          * the order of declining indexes. When we put the pages in
3498          * the rdata->pages, then we want them in increasing order.
3499          */
3500         while (!list_empty(page_list)) {
3501                 unsigned int i, nr_pages, bytes, rsize;
3502                 loff_t offset;
3503                 struct page *page, *tpage;
3504                 struct cifs_readdata *rdata;
3505                 unsigned credits;
3506
3507                 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3508                                                    &rsize, &credits);
3509                 if (rc)
3510                         break;
3511
3512                 /*
3513                  * Give up immediately if rsize is too small to read an entire
3514                  * page. The VFS will fall back to readpage. We should never
3515                  * reach this point however since we set ra_pages to 0 when the
3516                  * rsize is smaller than a cache page.
3517                  */
3518                 if (unlikely(rsize < PAGE_CACHE_SIZE)) {
3519                         add_credits_and_wake_if(server, credits, 0);
3520                         return 0;
3521                 }
3522
3523                 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3524                                          &nr_pages, &offset, &bytes);
3525                 if (rc) {
3526                         add_credits_and_wake_if(server, credits, 0);
3527                         break;
3528                 }
3529
3530                 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
3531                 if (!rdata) {
3532                         /* best to give up if we're out of mem */
3533                         list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3534                                 list_del(&page->lru);
3535                                 lru_cache_add_file(page);
3536                                 unlock_page(page);
3537                                 page_cache_release(page);
3538                         }
3539                         rc = -ENOMEM;
3540                         add_credits_and_wake_if(server, credits, 0);
3541                         break;
3542                 }
3543
3544                 rdata->cfile = cifsFileInfo_get(open_file);
3545                 rdata->mapping = mapping;
3546                 rdata->offset = offset;
3547                 rdata->bytes = bytes;
3548                 rdata->pid = pid;
3549                 rdata->pagesz = PAGE_CACHE_SIZE;
3550                 rdata->read_into_pages = cifs_readpages_read_into_pages;
3551                 rdata->credits = credits;
3552
3553                 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3554                         list_del(&page->lru);
3555                         rdata->pages[rdata->nr_pages++] = page;
3556                 }
3557
3558                 if (!rdata->cfile->invalidHandle ||
3559                     !cifs_reopen_file(rdata->cfile, true))
3560                         rc = server->ops->async_readv(rdata);
3561                 if (rc) {
3562                         add_credits_and_wake_if(server, rdata->credits, 0);
3563                         for (i = 0; i < rdata->nr_pages; i++) {
3564                                 page = rdata->pages[i];
3565                                 lru_cache_add_file(page);
3566                                 unlock_page(page);
3567                                 page_cache_release(page);
3568                         }
3569                         /* Fallback to the readpage in error/reconnect cases */
3570                         kref_put(&rdata->refcount, cifs_readdata_release);
3571                         break;
3572                 }
3573
3574                 kref_put(&rdata->refcount, cifs_readdata_release);
3575         }
3576
3577         /* Any pages that have been shown to fscache but didn't get added to
3578          * the pagecache must be uncached before they get returned to the
3579          * allocator.
3580          */
3581         cifs_fscache_readpages_cancel(mapping->host, page_list);
3582         return rc;
3583 }
3584
3585 /*
3586  * cifs_readpage_worker must be called with the page pinned
3587  */
3588 static int cifs_readpage_worker(struct file *file, struct page *page,
3589         loff_t *poffset)
3590 {
3591         char *read_data;
3592         int rc;
3593
3594         /* Is the page cached? */
3595         rc = cifs_readpage_from_fscache(file_inode(file), page);
3596         if (rc == 0)
3597                 goto read_complete;
3598
3599         read_data = kmap(page);
3600         /* for reads over a certain size could initiate async read ahead */
3601
3602         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
3603
3604         if (rc < 0)
3605                 goto io_error;
3606         else
3607                 cifs_dbg(FYI, "Bytes read %d\n", rc);
3608
3609         file_inode(file)->i_atime =
3610                 current_fs_time(file_inode(file)->i_sb);
3611
3612         if (PAGE_CACHE_SIZE > rc)
3613                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3614
3615         flush_dcache_page(page);
3616         SetPageUptodate(page);
3617
3618         /* send this page to the cache */
3619         cifs_readpage_to_fscache(file_inode(file), page);
3620
3621         rc = 0;
3622
3623 io_error:
3624         kunmap(page);
3625         unlock_page(page);
3626
3627 read_complete:
3628         return rc;
3629 }
3630
3631 static int cifs_readpage(struct file *file, struct page *page)
3632 {
3633         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3634         int rc = -EACCES;
3635         unsigned int xid;
3636
3637         xid = get_xid();
3638
3639         if (file->private_data == NULL) {
3640                 rc = -EBADF;
3641                 free_xid(xid);
3642                 return rc;
3643         }
3644
3645         cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
3646                  page, (int)offset, (int)offset);
3647
3648         rc = cifs_readpage_worker(file, page, &offset);
3649
3650         free_xid(xid);
3651         return rc;
3652 }
3653
3654 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3655 {
3656         struct cifsFileInfo *open_file;
3657
3658         spin_lock(&cifs_file_list_lock);
3659         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3660                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3661                         spin_unlock(&cifs_file_list_lock);
3662                         return 1;
3663                 }
3664         }
3665         spin_unlock(&cifs_file_list_lock);
3666         return 0;
3667 }
3668
3669 /* We do not want to update the file size from server for inodes
3670    open for write - to avoid races with writepage extending
3671    the file - in the future we could consider allowing
3672    refreshing the inode only on increases in the file size
3673    but this is tricky to do without racing with writebehind
3674    page caching in the current Linux kernel design */
3675 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
3676 {
3677         if (!cifsInode)
3678                 return true;
3679
3680         if (is_inode_writable(cifsInode)) {
3681                 /* This inode is open for write at least once */
3682                 struct cifs_sb_info *cifs_sb;
3683
3684                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
3685                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3686                         /* since no page cache to corrupt on directio
3687                         we can change size safely */
3688                         return true;
3689                 }
3690
3691                 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
3692                         return true;
3693
3694                 return false;
3695         } else
3696                 return true;
3697 }
3698
3699 static int cifs_write_begin(struct file *file, struct address_space *mapping,
3700                         loff_t pos, unsigned len, unsigned flags,
3701                         struct page **pagep, void **fsdata)
3702 {
3703         int oncethru = 0;
3704         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3705         loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
3706         loff_t page_start = pos & PAGE_MASK;
3707         loff_t i_size;
3708         struct page *page;
3709         int rc = 0;
3710
3711         cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
3712
3713 start:
3714         page = grab_cache_page_write_begin(mapping, index, flags);
3715         if (!page) {
3716                 rc = -ENOMEM;
3717                 goto out;
3718         }
3719
3720         if (PageUptodate(page))
3721                 goto out;
3722
3723         /*
3724          * If we write a full page it will be up to date, no need to read from
3725          * the server. If the write is short, we'll end up doing a sync write
3726          * instead.
3727          */
3728         if (len == PAGE_CACHE_SIZE)
3729                 goto out;
3730
3731         /*
3732          * optimize away the read when we have an oplock, and we're not
3733          * expecting to use any of the data we'd be reading in. That
3734          * is, when the page lies beyond the EOF, or straddles the EOF
3735          * and the write will cover all of the existing data.
3736          */
3737         if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
3738                 i_size = i_size_read(mapping->host);
3739                 if (page_start >= i_size ||
3740                     (offset == 0 && (pos + len) >= i_size)) {
3741                         zero_user_segments(page, 0, offset,
3742                                            offset + len,
3743                                            PAGE_CACHE_SIZE);
3744                         /*
3745                          * PageChecked means that the parts of the page
3746                          * to which we're not writing are considered up
3747                          * to date. Once the data is copied to the
3748                          * page, it can be set uptodate.
3749                          */
3750                         SetPageChecked(page);
3751                         goto out;
3752                 }
3753         }
3754
3755         if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
3756                 /*
3757                  * might as well read a page, it is fast enough. If we get
3758                  * an error, we don't need to return it. cifs_write_end will
3759                  * do a sync write instead since PG_uptodate isn't set.
3760                  */
3761                 cifs_readpage_worker(file, page, &page_start);
3762                 page_cache_release(page);
3763                 oncethru = 1;
3764                 goto start;
3765         } else {
3766                 /* we could try using another file handle if there is one -
3767                    but how would we lock it to prevent close of that handle
3768                    racing with this read? In any case
3769                    this will be written out by write_end so is fine */
3770         }
3771 out:
3772         *pagep = page;
3773         return rc;
3774 }
3775
3776 static int cifs_release_page(struct page *page, gfp_t gfp)
3777 {
3778         if (PagePrivate(page))
3779                 return 0;
3780
3781         return cifs_fscache_release_page(page, gfp);
3782 }
3783
3784 static void cifs_invalidate_page(struct page *page, unsigned int offset,
3785                                  unsigned int length)
3786 {
3787         struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3788
3789         if (offset == 0 && length == PAGE_CACHE_SIZE)
3790                 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3791 }
3792
3793 static int cifs_launder_page(struct page *page)
3794 {
3795         int rc = 0;
3796         loff_t range_start = page_offset(page);
3797         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3798         struct writeback_control wbc = {
3799                 .sync_mode = WB_SYNC_ALL,
3800                 .nr_to_write = 0,
3801                 .range_start = range_start,
3802                 .range_end = range_end,
3803         };
3804
3805         cifs_dbg(FYI, "Launder page: %p\n", page);
3806
3807         if (clear_page_dirty_for_io(page))
3808                 rc = cifs_writepage_locked(page, &wbc);
3809
3810         cifs_fscache_invalidate_page(page, page->mapping->host);
3811         return rc;
3812 }
3813
3814 void cifs_oplock_break(struct work_struct *work)
3815 {
3816         struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3817                                                   oplock_break);
3818         struct inode *inode = cfile->dentry->d_inode;
3819         struct cifsInodeInfo *cinode = CIFS_I(inode);
3820         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3821         struct TCP_Server_Info *server = tcon->ses->server;
3822         int rc = 0;
3823
3824         wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3825                         TASK_UNINTERRUPTIBLE);
3826
3827         server->ops->downgrade_oplock(server, cinode,
3828                 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3829
3830         if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3831                                                 cifs_has_mand_locks(cinode)) {
3832                 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3833                          inode);
3834                 cinode->oplock = 0;
3835         }
3836
3837         if (inode && S_ISREG(inode->i_mode)) {
3838                 if (CIFS_CACHE_READ(cinode))
3839                         break_lease(inode, O_RDONLY);
3840                 else
3841                         break_lease(inode, O_WRONLY);
3842                 rc = filemap_fdatawrite(inode->i_mapping);
3843                 if (!CIFS_CACHE_READ(cinode)) {
3844                         rc = filemap_fdatawait(inode->i_mapping);
3845                         mapping_set_error(inode->i_mapping, rc);
3846                         cifs_zap_mapping(inode);
3847                 }
3848                 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3849         }
3850
3851         rc = cifs_push_locks(cfile);
3852         if (rc)
3853                 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3854
3855         /*
3856          * releasing stale oplock after recent reconnect of smb session using
3857          * a now incorrect file handle is not a data integrity issue but do
3858          * not bother sending an oplock release if session to server still is
3859          * disconnected since oplock already released by the server
3860          */
3861         if (!cfile->oplock_break_cancelled) {
3862                 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3863                                                              cinode);
3864                 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3865         }
3866         cifs_done_oplock_break(cinode);
3867 }
3868
3869 /*
3870  * The presence of cifs_direct_io() in the address space ops vector
3871  * allowes open() O_DIRECT flags which would have failed otherwise.
3872  *
3873  * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3874  * so this method should never be called.
3875  *
3876  * Direct IO is not yet supported in the cached mode. 
3877  */
3878 static ssize_t
3879 cifs_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter,
3880                loff_t pos)
3881 {
3882         /*
3883          * FIXME
3884          * Eventually need to support direct IO for non forcedirectio mounts
3885          */
3886         return -EINVAL;
3887 }
3888
3889
3890 const struct address_space_operations cifs_addr_ops = {
3891         .readpage = cifs_readpage,
3892         .readpages = cifs_readpages,
3893         .writepage = cifs_writepage,
3894         .writepages = cifs_writepages,
3895         .write_begin = cifs_write_begin,
3896         .write_end = cifs_write_end,
3897         .set_page_dirty = __set_page_dirty_nobuffers,
3898         .releasepage = cifs_release_page,
3899         .direct_IO = cifs_direct_io,
3900         .invalidatepage = cifs_invalidate_page,
3901         .launder_page = cifs_launder_page,
3902 };
3903
3904 /*
3905  * cifs_readpages requires the server to support a buffer large enough to
3906  * contain the header plus one complete page of data.  Otherwise, we need
3907  * to leave cifs_readpages out of the address space operations.
3908  */
3909 const struct address_space_operations cifs_addr_ops_smallbuf = {
3910         .readpage = cifs_readpage,
3911         .writepage = cifs_writepage,
3912         .writepages = cifs_writepages,
3913         .write_begin = cifs_write_begin,
3914         .write_end = cifs_write_end,
3915         .set_page_dirty = __set_page_dirty_nobuffers,
3916         .releasepage = cifs_release_page,
3917         .invalidatepage = cifs_invalidate_page,
3918         .launder_page = cifs_launder_page,
3919 };