]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/cifs/file.c
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[net-next-2.6.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
43
1da177e4
LT
44static inline int cifs_convert_flags(unsigned int flags)
45{
46 if ((flags & O_ACCMODE) == O_RDONLY)
47 return GENERIC_READ;
48 else if ((flags & O_ACCMODE) == O_WRONLY)
49 return GENERIC_WRITE;
50 else if ((flags & O_ACCMODE) == O_RDWR) {
51 /* GENERIC_ALL is too much permission to request
52 can cause unnecessary access denied on create */
53 /* return GENERIC_ALL; */
54 return (GENERIC_READ | GENERIC_WRITE);
55 }
56
e10f7b55
JL
57 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
58 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
59 FILE_READ_DATA);
7fc8f4e9 60}
e10f7b55 61
7fc8f4e9
SF
62static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
63{
64 fmode_t posix_flags = 0;
e10f7b55 65
7fc8f4e9
SF
66 if ((flags & O_ACCMODE) == O_RDONLY)
67 posix_flags = FMODE_READ;
68 else if ((flags & O_ACCMODE) == O_WRONLY)
69 posix_flags = FMODE_WRITE;
70 else if ((flags & O_ACCMODE) == O_RDWR) {
71 /* GENERIC_ALL is too much permission to request
72 can cause unnecessary access denied on create */
73 /* return GENERIC_ALL; */
74 posix_flags = FMODE_READ | FMODE_WRITE;
75 }
76 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
77 reopening a file. They had their effect on the original open */
78 if (flags & O_APPEND)
79 posix_flags |= (fmode_t)O_APPEND;
6b2f3d1f
CH
80 if (flags & O_DSYNC)
81 posix_flags |= (fmode_t)O_DSYNC;
82 if (flags & __O_SYNC)
83 posix_flags |= (fmode_t)__O_SYNC;
7fc8f4e9
SF
84 if (flags & O_DIRECTORY)
85 posix_flags |= (fmode_t)O_DIRECTORY;
86 if (flags & O_NOFOLLOW)
87 posix_flags |= (fmode_t)O_NOFOLLOW;
88 if (flags & O_DIRECT)
89 posix_flags |= (fmode_t)O_DIRECT;
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
276a74a4 108/* all arguments to this function must be checked for validity in caller */
590a3fe0
JL
109static inline int
110cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
51c81764 111 struct cifsInodeInfo *pCifsInode, __u32 oplock,
590a3fe0 112 u16 netfid)
276a74a4 113{
276a74a4 114
276a74a4 115 write_lock(&GlobalSMBSeslock);
276a74a4
SF
116
117 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
118 if (pCifsInode == NULL) {
119 write_unlock(&GlobalSMBSeslock);
120 return -EINVAL;
121 }
122
276a74a4
SF
123 if (pCifsInode->clientCanCacheRead) {
124 /* we have the inode open somewhere else
125 no need to discard cache data */
126 goto psx_client_can_cache;
127 }
128
129 /* BB FIXME need to fix this check to move it earlier into posix_open
130 BB fIX following section BB FIXME */
131
132 /* if not oplocked, invalidate inode pages if mtime or file
133 size changed */
134/* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
135 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
136 (file->f_path.dentry->d_inode->i_size ==
137 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 138 cFYI(1, "inode unchanged on server");
276a74a4
SF
139 } else {
140 if (file->f_path.dentry->d_inode->i_mapping) {
141 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
142 if (rc != 0)
143 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
144 }
b6b38f70
JP
145 cFYI(1, "invalidating remote inode since open detected it "
146 "changed");
276a74a4
SF
147 invalidate_remote_inode(file->f_path.dentry->d_inode);
148 } */
149
150psx_client_can_cache:
151 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
152 pCifsInode->clientCanCacheAll = true;
153 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
154 cFYI(1, "Exclusive Oplock granted on inode %p",
155 file->f_path.dentry->d_inode);
276a74a4
SF
156 } else if ((oplock & 0xF) == OPLOCK_READ)
157 pCifsInode->clientCanCacheRead = true;
158
159 /* will have to change the unlock if we reenable the
160 filemap_fdatawrite (which does not seem necessary */
161 write_unlock(&GlobalSMBSeslock);
162 return 0;
163}
164
703a3b8e
SF
165static struct cifsFileInfo *
166cifs_fill_filedata(struct file *file)
167{
168 struct list_head *tmp;
169 struct cifsFileInfo *pCifsFile = NULL;
170 struct cifsInodeInfo *pCifsInode = NULL;
171
172 /* search inode for this file and fill in file->private_data */
173 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
174 read_lock(&GlobalSMBSeslock);
175 list_for_each(tmp, &pCifsInode->openFileList) {
176 pCifsFile = list_entry(tmp, struct cifsFileInfo, flist);
177 if ((pCifsFile->pfile == NULL) &&
178 (pCifsFile->pid == current->tgid)) {
179 /* mode set in cifs_create */
180
181 /* needed for writepage */
182 pCifsFile->pfile = file;
183 file->private_data = pCifsFile;
184 break;
185 }
186 }
187 read_unlock(&GlobalSMBSeslock);
188
189 if (file->private_data != NULL) {
190 return pCifsFile;
191 } else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL))
b6b38f70
JP
192 cERROR(1, "could not find file instance for "
193 "new file %p", file);
703a3b8e
SF
194 return NULL;
195}
196
1da177e4
LT
197/* all arguments to this function must be checked for validity in caller */
198static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
199 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
200 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
201 char *full_path, int xid)
202{
203 struct timespec temp;
204 int rc;
205
1da177e4
LT
206 if (pCifsInode->clientCanCacheRead) {
207 /* we have the inode open somewhere else
208 no need to discard cache data */
209 goto client_can_cache;
210 }
211
212 /* BB need same check in cifs_create too? */
213 /* if not oplocked, invalidate inode pages if mtime or file
214 size changed */
07119a4d 215 temp = cifs_NTtimeToUnix(buf->LastWriteTime);
e6a00296
JJS
216 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
217 (file->f_path.dentry->d_inode->i_size ==
1da177e4 218 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 219 cFYI(1, "inode unchanged on server");
1da177e4 220 } else {
e6a00296 221 if (file->f_path.dentry->d_inode->i_mapping) {
ff215713
SF
222 /* BB no need to lock inode until after invalidate
223 since namei code should already have it locked? */
cea21805
JL
224 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
225 if (rc != 0)
226 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
1da177e4 227 }
b6b38f70
JP
228 cFYI(1, "invalidating remote inode since open detected it "
229 "changed");
e6a00296 230 invalidate_remote_inode(file->f_path.dentry->d_inode);
1da177e4
LT
231 }
232
233client_can_cache:
c18c842b 234 if (pTcon->unix_ext)
e6a00296 235 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
1da177e4
LT
236 full_path, inode->i_sb, xid);
237 else
e6a00296 238 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
8b1327f6 239 full_path, buf, inode->i_sb, xid, NULL);
1da177e4
LT
240
241 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
242 pCifsInode->clientCanCacheAll = true;
243 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
244 cFYI(1, "Exclusive Oplock granted on inode %p",
245 file->f_path.dentry->d_inode);
1da177e4 246 } else if ((*oplock & 0xF) == OPLOCK_READ)
4b18f2a9 247 pCifsInode->clientCanCacheRead = true;
1da177e4
LT
248
249 return rc;
250}
251
252int cifs_open(struct inode *inode, struct file *file)
253{
254 int rc = -EACCES;
590a3fe0
JL
255 int xid;
256 __u32 oplock;
1da177e4 257 struct cifs_sb_info *cifs_sb;
276a74a4 258 struct cifsTconInfo *tcon;
1da177e4
LT
259 struct cifsFileInfo *pCifsFile;
260 struct cifsInodeInfo *pCifsInode;
1da177e4
LT
261 char *full_path = NULL;
262 int desiredAccess;
263 int disposition;
264 __u16 netfid;
265 FILE_ALL_INFO *buf = NULL;
266
267 xid = GetXid();
268
269 cifs_sb = CIFS_SB(inode->i_sb);
276a74a4 270 tcon = cifs_sb->tcon;
1da177e4 271
a6ce4932 272 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
703a3b8e
SF
273 pCifsFile = cifs_fill_filedata(file);
274 if (pCifsFile) {
0f3bc09e 275 rc = 0;
a6ce4932 276 FreeXid(xid);
0f3bc09e 277 return rc;
703a3b8e 278 }
1da177e4 279
e6a00296 280 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 281 if (full_path == NULL) {
0f3bc09e 282 rc = -ENOMEM;
1da177e4 283 FreeXid(xid);
0f3bc09e 284 return rc;
1da177e4
LT
285 }
286
b6b38f70
JP
287 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
288 inode, file->f_flags, full_path);
276a74a4
SF
289
290 if (oplockEnabled)
291 oplock = REQ_OPLOCK;
292 else
293 oplock = 0;
294
64cc2c63
SF
295 if (!tcon->broken_posix_open && tcon->unix_ext &&
296 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
297 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
298 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
299 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
fa588e0c 300 oflags |= SMB_O_CREAT;
276a74a4 301 /* can not refresh inode info since size could be stale */
3bc303c2 302 rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
fa588e0c
SF
303 inode->i_sb,
304 cifs_sb->mnt_file_mode /* ignored */,
305 oflags, &oplock, &netfid, xid);
276a74a4 306 if (rc == 0) {
b6b38f70 307 cFYI(1, "posix open succeeded");
276a74a4
SF
308 /* no need for special case handling of setting mode
309 on read only files needed here */
310
703a3b8e 311 pCifsFile = cifs_fill_filedata(file);
276a74a4 312 cifs_posix_open_inode_helper(inode, file, pCifsInode,
51c81764 313 oplock, netfid);
276a74a4 314 goto out;
64cc2c63
SF
315 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
316 if (tcon->ses->serverNOS)
b6b38f70 317 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
318 " unexpected error on SMB posix open"
319 ", disabling posix open support."
320 " Check if server update available.",
321 tcon->ses->serverName,
b6b38f70 322 tcon->ses->serverNOS);
64cc2c63 323 tcon->broken_posix_open = true;
276a74a4
SF
324 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
325 (rc != -EOPNOTSUPP)) /* path not found or net err */
326 goto out;
64cc2c63
SF
327 /* else fallthrough to retry open the old way on network i/o
328 or DFS errors */
276a74a4
SF
329 }
330
1da177e4
LT
331 desiredAccess = cifs_convert_flags(file->f_flags);
332
333/*********************************************************************
334 * open flag mapping table:
fb8c4b14 335 *
1da177e4 336 * POSIX Flag CIFS Disposition
fb8c4b14 337 * ---------- ----------------
1da177e4
LT
338 * O_CREAT FILE_OPEN_IF
339 * O_CREAT | O_EXCL FILE_CREATE
340 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
341 * O_TRUNC FILE_OVERWRITE
342 * none of the above FILE_OPEN
343 *
344 * Note that there is not a direct match between disposition
fb8c4b14 345 * FILE_SUPERSEDE (ie create whether or not file exists although
1da177e4
LT
346 * O_CREAT | O_TRUNC is similar but truncates the existing
347 * file rather than creating a new file as FILE_SUPERSEDE does
348 * (which uses the attributes / metadata passed in on open call)
349 *?
fb8c4b14 350 *? O_SYNC is a reasonable match to CIFS writethrough flag
1da177e4
LT
351 *? and the read write flags match reasonably. O_LARGEFILE
352 *? is irrelevant because largefile support is always used
353 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
354 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
355 *********************************************************************/
356
357 disposition = cifs_get_disposition(file->f_flags);
358
1da177e4
LT
359 /* BB pass O_SYNC flag through on file attributes .. BB */
360
361 /* Also refresh inode by passing in file_info buf returned by SMBOpen
362 and calling get_inode_info with returned buf (at least helps
363 non-Unix server case) */
364
fb8c4b14
SF
365 /* BB we can not do this if this is the second open of a file
366 and the first handle has writebehind data, we might be
1da177e4
LT
367 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
368 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
369 if (!buf) {
370 rc = -ENOMEM;
371 goto out;
372 }
5bafd765
SF
373
374 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
276a74a4 375 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
5bafd765 376 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
377 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
378 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
379 else
380 rc = -EIO; /* no NT SMB support fall into legacy open below */
381
a9d02ad4
SF
382 if (rc == -EIO) {
383 /* Old server, try legacy style OpenX */
276a74a4 384 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
a9d02ad4
SF
385 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
386 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
387 & CIFS_MOUNT_MAP_SPECIAL_CHR);
388 }
1da177e4 389 if (rc) {
b6b38f70 390 cFYI(1, "cifs_open returned 0x%x", rc);
1da177e4
LT
391 goto out;
392 }
3321b791 393
086f68bd
JL
394 pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
395 file->f_flags);
396 file->private_data = pCifsFile;
1da177e4
LT
397 if (file->private_data == NULL) {
398 rc = -ENOMEM;
399 goto out;
400 }
1da177e4 401
3321b791
JL
402 rc = cifs_open_inode_helper(inode, file, pCifsInode, pCifsFile, tcon,
403 &oplock, buf, full_path, xid);
1da177e4 404
fb8c4b14 405 if (oplock & CIFS_CREATE_ACTION) {
1da177e4
LT
406 /* time to set mode which we can not set earlier due to
407 problems creating new read-only files */
276a74a4 408 if (tcon->unix_ext) {
4e1e7fb9
JL
409 struct cifs_unix_set_info_args args = {
410 .mode = inode->i_mode,
411 .uid = NO_CHANGE_64,
412 .gid = NO_CHANGE_64,
413 .ctime = NO_CHANGE_64,
414 .atime = NO_CHANGE_64,
415 .mtime = NO_CHANGE_64,
416 .device = 0,
417 };
01ea95e3
JL
418 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
419 cifs_sb->local_nls,
420 cifs_sb->mnt_cifs_flags &
737b758c 421 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
422 }
423 }
424
425out:
426 kfree(buf);
427 kfree(full_path);
428 FreeXid(xid);
429 return rc;
430}
431
0418726b 432/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
433/* to server was lost */
434static int cifs_relock_file(struct cifsFileInfo *cifsFile)
435{
436 int rc = 0;
437
438/* BB list all locks open on this file and relock */
439
440 return rc;
441}
442
4b18f2a9 443static int cifs_reopen_file(struct file *file, bool can_flush)
1da177e4
LT
444{
445 int rc = -EACCES;
590a3fe0
JL
446 int xid;
447 __u32 oplock;
1da177e4 448 struct cifs_sb_info *cifs_sb;
7fc8f4e9 449 struct cifsTconInfo *tcon;
1da177e4
LT
450 struct cifsFileInfo *pCifsFile;
451 struct cifsInodeInfo *pCifsInode;
fb8c4b14 452 struct inode *inode;
1da177e4
LT
453 char *full_path = NULL;
454 int desiredAccess;
455 int disposition = FILE_OPEN;
456 __u16 netfid;
457
ad7a2926 458 if (file->private_data)
1da177e4 459 pCifsFile = (struct cifsFileInfo *)file->private_data;
ad7a2926 460 else
1da177e4
LT
461 return -EBADF;
462
463 xid = GetXid();
f0a71eb8 464 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 465 if (!pCifsFile->invalidHandle) {
f0a71eb8 466 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 467 rc = 0;
1da177e4 468 FreeXid(xid);
0f3bc09e 469 return rc;
1da177e4
LT
470 }
471
e6a00296 472 if (file->f_path.dentry == NULL) {
b6b38f70 473 cERROR(1, "no valid name if dentry freed");
3a9f462f
SF
474 dump_stack();
475 rc = -EBADF;
476 goto reopen_error_exit;
477 }
478
479 inode = file->f_path.dentry->d_inode;
fb8c4b14 480 if (inode == NULL) {
b6b38f70 481 cERROR(1, "inode not valid");
3a9f462f
SF
482 dump_stack();
483 rc = -EBADF;
484 goto reopen_error_exit;
1da177e4 485 }
50c2f753 486
1da177e4 487 cifs_sb = CIFS_SB(inode->i_sb);
7fc8f4e9 488 tcon = cifs_sb->tcon;
3a9f462f 489
1da177e4
LT
490/* can not grab rename sem here because various ops, including
491 those that already have the rename sem can end up causing writepage
492 to get called and if the server was down that means we end up here,
493 and we can never tell if the caller already has the rename_sem */
e6a00296 494 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 495 if (full_path == NULL) {
3a9f462f
SF
496 rc = -ENOMEM;
497reopen_error_exit:
f0a71eb8 498 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 499 FreeXid(xid);
3a9f462f 500 return rc;
1da177e4
LT
501 }
502
b6b38f70
JP
503 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
504 inode, file->f_flags, full_path);
1da177e4
LT
505
506 if (oplockEnabled)
507 oplock = REQ_OPLOCK;
508 else
4b18f2a9 509 oplock = 0;
1da177e4 510
7fc8f4e9
SF
511 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
512 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
513 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
514 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
515 /* can not refresh inode info since size could be stale */
3bc303c2 516 rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
fa588e0c
SF
517 inode->i_sb,
518 cifs_sb->mnt_file_mode /* ignored */,
519 oflags, &oplock, &netfid, xid);
7fc8f4e9 520 if (rc == 0) {
b6b38f70 521 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
522 goto reopen_success;
523 }
524 /* fallthrough to retry open the old way on errors, especially
525 in the reconnect path it is important to retry hard */
526 }
527
528 desiredAccess = cifs_convert_flags(file->f_flags);
529
1da177e4 530 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
531 by SMBOpen and then calling get_inode_info with returned buf
532 since file might have write behind data that needs to be flushed
1da177e4
LT
533 and server version of file size can be stale. If we knew for sure
534 that inode was not dirty locally we could do this */
535
7fc8f4e9 536 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 537 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 538 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 539 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 540 if (rc) {
f0a71eb8 541 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
542 cFYI(1, "cifs_open returned 0x%x", rc);
543 cFYI(1, "oplock: %d", oplock);
1da177e4 544 } else {
7fc8f4e9 545reopen_success:
1da177e4 546 pCifsFile->netfid = netfid;
4b18f2a9 547 pCifsFile->invalidHandle = false;
f0a71eb8 548 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4
LT
549 pCifsInode = CIFS_I(inode);
550 if (pCifsInode) {
551 if (can_flush) {
cea21805
JL
552 rc = filemap_write_and_wait(inode->i_mapping);
553 if (rc != 0)
554 CIFS_I(inode)->write_behind_rc = rc;
1da177e4
LT
555 /* temporarily disable caching while we
556 go to server to get inode info */
4b18f2a9
SF
557 pCifsInode->clientCanCacheAll = false;
558 pCifsInode->clientCanCacheRead = false;
7fc8f4e9 559 if (tcon->unix_ext)
1da177e4
LT
560 rc = cifs_get_inode_info_unix(&inode,
561 full_path, inode->i_sb, xid);
562 else
563 rc = cifs_get_inode_info(&inode,
564 full_path, NULL, inode->i_sb,
8b1327f6 565 xid, NULL);
1da177e4
LT
566 } /* else we are writing out data to server already
567 and could deadlock if we tried to flush data, and
568 since we do not know if we have data that would
569 invalidate the current end of file on the server
570 we can not go to the server to get the new inod
571 info */
572 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
573 pCifsInode->clientCanCacheAll = true;
574 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
575 cFYI(1, "Exclusive Oplock granted on inode %p",
576 file->f_path.dentry->d_inode);
1da177e4 577 } else if ((oplock & 0xF) == OPLOCK_READ) {
4b18f2a9
SF
578 pCifsInode->clientCanCacheRead = true;
579 pCifsInode->clientCanCacheAll = false;
1da177e4 580 } else {
4b18f2a9
SF
581 pCifsInode->clientCanCacheRead = false;
582 pCifsInode->clientCanCacheAll = false;
1da177e4
LT
583 }
584 cifs_relock_file(pCifsFile);
585 }
586 }
1da177e4
LT
587 kfree(full_path);
588 FreeXid(xid);
589 return rc;
590}
591
592int cifs_close(struct inode *inode, struct file *file)
593{
594 int rc = 0;
15745320 595 int xid, timeout;
1da177e4
LT
596 struct cifs_sb_info *cifs_sb;
597 struct cifsTconInfo *pTcon;
598 struct cifsFileInfo *pSMBFile =
599 (struct cifsFileInfo *)file->private_data;
600
601 xid = GetXid();
602
603 cifs_sb = CIFS_SB(inode->i_sb);
604 pTcon = cifs_sb->tcon;
605 if (pSMBFile) {
7ee1af76 606 struct cifsLockInfo *li, *tmp;
ddb4cbfc 607 write_lock(&GlobalSMBSeslock);
4b18f2a9 608 pSMBFile->closePend = true;
1da177e4
LT
609 if (pTcon) {
610 /* no sense reconnecting to close a file that is
611 already closed */
3b795210 612 if (!pTcon->need_reconnect) {
ddb4cbfc 613 write_unlock(&GlobalSMBSeslock);
15745320 614 timeout = 2;
6ab409b5 615 while ((atomic_read(&pSMBFile->count) != 1)
15745320 616 && (timeout <= 2048)) {
23e7dd7d
SF
617 /* Give write a better chance to get to
618 server ahead of the close. We do not
619 want to add a wait_q here as it would
620 increase the memory utilization as
621 the struct would be in each open file,
fb8c4b14 622 but this should give enough time to
23e7dd7d 623 clear the socket */
b6b38f70 624 cFYI(DBG2, "close delay, write pending");
23e7dd7d
SF
625 msleep(timeout);
626 timeout *= 4;
4891d539 627 }
ddb4cbfc
SF
628 if (!pTcon->need_reconnect &&
629 !pSMBFile->invalidHandle)
630 rc = CIFSSMBClose(xid, pTcon,
1da177e4 631 pSMBFile->netfid);
ddb4cbfc
SF
632 } else
633 write_unlock(&GlobalSMBSeslock);
634 } else
635 write_unlock(&GlobalSMBSeslock);
7ee1af76
JA
636
637 /* Delete any outstanding lock records.
638 We'll lose them when the file is closed anyway. */
796e5661 639 mutex_lock(&pSMBFile->lock_mutex);
7ee1af76
JA
640 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
641 list_del(&li->llist);
642 kfree(li);
643 }
796e5661 644 mutex_unlock(&pSMBFile->lock_mutex);
7ee1af76 645
cbe0476f 646 write_lock(&GlobalSMBSeslock);
1da177e4
LT
647 list_del(&pSMBFile->flist);
648 list_del(&pSMBFile->tlist);
cbe0476f 649 write_unlock(&GlobalSMBSeslock);
6ab409b5 650 cifsFileInfo_put(file->private_data);
1da177e4
LT
651 file->private_data = NULL;
652 } else
653 rc = -EBADF;
654
4efa53f0 655 read_lock(&GlobalSMBSeslock);
1da177e4 656 if (list_empty(&(CIFS_I(inode)->openFileList))) {
b6b38f70 657 cFYI(1, "closing last open instance for inode %p", inode);
1da177e4
LT
658 /* if the file is not open we do not know if we can cache info
659 on this inode, much less write behind and read ahead */
4b18f2a9
SF
660 CIFS_I(inode)->clientCanCacheRead = false;
661 CIFS_I(inode)->clientCanCacheAll = false;
1da177e4 662 }
4efa53f0 663 read_unlock(&GlobalSMBSeslock);
fb8c4b14 664 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
1da177e4
LT
665 rc = CIFS_I(inode)->write_behind_rc;
666 FreeXid(xid);
667 return rc;
668}
669
670int cifs_closedir(struct inode *inode, struct file *file)
671{
672 int rc = 0;
673 int xid;
674 struct cifsFileInfo *pCFileStruct =
675 (struct cifsFileInfo *)file->private_data;
676 char *ptmp;
677
b6b38f70 678 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
679
680 xid = GetXid();
681
682 if (pCFileStruct) {
683 struct cifsTconInfo *pTcon;
fb8c4b14
SF
684 struct cifs_sb_info *cifs_sb =
685 CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
686
687 pTcon = cifs_sb->tcon;
688
b6b38f70 689 cFYI(1, "Freeing private data in close dir");
ddb4cbfc 690 write_lock(&GlobalSMBSeslock);
4b18f2a9
SF
691 if (!pCFileStruct->srch_inf.endOfSearch &&
692 !pCFileStruct->invalidHandle) {
693 pCFileStruct->invalidHandle = true;
ddb4cbfc 694 write_unlock(&GlobalSMBSeslock);
1da177e4 695 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
696 cFYI(1, "Closing uncompleted readdir with rc %d",
697 rc);
1da177e4
LT
698 /* not much we can do if it fails anyway, ignore rc */
699 rc = 0;
ddb4cbfc
SF
700 } else
701 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
702 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
703 if (ptmp) {
b6b38f70 704 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 705 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 706 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
707 cifs_small_buf_release(ptmp);
708 else
709 cifs_buf_release(ptmp);
1da177e4 710 }
1da177e4
LT
711 kfree(file->private_data);
712 file->private_data = NULL;
713 }
714 /* BB can we lock the filestruct while this is going on? */
715 FreeXid(xid);
716 return rc;
717}
718
7ee1af76
JA
719static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
720 __u64 offset, __u8 lockType)
721{
fb8c4b14
SF
722 struct cifsLockInfo *li =
723 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
724 if (li == NULL)
725 return -ENOMEM;
726 li->offset = offset;
727 li->length = len;
728 li->type = lockType;
796e5661 729 mutex_lock(&fid->lock_mutex);
7ee1af76 730 list_add(&li->llist, &fid->llist);
796e5661 731 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
732 return 0;
733}
734
1da177e4
LT
735int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
736{
737 int rc, xid;
1da177e4
LT
738 __u32 numLock = 0;
739 __u32 numUnlock = 0;
740 __u64 length;
4b18f2a9 741 bool wait_flag = false;
1da177e4 742 struct cifs_sb_info *cifs_sb;
13a6e42a 743 struct cifsTconInfo *tcon;
08547b03
SF
744 __u16 netfid;
745 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 746 bool posix_locking = 0;
1da177e4
LT
747
748 length = 1 + pfLock->fl_end - pfLock->fl_start;
749 rc = -EACCES;
750 xid = GetXid();
751
b6b38f70 752 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 753 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 754 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 755 pfLock->fl_end);
1da177e4
LT
756
757 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 758 cFYI(1, "Posix");
1da177e4 759 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 760 cFYI(1, "Flock");
1da177e4 761 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 762 cFYI(1, "Blocking lock");
4b18f2a9 763 wait_flag = true;
1da177e4
LT
764 }
765 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
766 cFYI(1, "Process suspended by mandatory locking - "
767 "not implemented yet");
1da177e4 768 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 769 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 770 if (pfLock->fl_flags &
1da177e4 771 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 772 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
773
774 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 775 cFYI(1, "F_WRLCK ");
1da177e4
LT
776 numLock = 1;
777 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 778 cFYI(1, "F_UNLCK");
1da177e4 779 numUnlock = 1;
d47d7c1a
SF
780 /* Check if unlock includes more than
781 one lock range */
1da177e4 782 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 783 cFYI(1, "F_RDLCK");
1da177e4
LT
784 lockType |= LOCKING_ANDX_SHARED_LOCK;
785 numLock = 1;
786 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 787 cFYI(1, "F_EXLCK");
1da177e4
LT
788 numLock = 1;
789 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 790 cFYI(1, "F_SHLCK");
1da177e4
LT
791 lockType |= LOCKING_ANDX_SHARED_LOCK;
792 numLock = 1;
793 } else
b6b38f70 794 cFYI(1, "Unknown type of lock");
1da177e4 795
e6a00296 796 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13a6e42a 797 tcon = cifs_sb->tcon;
1da177e4
LT
798
799 if (file->private_data == NULL) {
0f3bc09e 800 rc = -EBADF;
1da177e4 801 FreeXid(xid);
0f3bc09e 802 return rc;
1da177e4 803 }
08547b03
SF
804 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
805
13a6e42a
SF
806 if ((tcon->ses->capabilities & CAP_UNIX) &&
807 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 808 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 809 posix_locking = 1;
08547b03
SF
810 /* BB add code here to normalize offset and length to
811 account for negative length which we can not accept over the
812 wire */
1da177e4 813 if (IS_GETLK(cmd)) {
fb8c4b14 814 if (posix_locking) {
08547b03 815 int posix_lock_type;
fb8c4b14 816 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
817 posix_lock_type = CIFS_RDLCK;
818 else
819 posix_lock_type = CIFS_WRLCK;
13a6e42a 820 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 821 length, pfLock,
08547b03
SF
822 posix_lock_type, wait_flag);
823 FreeXid(xid);
824 return rc;
825 }
826
827 /* BB we could chain these into one lock request BB */
13a6e42a 828 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
08547b03 829 0, 1, lockType, 0 /* wait flag */ );
1da177e4 830 if (rc == 0) {
13a6e42a 831 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
832 pfLock->fl_start, 1 /* numUnlock */ ,
833 0 /* numLock */ , lockType,
834 0 /* wait flag */ );
835 pfLock->fl_type = F_UNLCK;
836 if (rc != 0)
b6b38f70
JP
837 cERROR(1, "Error unlocking previously locked "
838 "range %d during test of lock", rc);
1da177e4
LT
839 rc = 0;
840
841 } else {
842 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
843 rc = 0;
844
845 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
846 pfLock->fl_type = F_WRLCK;
847 } else {
848 rc = CIFSSMBLock(xid, tcon, netfid, length,
849 pfLock->fl_start, 0, 1,
850 lockType | LOCKING_ANDX_SHARED_LOCK,
851 0 /* wait flag */);
852 if (rc == 0) {
853 rc = CIFSSMBLock(xid, tcon, netfid,
854 length, pfLock->fl_start, 1, 0,
855 lockType |
856 LOCKING_ANDX_SHARED_LOCK,
857 0 /* wait flag */);
858 pfLock->fl_type = F_RDLCK;
859 if (rc != 0)
f19159dc 860 cERROR(1, "Error unlocking "
f05337c6 861 "previously locked range %d "
f19159dc 862 "during test of lock", rc);
f05337c6
PS
863 rc = 0;
864 } else {
865 pfLock->fl_type = F_WRLCK;
866 rc = 0;
867 }
868 }
1da177e4
LT
869 }
870
871 FreeXid(xid);
872 return rc;
873 }
7ee1af76
JA
874
875 if (!numLock && !numUnlock) {
876 /* if no lock or unlock then nothing
877 to do since we do not know what it is */
878 FreeXid(xid);
879 return -EOPNOTSUPP;
880 }
881
882 if (posix_locking) {
08547b03 883 int posix_lock_type;
fb8c4b14 884 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
885 posix_lock_type = CIFS_RDLCK;
886 else
887 posix_lock_type = CIFS_WRLCK;
50c2f753 888
fb8c4b14 889 if (numUnlock == 1)
beb84dc8 890 posix_lock_type = CIFS_UNLCK;
7ee1af76 891
13a6e42a 892 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 893 length, pfLock,
08547b03 894 posix_lock_type, wait_flag);
7ee1af76 895 } else {
fb8c4b14
SF
896 struct cifsFileInfo *fid =
897 (struct cifsFileInfo *)file->private_data;
7ee1af76
JA
898
899 if (numLock) {
13a6e42a 900 rc = CIFSSMBLock(xid, tcon, netfid, length,
fb8c4b14 901 pfLock->fl_start,
7ee1af76
JA
902 0, numLock, lockType, wait_flag);
903
904 if (rc == 0) {
905 /* For Windows locks we must store them. */
906 rc = store_file_lock(fid, length,
907 pfLock->fl_start, lockType);
908 }
909 } else if (numUnlock) {
910 /* For each stored lock that this unlock overlaps
911 completely, unlock it. */
912 int stored_rc = 0;
913 struct cifsLockInfo *li, *tmp;
914
6b70c955 915 rc = 0;
796e5661 916 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
917 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
918 if (pfLock->fl_start <= li->offset &&
c19eb710 919 (pfLock->fl_start + length) >=
39db810c 920 (li->offset + li->length)) {
13a6e42a 921 stored_rc = CIFSSMBLock(xid, tcon,
fb8c4b14 922 netfid,
7ee1af76 923 li->length, li->offset,
4b18f2a9 924 1, 0, li->type, false);
7ee1af76
JA
925 if (stored_rc)
926 rc = stored_rc;
2c964d1f
PS
927 else {
928 list_del(&li->llist);
929 kfree(li);
930 }
7ee1af76
JA
931 }
932 }
796e5661 933 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
934 }
935 }
936
d634cc15 937 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
938 posix_lock_file_wait(file, pfLock);
939 FreeXid(xid);
940 return rc;
941}
942
fbec9ab9
JL
943/*
944 * Set the timeout on write requests past EOF. For some servers (Windows)
945 * these calls can be very long.
946 *
947 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
948 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
949 * The 10M cutoff is totally arbitrary. A better scheme for this would be
950 * welcome if someone wants to suggest one.
951 *
952 * We may be able to do a better job with this if there were some way to
953 * declare that a file should be sparse.
954 */
955static int
956cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
957{
958 if (offset <= cifsi->server_eof)
959 return CIFS_STD_OP;
960 else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
961 return CIFS_VLONG_OP;
962 else
963 return CIFS_LONG_OP;
964}
965
966/* update the file size (if needed) after a write */
967static void
968cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
969 unsigned int bytes_written)
970{
971 loff_t end_of_write = offset + bytes_written;
972
973 if (end_of_write > cifsi->server_eof)
974 cifsi->server_eof = end_of_write;
975}
976
1da177e4
LT
977ssize_t cifs_user_write(struct file *file, const char __user *write_data,
978 size_t write_size, loff_t *poffset)
979{
980 int rc = 0;
981 unsigned int bytes_written = 0;
982 unsigned int total_written;
983 struct cifs_sb_info *cifs_sb;
984 struct cifsTconInfo *pTcon;
985 int xid, long_op;
986 struct cifsFileInfo *open_file;
fbec9ab9 987 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 988
e6a00296 989 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
990
991 pTcon = cifs_sb->tcon;
992
b6b38f70
JP
993 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
994 *poffset, file->f_path.dentry->d_name.name); */
1da177e4
LT
995
996 if (file->private_data == NULL)
997 return -EBADF;
c33f8d32 998 open_file = (struct cifsFileInfo *) file->private_data;
50c2f753 999
838726c4
JL
1000 rc = generic_write_checks(file, poffset, &write_size, 0);
1001 if (rc)
1002 return rc;
1003
1da177e4 1004 xid = GetXid();
1da177e4 1005
fbec9ab9 1006 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
1007 for (total_written = 0; write_size > total_written;
1008 total_written += bytes_written) {
1009 rc = -EAGAIN;
1010 while (rc == -EAGAIN) {
1011 if (file->private_data == NULL) {
1012 /* file has been closed on us */
1013 FreeXid(xid);
1014 /* if we have gotten here we have written some data
1015 and blocked, and the file has been freed on us while
1016 we blocked so return what we managed to write */
1017 return total_written;
fb8c4b14 1018 }
1da177e4
LT
1019 if (open_file->closePend) {
1020 FreeXid(xid);
1021 if (total_written)
1022 return total_written;
1023 else
1024 return -EBADF;
1025 }
1026 if (open_file->invalidHandle) {
1da177e4
LT
1027 /* we could deadlock if we called
1028 filemap_fdatawait from here so tell
1029 reopen_file not to flush data to server
1030 now */
4b18f2a9 1031 rc = cifs_reopen_file(file, false);
1da177e4
LT
1032 if (rc != 0)
1033 break;
1034 }
1035
1036 rc = CIFSSMBWrite(xid, pTcon,
1037 open_file->netfid,
1038 min_t(const int, cifs_sb->wsize,
1039 write_size - total_written),
1040 *poffset, &bytes_written,
1041 NULL, write_data + total_written, long_op);
1042 }
1043 if (rc || (bytes_written == 0)) {
1044 if (total_written)
1045 break;
1046 else {
1047 FreeXid(xid);
1048 return rc;
1049 }
fbec9ab9
JL
1050 } else {
1051 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1052 *poffset += bytes_written;
fbec9ab9 1053 }
133672ef 1054 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1055 15 seconds is plenty */
1056 }
1057
a4544347 1058 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1059
1060 /* since the write may have blocked check these pointers again */
3677db10
SF
1061 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1062 struct inode *inode = file->f_path.dentry->d_inode;
fb8c4b14
SF
1063/* Do not update local mtime - server will set its actual value on write
1064 * inode->i_ctime = inode->i_mtime =
3677db10
SF
1065 * current_fs_time(inode->i_sb);*/
1066 if (total_written > 0) {
1067 spin_lock(&inode->i_lock);
1068 if (*poffset > file->f_path.dentry->d_inode->i_size)
1069 i_size_write(file->f_path.dentry->d_inode,
1da177e4 1070 *poffset);
3677db10 1071 spin_unlock(&inode->i_lock);
1da177e4 1072 }
fb8c4b14 1073 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1074 }
1075 FreeXid(xid);
1076 return total_written;
1077}
1078
1079static ssize_t cifs_write(struct file *file, const char *write_data,
d9414774 1080 size_t write_size, loff_t *poffset)
1da177e4
LT
1081{
1082 int rc = 0;
1083 unsigned int bytes_written = 0;
1084 unsigned int total_written;
1085 struct cifs_sb_info *cifs_sb;
1086 struct cifsTconInfo *pTcon;
1087 int xid, long_op;
1088 struct cifsFileInfo *open_file;
fbec9ab9 1089 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 1090
e6a00296 1091 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1092
1093 pTcon = cifs_sb->tcon;
1094
b6b38f70
JP
1095 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1096 *poffset, file->f_path.dentry->d_name.name);
1da177e4
LT
1097
1098 if (file->private_data == NULL)
1099 return -EBADF;
c33f8d32 1100 open_file = (struct cifsFileInfo *)file->private_data;
50c2f753 1101
1da177e4 1102 xid = GetXid();
1da177e4 1103
fbec9ab9 1104 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
1105 for (total_written = 0; write_size > total_written;
1106 total_written += bytes_written) {
1107 rc = -EAGAIN;
1108 while (rc == -EAGAIN) {
1109 if (file->private_data == NULL) {
1110 /* file has been closed on us */
1111 FreeXid(xid);
1112 /* if we have gotten here we have written some data
1113 and blocked, and the file has been freed on us
fb8c4b14 1114 while we blocked so return what we managed to
1da177e4
LT
1115 write */
1116 return total_written;
fb8c4b14 1117 }
1da177e4
LT
1118 if (open_file->closePend) {
1119 FreeXid(xid);
1120 if (total_written)
1121 return total_written;
1122 else
1123 return -EBADF;
1124 }
1125 if (open_file->invalidHandle) {
1da177e4
LT
1126 /* we could deadlock if we called
1127 filemap_fdatawait from here so tell
fb8c4b14 1128 reopen_file not to flush data to
1da177e4 1129 server now */
4b18f2a9 1130 rc = cifs_reopen_file(file, false);
1da177e4
LT
1131 if (rc != 0)
1132 break;
1133 }
fb8c4b14
SF
1134 if (experimEnabled || (pTcon->ses->server &&
1135 ((pTcon->ses->server->secMode &
08775834 1136 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 1137 == 0))) {
3e84469d
SF
1138 struct kvec iov[2];
1139 unsigned int len;
1140
0ae0efad 1141 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
1142 write_size - total_written);
1143 /* iov[0] is reserved for smb header */
1144 iov[1].iov_base = (char *)write_data +
1145 total_written;
1146 iov[1].iov_len = len;
d6e04ae6 1147 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 1148 open_file->netfid, len,
d6e04ae6 1149 *poffset, &bytes_written,
3e84469d 1150 iov, 1, long_op);
d6e04ae6 1151 } else
60808233
SF
1152 rc = CIFSSMBWrite(xid, pTcon,
1153 open_file->netfid,
1154 min_t(const int, cifs_sb->wsize,
1155 write_size - total_written),
1156 *poffset, &bytes_written,
1157 write_data + total_written,
1158 NULL, long_op);
1da177e4
LT
1159 }
1160 if (rc || (bytes_written == 0)) {
1161 if (total_written)
1162 break;
1163 else {
1164 FreeXid(xid);
1165 return rc;
1166 }
fbec9ab9
JL
1167 } else {
1168 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1169 *poffset += bytes_written;
fbec9ab9 1170 }
133672ef 1171 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1172 15 seconds is plenty */
1173 }
1174
a4544347 1175 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1176
1177 /* since the write may have blocked check these pointers again */
3677db10 1178 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
004c46b9 1179/*BB We could make this contingent on superblock ATIME flag too */
3677db10
SF
1180/* file->f_path.dentry->d_inode->i_ctime =
1181 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1182 if (total_written > 0) {
1183 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1184 if (*poffset > file->f_path.dentry->d_inode->i_size)
1185 i_size_write(file->f_path.dentry->d_inode,
1186 *poffset);
1187 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1da177e4 1188 }
3677db10 1189 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1190 }
1191 FreeXid(xid);
1192 return total_written;
1193}
1194
630f3f0c
SF
1195#ifdef CONFIG_CIFS_EXPERIMENTAL
1196struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1197{
1198 struct cifsFileInfo *open_file = NULL;
1199
1200 read_lock(&GlobalSMBSeslock);
1201 /* we could simply get the first_list_entry since write-only entries
1202 are always at the end of the list but since the first entry might
1203 have a close pending, we go through the whole list */
1204 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1205 if (open_file->closePend)
1206 continue;
1207 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1208 (open_file->pfile->f_flags & O_RDONLY))) {
1209 if (!open_file->invalidHandle) {
1210 /* found a good file */
1211 /* lock it so it will not be closed on us */
6ab409b5 1212 cifsFileInfo_get(open_file);
630f3f0c
SF
1213 read_unlock(&GlobalSMBSeslock);
1214 return open_file;
1215 } /* else might as well continue, and look for
1216 another, or simply have the caller reopen it
1217 again rather than trying to fix this handle */
1218 } else /* write only file */
1219 break; /* write only files are last so must be done */
1220 }
1221 read_unlock(&GlobalSMBSeslock);
1222 return NULL;
1223}
1224#endif
1225
dd99cd80 1226struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
1227{
1228 struct cifsFileInfo *open_file;
2846d386 1229 bool any_available = false;
dd99cd80 1230 int rc;
6148a742 1231
60808233
SF
1232 /* Having a null inode here (because mapping->host was set to zero by
1233 the VFS or MM) should not happen but we had reports of on oops (due to
1234 it being zero) during stress testcases so we need to check for it */
1235
fb8c4b14 1236 if (cifs_inode == NULL) {
b6b38f70 1237 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1238 dump_stack();
1239 return NULL;
1240 }
1241
6148a742 1242 read_lock(&GlobalSMBSeslock);
9b22b0b7 1243refind_writable:
6148a742 1244 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2846d386
JL
1245 if (open_file->closePend ||
1246 (!any_available && open_file->pid != current->tgid))
6148a742 1247 continue;
2846d386 1248
6148a742
SF
1249 if (open_file->pfile &&
1250 ((open_file->pfile->f_flags & O_RDWR) ||
1251 (open_file->pfile->f_flags & O_WRONLY))) {
6ab409b5 1252 cifsFileInfo_get(open_file);
9b22b0b7
SF
1253
1254 if (!open_file->invalidHandle) {
1255 /* found a good writable file */
1256 read_unlock(&GlobalSMBSeslock);
1257 return open_file;
1258 }
8840dee9 1259
6148a742 1260 read_unlock(&GlobalSMBSeslock);
9b22b0b7 1261 /* Had to unlock since following call can block */
4b18f2a9 1262 rc = cifs_reopen_file(open_file->pfile, false);
8840dee9 1263 if (!rc) {
9b22b0b7
SF
1264 if (!open_file->closePend)
1265 return open_file;
1266 else { /* start over in case this was deleted */
1267 /* since the list could be modified */
37c0eb46 1268 read_lock(&GlobalSMBSeslock);
6ab409b5 1269 cifsFileInfo_put(open_file);
9b22b0b7 1270 goto refind_writable;
37c0eb46
SF
1271 }
1272 }
9b22b0b7
SF
1273
1274 /* if it fails, try another handle if possible -
1275 (we can not do this if closePending since
1276 loop could be modified - in which case we
1277 have to start at the beginning of the list
1278 again. Note that it would be bad
1279 to hold up writepages here (rather than
1280 in caller) with continuous retries */
b6b38f70 1281 cFYI(1, "wp failed on reopen file");
9b22b0b7
SF
1282 read_lock(&GlobalSMBSeslock);
1283 /* can not use this handle, no write
1284 pending on this one after all */
6ab409b5 1285 cifsFileInfo_put(open_file);
8840dee9 1286
9b22b0b7
SF
1287 if (open_file->closePend) /* list could have changed */
1288 goto refind_writable;
1289 /* else we simply continue to the next entry. Thus
1290 we do not loop on reopen errors. If we
1291 can not reopen the file, for example if we
1292 reconnected to a server with another client
1293 racing to delete or lock the file we would not
1294 make progress if we restarted before the beginning
1295 of the loop here. */
6148a742
SF
1296 }
1297 }
2846d386
JL
1298 /* couldn't find useable FH with same pid, try any available */
1299 if (!any_available) {
1300 any_available = true;
1301 goto refind_writable;
1302 }
6148a742
SF
1303 read_unlock(&GlobalSMBSeslock);
1304 return NULL;
1305}
1306
1da177e4
LT
1307static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1308{
1309 struct address_space *mapping = page->mapping;
1310 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1311 char *write_data;
1312 int rc = -EFAULT;
1313 int bytes_written = 0;
1314 struct cifs_sb_info *cifs_sb;
1315 struct cifsTconInfo *pTcon;
1316 struct inode *inode;
6148a742 1317 struct cifsFileInfo *open_file;
1da177e4
LT
1318
1319 if (!mapping || !mapping->host)
1320 return -EFAULT;
1321
1322 inode = page->mapping->host;
1323 cifs_sb = CIFS_SB(inode->i_sb);
1324 pTcon = cifs_sb->tcon;
1325
1326 offset += (loff_t)from;
1327 write_data = kmap(page);
1328 write_data += from;
1329
1330 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1331 kunmap(page);
1332 return -EIO;
1333 }
1334
1335 /* racing with truncate? */
1336 if (offset > mapping->host->i_size) {
1337 kunmap(page);
1338 return 0; /* don't care */
1339 }
1340
1341 /* check to make sure that we are not extending the file */
1342 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1343 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1344
6148a742
SF
1345 open_file = find_writable_file(CIFS_I(mapping->host));
1346 if (open_file) {
1347 bytes_written = cifs_write(open_file->pfile, write_data,
1348 to-from, &offset);
6ab409b5 1349 cifsFileInfo_put(open_file);
1da177e4 1350 /* Does mm or vfs already set times? */
6148a742 1351 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1352 if ((bytes_written > 0) && (offset))
6148a742 1353 rc = 0;
bb5a9a04
SF
1354 else if (bytes_written < 0)
1355 rc = bytes_written;
6148a742 1356 } else {
b6b38f70 1357 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1358 rc = -EIO;
1359 }
1360
1361 kunmap(page);
1362 return rc;
1363}
1364
1da177e4 1365static int cifs_writepages(struct address_space *mapping,
37c0eb46 1366 struct writeback_control *wbc)
1da177e4 1367{
37c0eb46
SF
1368 struct backing_dev_info *bdi = mapping->backing_dev_info;
1369 unsigned int bytes_to_write;
1370 unsigned int bytes_written;
1371 struct cifs_sb_info *cifs_sb;
1372 int done = 0;
111ebb6e 1373 pgoff_t end;
37c0eb46 1374 pgoff_t index;
fb8c4b14
SF
1375 int range_whole = 0;
1376 struct kvec *iov;
84d2f07e 1377 int len;
37c0eb46
SF
1378 int n_iov = 0;
1379 pgoff_t next;
1380 int nr_pages;
1381 __u64 offset = 0;
23e7dd7d 1382 struct cifsFileInfo *open_file;
fbec9ab9 1383 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1384 struct page *page;
1385 struct pagevec pvec;
1386 int rc = 0;
1387 int scanned = 0;
fbec9ab9 1388 int xid, long_op;
1da177e4 1389
37c0eb46 1390 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1391
37c0eb46
SF
1392 /*
1393 * If wsize is smaller that the page cache size, default to writing
1394 * one page at a time via cifs_writepage
1395 */
1396 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1397 return generic_writepages(mapping, wbc);
1398
fb8c4b14
SF
1399 if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1400 if (cifs_sb->tcon->ses->server->secMode &
1401 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1402 if (!experimEnabled)
60808233 1403 return generic_writepages(mapping, wbc);
4a77118c 1404
9a0c8230 1405 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1406 if (iov == NULL)
9a0c8230
SF
1407 return generic_writepages(mapping, wbc);
1408
1409
37c0eb46
SF
1410 /*
1411 * BB: Is this meaningful for a non-block-device file system?
1412 * If it is, we should test it again after we do I/O
1413 */
1414 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1415 wbc->encountered_congestion = 1;
9a0c8230 1416 kfree(iov);
37c0eb46
SF
1417 return 0;
1418 }
1419
1da177e4
LT
1420 xid = GetXid();
1421
37c0eb46 1422 pagevec_init(&pvec, 0);
111ebb6e 1423 if (wbc->range_cyclic) {
37c0eb46 1424 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1425 end = -1;
1426 } else {
1427 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1428 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1429 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1430 range_whole = 1;
37c0eb46
SF
1431 scanned = 1;
1432 }
1433retry:
1434 while (!done && (index <= end) &&
1435 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1436 PAGECACHE_TAG_DIRTY,
1437 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1438 int first;
1439 unsigned int i;
1440
37c0eb46
SF
1441 first = -1;
1442 next = 0;
1443 n_iov = 0;
1444 bytes_to_write = 0;
1445
1446 for (i = 0; i < nr_pages; i++) {
1447 page = pvec.pages[i];
1448 /*
1449 * At this point we hold neither mapping->tree_lock nor
1450 * lock on the page itself: the page may be truncated or
1451 * invalidated (changing page->mapping to NULL), or even
1452 * swizzled back from swapper_space to tmpfs file
1453 * mapping
1454 */
1455
1456 if (first < 0)
1457 lock_page(page);
529ae9aa 1458 else if (!trylock_page(page))
37c0eb46
SF
1459 break;
1460
1461 if (unlikely(page->mapping != mapping)) {
1462 unlock_page(page);
1463 break;
1464 }
1465
111ebb6e 1466 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1467 done = 1;
1468 unlock_page(page);
1469 break;
1470 }
1471
1472 if (next && (page->index != next)) {
1473 /* Not next consecutive page */
1474 unlock_page(page);
1475 break;
1476 }
1477
1478 if (wbc->sync_mode != WB_SYNC_NONE)
1479 wait_on_page_writeback(page);
1480
1481 if (PageWriteback(page) ||
cb876f45 1482 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1483 unlock_page(page);
1484 break;
1485 }
84d2f07e 1486
cb876f45
LT
1487 /*
1488 * This actually clears the dirty bit in the radix tree.
1489 * See cifs_writepage() for more commentary.
1490 */
1491 set_page_writeback(page);
1492
84d2f07e
SF
1493 if (page_offset(page) >= mapping->host->i_size) {
1494 done = 1;
1495 unlock_page(page);
cb876f45 1496 end_page_writeback(page);
84d2f07e
SF
1497 break;
1498 }
1499
37c0eb46
SF
1500 /*
1501 * BB can we get rid of this? pages are held by pvec
1502 */
1503 page_cache_get(page);
1504
84d2f07e
SF
1505 len = min(mapping->host->i_size - page_offset(page),
1506 (loff_t)PAGE_CACHE_SIZE);
1507
37c0eb46
SF
1508 /* reserve iov[0] for the smb header */
1509 n_iov++;
1510 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1511 iov[n_iov].iov_len = len;
1512 bytes_to_write += len;
37c0eb46
SF
1513
1514 if (first < 0) {
1515 first = i;
1516 offset = page_offset(page);
1517 }
1518 next = page->index + 1;
1519 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1520 break;
1521 }
1522 if (n_iov) {
23e7dd7d
SF
1523 /* Search for a writable handle every time we call
1524 * CIFSSMBWrite2. We can't rely on the last handle
1525 * we used to still be valid
1526 */
1527 open_file = find_writable_file(CIFS_I(mapping->host));
1528 if (!open_file) {
b6b38f70 1529 cERROR(1, "No writable handles for inode");
23e7dd7d 1530 rc = -EBADF;
1047abc1 1531 } else {
fbec9ab9 1532 long_op = cifs_write_timeout(cifsi, offset);
23e7dd7d
SF
1533 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1534 open_file->netfid,
1535 bytes_to_write, offset,
1536 &bytes_written, iov, n_iov,
fbec9ab9 1537 long_op);
6ab409b5 1538 cifsFileInfo_put(open_file);
fbec9ab9
JL
1539 cifs_update_eof(cifsi, offset, bytes_written);
1540
23e7dd7d 1541 if (rc || bytes_written < bytes_to_write) {
b6b38f70
JP
1542 cERROR(1, "Write2 ret %d, wrote %d",
1543 rc, bytes_written);
23e7dd7d
SF
1544 /* BB what if continued retry is
1545 requested via mount flags? */
cea21805
JL
1546 if (rc == -ENOSPC)
1547 set_bit(AS_ENOSPC, &mapping->flags);
1548 else
1549 set_bit(AS_EIO, &mapping->flags);
23e7dd7d
SF
1550 } else {
1551 cifs_stats_bytes_written(cifs_sb->tcon,
1552 bytes_written);
1553 }
37c0eb46
SF
1554 }
1555 for (i = 0; i < n_iov; i++) {
1556 page = pvec.pages[first + i];
eb9bdaa3
SF
1557 /* Should we also set page error on
1558 success rc but too little data written? */
1559 /* BB investigate retry logic on temporary
1560 server crash cases and how recovery works
fb8c4b14
SF
1561 when page marked as error */
1562 if (rc)
eb9bdaa3 1563 SetPageError(page);
37c0eb46
SF
1564 kunmap(page);
1565 unlock_page(page);
cb876f45 1566 end_page_writeback(page);
37c0eb46
SF
1567 page_cache_release(page);
1568 }
1569 if ((wbc->nr_to_write -= n_iov) <= 0)
1570 done = 1;
1571 index = next;
b066a48c
DK
1572 } else
1573 /* Need to re-find the pages we skipped */
1574 index = pvec.pages[0]->index + 1;
1575
37c0eb46
SF
1576 pagevec_release(&pvec);
1577 }
1578 if (!scanned && !done) {
1579 /*
1580 * We hit the last page and there is more work to be done: wrap
1581 * back to the start of the file
1582 */
1583 scanned = 1;
1584 index = 0;
1585 goto retry;
1586 }
111ebb6e 1587 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1588 mapping->writeback_index = index;
1589
1da177e4 1590 FreeXid(xid);
9a0c8230 1591 kfree(iov);
1da177e4
LT
1592 return rc;
1593}
1da177e4 1594
fb8c4b14 1595static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1596{
1597 int rc = -EFAULT;
1598 int xid;
1599
1600 xid = GetXid();
1601/* BB add check for wbc flags */
1602 page_cache_get(page);
ad7a2926 1603 if (!PageUptodate(page))
b6b38f70 1604 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1605
1606 /*
1607 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1608 *
1609 * A writepage() implementation always needs to do either this,
1610 * or re-dirty the page with "redirty_page_for_writepage()" in
1611 * the case of a failure.
1612 *
1613 * Just unlocking the page will cause the radix tree tag-bits
1614 * to fail to update with the state of the page correctly.
1615 */
fb8c4b14 1616 set_page_writeback(page);
1da177e4
LT
1617 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1618 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1619 unlock_page(page);
cb876f45
LT
1620 end_page_writeback(page);
1621 page_cache_release(page);
1da177e4
LT
1622 FreeXid(xid);
1623 return rc;
1624}
1625
d9414774
NP
1626static int cifs_write_end(struct file *file, struct address_space *mapping,
1627 loff_t pos, unsigned len, unsigned copied,
1628 struct page *page, void *fsdata)
1da177e4 1629{
d9414774
NP
1630 int rc;
1631 struct inode *inode = mapping->host;
1da177e4 1632
b6b38f70
JP
1633 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1634 page, pos, copied);
d9414774 1635
a98ee8c1
JL
1636 if (PageChecked(page)) {
1637 if (copied == len)
1638 SetPageUptodate(page);
1639 ClearPageChecked(page);
1640 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1641 SetPageUptodate(page);
ad7a2926 1642
1da177e4 1643 if (!PageUptodate(page)) {
d9414774
NP
1644 char *page_data;
1645 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1646 int xid;
1647
1648 xid = GetXid();
1da177e4
LT
1649 /* this is probably better than directly calling
1650 partialpage_write since in this function the file handle is
1651 known which we might as well leverage */
1652 /* BB check if anything else missing out of ppw
1653 such as updating last write time */
1654 page_data = kmap(page);
d9414774
NP
1655 rc = cifs_write(file, page_data + offset, copied, &pos);
1656 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1657 kunmap(page);
d9414774
NP
1658
1659 FreeXid(xid);
fb8c4b14 1660 } else {
d9414774
NP
1661 rc = copied;
1662 pos += copied;
1da177e4
LT
1663 set_page_dirty(page);
1664 }
1665
d9414774
NP
1666 if (rc > 0) {
1667 spin_lock(&inode->i_lock);
1668 if (pos > inode->i_size)
1669 i_size_write(inode, pos);
1670 spin_unlock(&inode->i_lock);
1671 }
1672
1673 unlock_page(page);
1674 page_cache_release(page);
1675
1da177e4
LT
1676 return rc;
1677}
1678
7ea80859 1679int cifs_fsync(struct file *file, int datasync)
1da177e4
LT
1680{
1681 int xid;
1682 int rc = 0;
b298f223
SF
1683 struct cifsTconInfo *tcon;
1684 struct cifsFileInfo *smbfile =
1685 (struct cifsFileInfo *)file->private_data;
e6a00296 1686 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1687
1688 xid = GetXid();
1689
b6b38f70 1690 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1691 file->f_path.dentry->d_name.name, datasync);
50c2f753 1692
cea21805
JL
1693 rc = filemap_write_and_wait(inode->i_mapping);
1694 if (rc == 0) {
1695 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1696 CIFS_I(inode)->write_behind_rc = 0;
b298f223 1697 tcon = CIFS_SB(inode->i_sb)->tcon;
be652445 1698 if (!rc && tcon && smbfile &&
4717bed6 1699 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
b298f223 1700 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
cea21805 1701 }
b298f223 1702
1da177e4
LT
1703 FreeXid(xid);
1704 return rc;
1705}
1706
3978d717 1707/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1708{
1709 struct address_space *mapping;
1710 struct inode *inode;
1711 unsigned long index = page->index;
1712 unsigned int rpages = 0;
1713 int rc = 0;
1714
f19159dc 1715 cFYI(1, "sync page %p", page);
1da177e4
LT
1716 mapping = page->mapping;
1717 if (!mapping)
1718 return 0;
1719 inode = mapping->host;
1720 if (!inode)
3978d717 1721 return; */
1da177e4 1722
fb8c4b14 1723/* fill in rpages then
1da177e4
LT
1724 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1725
b6b38f70 1726/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1da177e4 1727
3978d717 1728#if 0
1da177e4
LT
1729 if (rc < 0)
1730 return rc;
1731 return 0;
3978d717 1732#endif
1da177e4
LT
1733} */
1734
1735/*
1736 * As file closes, flush all cached write data for this inode checking
1737 * for write behind errors.
1738 */
75e1fcc0 1739int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1740{
fb8c4b14 1741 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1742 int rc = 0;
1743
1744 /* Rather than do the steps manually:
1745 lock the inode for writing
1746 loop through pages looking for write behind data (dirty pages)
1747 coalesce into contiguous 16K (or smaller) chunks to write to server
1748 send to server (prefer in parallel)
1749 deal with writebehind errors
1750 unlock inode for writing
1751 filemapfdatawrite appears easier for the time being */
1752
1753 rc = filemap_fdatawrite(inode->i_mapping);
cea21805
JL
1754 /* reset wb rc if we were able to write out dirty pages */
1755 if (!rc) {
1756 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1757 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1758 }
50c2f753 1759
b6b38f70 1760 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1761
1762 return rc;
1763}
1764
1765ssize_t cifs_user_read(struct file *file, char __user *read_data,
1766 size_t read_size, loff_t *poffset)
1767{
1768 int rc = -EACCES;
1769 unsigned int bytes_read = 0;
1770 unsigned int total_read = 0;
1771 unsigned int current_read_size;
1772 struct cifs_sb_info *cifs_sb;
1773 struct cifsTconInfo *pTcon;
1774 int xid;
1775 struct cifsFileInfo *open_file;
1776 char *smb_read_data;
1777 char __user *current_offset;
1778 struct smb_com_read_rsp *pSMBr;
1779
1780 xid = GetXid();
e6a00296 1781 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1782 pTcon = cifs_sb->tcon;
1783
1784 if (file->private_data == NULL) {
0f3bc09e 1785 rc = -EBADF;
1da177e4 1786 FreeXid(xid);
0f3bc09e 1787 return rc;
1da177e4
LT
1788 }
1789 open_file = (struct cifsFileInfo *)file->private_data;
1790
ad7a2926 1791 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1792 cFYI(1, "attempting read on write only file instance");
ad7a2926 1793
1da177e4
LT
1794 for (total_read = 0, current_offset = read_data;
1795 read_size > total_read;
1796 total_read += bytes_read, current_offset += bytes_read) {
fb8c4b14 1797 current_read_size = min_t(const int, read_size - total_read,
1da177e4
LT
1798 cifs_sb->rsize);
1799 rc = -EAGAIN;
1800 smb_read_data = NULL;
1801 while (rc == -EAGAIN) {
ec637e3f 1802 int buf_type = CIFS_NO_BUFFER;
fb8c4b14 1803 if ((open_file->invalidHandle) &&
1da177e4 1804 (!open_file->closePend)) {
4b18f2a9 1805 rc = cifs_reopen_file(file, true);
1da177e4
LT
1806 if (rc != 0)
1807 break;
1808 }
bfa0d75a 1809 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1810 open_file->netfid,
1811 current_read_size, *poffset,
1812 &bytes_read, &smb_read_data,
1813 &buf_type);
1da177e4 1814 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1815 if (smb_read_data) {
93544cc6
SF
1816 if (copy_to_user(current_offset,
1817 smb_read_data +
1818 4 /* RFC1001 length field */ +
1819 le16_to_cpu(pSMBr->DataOffset),
ad7a2926 1820 bytes_read))
93544cc6 1821 rc = -EFAULT;
93544cc6 1822
fb8c4b14 1823 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1824 cifs_small_buf_release(smb_read_data);
fb8c4b14 1825 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1826 cifs_buf_release(smb_read_data);
1da177e4
LT
1827 smb_read_data = NULL;
1828 }
1829 }
1830 if (rc || (bytes_read == 0)) {
1831 if (total_read) {
1832 break;
1833 } else {
1834 FreeXid(xid);
1835 return rc;
1836 }
1837 } else {
a4544347 1838 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1839 *poffset += bytes_read;
1840 }
1841 }
1842 FreeXid(xid);
1843 return total_read;
1844}
1845
1846
1847static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1848 loff_t *poffset)
1849{
1850 int rc = -EACCES;
1851 unsigned int bytes_read = 0;
1852 unsigned int total_read;
1853 unsigned int current_read_size;
1854 struct cifs_sb_info *cifs_sb;
1855 struct cifsTconInfo *pTcon;
1856 int xid;
1857 char *current_offset;
1858 struct cifsFileInfo *open_file;
ec637e3f 1859 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1860
1861 xid = GetXid();
e6a00296 1862 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1863 pTcon = cifs_sb->tcon;
1864
1865 if (file->private_data == NULL) {
0f3bc09e 1866 rc = -EBADF;
1da177e4 1867 FreeXid(xid);
0f3bc09e 1868 return rc;
1da177e4
LT
1869 }
1870 open_file = (struct cifsFileInfo *)file->private_data;
1871
1872 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1873 cFYI(1, "attempting read on write only file instance");
1da177e4 1874
fb8c4b14 1875 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1876 read_size > total_read;
1877 total_read += bytes_read, current_offset += bytes_read) {
1878 current_read_size = min_t(const int, read_size - total_read,
1879 cifs_sb->rsize);
f9f5c817
SF
1880 /* For windows me and 9x we do not want to request more
1881 than it negotiated since it will refuse the read then */
fb8c4b14 1882 if ((pTcon->ses) &&
f9f5c817
SF
1883 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1884 current_read_size = min_t(const int, current_read_size,
1885 pTcon->ses->server->maxBuf - 128);
1886 }
1da177e4
LT
1887 rc = -EAGAIN;
1888 while (rc == -EAGAIN) {
fb8c4b14 1889 if ((open_file->invalidHandle) &&
1da177e4 1890 (!open_file->closePend)) {
4b18f2a9 1891 rc = cifs_reopen_file(file, true);
1da177e4
LT
1892 if (rc != 0)
1893 break;
1894 }
bfa0d75a 1895 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1896 open_file->netfid,
1897 current_read_size, *poffset,
1898 &bytes_read, &current_offset,
1899 &buf_type);
1da177e4
LT
1900 }
1901 if (rc || (bytes_read == 0)) {
1902 if (total_read) {
1903 break;
1904 } else {
1905 FreeXid(xid);
1906 return rc;
1907 }
1908 } else {
a4544347 1909 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1910 *poffset += bytes_read;
1911 }
1912 }
1913 FreeXid(xid);
1914 return total_read;
1915}
1916
1917int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1918{
1da177e4
LT
1919 int rc, xid;
1920
1921 xid = GetXid();
abab095d 1922 rc = cifs_revalidate_file(file);
1da177e4 1923 if (rc) {
b6b38f70 1924 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1925 FreeXid(xid);
1926 return rc;
1927 }
1928 rc = generic_file_mmap(file, vma);
1929 FreeXid(xid);
1930 return rc;
1931}
1932
1933
fb8c4b14 1934static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 1935 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
1936{
1937 struct page *page;
1938 char *target;
1939
1940 while (bytes_read > 0) {
1941 if (list_empty(pages))
1942 break;
1943
1944 page = list_entry(pages->prev, struct page, lru);
1945 list_del(&page->lru);
1946
315e995c 1947 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
1948 GFP_KERNEL)) {
1949 page_cache_release(page);
b6b38f70 1950 cFYI(1, "Add page cache failed");
3079ca62
SF
1951 data += PAGE_CACHE_SIZE;
1952 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1953 continue;
1954 }
06b43672 1955 page_cache_release(page);
1da177e4 1956
fb8c4b14 1957 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1958
1959 if (PAGE_CACHE_SIZE > bytes_read) {
1960 memcpy(target, data, bytes_read);
1961 /* zero the tail end of this partial page */
fb8c4b14 1962 memset(target + bytes_read, 0,
1da177e4
LT
1963 PAGE_CACHE_SIZE - bytes_read);
1964 bytes_read = 0;
1965 } else {
1966 memcpy(target, data, PAGE_CACHE_SIZE);
1967 bytes_read -= PAGE_CACHE_SIZE;
1968 }
1969 kunmap_atomic(target, KM_USER0);
1970
1971 flush_dcache_page(page);
1972 SetPageUptodate(page);
1973 unlock_page(page);
1da177e4
LT
1974 data += PAGE_CACHE_SIZE;
1975 }
1976 return;
1977}
1978
1979static int cifs_readpages(struct file *file, struct address_space *mapping,
1980 struct list_head *page_list, unsigned num_pages)
1981{
1982 int rc = -EACCES;
1983 int xid;
1984 loff_t offset;
1985 struct page *page;
1986 struct cifs_sb_info *cifs_sb;
1987 struct cifsTconInfo *pTcon;
2c2130e1 1988 unsigned int bytes_read = 0;
fb8c4b14 1989 unsigned int read_size, i;
1da177e4
LT
1990 char *smb_read_data = NULL;
1991 struct smb_com_read_rsp *pSMBr;
1da177e4 1992 struct cifsFileInfo *open_file;
ec637e3f 1993 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1994
1995 xid = GetXid();
1996 if (file->private_data == NULL) {
0f3bc09e 1997 rc = -EBADF;
1da177e4 1998 FreeXid(xid);
0f3bc09e 1999 return rc;
1da177e4
LT
2000 }
2001 open_file = (struct cifsFileInfo *)file->private_data;
e6a00296 2002 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2003 pTcon = cifs_sb->tcon;
bfa0d75a 2004
f19159dc 2005 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
2006 for (i = 0; i < num_pages; ) {
2007 unsigned contig_pages;
2008 struct page *tmp_page;
2009 unsigned long expected_index;
2010
2011 if (list_empty(page_list))
2012 break;
2013
2014 page = list_entry(page_list->prev, struct page, lru);
2015 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2016
2017 /* count adjacent pages that we will read into */
2018 contig_pages = 0;
fb8c4b14 2019 expected_index =
1da177e4 2020 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 2021 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
2022 if (tmp_page->index == expected_index) {
2023 contig_pages++;
2024 expected_index++;
2025 } else
fb8c4b14 2026 break;
1da177e4
LT
2027 }
2028 if (contig_pages + i > num_pages)
2029 contig_pages = num_pages - i;
2030
2031 /* for reads over a certain size could initiate async
2032 read ahead */
2033
2034 read_size = contig_pages * PAGE_CACHE_SIZE;
2035 /* Read size needs to be in multiples of one page */
2036 read_size = min_t(const unsigned int, read_size,
2037 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2038 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2039 read_size, contig_pages);
1da177e4
LT
2040 rc = -EAGAIN;
2041 while (rc == -EAGAIN) {
fb8c4b14 2042 if ((open_file->invalidHandle) &&
1da177e4 2043 (!open_file->closePend)) {
4b18f2a9 2044 rc = cifs_reopen_file(file, true);
1da177e4
LT
2045 if (rc != 0)
2046 break;
2047 }
2048
bfa0d75a 2049 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2050 open_file->netfid,
2051 read_size, offset,
2052 &bytes_read, &smb_read_data,
2053 &buf_type);
a9d02ad4 2054 /* BB more RC checks ? */
fb8c4b14 2055 if (rc == -EAGAIN) {
1da177e4 2056 if (smb_read_data) {
fb8c4b14 2057 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2058 cifs_small_buf_release(smb_read_data);
fb8c4b14 2059 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2060 cifs_buf_release(smb_read_data);
1da177e4
LT
2061 smb_read_data = NULL;
2062 }
2063 }
2064 }
2065 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2066 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2067 break;
2068 } else if (bytes_read > 0) {
6f88cc2e 2069 task_io_account_read(bytes_read);
1da177e4
LT
2070 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2071 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2072 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2073 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2074
2075 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2076 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2077 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2078 i++; /* account for partial page */
2079
fb8c4b14 2080 /* server copy of file can have smaller size
1da177e4 2081 than client */
fb8c4b14
SF
2082 /* BB do we need to verify this common case ?
2083 this case is ok - if we are at server EOF
1da177e4
LT
2084 we will hit it on next read */
2085
05ac9d4b 2086 /* break; */
1da177e4
LT
2087 }
2088 } else {
b6b38f70 2089 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2090 "Cleaning remaining pages from readahead list",
b6b38f70 2091 bytes_read, offset);
fb8c4b14 2092 /* BB turn off caching and do new lookup on
1da177e4 2093 file size at server? */
1da177e4
LT
2094 break;
2095 }
2096 if (smb_read_data) {
fb8c4b14 2097 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2098 cifs_small_buf_release(smb_read_data);
fb8c4b14 2099 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2100 cifs_buf_release(smb_read_data);
1da177e4
LT
2101 smb_read_data = NULL;
2102 }
2103 bytes_read = 0;
2104 }
2105
1da177e4
LT
2106/* need to free smb_read_data buf before exit */
2107 if (smb_read_data) {
fb8c4b14 2108 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2109 cifs_small_buf_release(smb_read_data);
fb8c4b14 2110 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2111 cifs_buf_release(smb_read_data);
1da177e4 2112 smb_read_data = NULL;
fb8c4b14 2113 }
1da177e4
LT
2114
2115 FreeXid(xid);
2116 return rc;
2117}
2118
2119static int cifs_readpage_worker(struct file *file, struct page *page,
2120 loff_t *poffset)
2121{
2122 char *read_data;
2123 int rc;
2124
2125 page_cache_get(page);
2126 read_data = kmap(page);
2127 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2128
1da177e4 2129 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2130
1da177e4
LT
2131 if (rc < 0)
2132 goto io_error;
2133 else
b6b38f70 2134 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2135
e6a00296
JJS
2136 file->f_path.dentry->d_inode->i_atime =
2137 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2138
1da177e4
LT
2139 if (PAGE_CACHE_SIZE > rc)
2140 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2141
2142 flush_dcache_page(page);
2143 SetPageUptodate(page);
2144 rc = 0;
fb8c4b14 2145
1da177e4 2146io_error:
fb8c4b14 2147 kunmap(page);
1da177e4
LT
2148 page_cache_release(page);
2149 return rc;
2150}
2151
2152static int cifs_readpage(struct file *file, struct page *page)
2153{
2154 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2155 int rc = -EACCES;
2156 int xid;
2157
2158 xid = GetXid();
2159
2160 if (file->private_data == NULL) {
0f3bc09e 2161 rc = -EBADF;
1da177e4 2162 FreeXid(xid);
0f3bc09e 2163 return rc;
1da177e4
LT
2164 }
2165
b6b38f70
JP
2166 cFYI(1, "readpage %p at offset %d 0x%x\n",
2167 page, (int)offset, (int)offset);
1da177e4
LT
2168
2169 rc = cifs_readpage_worker(file, page, &offset);
2170
2171 unlock_page(page);
2172
2173 FreeXid(xid);
2174 return rc;
2175}
2176
a403a0a3
SF
2177static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2178{
2179 struct cifsFileInfo *open_file;
2180
2181 read_lock(&GlobalSMBSeslock);
2182 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2183 if (open_file->closePend)
2184 continue;
2185 if (open_file->pfile &&
2186 ((open_file->pfile->f_flags & O_RDWR) ||
2187 (open_file->pfile->f_flags & O_WRONLY))) {
2188 read_unlock(&GlobalSMBSeslock);
2189 return 1;
2190 }
2191 }
2192 read_unlock(&GlobalSMBSeslock);
2193 return 0;
2194}
2195
1da177e4
LT
2196/* We do not want to update the file size from server for inodes
2197 open for write - to avoid races with writepage extending
2198 the file - in the future we could consider allowing
fb8c4b14 2199 refreshing the inode only on increases in the file size
1da177e4
LT
2200 but this is tricky to do without racing with writebehind
2201 page caching in the current Linux kernel design */
4b18f2a9 2202bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2203{
a403a0a3 2204 if (!cifsInode)
4b18f2a9 2205 return true;
50c2f753 2206
a403a0a3
SF
2207 if (is_inode_writable(cifsInode)) {
2208 /* This inode is open for write at least once */
c32a0b68
SF
2209 struct cifs_sb_info *cifs_sb;
2210
c32a0b68 2211 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2212 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2213 /* since no page cache to corrupt on directio
c32a0b68 2214 we can change size safely */
4b18f2a9 2215 return true;
c32a0b68
SF
2216 }
2217
fb8c4b14 2218 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2219 return true;
7ba52631 2220
4b18f2a9 2221 return false;
23e7dd7d 2222 } else
4b18f2a9 2223 return true;
1da177e4
LT
2224}
2225
d9414774
NP
2226static int cifs_write_begin(struct file *file, struct address_space *mapping,
2227 loff_t pos, unsigned len, unsigned flags,
2228 struct page **pagep, void **fsdata)
1da177e4 2229{
d9414774
NP
2230 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2231 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2232 loff_t page_start = pos & PAGE_MASK;
2233 loff_t i_size;
2234 struct page *page;
2235 int rc = 0;
d9414774 2236
b6b38f70 2237 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2238
54566b2c 2239 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2240 if (!page) {
2241 rc = -ENOMEM;
2242 goto out;
2243 }
8a236264 2244
a98ee8c1
JL
2245 if (PageUptodate(page))
2246 goto out;
8a236264 2247
a98ee8c1
JL
2248 /*
2249 * If we write a full page it will be up to date, no need to read from
2250 * the server. If the write is short, we'll end up doing a sync write
2251 * instead.
2252 */
2253 if (len == PAGE_CACHE_SIZE)
2254 goto out;
8a236264 2255
a98ee8c1
JL
2256 /*
2257 * optimize away the read when we have an oplock, and we're not
2258 * expecting to use any of the data we'd be reading in. That
2259 * is, when the page lies beyond the EOF, or straddles the EOF
2260 * and the write will cover all of the existing data.
2261 */
2262 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2263 i_size = i_size_read(mapping->host);
2264 if (page_start >= i_size ||
2265 (offset == 0 && (pos + len) >= i_size)) {
2266 zero_user_segments(page, 0, offset,
2267 offset + len,
2268 PAGE_CACHE_SIZE);
2269 /*
2270 * PageChecked means that the parts of the page
2271 * to which we're not writing are considered up
2272 * to date. Once the data is copied to the
2273 * page, it can be set uptodate.
2274 */
2275 SetPageChecked(page);
2276 goto out;
2277 }
2278 }
d9414774 2279
a98ee8c1
JL
2280 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2281 /*
2282 * might as well read a page, it is fast enough. If we get
2283 * an error, we don't need to return it. cifs_write_end will
2284 * do a sync write instead since PG_uptodate isn't set.
2285 */
2286 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2287 } else {
2288 /* we could try using another file handle if there is one -
2289 but how would we lock it to prevent close of that handle
2290 racing with this read? In any case
d9414774 2291 this will be written out by write_end so is fine */
1da177e4 2292 }
a98ee8c1
JL
2293out:
2294 *pagep = page;
2295 return rc;
1da177e4
LT
2296}
2297
3bc303c2
JL
2298static void
2299cifs_oplock_break(struct slow_work *work)
2300{
2301 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2302 oplock_break);
2303 struct inode *inode = cfile->pInode;
2304 struct cifsInodeInfo *cinode = CIFS_I(inode);
2305 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
2306 int rc, waitrc = 0;
2307
2308 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2309 if (cinode->clientCanCacheRead)
8737c930 2310 break_lease(inode, O_RDONLY);
d54ff732 2311 else
8737c930 2312 break_lease(inode, O_WRONLY);
3bc303c2
JL
2313 rc = filemap_fdatawrite(inode->i_mapping);
2314 if (cinode->clientCanCacheRead == 0) {
2315 waitrc = filemap_fdatawait(inode->i_mapping);
2316 invalidate_remote_inode(inode);
2317 }
2318 if (!rc)
2319 rc = waitrc;
2320 if (rc)
2321 cinode->write_behind_rc = rc;
b6b38f70 2322 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2323 }
2324
2325 /*
2326 * releasing stale oplock after recent reconnect of smb session using
2327 * a now incorrect file handle is not a data integrity issue but do
2328 * not bother sending an oplock release if session to server still is
2329 * disconnected since oplock already released by the server
2330 */
2331 if (!cfile->closePend && !cfile->oplock_break_cancelled) {
2332 rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
2333 LOCKING_ANDX_OPLOCK_RELEASE, false);
b6b38f70 2334 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2
JL
2335 }
2336}
2337
2338static int
2339cifs_oplock_break_get(struct slow_work *work)
2340{
2341 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2342 oplock_break);
2343 mntget(cfile->mnt);
2344 cifsFileInfo_get(cfile);
2345 return 0;
2346}
2347
2348static void
2349cifs_oplock_break_put(struct slow_work *work)
2350{
2351 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2352 oplock_break);
2353 mntput(cfile->mnt);
2354 cifsFileInfo_put(cfile);
2355}
2356
2357const struct slow_work_ops cifs_oplock_break_ops = {
2358 .get_ref = cifs_oplock_break_get,
2359 .put_ref = cifs_oplock_break_put,
2360 .execute = cifs_oplock_break,
2361};
2362
f5e54d6e 2363const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2364 .readpage = cifs_readpage,
2365 .readpages = cifs_readpages,
2366 .writepage = cifs_writepage,
37c0eb46 2367 .writepages = cifs_writepages,
d9414774
NP
2368 .write_begin = cifs_write_begin,
2369 .write_end = cifs_write_end,
1da177e4
LT
2370 .set_page_dirty = __set_page_dirty_nobuffers,
2371 /* .sync_page = cifs_sync_page, */
2372 /* .direct_IO = */
2373};
273d81d6
DK
2374
2375/*
2376 * cifs_readpages requires the server to support a buffer large enough to
2377 * contain the header plus one complete page of data. Otherwise, we need
2378 * to leave cifs_readpages out of the address space operations.
2379 */
f5e54d6e 2380const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2381 .readpage = cifs_readpage,
2382 .writepage = cifs_writepage,
2383 .writepages = cifs_writepages,
d9414774
NP
2384 .write_begin = cifs_write_begin,
2385 .write_end = cifs_write_end,
273d81d6
DK
2386 .set_page_dirty = __set_page_dirty_nobuffers,
2387 /* .sync_page = cifs_sync_page, */
2388 /* .direct_IO = */
2389};