]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/cifs/file.c
cifs: FS-Cache page management
[net-next-2.6.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
9451a9a5 43#include "fscache.h"
1da177e4 44
1da177e4
LT
45static inline int cifs_convert_flags(unsigned int flags)
46{
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
56 }
57
e10f7b55
JL
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
7fc8f4e9 61}
e10f7b55 62
7fc8f4e9
SF
63static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
64{
65 fmode_t posix_flags = 0;
e10f7b55 66
7fc8f4e9
SF
67 if ((flags & O_ACCMODE) == O_RDONLY)
68 posix_flags = FMODE_READ;
69 else if ((flags & O_ACCMODE) == O_WRONLY)
70 posix_flags = FMODE_WRITE;
71 else if ((flags & O_ACCMODE) == O_RDWR) {
72 /* GENERIC_ALL is too much permission to request
73 can cause unnecessary access denied on create */
74 /* return GENERIC_ALL; */
75 posix_flags = FMODE_READ | FMODE_WRITE;
76 }
77 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
78 reopening a file. They had their effect on the original open */
79 if (flags & O_APPEND)
80 posix_flags |= (fmode_t)O_APPEND;
6b2f3d1f
CH
81 if (flags & O_DSYNC)
82 posix_flags |= (fmode_t)O_DSYNC;
83 if (flags & __O_SYNC)
84 posix_flags |= (fmode_t)__O_SYNC;
7fc8f4e9
SF
85 if (flags & O_DIRECTORY)
86 posix_flags |= (fmode_t)O_DIRECTORY;
87 if (flags & O_NOFOLLOW)
88 posix_flags |= (fmode_t)O_NOFOLLOW;
89 if (flags & O_DIRECT)
90 posix_flags |= (fmode_t)O_DIRECT;
91
92 return posix_flags;
1da177e4
LT
93}
94
95static inline int cifs_get_disposition(unsigned int flags)
96{
97 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
98 return FILE_CREATE;
99 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
100 return FILE_OVERWRITE_IF;
101 else if ((flags & O_CREAT) == O_CREAT)
102 return FILE_OPEN_IF;
55aa2e09
SF
103 else if ((flags & O_TRUNC) == O_TRUNC)
104 return FILE_OVERWRITE;
1da177e4
LT
105 else
106 return FILE_OPEN;
107}
108
276a74a4 109/* all arguments to this function must be checked for validity in caller */
590a3fe0
JL
110static inline int
111cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
51c81764 112 struct cifsInodeInfo *pCifsInode, __u32 oplock,
590a3fe0 113 u16 netfid)
276a74a4 114{
276a74a4 115
276a74a4 116 write_lock(&GlobalSMBSeslock);
276a74a4
SF
117
118 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
119 if (pCifsInode == NULL) {
120 write_unlock(&GlobalSMBSeslock);
121 return -EINVAL;
122 }
123
276a74a4
SF
124 if (pCifsInode->clientCanCacheRead) {
125 /* we have the inode open somewhere else
126 no need to discard cache data */
127 goto psx_client_can_cache;
128 }
129
130 /* BB FIXME need to fix this check to move it earlier into posix_open
131 BB fIX following section BB FIXME */
132
133 /* if not oplocked, invalidate inode pages if mtime or file
134 size changed */
135/* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
136 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
137 (file->f_path.dentry->d_inode->i_size ==
138 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 139 cFYI(1, "inode unchanged on server");
276a74a4
SF
140 } else {
141 if (file->f_path.dentry->d_inode->i_mapping) {
142 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
143 if (rc != 0)
144 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
145 }
b6b38f70
JP
146 cFYI(1, "invalidating remote inode since open detected it "
147 "changed");
276a74a4
SF
148 invalidate_remote_inode(file->f_path.dentry->d_inode);
149 } */
150
151psx_client_can_cache:
152 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
153 pCifsInode->clientCanCacheAll = true;
154 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
155 cFYI(1, "Exclusive Oplock granted on inode %p",
156 file->f_path.dentry->d_inode);
276a74a4
SF
157 } else if ((oplock & 0xF) == OPLOCK_READ)
158 pCifsInode->clientCanCacheRead = true;
159
160 /* will have to change the unlock if we reenable the
161 filemap_fdatawrite (which does not seem necessary */
162 write_unlock(&GlobalSMBSeslock);
163 return 0;
164}
165
1da177e4 166/* all arguments to this function must be checked for validity in caller */
db460242 167static inline int cifs_open_inode_helper(struct inode *inode,
1da177e4
LT
168 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
169 char *full_path, int xid)
170{
db460242 171 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
1da177e4
LT
172 struct timespec temp;
173 int rc;
174
1da177e4
LT
175 if (pCifsInode->clientCanCacheRead) {
176 /* we have the inode open somewhere else
177 no need to discard cache data */
178 goto client_can_cache;
179 }
180
181 /* BB need same check in cifs_create too? */
182 /* if not oplocked, invalidate inode pages if mtime or file
183 size changed */
07119a4d 184 temp = cifs_NTtimeToUnix(buf->LastWriteTime);
db460242
JL
185 if (timespec_equal(&inode->i_mtime, &temp) &&
186 (inode->i_size ==
1da177e4 187 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 188 cFYI(1, "inode unchanged on server");
1da177e4 189 } else {
db460242 190 if (inode->i_mapping) {
ff215713
SF
191 /* BB no need to lock inode until after invalidate
192 since namei code should already have it locked? */
db460242 193 rc = filemap_write_and_wait(inode->i_mapping);
cea21805 194 if (rc != 0)
db460242 195 pCifsInode->write_behind_rc = rc;
1da177e4 196 }
b6b38f70
JP
197 cFYI(1, "invalidating remote inode since open detected it "
198 "changed");
db460242 199 invalidate_remote_inode(inode);
1da177e4
LT
200 }
201
202client_can_cache:
c18c842b 203 if (pTcon->unix_ext)
db460242
JL
204 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
205 xid);
1da177e4 206 else
db460242
JL
207 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
208 xid, NULL);
1da177e4
LT
209
210 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
211 pCifsInode->clientCanCacheAll = true;
212 pCifsInode->clientCanCacheRead = true;
db460242 213 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
1da177e4 214 } else if ((*oplock & 0xF) == OPLOCK_READ)
4b18f2a9 215 pCifsInode->clientCanCacheRead = true;
1da177e4
LT
216
217 return rc;
218}
219
220int cifs_open(struct inode *inode, struct file *file)
221{
222 int rc = -EACCES;
590a3fe0
JL
223 int xid;
224 __u32 oplock;
1da177e4 225 struct cifs_sb_info *cifs_sb;
276a74a4 226 struct cifsTconInfo *tcon;
6ca9f3ba 227 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 228 struct cifsInodeInfo *pCifsInode;
1da177e4
LT
229 char *full_path = NULL;
230 int desiredAccess;
231 int disposition;
232 __u16 netfid;
233 FILE_ALL_INFO *buf = NULL;
234
235 xid = GetXid();
236
237 cifs_sb = CIFS_SB(inode->i_sb);
276a74a4 238 tcon = cifs_sb->tcon;
1da177e4 239
a6ce4932 240 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 241
e6a00296 242 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 243 if (full_path == NULL) {
0f3bc09e 244 rc = -ENOMEM;
1da177e4 245 FreeXid(xid);
0f3bc09e 246 return rc;
1da177e4
LT
247 }
248
b6b38f70
JP
249 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
250 inode, file->f_flags, full_path);
276a74a4
SF
251
252 if (oplockEnabled)
253 oplock = REQ_OPLOCK;
254 else
255 oplock = 0;
256
64cc2c63
SF
257 if (!tcon->broken_posix_open && tcon->unix_ext &&
258 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
259 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
260 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
261 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
fa588e0c 262 oflags |= SMB_O_CREAT;
276a74a4 263 /* can not refresh inode info since size could be stale */
2422f676 264 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c
SF
265 cifs_sb->mnt_file_mode /* ignored */,
266 oflags, &oplock, &netfid, xid);
276a74a4 267 if (rc == 0) {
b6b38f70 268 cFYI(1, "posix open succeeded");
276a74a4
SF
269 /* no need for special case handling of setting mode
270 on read only files needed here */
271
47c78b7f
JL
272 rc = cifs_posix_open_inode_helper(inode, file,
273 pCifsInode, oplock, netfid);
274 if (rc != 0) {
275 CIFSSMBClose(xid, tcon, netfid);
276 goto out;
277 }
278
2422f676
JL
279 pCifsFile = cifs_new_fileinfo(inode, netfid, file,
280 file->f_path.mnt,
281 oflags);
282 if (pCifsFile == NULL) {
283 CIFSSMBClose(xid, tcon, netfid);
284 rc = -ENOMEM;
2422f676 285 }
9451a9a5
SJ
286
287 cifs_fscache_set_inode_cookie(inode, file);
288
276a74a4 289 goto out;
64cc2c63
SF
290 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
291 if (tcon->ses->serverNOS)
b6b38f70 292 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
293 " unexpected error on SMB posix open"
294 ", disabling posix open support."
295 " Check if server update available.",
296 tcon->ses->serverName,
b6b38f70 297 tcon->ses->serverNOS);
64cc2c63 298 tcon->broken_posix_open = true;
276a74a4
SF
299 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
300 (rc != -EOPNOTSUPP)) /* path not found or net err */
301 goto out;
64cc2c63
SF
302 /* else fallthrough to retry open the old way on network i/o
303 or DFS errors */
276a74a4
SF
304 }
305
1da177e4
LT
306 desiredAccess = cifs_convert_flags(file->f_flags);
307
308/*********************************************************************
309 * open flag mapping table:
fb8c4b14 310 *
1da177e4 311 * POSIX Flag CIFS Disposition
fb8c4b14 312 * ---------- ----------------
1da177e4
LT
313 * O_CREAT FILE_OPEN_IF
314 * O_CREAT | O_EXCL FILE_CREATE
315 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
316 * O_TRUNC FILE_OVERWRITE
317 * none of the above FILE_OPEN
318 *
319 * Note that there is not a direct match between disposition
fb8c4b14 320 * FILE_SUPERSEDE (ie create whether or not file exists although
1da177e4
LT
321 * O_CREAT | O_TRUNC is similar but truncates the existing
322 * file rather than creating a new file as FILE_SUPERSEDE does
323 * (which uses the attributes / metadata passed in on open call)
324 *?
fb8c4b14 325 *? O_SYNC is a reasonable match to CIFS writethrough flag
1da177e4
LT
326 *? and the read write flags match reasonably. O_LARGEFILE
327 *? is irrelevant because largefile support is always used
328 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
329 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
330 *********************************************************************/
331
332 disposition = cifs_get_disposition(file->f_flags);
333
1da177e4
LT
334 /* BB pass O_SYNC flag through on file attributes .. BB */
335
336 /* Also refresh inode by passing in file_info buf returned by SMBOpen
337 and calling get_inode_info with returned buf (at least helps
338 non-Unix server case) */
339
fb8c4b14
SF
340 /* BB we can not do this if this is the second open of a file
341 and the first handle has writebehind data, we might be
1da177e4
LT
342 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
343 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
344 if (!buf) {
345 rc = -ENOMEM;
346 goto out;
347 }
5bafd765
SF
348
349 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
276a74a4 350 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
5bafd765 351 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
352 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
353 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
354 else
355 rc = -EIO; /* no NT SMB support fall into legacy open below */
356
a9d02ad4
SF
357 if (rc == -EIO) {
358 /* Old server, try legacy style OpenX */
276a74a4 359 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
a9d02ad4
SF
360 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
361 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
362 & CIFS_MOUNT_MAP_SPECIAL_CHR);
363 }
1da177e4 364 if (rc) {
b6b38f70 365 cFYI(1, "cifs_open returned 0x%x", rc);
1da177e4
LT
366 goto out;
367 }
3321b791 368
47c78b7f
JL
369 rc = cifs_open_inode_helper(inode, tcon, &oplock, buf, full_path, xid);
370 if (rc != 0)
371 goto out;
372
086f68bd
JL
373 pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
374 file->f_flags);
6ca9f3ba 375 if (pCifsFile == NULL) {
1da177e4
LT
376 rc = -ENOMEM;
377 goto out;
378 }
1da177e4 379
9451a9a5
SJ
380 cifs_fscache_set_inode_cookie(inode, file);
381
fb8c4b14 382 if (oplock & CIFS_CREATE_ACTION) {
1da177e4
LT
383 /* time to set mode which we can not set earlier due to
384 problems creating new read-only files */
276a74a4 385 if (tcon->unix_ext) {
4e1e7fb9
JL
386 struct cifs_unix_set_info_args args = {
387 .mode = inode->i_mode,
388 .uid = NO_CHANGE_64,
389 .gid = NO_CHANGE_64,
390 .ctime = NO_CHANGE_64,
391 .atime = NO_CHANGE_64,
392 .mtime = NO_CHANGE_64,
393 .device = 0,
394 };
01ea95e3
JL
395 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
396 cifs_sb->local_nls,
397 cifs_sb->mnt_cifs_flags &
737b758c 398 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
399 }
400 }
401
402out:
403 kfree(buf);
404 kfree(full_path);
405 FreeXid(xid);
406 return rc;
407}
408
0418726b 409/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
410/* to server was lost */
411static int cifs_relock_file(struct cifsFileInfo *cifsFile)
412{
413 int rc = 0;
414
415/* BB list all locks open on this file and relock */
416
417 return rc;
418}
419
4b18f2a9 420static int cifs_reopen_file(struct file *file, bool can_flush)
1da177e4
LT
421{
422 int rc = -EACCES;
590a3fe0
JL
423 int xid;
424 __u32 oplock;
1da177e4 425 struct cifs_sb_info *cifs_sb;
7fc8f4e9 426 struct cifsTconInfo *tcon;
1da177e4
LT
427 struct cifsFileInfo *pCifsFile;
428 struct cifsInodeInfo *pCifsInode;
fb8c4b14 429 struct inode *inode;
1da177e4
LT
430 char *full_path = NULL;
431 int desiredAccess;
432 int disposition = FILE_OPEN;
433 __u16 netfid;
434
ad7a2926 435 if (file->private_data)
c21dfb69 436 pCifsFile = file->private_data;
ad7a2926 437 else
1da177e4
LT
438 return -EBADF;
439
440 xid = GetXid();
f0a71eb8 441 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 442 if (!pCifsFile->invalidHandle) {
f0a71eb8 443 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 444 rc = 0;
1da177e4 445 FreeXid(xid);
0f3bc09e 446 return rc;
1da177e4
LT
447 }
448
e6a00296 449 if (file->f_path.dentry == NULL) {
b6b38f70 450 cERROR(1, "no valid name if dentry freed");
3a9f462f
SF
451 dump_stack();
452 rc = -EBADF;
453 goto reopen_error_exit;
454 }
455
456 inode = file->f_path.dentry->d_inode;
fb8c4b14 457 if (inode == NULL) {
b6b38f70 458 cERROR(1, "inode not valid");
3a9f462f
SF
459 dump_stack();
460 rc = -EBADF;
461 goto reopen_error_exit;
1da177e4 462 }
50c2f753 463
1da177e4 464 cifs_sb = CIFS_SB(inode->i_sb);
7fc8f4e9 465 tcon = cifs_sb->tcon;
3a9f462f 466
1da177e4
LT
467/* can not grab rename sem here because various ops, including
468 those that already have the rename sem can end up causing writepage
469 to get called and if the server was down that means we end up here,
470 and we can never tell if the caller already has the rename_sem */
e6a00296 471 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 472 if (full_path == NULL) {
3a9f462f
SF
473 rc = -ENOMEM;
474reopen_error_exit:
f0a71eb8 475 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 476 FreeXid(xid);
3a9f462f 477 return rc;
1da177e4
LT
478 }
479
b6b38f70
JP
480 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
481 inode, file->f_flags, full_path);
1da177e4
LT
482
483 if (oplockEnabled)
484 oplock = REQ_OPLOCK;
485 else
4b18f2a9 486 oplock = 0;
1da177e4 487
7fc8f4e9
SF
488 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
489 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
490 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
491 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
492 /* can not refresh inode info since size could be stale */
2422f676 493 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
494 cifs_sb->mnt_file_mode /* ignored */,
495 oflags, &oplock, &netfid, xid);
7fc8f4e9 496 if (rc == 0) {
b6b38f70 497 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
498 goto reopen_success;
499 }
500 /* fallthrough to retry open the old way on errors, especially
501 in the reconnect path it is important to retry hard */
502 }
503
504 desiredAccess = cifs_convert_flags(file->f_flags);
505
1da177e4 506 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
507 by SMBOpen and then calling get_inode_info with returned buf
508 since file might have write behind data that needs to be flushed
1da177e4
LT
509 and server version of file size can be stale. If we knew for sure
510 that inode was not dirty locally we could do this */
511
7fc8f4e9 512 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 513 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 514 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 515 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 516 if (rc) {
f0a71eb8 517 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
518 cFYI(1, "cifs_open returned 0x%x", rc);
519 cFYI(1, "oplock: %d", oplock);
1da177e4 520 } else {
7fc8f4e9 521reopen_success:
1da177e4 522 pCifsFile->netfid = netfid;
4b18f2a9 523 pCifsFile->invalidHandle = false;
f0a71eb8 524 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4
LT
525 pCifsInode = CIFS_I(inode);
526 if (pCifsInode) {
527 if (can_flush) {
cea21805
JL
528 rc = filemap_write_and_wait(inode->i_mapping);
529 if (rc != 0)
530 CIFS_I(inode)->write_behind_rc = rc;
1da177e4
LT
531 /* temporarily disable caching while we
532 go to server to get inode info */
4b18f2a9
SF
533 pCifsInode->clientCanCacheAll = false;
534 pCifsInode->clientCanCacheRead = false;
7fc8f4e9 535 if (tcon->unix_ext)
1da177e4
LT
536 rc = cifs_get_inode_info_unix(&inode,
537 full_path, inode->i_sb, xid);
538 else
539 rc = cifs_get_inode_info(&inode,
540 full_path, NULL, inode->i_sb,
8b1327f6 541 xid, NULL);
1da177e4
LT
542 } /* else we are writing out data to server already
543 and could deadlock if we tried to flush data, and
544 since we do not know if we have data that would
545 invalidate the current end of file on the server
546 we can not go to the server to get the new inod
547 info */
548 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
549 pCifsInode->clientCanCacheAll = true;
550 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
551 cFYI(1, "Exclusive Oplock granted on inode %p",
552 file->f_path.dentry->d_inode);
1da177e4 553 } else if ((oplock & 0xF) == OPLOCK_READ) {
4b18f2a9
SF
554 pCifsInode->clientCanCacheRead = true;
555 pCifsInode->clientCanCacheAll = false;
1da177e4 556 } else {
4b18f2a9
SF
557 pCifsInode->clientCanCacheRead = false;
558 pCifsInode->clientCanCacheAll = false;
1da177e4
LT
559 }
560 cifs_relock_file(pCifsFile);
561 }
562 }
1da177e4
LT
563 kfree(full_path);
564 FreeXid(xid);
565 return rc;
566}
567
568int cifs_close(struct inode *inode, struct file *file)
569{
570 int rc = 0;
15745320 571 int xid, timeout;
1da177e4
LT
572 struct cifs_sb_info *cifs_sb;
573 struct cifsTconInfo *pTcon;
c21dfb69 574 struct cifsFileInfo *pSMBFile = file->private_data;
1da177e4
LT
575
576 xid = GetXid();
577
578 cifs_sb = CIFS_SB(inode->i_sb);
579 pTcon = cifs_sb->tcon;
580 if (pSMBFile) {
7ee1af76 581 struct cifsLockInfo *li, *tmp;
ddb4cbfc 582 write_lock(&GlobalSMBSeslock);
4b18f2a9 583 pSMBFile->closePend = true;
1da177e4
LT
584 if (pTcon) {
585 /* no sense reconnecting to close a file that is
586 already closed */
3b795210 587 if (!pTcon->need_reconnect) {
ddb4cbfc 588 write_unlock(&GlobalSMBSeslock);
15745320 589 timeout = 2;
6ab409b5 590 while ((atomic_read(&pSMBFile->count) != 1)
15745320 591 && (timeout <= 2048)) {
23e7dd7d
SF
592 /* Give write a better chance to get to
593 server ahead of the close. We do not
594 want to add a wait_q here as it would
595 increase the memory utilization as
596 the struct would be in each open file,
fb8c4b14 597 but this should give enough time to
23e7dd7d 598 clear the socket */
b6b38f70 599 cFYI(DBG2, "close delay, write pending");
23e7dd7d
SF
600 msleep(timeout);
601 timeout *= 4;
4891d539 602 }
ddb4cbfc
SF
603 if (!pTcon->need_reconnect &&
604 !pSMBFile->invalidHandle)
605 rc = CIFSSMBClose(xid, pTcon,
1da177e4 606 pSMBFile->netfid);
ddb4cbfc
SF
607 } else
608 write_unlock(&GlobalSMBSeslock);
609 } else
610 write_unlock(&GlobalSMBSeslock);
7ee1af76
JA
611
612 /* Delete any outstanding lock records.
613 We'll lose them when the file is closed anyway. */
796e5661 614 mutex_lock(&pSMBFile->lock_mutex);
7ee1af76
JA
615 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
616 list_del(&li->llist);
617 kfree(li);
618 }
796e5661 619 mutex_unlock(&pSMBFile->lock_mutex);
7ee1af76 620
cbe0476f 621 write_lock(&GlobalSMBSeslock);
1da177e4
LT
622 list_del(&pSMBFile->flist);
623 list_del(&pSMBFile->tlist);
cbe0476f 624 write_unlock(&GlobalSMBSeslock);
6ab409b5 625 cifsFileInfo_put(file->private_data);
1da177e4
LT
626 file->private_data = NULL;
627 } else
628 rc = -EBADF;
629
4efa53f0 630 read_lock(&GlobalSMBSeslock);
1da177e4 631 if (list_empty(&(CIFS_I(inode)->openFileList))) {
b6b38f70 632 cFYI(1, "closing last open instance for inode %p", inode);
1da177e4
LT
633 /* if the file is not open we do not know if we can cache info
634 on this inode, much less write behind and read ahead */
4b18f2a9
SF
635 CIFS_I(inode)->clientCanCacheRead = false;
636 CIFS_I(inode)->clientCanCacheAll = false;
1da177e4 637 }
4efa53f0 638 read_unlock(&GlobalSMBSeslock);
fb8c4b14 639 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
1da177e4
LT
640 rc = CIFS_I(inode)->write_behind_rc;
641 FreeXid(xid);
642 return rc;
643}
644
645int cifs_closedir(struct inode *inode, struct file *file)
646{
647 int rc = 0;
648 int xid;
c21dfb69 649 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
650 char *ptmp;
651
b6b38f70 652 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
653
654 xid = GetXid();
655
656 if (pCFileStruct) {
657 struct cifsTconInfo *pTcon;
fb8c4b14
SF
658 struct cifs_sb_info *cifs_sb =
659 CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
660
661 pTcon = cifs_sb->tcon;
662
b6b38f70 663 cFYI(1, "Freeing private data in close dir");
ddb4cbfc 664 write_lock(&GlobalSMBSeslock);
4b18f2a9
SF
665 if (!pCFileStruct->srch_inf.endOfSearch &&
666 !pCFileStruct->invalidHandle) {
667 pCFileStruct->invalidHandle = true;
ddb4cbfc 668 write_unlock(&GlobalSMBSeslock);
1da177e4 669 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
670 cFYI(1, "Closing uncompleted readdir with rc %d",
671 rc);
1da177e4
LT
672 /* not much we can do if it fails anyway, ignore rc */
673 rc = 0;
ddb4cbfc
SF
674 } else
675 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
676 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
677 if (ptmp) {
b6b38f70 678 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 679 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 680 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
681 cifs_small_buf_release(ptmp);
682 else
683 cifs_buf_release(ptmp);
1da177e4 684 }
1da177e4
LT
685 kfree(file->private_data);
686 file->private_data = NULL;
687 }
688 /* BB can we lock the filestruct while this is going on? */
689 FreeXid(xid);
690 return rc;
691}
692
7ee1af76
JA
693static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
694 __u64 offset, __u8 lockType)
695{
fb8c4b14
SF
696 struct cifsLockInfo *li =
697 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
698 if (li == NULL)
699 return -ENOMEM;
700 li->offset = offset;
701 li->length = len;
702 li->type = lockType;
796e5661 703 mutex_lock(&fid->lock_mutex);
7ee1af76 704 list_add(&li->llist, &fid->llist);
796e5661 705 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
706 return 0;
707}
708
1da177e4
LT
709int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
710{
711 int rc, xid;
1da177e4
LT
712 __u32 numLock = 0;
713 __u32 numUnlock = 0;
714 __u64 length;
4b18f2a9 715 bool wait_flag = false;
1da177e4 716 struct cifs_sb_info *cifs_sb;
13a6e42a 717 struct cifsTconInfo *tcon;
08547b03
SF
718 __u16 netfid;
719 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 720 bool posix_locking = 0;
1da177e4
LT
721
722 length = 1 + pfLock->fl_end - pfLock->fl_start;
723 rc = -EACCES;
724 xid = GetXid();
725
b6b38f70 726 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 727 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 728 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 729 pfLock->fl_end);
1da177e4
LT
730
731 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 732 cFYI(1, "Posix");
1da177e4 733 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 734 cFYI(1, "Flock");
1da177e4 735 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 736 cFYI(1, "Blocking lock");
4b18f2a9 737 wait_flag = true;
1da177e4
LT
738 }
739 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
740 cFYI(1, "Process suspended by mandatory locking - "
741 "not implemented yet");
1da177e4 742 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 743 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 744 if (pfLock->fl_flags &
1da177e4 745 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 746 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
747
748 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 749 cFYI(1, "F_WRLCK ");
1da177e4
LT
750 numLock = 1;
751 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 752 cFYI(1, "F_UNLCK");
1da177e4 753 numUnlock = 1;
d47d7c1a
SF
754 /* Check if unlock includes more than
755 one lock range */
1da177e4 756 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 757 cFYI(1, "F_RDLCK");
1da177e4
LT
758 lockType |= LOCKING_ANDX_SHARED_LOCK;
759 numLock = 1;
760 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 761 cFYI(1, "F_EXLCK");
1da177e4
LT
762 numLock = 1;
763 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 764 cFYI(1, "F_SHLCK");
1da177e4
LT
765 lockType |= LOCKING_ANDX_SHARED_LOCK;
766 numLock = 1;
767 } else
b6b38f70 768 cFYI(1, "Unknown type of lock");
1da177e4 769
e6a00296 770 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13a6e42a 771 tcon = cifs_sb->tcon;
1da177e4
LT
772
773 if (file->private_data == NULL) {
0f3bc09e 774 rc = -EBADF;
1da177e4 775 FreeXid(xid);
0f3bc09e 776 return rc;
1da177e4 777 }
08547b03
SF
778 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
779
13a6e42a
SF
780 if ((tcon->ses->capabilities & CAP_UNIX) &&
781 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 782 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 783 posix_locking = 1;
08547b03
SF
784 /* BB add code here to normalize offset and length to
785 account for negative length which we can not accept over the
786 wire */
1da177e4 787 if (IS_GETLK(cmd)) {
fb8c4b14 788 if (posix_locking) {
08547b03 789 int posix_lock_type;
fb8c4b14 790 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
791 posix_lock_type = CIFS_RDLCK;
792 else
793 posix_lock_type = CIFS_WRLCK;
13a6e42a 794 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 795 length, pfLock,
08547b03
SF
796 posix_lock_type, wait_flag);
797 FreeXid(xid);
798 return rc;
799 }
800
801 /* BB we could chain these into one lock request BB */
13a6e42a 802 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
08547b03 803 0, 1, lockType, 0 /* wait flag */ );
1da177e4 804 if (rc == 0) {
13a6e42a 805 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
806 pfLock->fl_start, 1 /* numUnlock */ ,
807 0 /* numLock */ , lockType,
808 0 /* wait flag */ );
809 pfLock->fl_type = F_UNLCK;
810 if (rc != 0)
b6b38f70
JP
811 cERROR(1, "Error unlocking previously locked "
812 "range %d during test of lock", rc);
1da177e4
LT
813 rc = 0;
814
815 } else {
816 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
817 rc = 0;
818
819 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
820 pfLock->fl_type = F_WRLCK;
821 } else {
822 rc = CIFSSMBLock(xid, tcon, netfid, length,
823 pfLock->fl_start, 0, 1,
824 lockType | LOCKING_ANDX_SHARED_LOCK,
825 0 /* wait flag */);
826 if (rc == 0) {
827 rc = CIFSSMBLock(xid, tcon, netfid,
828 length, pfLock->fl_start, 1, 0,
829 lockType |
830 LOCKING_ANDX_SHARED_LOCK,
831 0 /* wait flag */);
832 pfLock->fl_type = F_RDLCK;
833 if (rc != 0)
f19159dc 834 cERROR(1, "Error unlocking "
f05337c6 835 "previously locked range %d "
f19159dc 836 "during test of lock", rc);
f05337c6
PS
837 rc = 0;
838 } else {
839 pfLock->fl_type = F_WRLCK;
840 rc = 0;
841 }
842 }
1da177e4
LT
843 }
844
845 FreeXid(xid);
846 return rc;
847 }
7ee1af76
JA
848
849 if (!numLock && !numUnlock) {
850 /* if no lock or unlock then nothing
851 to do since we do not know what it is */
852 FreeXid(xid);
853 return -EOPNOTSUPP;
854 }
855
856 if (posix_locking) {
08547b03 857 int posix_lock_type;
fb8c4b14 858 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
859 posix_lock_type = CIFS_RDLCK;
860 else
861 posix_lock_type = CIFS_WRLCK;
50c2f753 862
fb8c4b14 863 if (numUnlock == 1)
beb84dc8 864 posix_lock_type = CIFS_UNLCK;
7ee1af76 865
13a6e42a 866 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 867 length, pfLock,
08547b03 868 posix_lock_type, wait_flag);
7ee1af76 869 } else {
c21dfb69 870 struct cifsFileInfo *fid = file->private_data;
7ee1af76
JA
871
872 if (numLock) {
13a6e42a 873 rc = CIFSSMBLock(xid, tcon, netfid, length,
fb8c4b14 874 pfLock->fl_start,
7ee1af76
JA
875 0, numLock, lockType, wait_flag);
876
877 if (rc == 0) {
878 /* For Windows locks we must store them. */
879 rc = store_file_lock(fid, length,
880 pfLock->fl_start, lockType);
881 }
882 } else if (numUnlock) {
883 /* For each stored lock that this unlock overlaps
884 completely, unlock it. */
885 int stored_rc = 0;
886 struct cifsLockInfo *li, *tmp;
887
6b70c955 888 rc = 0;
796e5661 889 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
890 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
891 if (pfLock->fl_start <= li->offset &&
c19eb710 892 (pfLock->fl_start + length) >=
39db810c 893 (li->offset + li->length)) {
13a6e42a 894 stored_rc = CIFSSMBLock(xid, tcon,
fb8c4b14 895 netfid,
7ee1af76 896 li->length, li->offset,
4b18f2a9 897 1, 0, li->type, false);
7ee1af76
JA
898 if (stored_rc)
899 rc = stored_rc;
2c964d1f
PS
900 else {
901 list_del(&li->llist);
902 kfree(li);
903 }
7ee1af76
JA
904 }
905 }
796e5661 906 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
907 }
908 }
909
d634cc15 910 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
911 posix_lock_file_wait(file, pfLock);
912 FreeXid(xid);
913 return rc;
914}
915
fbec9ab9
JL
916/*
917 * Set the timeout on write requests past EOF. For some servers (Windows)
918 * these calls can be very long.
919 *
920 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
921 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
922 * The 10M cutoff is totally arbitrary. A better scheme for this would be
923 * welcome if someone wants to suggest one.
924 *
925 * We may be able to do a better job with this if there were some way to
926 * declare that a file should be sparse.
927 */
928static int
929cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
930{
931 if (offset <= cifsi->server_eof)
932 return CIFS_STD_OP;
933 else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
934 return CIFS_VLONG_OP;
935 else
936 return CIFS_LONG_OP;
937}
938
939/* update the file size (if needed) after a write */
940static void
941cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
942 unsigned int bytes_written)
943{
944 loff_t end_of_write = offset + bytes_written;
945
946 if (end_of_write > cifsi->server_eof)
947 cifsi->server_eof = end_of_write;
948}
949
1da177e4
LT
950ssize_t cifs_user_write(struct file *file, const char __user *write_data,
951 size_t write_size, loff_t *poffset)
952{
953 int rc = 0;
954 unsigned int bytes_written = 0;
955 unsigned int total_written;
956 struct cifs_sb_info *cifs_sb;
957 struct cifsTconInfo *pTcon;
958 int xid, long_op;
959 struct cifsFileInfo *open_file;
fbec9ab9 960 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 961
e6a00296 962 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
963
964 pTcon = cifs_sb->tcon;
965
b6b38f70
JP
966 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
967 *poffset, file->f_path.dentry->d_name.name); */
1da177e4
LT
968
969 if (file->private_data == NULL)
970 return -EBADF;
c21dfb69 971 open_file = file->private_data;
50c2f753 972
838726c4
JL
973 rc = generic_write_checks(file, poffset, &write_size, 0);
974 if (rc)
975 return rc;
976
1da177e4 977 xid = GetXid();
1da177e4 978
fbec9ab9 979 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
980 for (total_written = 0; write_size > total_written;
981 total_written += bytes_written) {
982 rc = -EAGAIN;
983 while (rc == -EAGAIN) {
984 if (file->private_data == NULL) {
985 /* file has been closed on us */
986 FreeXid(xid);
987 /* if we have gotten here we have written some data
988 and blocked, and the file has been freed on us while
989 we blocked so return what we managed to write */
990 return total_written;
fb8c4b14 991 }
1da177e4
LT
992 if (open_file->closePend) {
993 FreeXid(xid);
994 if (total_written)
995 return total_written;
996 else
997 return -EBADF;
998 }
999 if (open_file->invalidHandle) {
1da177e4
LT
1000 /* we could deadlock if we called
1001 filemap_fdatawait from here so tell
1002 reopen_file not to flush data to server
1003 now */
4b18f2a9 1004 rc = cifs_reopen_file(file, false);
1da177e4
LT
1005 if (rc != 0)
1006 break;
1007 }
1008
1009 rc = CIFSSMBWrite(xid, pTcon,
1010 open_file->netfid,
1011 min_t(const int, cifs_sb->wsize,
1012 write_size - total_written),
1013 *poffset, &bytes_written,
1014 NULL, write_data + total_written, long_op);
1015 }
1016 if (rc || (bytes_written == 0)) {
1017 if (total_written)
1018 break;
1019 else {
1020 FreeXid(xid);
1021 return rc;
1022 }
fbec9ab9
JL
1023 } else {
1024 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1025 *poffset += bytes_written;
fbec9ab9 1026 }
133672ef 1027 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1028 15 seconds is plenty */
1029 }
1030
a4544347 1031 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1032
1033 /* since the write may have blocked check these pointers again */
3677db10
SF
1034 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1035 struct inode *inode = file->f_path.dentry->d_inode;
fb8c4b14
SF
1036/* Do not update local mtime - server will set its actual value on write
1037 * inode->i_ctime = inode->i_mtime =
3677db10
SF
1038 * current_fs_time(inode->i_sb);*/
1039 if (total_written > 0) {
1040 spin_lock(&inode->i_lock);
1041 if (*poffset > file->f_path.dentry->d_inode->i_size)
1042 i_size_write(file->f_path.dentry->d_inode,
1da177e4 1043 *poffset);
3677db10 1044 spin_unlock(&inode->i_lock);
1da177e4 1045 }
fb8c4b14 1046 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1047 }
1048 FreeXid(xid);
1049 return total_written;
1050}
1051
1052static ssize_t cifs_write(struct file *file, const char *write_data,
d9414774 1053 size_t write_size, loff_t *poffset)
1da177e4
LT
1054{
1055 int rc = 0;
1056 unsigned int bytes_written = 0;
1057 unsigned int total_written;
1058 struct cifs_sb_info *cifs_sb;
1059 struct cifsTconInfo *pTcon;
1060 int xid, long_op;
1061 struct cifsFileInfo *open_file;
fbec9ab9 1062 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 1063
e6a00296 1064 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1065
1066 pTcon = cifs_sb->tcon;
1067
b6b38f70
JP
1068 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1069 *poffset, file->f_path.dentry->d_name.name);
1da177e4
LT
1070
1071 if (file->private_data == NULL)
1072 return -EBADF;
c21dfb69 1073 open_file = file->private_data;
50c2f753 1074
1da177e4 1075 xid = GetXid();
1da177e4 1076
fbec9ab9 1077 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
1078 for (total_written = 0; write_size > total_written;
1079 total_written += bytes_written) {
1080 rc = -EAGAIN;
1081 while (rc == -EAGAIN) {
1082 if (file->private_data == NULL) {
1083 /* file has been closed on us */
1084 FreeXid(xid);
1085 /* if we have gotten here we have written some data
1086 and blocked, and the file has been freed on us
fb8c4b14 1087 while we blocked so return what we managed to
1da177e4
LT
1088 write */
1089 return total_written;
fb8c4b14 1090 }
1da177e4
LT
1091 if (open_file->closePend) {
1092 FreeXid(xid);
1093 if (total_written)
1094 return total_written;
1095 else
1096 return -EBADF;
1097 }
1098 if (open_file->invalidHandle) {
1da177e4
LT
1099 /* we could deadlock if we called
1100 filemap_fdatawait from here so tell
fb8c4b14 1101 reopen_file not to flush data to
1da177e4 1102 server now */
4b18f2a9 1103 rc = cifs_reopen_file(file, false);
1da177e4
LT
1104 if (rc != 0)
1105 break;
1106 }
fb8c4b14
SF
1107 if (experimEnabled || (pTcon->ses->server &&
1108 ((pTcon->ses->server->secMode &
08775834 1109 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 1110 == 0))) {
3e84469d
SF
1111 struct kvec iov[2];
1112 unsigned int len;
1113
0ae0efad 1114 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
1115 write_size - total_written);
1116 /* iov[0] is reserved for smb header */
1117 iov[1].iov_base = (char *)write_data +
1118 total_written;
1119 iov[1].iov_len = len;
d6e04ae6 1120 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 1121 open_file->netfid, len,
d6e04ae6 1122 *poffset, &bytes_written,
3e84469d 1123 iov, 1, long_op);
d6e04ae6 1124 } else
60808233
SF
1125 rc = CIFSSMBWrite(xid, pTcon,
1126 open_file->netfid,
1127 min_t(const int, cifs_sb->wsize,
1128 write_size - total_written),
1129 *poffset, &bytes_written,
1130 write_data + total_written,
1131 NULL, long_op);
1da177e4
LT
1132 }
1133 if (rc || (bytes_written == 0)) {
1134 if (total_written)
1135 break;
1136 else {
1137 FreeXid(xid);
1138 return rc;
1139 }
fbec9ab9
JL
1140 } else {
1141 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1142 *poffset += bytes_written;
fbec9ab9 1143 }
133672ef 1144 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1145 15 seconds is plenty */
1146 }
1147
a4544347 1148 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1149
1150 /* since the write may have blocked check these pointers again */
3677db10 1151 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
004c46b9 1152/*BB We could make this contingent on superblock ATIME flag too */
3677db10
SF
1153/* file->f_path.dentry->d_inode->i_ctime =
1154 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1155 if (total_written > 0) {
1156 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1157 if (*poffset > file->f_path.dentry->d_inode->i_size)
1158 i_size_write(file->f_path.dentry->d_inode,
1159 *poffset);
1160 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1da177e4 1161 }
3677db10 1162 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1163 }
1164 FreeXid(xid);
1165 return total_written;
1166}
1167
630f3f0c
SF
1168#ifdef CONFIG_CIFS_EXPERIMENTAL
1169struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1170{
1171 struct cifsFileInfo *open_file = NULL;
1172
1173 read_lock(&GlobalSMBSeslock);
1174 /* we could simply get the first_list_entry since write-only entries
1175 are always at the end of the list but since the first entry might
1176 have a close pending, we go through the whole list */
1177 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1178 if (open_file->closePend)
1179 continue;
1180 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1181 (open_file->pfile->f_flags & O_RDONLY))) {
1182 if (!open_file->invalidHandle) {
1183 /* found a good file */
1184 /* lock it so it will not be closed on us */
6ab409b5 1185 cifsFileInfo_get(open_file);
630f3f0c
SF
1186 read_unlock(&GlobalSMBSeslock);
1187 return open_file;
1188 } /* else might as well continue, and look for
1189 another, or simply have the caller reopen it
1190 again rather than trying to fix this handle */
1191 } else /* write only file */
1192 break; /* write only files are last so must be done */
1193 }
1194 read_unlock(&GlobalSMBSeslock);
1195 return NULL;
1196}
1197#endif
1198
dd99cd80 1199struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
1200{
1201 struct cifsFileInfo *open_file;
2846d386 1202 bool any_available = false;
dd99cd80 1203 int rc;
6148a742 1204
60808233
SF
1205 /* Having a null inode here (because mapping->host was set to zero by
1206 the VFS or MM) should not happen but we had reports of on oops (due to
1207 it being zero) during stress testcases so we need to check for it */
1208
fb8c4b14 1209 if (cifs_inode == NULL) {
b6b38f70 1210 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1211 dump_stack();
1212 return NULL;
1213 }
1214
6148a742 1215 read_lock(&GlobalSMBSeslock);
9b22b0b7 1216refind_writable:
6148a742 1217 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2846d386
JL
1218 if (open_file->closePend ||
1219 (!any_available && open_file->pid != current->tgid))
6148a742 1220 continue;
2846d386 1221
6148a742
SF
1222 if (open_file->pfile &&
1223 ((open_file->pfile->f_flags & O_RDWR) ||
1224 (open_file->pfile->f_flags & O_WRONLY))) {
6ab409b5 1225 cifsFileInfo_get(open_file);
9b22b0b7
SF
1226
1227 if (!open_file->invalidHandle) {
1228 /* found a good writable file */
1229 read_unlock(&GlobalSMBSeslock);
1230 return open_file;
1231 }
8840dee9 1232
6148a742 1233 read_unlock(&GlobalSMBSeslock);
9b22b0b7 1234 /* Had to unlock since following call can block */
4b18f2a9 1235 rc = cifs_reopen_file(open_file->pfile, false);
8840dee9 1236 if (!rc) {
9b22b0b7
SF
1237 if (!open_file->closePend)
1238 return open_file;
1239 else { /* start over in case this was deleted */
1240 /* since the list could be modified */
37c0eb46 1241 read_lock(&GlobalSMBSeslock);
6ab409b5 1242 cifsFileInfo_put(open_file);
9b22b0b7 1243 goto refind_writable;
37c0eb46
SF
1244 }
1245 }
9b22b0b7
SF
1246
1247 /* if it fails, try another handle if possible -
1248 (we can not do this if closePending since
1249 loop could be modified - in which case we
1250 have to start at the beginning of the list
1251 again. Note that it would be bad
1252 to hold up writepages here (rather than
1253 in caller) with continuous retries */
b6b38f70 1254 cFYI(1, "wp failed on reopen file");
9b22b0b7
SF
1255 read_lock(&GlobalSMBSeslock);
1256 /* can not use this handle, no write
1257 pending on this one after all */
6ab409b5 1258 cifsFileInfo_put(open_file);
8840dee9 1259
9b22b0b7
SF
1260 if (open_file->closePend) /* list could have changed */
1261 goto refind_writable;
1262 /* else we simply continue to the next entry. Thus
1263 we do not loop on reopen errors. If we
1264 can not reopen the file, for example if we
1265 reconnected to a server with another client
1266 racing to delete or lock the file we would not
1267 make progress if we restarted before the beginning
1268 of the loop here. */
6148a742
SF
1269 }
1270 }
2846d386
JL
1271 /* couldn't find useable FH with same pid, try any available */
1272 if (!any_available) {
1273 any_available = true;
1274 goto refind_writable;
1275 }
6148a742
SF
1276 read_unlock(&GlobalSMBSeslock);
1277 return NULL;
1278}
1279
1da177e4
LT
1280static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1281{
1282 struct address_space *mapping = page->mapping;
1283 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1284 char *write_data;
1285 int rc = -EFAULT;
1286 int bytes_written = 0;
1287 struct cifs_sb_info *cifs_sb;
1288 struct cifsTconInfo *pTcon;
1289 struct inode *inode;
6148a742 1290 struct cifsFileInfo *open_file;
1da177e4
LT
1291
1292 if (!mapping || !mapping->host)
1293 return -EFAULT;
1294
1295 inode = page->mapping->host;
1296 cifs_sb = CIFS_SB(inode->i_sb);
1297 pTcon = cifs_sb->tcon;
1298
1299 offset += (loff_t)from;
1300 write_data = kmap(page);
1301 write_data += from;
1302
1303 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1304 kunmap(page);
1305 return -EIO;
1306 }
1307
1308 /* racing with truncate? */
1309 if (offset > mapping->host->i_size) {
1310 kunmap(page);
1311 return 0; /* don't care */
1312 }
1313
1314 /* check to make sure that we are not extending the file */
1315 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1316 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1317
6148a742
SF
1318 open_file = find_writable_file(CIFS_I(mapping->host));
1319 if (open_file) {
1320 bytes_written = cifs_write(open_file->pfile, write_data,
1321 to-from, &offset);
6ab409b5 1322 cifsFileInfo_put(open_file);
1da177e4 1323 /* Does mm or vfs already set times? */
6148a742 1324 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1325 if ((bytes_written > 0) && (offset))
6148a742 1326 rc = 0;
bb5a9a04
SF
1327 else if (bytes_written < 0)
1328 rc = bytes_written;
6148a742 1329 } else {
b6b38f70 1330 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1331 rc = -EIO;
1332 }
1333
1334 kunmap(page);
1335 return rc;
1336}
1337
1da177e4 1338static int cifs_writepages(struct address_space *mapping,
37c0eb46 1339 struct writeback_control *wbc)
1da177e4 1340{
37c0eb46
SF
1341 struct backing_dev_info *bdi = mapping->backing_dev_info;
1342 unsigned int bytes_to_write;
1343 unsigned int bytes_written;
1344 struct cifs_sb_info *cifs_sb;
1345 int done = 0;
111ebb6e 1346 pgoff_t end;
37c0eb46 1347 pgoff_t index;
fb8c4b14
SF
1348 int range_whole = 0;
1349 struct kvec *iov;
84d2f07e 1350 int len;
37c0eb46
SF
1351 int n_iov = 0;
1352 pgoff_t next;
1353 int nr_pages;
1354 __u64 offset = 0;
23e7dd7d 1355 struct cifsFileInfo *open_file;
fbec9ab9 1356 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1357 struct page *page;
1358 struct pagevec pvec;
1359 int rc = 0;
1360 int scanned = 0;
fbec9ab9 1361 int xid, long_op;
1da177e4 1362
37c0eb46 1363 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1364
37c0eb46
SF
1365 /*
1366 * If wsize is smaller that the page cache size, default to writing
1367 * one page at a time via cifs_writepage
1368 */
1369 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1370 return generic_writepages(mapping, wbc);
1371
fb8c4b14
SF
1372 if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1373 if (cifs_sb->tcon->ses->server->secMode &
1374 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1375 if (!experimEnabled)
60808233 1376 return generic_writepages(mapping, wbc);
4a77118c 1377
9a0c8230 1378 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1379 if (iov == NULL)
9a0c8230
SF
1380 return generic_writepages(mapping, wbc);
1381
1382
37c0eb46
SF
1383 /*
1384 * BB: Is this meaningful for a non-block-device file system?
1385 * If it is, we should test it again after we do I/O
1386 */
1387 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1388 wbc->encountered_congestion = 1;
9a0c8230 1389 kfree(iov);
37c0eb46
SF
1390 return 0;
1391 }
1392
1da177e4
LT
1393 xid = GetXid();
1394
37c0eb46 1395 pagevec_init(&pvec, 0);
111ebb6e 1396 if (wbc->range_cyclic) {
37c0eb46 1397 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1398 end = -1;
1399 } else {
1400 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1401 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1402 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1403 range_whole = 1;
37c0eb46
SF
1404 scanned = 1;
1405 }
1406retry:
1407 while (!done && (index <= end) &&
1408 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1409 PAGECACHE_TAG_DIRTY,
1410 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1411 int first;
1412 unsigned int i;
1413
37c0eb46
SF
1414 first = -1;
1415 next = 0;
1416 n_iov = 0;
1417 bytes_to_write = 0;
1418
1419 for (i = 0; i < nr_pages; i++) {
1420 page = pvec.pages[i];
1421 /*
1422 * At this point we hold neither mapping->tree_lock nor
1423 * lock on the page itself: the page may be truncated or
1424 * invalidated (changing page->mapping to NULL), or even
1425 * swizzled back from swapper_space to tmpfs file
1426 * mapping
1427 */
1428
1429 if (first < 0)
1430 lock_page(page);
529ae9aa 1431 else if (!trylock_page(page))
37c0eb46
SF
1432 break;
1433
1434 if (unlikely(page->mapping != mapping)) {
1435 unlock_page(page);
1436 break;
1437 }
1438
111ebb6e 1439 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1440 done = 1;
1441 unlock_page(page);
1442 break;
1443 }
1444
1445 if (next && (page->index != next)) {
1446 /* Not next consecutive page */
1447 unlock_page(page);
1448 break;
1449 }
1450
1451 if (wbc->sync_mode != WB_SYNC_NONE)
1452 wait_on_page_writeback(page);
1453
1454 if (PageWriteback(page) ||
cb876f45 1455 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1456 unlock_page(page);
1457 break;
1458 }
84d2f07e 1459
cb876f45
LT
1460 /*
1461 * This actually clears the dirty bit in the radix tree.
1462 * See cifs_writepage() for more commentary.
1463 */
1464 set_page_writeback(page);
1465
84d2f07e
SF
1466 if (page_offset(page) >= mapping->host->i_size) {
1467 done = 1;
1468 unlock_page(page);
cb876f45 1469 end_page_writeback(page);
84d2f07e
SF
1470 break;
1471 }
1472
37c0eb46
SF
1473 /*
1474 * BB can we get rid of this? pages are held by pvec
1475 */
1476 page_cache_get(page);
1477
84d2f07e
SF
1478 len = min(mapping->host->i_size - page_offset(page),
1479 (loff_t)PAGE_CACHE_SIZE);
1480
37c0eb46
SF
1481 /* reserve iov[0] for the smb header */
1482 n_iov++;
1483 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1484 iov[n_iov].iov_len = len;
1485 bytes_to_write += len;
37c0eb46
SF
1486
1487 if (first < 0) {
1488 first = i;
1489 offset = page_offset(page);
1490 }
1491 next = page->index + 1;
1492 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1493 break;
1494 }
1495 if (n_iov) {
23e7dd7d
SF
1496 /* Search for a writable handle every time we call
1497 * CIFSSMBWrite2. We can't rely on the last handle
1498 * we used to still be valid
1499 */
1500 open_file = find_writable_file(CIFS_I(mapping->host));
1501 if (!open_file) {
b6b38f70 1502 cERROR(1, "No writable handles for inode");
23e7dd7d 1503 rc = -EBADF;
1047abc1 1504 } else {
fbec9ab9 1505 long_op = cifs_write_timeout(cifsi, offset);
23e7dd7d
SF
1506 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1507 open_file->netfid,
1508 bytes_to_write, offset,
1509 &bytes_written, iov, n_iov,
fbec9ab9 1510 long_op);
6ab409b5 1511 cifsFileInfo_put(open_file);
fbec9ab9
JL
1512 cifs_update_eof(cifsi, offset, bytes_written);
1513
23e7dd7d 1514 if (rc || bytes_written < bytes_to_write) {
b6b38f70
JP
1515 cERROR(1, "Write2 ret %d, wrote %d",
1516 rc, bytes_written);
23e7dd7d
SF
1517 /* BB what if continued retry is
1518 requested via mount flags? */
cea21805
JL
1519 if (rc == -ENOSPC)
1520 set_bit(AS_ENOSPC, &mapping->flags);
1521 else
1522 set_bit(AS_EIO, &mapping->flags);
23e7dd7d
SF
1523 } else {
1524 cifs_stats_bytes_written(cifs_sb->tcon,
1525 bytes_written);
1526 }
37c0eb46
SF
1527 }
1528 for (i = 0; i < n_iov; i++) {
1529 page = pvec.pages[first + i];
eb9bdaa3
SF
1530 /* Should we also set page error on
1531 success rc but too little data written? */
1532 /* BB investigate retry logic on temporary
1533 server crash cases and how recovery works
fb8c4b14
SF
1534 when page marked as error */
1535 if (rc)
eb9bdaa3 1536 SetPageError(page);
37c0eb46
SF
1537 kunmap(page);
1538 unlock_page(page);
cb876f45 1539 end_page_writeback(page);
37c0eb46
SF
1540 page_cache_release(page);
1541 }
1542 if ((wbc->nr_to_write -= n_iov) <= 0)
1543 done = 1;
1544 index = next;
b066a48c
DK
1545 } else
1546 /* Need to re-find the pages we skipped */
1547 index = pvec.pages[0]->index + 1;
1548
37c0eb46
SF
1549 pagevec_release(&pvec);
1550 }
1551 if (!scanned && !done) {
1552 /*
1553 * We hit the last page and there is more work to be done: wrap
1554 * back to the start of the file
1555 */
1556 scanned = 1;
1557 index = 0;
1558 goto retry;
1559 }
111ebb6e 1560 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1561 mapping->writeback_index = index;
1562
1da177e4 1563 FreeXid(xid);
9a0c8230 1564 kfree(iov);
1da177e4
LT
1565 return rc;
1566}
1da177e4 1567
fb8c4b14 1568static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1569{
1570 int rc = -EFAULT;
1571 int xid;
1572
1573 xid = GetXid();
1574/* BB add check for wbc flags */
1575 page_cache_get(page);
ad7a2926 1576 if (!PageUptodate(page))
b6b38f70 1577 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1578
1579 /*
1580 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1581 *
1582 * A writepage() implementation always needs to do either this,
1583 * or re-dirty the page with "redirty_page_for_writepage()" in
1584 * the case of a failure.
1585 *
1586 * Just unlocking the page will cause the radix tree tag-bits
1587 * to fail to update with the state of the page correctly.
1588 */
fb8c4b14 1589 set_page_writeback(page);
1da177e4
LT
1590 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1591 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1592 unlock_page(page);
cb876f45
LT
1593 end_page_writeback(page);
1594 page_cache_release(page);
1da177e4
LT
1595 FreeXid(xid);
1596 return rc;
1597}
1598
d9414774
NP
1599static int cifs_write_end(struct file *file, struct address_space *mapping,
1600 loff_t pos, unsigned len, unsigned copied,
1601 struct page *page, void *fsdata)
1da177e4 1602{
d9414774
NP
1603 int rc;
1604 struct inode *inode = mapping->host;
1da177e4 1605
b6b38f70
JP
1606 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1607 page, pos, copied);
d9414774 1608
a98ee8c1
JL
1609 if (PageChecked(page)) {
1610 if (copied == len)
1611 SetPageUptodate(page);
1612 ClearPageChecked(page);
1613 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1614 SetPageUptodate(page);
ad7a2926 1615
1da177e4 1616 if (!PageUptodate(page)) {
d9414774
NP
1617 char *page_data;
1618 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1619 int xid;
1620
1621 xid = GetXid();
1da177e4
LT
1622 /* this is probably better than directly calling
1623 partialpage_write since in this function the file handle is
1624 known which we might as well leverage */
1625 /* BB check if anything else missing out of ppw
1626 such as updating last write time */
1627 page_data = kmap(page);
d9414774
NP
1628 rc = cifs_write(file, page_data + offset, copied, &pos);
1629 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1630 kunmap(page);
d9414774
NP
1631
1632 FreeXid(xid);
fb8c4b14 1633 } else {
d9414774
NP
1634 rc = copied;
1635 pos += copied;
1da177e4
LT
1636 set_page_dirty(page);
1637 }
1638
d9414774
NP
1639 if (rc > 0) {
1640 spin_lock(&inode->i_lock);
1641 if (pos > inode->i_size)
1642 i_size_write(inode, pos);
1643 spin_unlock(&inode->i_lock);
1644 }
1645
1646 unlock_page(page);
1647 page_cache_release(page);
1648
1da177e4
LT
1649 return rc;
1650}
1651
7ea80859 1652int cifs_fsync(struct file *file, int datasync)
1da177e4
LT
1653{
1654 int xid;
1655 int rc = 0;
b298f223 1656 struct cifsTconInfo *tcon;
c21dfb69 1657 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1658 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1659
1660 xid = GetXid();
1661
b6b38f70 1662 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1663 file->f_path.dentry->d_name.name, datasync);
50c2f753 1664
cea21805
JL
1665 rc = filemap_write_and_wait(inode->i_mapping);
1666 if (rc == 0) {
1667 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1668 CIFS_I(inode)->write_behind_rc = 0;
b298f223 1669 tcon = CIFS_SB(inode->i_sb)->tcon;
be652445 1670 if (!rc && tcon && smbfile &&
4717bed6 1671 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
b298f223 1672 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
cea21805 1673 }
b298f223 1674
1da177e4
LT
1675 FreeXid(xid);
1676 return rc;
1677}
1678
3978d717 1679/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1680{
1681 struct address_space *mapping;
1682 struct inode *inode;
1683 unsigned long index = page->index;
1684 unsigned int rpages = 0;
1685 int rc = 0;
1686
f19159dc 1687 cFYI(1, "sync page %p", page);
1da177e4
LT
1688 mapping = page->mapping;
1689 if (!mapping)
1690 return 0;
1691 inode = mapping->host;
1692 if (!inode)
3978d717 1693 return; */
1da177e4 1694
fb8c4b14 1695/* fill in rpages then
1da177e4
LT
1696 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1697
b6b38f70 1698/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1da177e4 1699
3978d717 1700#if 0
1da177e4
LT
1701 if (rc < 0)
1702 return rc;
1703 return 0;
3978d717 1704#endif
1da177e4
LT
1705} */
1706
1707/*
1708 * As file closes, flush all cached write data for this inode checking
1709 * for write behind errors.
1710 */
75e1fcc0 1711int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1712{
fb8c4b14 1713 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1714 int rc = 0;
1715
1716 /* Rather than do the steps manually:
1717 lock the inode for writing
1718 loop through pages looking for write behind data (dirty pages)
1719 coalesce into contiguous 16K (or smaller) chunks to write to server
1720 send to server (prefer in parallel)
1721 deal with writebehind errors
1722 unlock inode for writing
1723 filemapfdatawrite appears easier for the time being */
1724
1725 rc = filemap_fdatawrite(inode->i_mapping);
cea21805
JL
1726 /* reset wb rc if we were able to write out dirty pages */
1727 if (!rc) {
1728 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1729 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1730 }
50c2f753 1731
b6b38f70 1732 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1733
1734 return rc;
1735}
1736
1737ssize_t cifs_user_read(struct file *file, char __user *read_data,
1738 size_t read_size, loff_t *poffset)
1739{
1740 int rc = -EACCES;
1741 unsigned int bytes_read = 0;
1742 unsigned int total_read = 0;
1743 unsigned int current_read_size;
1744 struct cifs_sb_info *cifs_sb;
1745 struct cifsTconInfo *pTcon;
1746 int xid;
1747 struct cifsFileInfo *open_file;
1748 char *smb_read_data;
1749 char __user *current_offset;
1750 struct smb_com_read_rsp *pSMBr;
1751
1752 xid = GetXid();
e6a00296 1753 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1754 pTcon = cifs_sb->tcon;
1755
1756 if (file->private_data == NULL) {
0f3bc09e 1757 rc = -EBADF;
1da177e4 1758 FreeXid(xid);
0f3bc09e 1759 return rc;
1da177e4 1760 }
c21dfb69 1761 open_file = file->private_data;
1da177e4 1762
ad7a2926 1763 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1764 cFYI(1, "attempting read on write only file instance");
ad7a2926 1765
1da177e4
LT
1766 for (total_read = 0, current_offset = read_data;
1767 read_size > total_read;
1768 total_read += bytes_read, current_offset += bytes_read) {
fb8c4b14 1769 current_read_size = min_t(const int, read_size - total_read,
1da177e4
LT
1770 cifs_sb->rsize);
1771 rc = -EAGAIN;
1772 smb_read_data = NULL;
1773 while (rc == -EAGAIN) {
ec637e3f 1774 int buf_type = CIFS_NO_BUFFER;
fb8c4b14 1775 if ((open_file->invalidHandle) &&
1da177e4 1776 (!open_file->closePend)) {
4b18f2a9 1777 rc = cifs_reopen_file(file, true);
1da177e4
LT
1778 if (rc != 0)
1779 break;
1780 }
bfa0d75a 1781 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1782 open_file->netfid,
1783 current_read_size, *poffset,
1784 &bytes_read, &smb_read_data,
1785 &buf_type);
1da177e4 1786 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1787 if (smb_read_data) {
93544cc6
SF
1788 if (copy_to_user(current_offset,
1789 smb_read_data +
1790 4 /* RFC1001 length field */ +
1791 le16_to_cpu(pSMBr->DataOffset),
ad7a2926 1792 bytes_read))
93544cc6 1793 rc = -EFAULT;
93544cc6 1794
fb8c4b14 1795 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1796 cifs_small_buf_release(smb_read_data);
fb8c4b14 1797 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1798 cifs_buf_release(smb_read_data);
1da177e4
LT
1799 smb_read_data = NULL;
1800 }
1801 }
1802 if (rc || (bytes_read == 0)) {
1803 if (total_read) {
1804 break;
1805 } else {
1806 FreeXid(xid);
1807 return rc;
1808 }
1809 } else {
a4544347 1810 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1811 *poffset += bytes_read;
1812 }
1813 }
1814 FreeXid(xid);
1815 return total_read;
1816}
1817
1818
1819static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1820 loff_t *poffset)
1821{
1822 int rc = -EACCES;
1823 unsigned int bytes_read = 0;
1824 unsigned int total_read;
1825 unsigned int current_read_size;
1826 struct cifs_sb_info *cifs_sb;
1827 struct cifsTconInfo *pTcon;
1828 int xid;
1829 char *current_offset;
1830 struct cifsFileInfo *open_file;
ec637e3f 1831 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1832
1833 xid = GetXid();
e6a00296 1834 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1835 pTcon = cifs_sb->tcon;
1836
1837 if (file->private_data == NULL) {
0f3bc09e 1838 rc = -EBADF;
1da177e4 1839 FreeXid(xid);
0f3bc09e 1840 return rc;
1da177e4 1841 }
c21dfb69 1842 open_file = file->private_data;
1da177e4
LT
1843
1844 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1845 cFYI(1, "attempting read on write only file instance");
1da177e4 1846
fb8c4b14 1847 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1848 read_size > total_read;
1849 total_read += bytes_read, current_offset += bytes_read) {
1850 current_read_size = min_t(const int, read_size - total_read,
1851 cifs_sb->rsize);
f9f5c817
SF
1852 /* For windows me and 9x we do not want to request more
1853 than it negotiated since it will refuse the read then */
fb8c4b14 1854 if ((pTcon->ses) &&
f9f5c817
SF
1855 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1856 current_read_size = min_t(const int, current_read_size,
1857 pTcon->ses->server->maxBuf - 128);
1858 }
1da177e4
LT
1859 rc = -EAGAIN;
1860 while (rc == -EAGAIN) {
fb8c4b14 1861 if ((open_file->invalidHandle) &&
1da177e4 1862 (!open_file->closePend)) {
4b18f2a9 1863 rc = cifs_reopen_file(file, true);
1da177e4
LT
1864 if (rc != 0)
1865 break;
1866 }
bfa0d75a 1867 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1868 open_file->netfid,
1869 current_read_size, *poffset,
1870 &bytes_read, &current_offset,
1871 &buf_type);
1da177e4
LT
1872 }
1873 if (rc || (bytes_read == 0)) {
1874 if (total_read) {
1875 break;
1876 } else {
1877 FreeXid(xid);
1878 return rc;
1879 }
1880 } else {
a4544347 1881 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1882 *poffset += bytes_read;
1883 }
1884 }
1885 FreeXid(xid);
1886 return total_read;
1887}
1888
1889int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1890{
1da177e4
LT
1891 int rc, xid;
1892
1893 xid = GetXid();
abab095d 1894 rc = cifs_revalidate_file(file);
1da177e4 1895 if (rc) {
b6b38f70 1896 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1897 FreeXid(xid);
1898 return rc;
1899 }
1900 rc = generic_file_mmap(file, vma);
1901 FreeXid(xid);
1902 return rc;
1903}
1904
1905
fb8c4b14 1906static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 1907 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
1908{
1909 struct page *page;
1910 char *target;
1911
1912 while (bytes_read > 0) {
1913 if (list_empty(pages))
1914 break;
1915
1916 page = list_entry(pages->prev, struct page, lru);
1917 list_del(&page->lru);
1918
315e995c 1919 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
1920 GFP_KERNEL)) {
1921 page_cache_release(page);
b6b38f70 1922 cFYI(1, "Add page cache failed");
3079ca62
SF
1923 data += PAGE_CACHE_SIZE;
1924 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1925 continue;
1926 }
06b43672 1927 page_cache_release(page);
1da177e4 1928
fb8c4b14 1929 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1930
1931 if (PAGE_CACHE_SIZE > bytes_read) {
1932 memcpy(target, data, bytes_read);
1933 /* zero the tail end of this partial page */
fb8c4b14 1934 memset(target + bytes_read, 0,
1da177e4
LT
1935 PAGE_CACHE_SIZE - bytes_read);
1936 bytes_read = 0;
1937 } else {
1938 memcpy(target, data, PAGE_CACHE_SIZE);
1939 bytes_read -= PAGE_CACHE_SIZE;
1940 }
1941 kunmap_atomic(target, KM_USER0);
1942
1943 flush_dcache_page(page);
1944 SetPageUptodate(page);
1945 unlock_page(page);
1da177e4
LT
1946 data += PAGE_CACHE_SIZE;
1947 }
1948 return;
1949}
1950
1951static int cifs_readpages(struct file *file, struct address_space *mapping,
1952 struct list_head *page_list, unsigned num_pages)
1953{
1954 int rc = -EACCES;
1955 int xid;
1956 loff_t offset;
1957 struct page *page;
1958 struct cifs_sb_info *cifs_sb;
1959 struct cifsTconInfo *pTcon;
2c2130e1 1960 unsigned int bytes_read = 0;
fb8c4b14 1961 unsigned int read_size, i;
1da177e4
LT
1962 char *smb_read_data = NULL;
1963 struct smb_com_read_rsp *pSMBr;
1da177e4 1964 struct cifsFileInfo *open_file;
ec637e3f 1965 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1966
1967 xid = GetXid();
1968 if (file->private_data == NULL) {
0f3bc09e 1969 rc = -EBADF;
1da177e4 1970 FreeXid(xid);
0f3bc09e 1971 return rc;
1da177e4 1972 }
c21dfb69 1973 open_file = file->private_data;
e6a00296 1974 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1975 pTcon = cifs_sb->tcon;
bfa0d75a 1976
f19159dc 1977 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
1978 for (i = 0; i < num_pages; ) {
1979 unsigned contig_pages;
1980 struct page *tmp_page;
1981 unsigned long expected_index;
1982
1983 if (list_empty(page_list))
1984 break;
1985
1986 page = list_entry(page_list->prev, struct page, lru);
1987 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1988
1989 /* count adjacent pages that we will read into */
1990 contig_pages = 0;
fb8c4b14 1991 expected_index =
1da177e4 1992 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 1993 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
1994 if (tmp_page->index == expected_index) {
1995 contig_pages++;
1996 expected_index++;
1997 } else
fb8c4b14 1998 break;
1da177e4
LT
1999 }
2000 if (contig_pages + i > num_pages)
2001 contig_pages = num_pages - i;
2002
2003 /* for reads over a certain size could initiate async
2004 read ahead */
2005
2006 read_size = contig_pages * PAGE_CACHE_SIZE;
2007 /* Read size needs to be in multiples of one page */
2008 read_size = min_t(const unsigned int, read_size,
2009 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2010 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2011 read_size, contig_pages);
1da177e4
LT
2012 rc = -EAGAIN;
2013 while (rc == -EAGAIN) {
fb8c4b14 2014 if ((open_file->invalidHandle) &&
1da177e4 2015 (!open_file->closePend)) {
4b18f2a9 2016 rc = cifs_reopen_file(file, true);
1da177e4
LT
2017 if (rc != 0)
2018 break;
2019 }
2020
bfa0d75a 2021 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2022 open_file->netfid,
2023 read_size, offset,
2024 &bytes_read, &smb_read_data,
2025 &buf_type);
a9d02ad4 2026 /* BB more RC checks ? */
fb8c4b14 2027 if (rc == -EAGAIN) {
1da177e4 2028 if (smb_read_data) {
fb8c4b14 2029 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2030 cifs_small_buf_release(smb_read_data);
fb8c4b14 2031 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2032 cifs_buf_release(smb_read_data);
1da177e4
LT
2033 smb_read_data = NULL;
2034 }
2035 }
2036 }
2037 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2038 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2039 break;
2040 } else if (bytes_read > 0) {
6f88cc2e 2041 task_io_account_read(bytes_read);
1da177e4
LT
2042 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2043 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2044 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2045 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2046
2047 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2048 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2049 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2050 i++; /* account for partial page */
2051
fb8c4b14 2052 /* server copy of file can have smaller size
1da177e4 2053 than client */
fb8c4b14
SF
2054 /* BB do we need to verify this common case ?
2055 this case is ok - if we are at server EOF
1da177e4
LT
2056 we will hit it on next read */
2057
05ac9d4b 2058 /* break; */
1da177e4
LT
2059 }
2060 } else {
b6b38f70 2061 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2062 "Cleaning remaining pages from readahead list",
b6b38f70 2063 bytes_read, offset);
fb8c4b14 2064 /* BB turn off caching and do new lookup on
1da177e4 2065 file size at server? */
1da177e4
LT
2066 break;
2067 }
2068 if (smb_read_data) {
fb8c4b14 2069 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2070 cifs_small_buf_release(smb_read_data);
fb8c4b14 2071 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2072 cifs_buf_release(smb_read_data);
1da177e4
LT
2073 smb_read_data = NULL;
2074 }
2075 bytes_read = 0;
2076 }
2077
1da177e4
LT
2078/* need to free smb_read_data buf before exit */
2079 if (smb_read_data) {
fb8c4b14 2080 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2081 cifs_small_buf_release(smb_read_data);
fb8c4b14 2082 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2083 cifs_buf_release(smb_read_data);
1da177e4 2084 smb_read_data = NULL;
fb8c4b14 2085 }
1da177e4
LT
2086
2087 FreeXid(xid);
2088 return rc;
2089}
2090
2091static int cifs_readpage_worker(struct file *file, struct page *page,
2092 loff_t *poffset)
2093{
2094 char *read_data;
2095 int rc;
2096
2097 page_cache_get(page);
2098 read_data = kmap(page);
2099 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2100
1da177e4 2101 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2102
1da177e4
LT
2103 if (rc < 0)
2104 goto io_error;
2105 else
b6b38f70 2106 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2107
e6a00296
JJS
2108 file->f_path.dentry->d_inode->i_atime =
2109 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2110
1da177e4
LT
2111 if (PAGE_CACHE_SIZE > rc)
2112 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2113
2114 flush_dcache_page(page);
2115 SetPageUptodate(page);
2116 rc = 0;
fb8c4b14 2117
1da177e4 2118io_error:
fb8c4b14 2119 kunmap(page);
1da177e4
LT
2120 page_cache_release(page);
2121 return rc;
2122}
2123
2124static int cifs_readpage(struct file *file, struct page *page)
2125{
2126 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2127 int rc = -EACCES;
2128 int xid;
2129
2130 xid = GetXid();
2131
2132 if (file->private_data == NULL) {
0f3bc09e 2133 rc = -EBADF;
1da177e4 2134 FreeXid(xid);
0f3bc09e 2135 return rc;
1da177e4
LT
2136 }
2137
b6b38f70
JP
2138 cFYI(1, "readpage %p at offset %d 0x%x\n",
2139 page, (int)offset, (int)offset);
1da177e4
LT
2140
2141 rc = cifs_readpage_worker(file, page, &offset);
2142
2143 unlock_page(page);
2144
2145 FreeXid(xid);
2146 return rc;
2147}
2148
a403a0a3
SF
2149static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2150{
2151 struct cifsFileInfo *open_file;
2152
2153 read_lock(&GlobalSMBSeslock);
2154 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2155 if (open_file->closePend)
2156 continue;
2157 if (open_file->pfile &&
2158 ((open_file->pfile->f_flags & O_RDWR) ||
2159 (open_file->pfile->f_flags & O_WRONLY))) {
2160 read_unlock(&GlobalSMBSeslock);
2161 return 1;
2162 }
2163 }
2164 read_unlock(&GlobalSMBSeslock);
2165 return 0;
2166}
2167
1da177e4
LT
2168/* We do not want to update the file size from server for inodes
2169 open for write - to avoid races with writepage extending
2170 the file - in the future we could consider allowing
fb8c4b14 2171 refreshing the inode only on increases in the file size
1da177e4
LT
2172 but this is tricky to do without racing with writebehind
2173 page caching in the current Linux kernel design */
4b18f2a9 2174bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2175{
a403a0a3 2176 if (!cifsInode)
4b18f2a9 2177 return true;
50c2f753 2178
a403a0a3
SF
2179 if (is_inode_writable(cifsInode)) {
2180 /* This inode is open for write at least once */
c32a0b68
SF
2181 struct cifs_sb_info *cifs_sb;
2182
c32a0b68 2183 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2184 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2185 /* since no page cache to corrupt on directio
c32a0b68 2186 we can change size safely */
4b18f2a9 2187 return true;
c32a0b68
SF
2188 }
2189
fb8c4b14 2190 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2191 return true;
7ba52631 2192
4b18f2a9 2193 return false;
23e7dd7d 2194 } else
4b18f2a9 2195 return true;
1da177e4
LT
2196}
2197
d9414774
NP
2198static int cifs_write_begin(struct file *file, struct address_space *mapping,
2199 loff_t pos, unsigned len, unsigned flags,
2200 struct page **pagep, void **fsdata)
1da177e4 2201{
d9414774
NP
2202 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2203 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2204 loff_t page_start = pos & PAGE_MASK;
2205 loff_t i_size;
2206 struct page *page;
2207 int rc = 0;
d9414774 2208
b6b38f70 2209 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2210
54566b2c 2211 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2212 if (!page) {
2213 rc = -ENOMEM;
2214 goto out;
2215 }
8a236264 2216
a98ee8c1
JL
2217 if (PageUptodate(page))
2218 goto out;
8a236264 2219
a98ee8c1
JL
2220 /*
2221 * If we write a full page it will be up to date, no need to read from
2222 * the server. If the write is short, we'll end up doing a sync write
2223 * instead.
2224 */
2225 if (len == PAGE_CACHE_SIZE)
2226 goto out;
8a236264 2227
a98ee8c1
JL
2228 /*
2229 * optimize away the read when we have an oplock, and we're not
2230 * expecting to use any of the data we'd be reading in. That
2231 * is, when the page lies beyond the EOF, or straddles the EOF
2232 * and the write will cover all of the existing data.
2233 */
2234 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2235 i_size = i_size_read(mapping->host);
2236 if (page_start >= i_size ||
2237 (offset == 0 && (pos + len) >= i_size)) {
2238 zero_user_segments(page, 0, offset,
2239 offset + len,
2240 PAGE_CACHE_SIZE);
2241 /*
2242 * PageChecked means that the parts of the page
2243 * to which we're not writing are considered up
2244 * to date. Once the data is copied to the
2245 * page, it can be set uptodate.
2246 */
2247 SetPageChecked(page);
2248 goto out;
2249 }
2250 }
d9414774 2251
a98ee8c1
JL
2252 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2253 /*
2254 * might as well read a page, it is fast enough. If we get
2255 * an error, we don't need to return it. cifs_write_end will
2256 * do a sync write instead since PG_uptodate isn't set.
2257 */
2258 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2259 } else {
2260 /* we could try using another file handle if there is one -
2261 but how would we lock it to prevent close of that handle
2262 racing with this read? In any case
d9414774 2263 this will be written out by write_end so is fine */
1da177e4 2264 }
a98ee8c1
JL
2265out:
2266 *pagep = page;
2267 return rc;
1da177e4
LT
2268}
2269
85f2d6b4
SJ
2270static int cifs_release_page(struct page *page, gfp_t gfp)
2271{
2272 if (PagePrivate(page))
2273 return 0;
2274
2275 return cifs_fscache_release_page(page, gfp);
2276}
2277
2278static void cifs_invalidate_page(struct page *page, unsigned long offset)
2279{
2280 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2281
2282 if (offset == 0)
2283 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2284}
2285
3bc303c2
JL
2286static void
2287cifs_oplock_break(struct slow_work *work)
2288{
2289 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2290 oplock_break);
2291 struct inode *inode = cfile->pInode;
2292 struct cifsInodeInfo *cinode = CIFS_I(inode);
2293 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
2294 int rc, waitrc = 0;
2295
2296 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2297 if (cinode->clientCanCacheRead)
8737c930 2298 break_lease(inode, O_RDONLY);
d54ff732 2299 else
8737c930 2300 break_lease(inode, O_WRONLY);
3bc303c2
JL
2301 rc = filemap_fdatawrite(inode->i_mapping);
2302 if (cinode->clientCanCacheRead == 0) {
2303 waitrc = filemap_fdatawait(inode->i_mapping);
2304 invalidate_remote_inode(inode);
2305 }
2306 if (!rc)
2307 rc = waitrc;
2308 if (rc)
2309 cinode->write_behind_rc = rc;
b6b38f70 2310 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2311 }
2312
2313 /*
2314 * releasing stale oplock after recent reconnect of smb session using
2315 * a now incorrect file handle is not a data integrity issue but do
2316 * not bother sending an oplock release if session to server still is
2317 * disconnected since oplock already released by the server
2318 */
2319 if (!cfile->closePend && !cfile->oplock_break_cancelled) {
2320 rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
2321 LOCKING_ANDX_OPLOCK_RELEASE, false);
b6b38f70 2322 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2
JL
2323 }
2324}
2325
2326static int
2327cifs_oplock_break_get(struct slow_work *work)
2328{
2329 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2330 oplock_break);
2331 mntget(cfile->mnt);
2332 cifsFileInfo_get(cfile);
2333 return 0;
2334}
2335
2336static void
2337cifs_oplock_break_put(struct slow_work *work)
2338{
2339 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2340 oplock_break);
2341 mntput(cfile->mnt);
2342 cifsFileInfo_put(cfile);
2343}
2344
2345const struct slow_work_ops cifs_oplock_break_ops = {
2346 .get_ref = cifs_oplock_break_get,
2347 .put_ref = cifs_oplock_break_put,
2348 .execute = cifs_oplock_break,
2349};
2350
f5e54d6e 2351const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2352 .readpage = cifs_readpage,
2353 .readpages = cifs_readpages,
2354 .writepage = cifs_writepage,
37c0eb46 2355 .writepages = cifs_writepages,
d9414774
NP
2356 .write_begin = cifs_write_begin,
2357 .write_end = cifs_write_end,
1da177e4 2358 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2359 .releasepage = cifs_release_page,
2360 .invalidatepage = cifs_invalidate_page,
1da177e4
LT
2361 /* .sync_page = cifs_sync_page, */
2362 /* .direct_IO = */
2363};
273d81d6
DK
2364
2365/*
2366 * cifs_readpages requires the server to support a buffer large enough to
2367 * contain the header plus one complete page of data. Otherwise, we need
2368 * to leave cifs_readpages out of the address space operations.
2369 */
f5e54d6e 2370const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2371 .readpage = cifs_readpage,
2372 .writepage = cifs_writepage,
2373 .writepages = cifs_writepages,
d9414774
NP
2374 .write_begin = cifs_write_begin,
2375 .write_end = cifs_write_end,
273d81d6 2376 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2377 .releasepage = cifs_release_page,
2378 .invalidatepage = cifs_invalidate_page,
273d81d6
DK
2379 /* .sync_page = cifs_sync_page, */
2380 /* .direct_IO = */
2381};