]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/cifs/file.c
cifs: clean up cifs_reopen_file
[net-next-2.6.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
9451a9a5 43#include "fscache.h"
1da177e4 44
1da177e4
LT
45static inline int cifs_convert_flags(unsigned int flags)
46{
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
56 }
57
e10f7b55
JL
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
7fc8f4e9 61}
e10f7b55 62
608712fe 63static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 64{
608712fe 65 u32 posix_flags = 0;
e10f7b55 66
7fc8f4e9 67 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 68 posix_flags = SMB_O_RDONLY;
7fc8f4e9 69 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR)
72 posix_flags = SMB_O_RDWR;
73
74 if (flags & O_CREAT)
75 posix_flags |= SMB_O_CREAT;
76 if (flags & O_EXCL)
77 posix_flags |= SMB_O_EXCL;
78 if (flags & O_TRUNC)
79 posix_flags |= SMB_O_TRUNC;
80 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 81 if (flags & O_DSYNC)
608712fe 82 posix_flags |= SMB_O_SYNC;
7fc8f4e9 83 if (flags & O_DIRECTORY)
608712fe 84 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 85 if (flags & O_NOFOLLOW)
608712fe 86 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 87 if (flags & O_DIRECT)
608712fe 88 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
89
90 return posix_flags;
1da177e4
LT
91}
92
93static inline int cifs_get_disposition(unsigned int flags)
94{
95 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 return FILE_CREATE;
97 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 return FILE_OVERWRITE_IF;
99 else if ((flags & O_CREAT) == O_CREAT)
100 return FILE_OPEN_IF;
55aa2e09
SF
101 else if ((flags & O_TRUNC) == O_TRUNC)
102 return FILE_OVERWRITE;
1da177e4
LT
103 else
104 return FILE_OPEN;
105}
106
db460242 107static inline int cifs_open_inode_helper(struct inode *inode,
a347ecb2 108 struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf,
1da177e4
LT
109 char *full_path, int xid)
110{
db460242 111 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
1da177e4
LT
112 struct timespec temp;
113 int rc;
114
1da177e4
LT
115 if (pCifsInode->clientCanCacheRead) {
116 /* we have the inode open somewhere else
117 no need to discard cache data */
118 goto client_can_cache;
119 }
120
121 /* BB need same check in cifs_create too? */
122 /* if not oplocked, invalidate inode pages if mtime or file
123 size changed */
07119a4d 124 temp = cifs_NTtimeToUnix(buf->LastWriteTime);
db460242
JL
125 if (timespec_equal(&inode->i_mtime, &temp) &&
126 (inode->i_size ==
1da177e4 127 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 128 cFYI(1, "inode unchanged on server");
1da177e4 129 } else {
db460242 130 if (inode->i_mapping) {
ff215713
SF
131 /* BB no need to lock inode until after invalidate
132 since namei code should already have it locked? */
db460242 133 rc = filemap_write_and_wait(inode->i_mapping);
cea21805 134 if (rc != 0)
db460242 135 pCifsInode->write_behind_rc = rc;
1da177e4 136 }
b6b38f70
JP
137 cFYI(1, "invalidating remote inode since open detected it "
138 "changed");
db460242 139 invalidate_remote_inode(inode);
1da177e4
LT
140 }
141
142client_can_cache:
c18c842b 143 if (pTcon->unix_ext)
db460242
JL
144 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
145 xid);
1da177e4 146 else
db460242
JL
147 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
148 xid, NULL);
1da177e4 149
a347ecb2 150 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
151 pCifsInode->clientCanCacheAll = true;
152 pCifsInode->clientCanCacheRead = true;
db460242 153 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
a347ecb2 154 } else if ((oplock & 0xF) == OPLOCK_READ)
4b18f2a9 155 pCifsInode->clientCanCacheRead = true;
1da177e4
LT
156
157 return rc;
158}
159
608712fe
JL
160int cifs_posix_open(char *full_path, struct inode **pinode,
161 struct super_block *sb, int mode, unsigned int f_flags,
162 __u32 *poplock, __u16 *pnetfid, int xid)
163{
164 int rc;
165 FILE_UNIX_BASIC_INFO *presp_data;
166 __u32 posix_flags = 0;
167 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
168 struct cifs_fattr fattr;
169 struct tcon_link *tlink;
170 struct cifsTconInfo *tcon;
171
172 cFYI(1, "posix open %s", full_path);
173
174 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
175 if (presp_data == NULL)
176 return -ENOMEM;
177
178 tlink = cifs_sb_tlink(cifs_sb);
179 if (IS_ERR(tlink)) {
180 rc = PTR_ERR(tlink);
181 goto posix_open_ret;
182 }
183
184 tcon = tlink_tcon(tlink);
185 mode &= ~current_umask();
186
187 posix_flags = cifs_posix_convert_flags(f_flags);
188 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
189 poplock, full_path, cifs_sb->local_nls,
190 cifs_sb->mnt_cifs_flags &
191 CIFS_MOUNT_MAP_SPECIAL_CHR);
192 cifs_put_tlink(tlink);
193
194 if (rc)
195 goto posix_open_ret;
196
197 if (presp_data->Type == cpu_to_le32(-1))
198 goto posix_open_ret; /* open ok, caller does qpathinfo */
199
200 if (!pinode)
201 goto posix_open_ret; /* caller does not need info */
202
203 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
204
205 /* get new inode and set it up */
206 if (*pinode == NULL) {
207 cifs_fill_uniqueid(sb, &fattr);
208 *pinode = cifs_iget(sb, &fattr);
209 if (!*pinode) {
210 rc = -ENOMEM;
211 goto posix_open_ret;
212 }
213 } else {
214 cifs_fattr_to_inode(*pinode, &fattr);
215 }
216
217posix_open_ret:
218 kfree(presp_data);
219 return rc;
220}
221
1da177e4
LT
222int cifs_open(struct inode *inode, struct file *file)
223{
224 int rc = -EACCES;
590a3fe0
JL
225 int xid;
226 __u32 oplock;
1da177e4 227 struct cifs_sb_info *cifs_sb;
276a74a4 228 struct cifsTconInfo *tcon;
7ffec372 229 struct tcon_link *tlink;
6ca9f3ba 230 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 231 struct cifsInodeInfo *pCifsInode;
1da177e4
LT
232 char *full_path = NULL;
233 int desiredAccess;
234 int disposition;
235 __u16 netfid;
236 FILE_ALL_INFO *buf = NULL;
237
238 xid = GetXid();
239
240 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
241 tlink = cifs_sb_tlink(cifs_sb);
242 if (IS_ERR(tlink)) {
243 FreeXid(xid);
244 return PTR_ERR(tlink);
245 }
246 tcon = tlink_tcon(tlink);
1da177e4 247
a6ce4932 248 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 249
e6a00296 250 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 251 if (full_path == NULL) {
0f3bc09e 252 rc = -ENOMEM;
232341ba 253 goto out;
1da177e4
LT
254 }
255
b6b38f70
JP
256 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
257 inode, file->f_flags, full_path);
276a74a4
SF
258
259 if (oplockEnabled)
260 oplock = REQ_OPLOCK;
261 else
262 oplock = 0;
263
64cc2c63
SF
264 if (!tcon->broken_posix_open && tcon->unix_ext &&
265 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
266 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
267 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 268 /* can not refresh inode info since size could be stale */
2422f676 269 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 270 cifs_sb->mnt_file_mode /* ignored */,
608712fe 271 file->f_flags, &oplock, &netfid, xid);
276a74a4 272 if (rc == 0) {
b6b38f70 273 cFYI(1, "posix open succeeded");
47c78b7f 274
abfe1eed
JL
275 pCifsFile = cifs_new_fileinfo(netfid, file, tlink,
276 oplock);
2422f676
JL
277 if (pCifsFile == NULL) {
278 CIFSSMBClose(xid, tcon, netfid);
279 rc = -ENOMEM;
2422f676 280 }
9451a9a5
SJ
281
282 cifs_fscache_set_inode_cookie(inode, file);
283
276a74a4 284 goto out;
64cc2c63
SF
285 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
286 if (tcon->ses->serverNOS)
b6b38f70 287 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
288 " unexpected error on SMB posix open"
289 ", disabling posix open support."
290 " Check if server update available.",
291 tcon->ses->serverName,
b6b38f70 292 tcon->ses->serverNOS);
64cc2c63 293 tcon->broken_posix_open = true;
276a74a4
SF
294 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
295 (rc != -EOPNOTSUPP)) /* path not found or net err */
296 goto out;
64cc2c63
SF
297 /* else fallthrough to retry open the old way on network i/o
298 or DFS errors */
276a74a4
SF
299 }
300
1da177e4
LT
301 desiredAccess = cifs_convert_flags(file->f_flags);
302
303/*********************************************************************
304 * open flag mapping table:
fb8c4b14 305 *
1da177e4 306 * POSIX Flag CIFS Disposition
fb8c4b14 307 * ---------- ----------------
1da177e4
LT
308 * O_CREAT FILE_OPEN_IF
309 * O_CREAT | O_EXCL FILE_CREATE
310 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
311 * O_TRUNC FILE_OVERWRITE
312 * none of the above FILE_OPEN
313 *
314 * Note that there is not a direct match between disposition
fb8c4b14 315 * FILE_SUPERSEDE (ie create whether or not file exists although
1da177e4
LT
316 * O_CREAT | O_TRUNC is similar but truncates the existing
317 * file rather than creating a new file as FILE_SUPERSEDE does
318 * (which uses the attributes / metadata passed in on open call)
319 *?
fb8c4b14 320 *? O_SYNC is a reasonable match to CIFS writethrough flag
1da177e4
LT
321 *? and the read write flags match reasonably. O_LARGEFILE
322 *? is irrelevant because largefile support is always used
323 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
324 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
325 *********************************************************************/
326
327 disposition = cifs_get_disposition(file->f_flags);
328
1da177e4
LT
329 /* BB pass O_SYNC flag through on file attributes .. BB */
330
331 /* Also refresh inode by passing in file_info buf returned by SMBOpen
332 and calling get_inode_info with returned buf (at least helps
333 non-Unix server case) */
334
fb8c4b14
SF
335 /* BB we can not do this if this is the second open of a file
336 and the first handle has writebehind data, we might be
1da177e4
LT
337 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
338 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
339 if (!buf) {
340 rc = -ENOMEM;
341 goto out;
342 }
5bafd765 343
a6e8a845 344 if (tcon->ses->capabilities & CAP_NT_SMBS)
276a74a4 345 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
5bafd765 346 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
347 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
348 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
349 else
350 rc = -EIO; /* no NT SMB support fall into legacy open below */
351
a9d02ad4
SF
352 if (rc == -EIO) {
353 /* Old server, try legacy style OpenX */
276a74a4 354 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
a9d02ad4
SF
355 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
356 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
357 & CIFS_MOUNT_MAP_SPECIAL_CHR);
358 }
1da177e4 359 if (rc) {
b6b38f70 360 cFYI(1, "cifs_open returned 0x%x", rc);
1da177e4
LT
361 goto out;
362 }
3321b791 363
a347ecb2 364 rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid);
47c78b7f
JL
365 if (rc != 0)
366 goto out;
367
abfe1eed 368 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 369 if (pCifsFile == NULL) {
1da177e4
LT
370 rc = -ENOMEM;
371 goto out;
372 }
1da177e4 373
9451a9a5
SJ
374 cifs_fscache_set_inode_cookie(inode, file);
375
fb8c4b14 376 if (oplock & CIFS_CREATE_ACTION) {
1da177e4
LT
377 /* time to set mode which we can not set earlier due to
378 problems creating new read-only files */
276a74a4 379 if (tcon->unix_ext) {
4e1e7fb9
JL
380 struct cifs_unix_set_info_args args = {
381 .mode = inode->i_mode,
382 .uid = NO_CHANGE_64,
383 .gid = NO_CHANGE_64,
384 .ctime = NO_CHANGE_64,
385 .atime = NO_CHANGE_64,
386 .mtime = NO_CHANGE_64,
387 .device = 0,
388 };
01ea95e3
JL
389 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
390 cifs_sb->local_nls,
391 cifs_sb->mnt_cifs_flags &
737b758c 392 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
393 }
394 }
395
396out:
397 kfree(buf);
398 kfree(full_path);
399 FreeXid(xid);
7ffec372 400 cifs_put_tlink(tlink);
1da177e4
LT
401 return rc;
402}
403
0418726b 404/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
405/* to server was lost */
406static int cifs_relock_file(struct cifsFileInfo *cifsFile)
407{
408 int rc = 0;
409
410/* BB list all locks open on this file and relock */
411
412 return rc;
413}
414
15886177 415static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
416{
417 int rc = -EACCES;
590a3fe0
JL
418 int xid;
419 __u32 oplock;
1da177e4 420 struct cifs_sb_info *cifs_sb;
7fc8f4e9 421 struct cifsTconInfo *tcon;
1da177e4 422 struct cifsInodeInfo *pCifsInode;
fb8c4b14 423 struct inode *inode;
1da177e4
LT
424 char *full_path = NULL;
425 int desiredAccess;
426 int disposition = FILE_OPEN;
427 __u16 netfid;
428
1da177e4 429 xid = GetXid();
f0a71eb8 430 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 431 if (!pCifsFile->invalidHandle) {
f0a71eb8 432 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 433 rc = 0;
1da177e4 434 FreeXid(xid);
0f3bc09e 435 return rc;
1da177e4
LT
436 }
437
15886177 438 inode = pCifsFile->dentry->d_inode;
1da177e4 439 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 440 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 441
1da177e4
LT
442/* can not grab rename sem here because various ops, including
443 those that already have the rename sem can end up causing writepage
444 to get called and if the server was down that means we end up here,
445 and we can never tell if the caller already has the rename_sem */
15886177 446 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 447 if (full_path == NULL) {
3a9f462f 448 rc = -ENOMEM;
f0a71eb8 449 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 450 FreeXid(xid);
3a9f462f 451 return rc;
1da177e4
LT
452 }
453
b6b38f70 454 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 455 inode, pCifsFile->f_flags, full_path);
1da177e4
LT
456
457 if (oplockEnabled)
458 oplock = REQ_OPLOCK;
459 else
4b18f2a9 460 oplock = 0;
1da177e4 461
7fc8f4e9
SF
462 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
463 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
464 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
465
466 /*
467 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
468 * original open. Must mask them off for a reopen.
469 */
15886177
JL
470 unsigned int oflags = pCifsFile->f_flags &
471 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 472
2422f676 473 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
474 cifs_sb->mnt_file_mode /* ignored */,
475 oflags, &oplock, &netfid, xid);
7fc8f4e9 476 if (rc == 0) {
b6b38f70 477 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
478 goto reopen_success;
479 }
480 /* fallthrough to retry open the old way on errors, especially
481 in the reconnect path it is important to retry hard */
482 }
483
15886177 484 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 485
1da177e4 486 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
487 by SMBOpen and then calling get_inode_info with returned buf
488 since file might have write behind data that needs to be flushed
1da177e4
LT
489 and server version of file size can be stale. If we knew for sure
490 that inode was not dirty locally we could do this */
491
7fc8f4e9 492 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 493 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 494 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 495 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 496 if (rc) {
f0a71eb8 497 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
498 cFYI(1, "cifs_open returned 0x%x", rc);
499 cFYI(1, "oplock: %d", oplock);
15886177
JL
500 goto reopen_error_exit;
501 }
502
7fc8f4e9 503reopen_success:
15886177
JL
504 pCifsFile->netfid = netfid;
505 pCifsFile->invalidHandle = false;
506 mutex_unlock(&pCifsFile->fh_mutex);
507 pCifsInode = CIFS_I(inode);
508
509 if (can_flush) {
510 rc = filemap_write_and_wait(inode->i_mapping);
511 if (rc != 0)
512 CIFS_I(inode)->write_behind_rc = rc;
513
514 pCifsInode->clientCanCacheAll = false;
515 pCifsInode->clientCanCacheRead = false;
516 if (tcon->unix_ext)
517 rc = cifs_get_inode_info_unix(&inode,
518 full_path, inode->i_sb, xid);
519 else
520 rc = cifs_get_inode_info(&inode,
521 full_path, NULL, inode->i_sb,
522 xid, NULL);
523 } /* else we are writing out data to server already
524 and could deadlock if we tried to flush data, and
525 since we do not know if we have data that would
526 invalidate the current end of file on the server
527 we can not go to the server to get the new inod
528 info */
529 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
530 pCifsInode->clientCanCacheAll = true;
531 pCifsInode->clientCanCacheRead = true;
532 cFYI(1, "Exclusive Oplock granted on inode %p",
533 pCifsFile->dentry->d_inode);
534 } else if ((oplock & 0xF) == OPLOCK_READ) {
535 pCifsInode->clientCanCacheRead = true;
536 pCifsInode->clientCanCacheAll = false;
537 } else {
538 pCifsInode->clientCanCacheRead = false;
539 pCifsInode->clientCanCacheAll = false;
1da177e4 540 }
15886177
JL
541 cifs_relock_file(pCifsFile);
542
543reopen_error_exit:
1da177e4
LT
544 kfree(full_path);
545 FreeXid(xid);
546 return rc;
547}
548
549int cifs_close(struct inode *inode, struct file *file)
550{
551 int rc = 0;
15745320 552 int xid, timeout;
1da177e4
LT
553 struct cifs_sb_info *cifs_sb;
554 struct cifsTconInfo *pTcon;
c21dfb69 555 struct cifsFileInfo *pSMBFile = file->private_data;
1da177e4
LT
556
557 xid = GetXid();
558
559 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 560 pTcon = tlink_tcon(pSMBFile->tlink);
1da177e4 561 if (pSMBFile) {
7ee1af76 562 struct cifsLockInfo *li, *tmp;
ddb4cbfc 563 write_lock(&GlobalSMBSeslock);
4b18f2a9 564 pSMBFile->closePend = true;
1da177e4
LT
565 if (pTcon) {
566 /* no sense reconnecting to close a file that is
567 already closed */
3b795210 568 if (!pTcon->need_reconnect) {
ddb4cbfc 569 write_unlock(&GlobalSMBSeslock);
15745320 570 timeout = 2;
6ab409b5 571 while ((atomic_read(&pSMBFile->count) != 1)
15745320 572 && (timeout <= 2048)) {
23e7dd7d
SF
573 /* Give write a better chance to get to
574 server ahead of the close. We do not
575 want to add a wait_q here as it would
576 increase the memory utilization as
577 the struct would be in each open file,
fb8c4b14 578 but this should give enough time to
23e7dd7d 579 clear the socket */
b6b38f70 580 cFYI(DBG2, "close delay, write pending");
23e7dd7d
SF
581 msleep(timeout);
582 timeout *= 4;
4891d539 583 }
ddb4cbfc
SF
584 if (!pTcon->need_reconnect &&
585 !pSMBFile->invalidHandle)
586 rc = CIFSSMBClose(xid, pTcon,
1da177e4 587 pSMBFile->netfid);
ddb4cbfc
SF
588 } else
589 write_unlock(&GlobalSMBSeslock);
590 } else
591 write_unlock(&GlobalSMBSeslock);
7ee1af76
JA
592
593 /* Delete any outstanding lock records.
594 We'll lose them when the file is closed anyway. */
796e5661 595 mutex_lock(&pSMBFile->lock_mutex);
7ee1af76
JA
596 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
597 list_del(&li->llist);
598 kfree(li);
599 }
796e5661 600 mutex_unlock(&pSMBFile->lock_mutex);
7ee1af76 601
cbe0476f 602 write_lock(&GlobalSMBSeslock);
1da177e4
LT
603 list_del(&pSMBFile->flist);
604 list_del(&pSMBFile->tlist);
cbe0476f 605 write_unlock(&GlobalSMBSeslock);
6ab409b5 606 cifsFileInfo_put(file->private_data);
1da177e4
LT
607 file->private_data = NULL;
608 } else
609 rc = -EBADF;
610
4efa53f0 611 read_lock(&GlobalSMBSeslock);
1da177e4 612 if (list_empty(&(CIFS_I(inode)->openFileList))) {
b6b38f70 613 cFYI(1, "closing last open instance for inode %p", inode);
1da177e4
LT
614 /* if the file is not open we do not know if we can cache info
615 on this inode, much less write behind and read ahead */
4b18f2a9
SF
616 CIFS_I(inode)->clientCanCacheRead = false;
617 CIFS_I(inode)->clientCanCacheAll = false;
1da177e4 618 }
4efa53f0 619 read_unlock(&GlobalSMBSeslock);
fb8c4b14 620 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
1da177e4
LT
621 rc = CIFS_I(inode)->write_behind_rc;
622 FreeXid(xid);
623 return rc;
624}
625
626int cifs_closedir(struct inode *inode, struct file *file)
627{
628 int rc = 0;
629 int xid;
c21dfb69 630 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
631 char *ptmp;
632
b6b38f70 633 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
634
635 xid = GetXid();
636
637 if (pCFileStruct) {
13cfb733 638 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 639
b6b38f70 640 cFYI(1, "Freeing private data in close dir");
ddb4cbfc 641 write_lock(&GlobalSMBSeslock);
4b18f2a9
SF
642 if (!pCFileStruct->srch_inf.endOfSearch &&
643 !pCFileStruct->invalidHandle) {
644 pCFileStruct->invalidHandle = true;
ddb4cbfc 645 write_unlock(&GlobalSMBSeslock);
1da177e4 646 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
647 cFYI(1, "Closing uncompleted readdir with rc %d",
648 rc);
1da177e4
LT
649 /* not much we can do if it fails anyway, ignore rc */
650 rc = 0;
ddb4cbfc
SF
651 } else
652 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
653 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
654 if (ptmp) {
b6b38f70 655 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 656 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 657 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
658 cifs_small_buf_release(ptmp);
659 else
660 cifs_buf_release(ptmp);
1da177e4 661 }
13cfb733 662 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
663 kfree(file->private_data);
664 file->private_data = NULL;
665 }
666 /* BB can we lock the filestruct while this is going on? */
667 FreeXid(xid);
668 return rc;
669}
670
7ee1af76
JA
671static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
672 __u64 offset, __u8 lockType)
673{
fb8c4b14
SF
674 struct cifsLockInfo *li =
675 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
676 if (li == NULL)
677 return -ENOMEM;
678 li->offset = offset;
679 li->length = len;
680 li->type = lockType;
796e5661 681 mutex_lock(&fid->lock_mutex);
7ee1af76 682 list_add(&li->llist, &fid->llist);
796e5661 683 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
684 return 0;
685}
686
1da177e4
LT
687int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
688{
689 int rc, xid;
1da177e4
LT
690 __u32 numLock = 0;
691 __u32 numUnlock = 0;
692 __u64 length;
4b18f2a9 693 bool wait_flag = false;
1da177e4 694 struct cifs_sb_info *cifs_sb;
13a6e42a 695 struct cifsTconInfo *tcon;
08547b03
SF
696 __u16 netfid;
697 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 698 bool posix_locking = 0;
1da177e4
LT
699
700 length = 1 + pfLock->fl_end - pfLock->fl_start;
701 rc = -EACCES;
702 xid = GetXid();
703
b6b38f70 704 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 705 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 706 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 707 pfLock->fl_end);
1da177e4
LT
708
709 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 710 cFYI(1, "Posix");
1da177e4 711 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 712 cFYI(1, "Flock");
1da177e4 713 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 714 cFYI(1, "Blocking lock");
4b18f2a9 715 wait_flag = true;
1da177e4
LT
716 }
717 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
718 cFYI(1, "Process suspended by mandatory locking - "
719 "not implemented yet");
1da177e4 720 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 721 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 722 if (pfLock->fl_flags &
1da177e4 723 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 724 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
725
726 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 727 cFYI(1, "F_WRLCK ");
1da177e4
LT
728 numLock = 1;
729 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 730 cFYI(1, "F_UNLCK");
1da177e4 731 numUnlock = 1;
d47d7c1a
SF
732 /* Check if unlock includes more than
733 one lock range */
1da177e4 734 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 735 cFYI(1, "F_RDLCK");
1da177e4
LT
736 lockType |= LOCKING_ANDX_SHARED_LOCK;
737 numLock = 1;
738 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 739 cFYI(1, "F_EXLCK");
1da177e4
LT
740 numLock = 1;
741 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 742 cFYI(1, "F_SHLCK");
1da177e4
LT
743 lockType |= LOCKING_ANDX_SHARED_LOCK;
744 numLock = 1;
745 } else
b6b38f70 746 cFYI(1, "Unknown type of lock");
1da177e4 747
e6a00296 748 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 749 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
1da177e4
LT
750
751 if (file->private_data == NULL) {
0f3bc09e 752 rc = -EBADF;
1da177e4 753 FreeXid(xid);
0f3bc09e 754 return rc;
1da177e4 755 }
08547b03
SF
756 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
757
13a6e42a
SF
758 if ((tcon->ses->capabilities & CAP_UNIX) &&
759 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 760 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 761 posix_locking = 1;
08547b03
SF
762 /* BB add code here to normalize offset and length to
763 account for negative length which we can not accept over the
764 wire */
1da177e4 765 if (IS_GETLK(cmd)) {
fb8c4b14 766 if (posix_locking) {
08547b03 767 int posix_lock_type;
fb8c4b14 768 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
769 posix_lock_type = CIFS_RDLCK;
770 else
771 posix_lock_type = CIFS_WRLCK;
13a6e42a 772 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 773 length, pfLock,
08547b03
SF
774 posix_lock_type, wait_flag);
775 FreeXid(xid);
776 return rc;
777 }
778
779 /* BB we could chain these into one lock request BB */
13a6e42a 780 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
08547b03 781 0, 1, lockType, 0 /* wait flag */ );
1da177e4 782 if (rc == 0) {
13a6e42a 783 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
784 pfLock->fl_start, 1 /* numUnlock */ ,
785 0 /* numLock */ , lockType,
786 0 /* wait flag */ );
787 pfLock->fl_type = F_UNLCK;
788 if (rc != 0)
b6b38f70
JP
789 cERROR(1, "Error unlocking previously locked "
790 "range %d during test of lock", rc);
1da177e4
LT
791 rc = 0;
792
793 } else {
794 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
795 rc = 0;
796
797 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
798 pfLock->fl_type = F_WRLCK;
799 } else {
800 rc = CIFSSMBLock(xid, tcon, netfid, length,
801 pfLock->fl_start, 0, 1,
802 lockType | LOCKING_ANDX_SHARED_LOCK,
803 0 /* wait flag */);
804 if (rc == 0) {
805 rc = CIFSSMBLock(xid, tcon, netfid,
806 length, pfLock->fl_start, 1, 0,
807 lockType |
808 LOCKING_ANDX_SHARED_LOCK,
809 0 /* wait flag */);
810 pfLock->fl_type = F_RDLCK;
811 if (rc != 0)
f19159dc 812 cERROR(1, "Error unlocking "
f05337c6 813 "previously locked range %d "
f19159dc 814 "during test of lock", rc);
f05337c6
PS
815 rc = 0;
816 } else {
817 pfLock->fl_type = F_WRLCK;
818 rc = 0;
819 }
820 }
1da177e4
LT
821 }
822
823 FreeXid(xid);
824 return rc;
825 }
7ee1af76
JA
826
827 if (!numLock && !numUnlock) {
828 /* if no lock or unlock then nothing
829 to do since we do not know what it is */
830 FreeXid(xid);
831 return -EOPNOTSUPP;
832 }
833
834 if (posix_locking) {
08547b03 835 int posix_lock_type;
fb8c4b14 836 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
837 posix_lock_type = CIFS_RDLCK;
838 else
839 posix_lock_type = CIFS_WRLCK;
50c2f753 840
fb8c4b14 841 if (numUnlock == 1)
beb84dc8 842 posix_lock_type = CIFS_UNLCK;
7ee1af76 843
13a6e42a 844 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 845 length, pfLock,
08547b03 846 posix_lock_type, wait_flag);
7ee1af76 847 } else {
c21dfb69 848 struct cifsFileInfo *fid = file->private_data;
7ee1af76
JA
849
850 if (numLock) {
13a6e42a 851 rc = CIFSSMBLock(xid, tcon, netfid, length,
fb8c4b14 852 pfLock->fl_start,
7ee1af76
JA
853 0, numLock, lockType, wait_flag);
854
855 if (rc == 0) {
856 /* For Windows locks we must store them. */
857 rc = store_file_lock(fid, length,
858 pfLock->fl_start, lockType);
859 }
860 } else if (numUnlock) {
861 /* For each stored lock that this unlock overlaps
862 completely, unlock it. */
863 int stored_rc = 0;
864 struct cifsLockInfo *li, *tmp;
865
6b70c955 866 rc = 0;
796e5661 867 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
868 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
869 if (pfLock->fl_start <= li->offset &&
c19eb710 870 (pfLock->fl_start + length) >=
39db810c 871 (li->offset + li->length)) {
13a6e42a 872 stored_rc = CIFSSMBLock(xid, tcon,
fb8c4b14 873 netfid,
7ee1af76 874 li->length, li->offset,
4b18f2a9 875 1, 0, li->type, false);
7ee1af76
JA
876 if (stored_rc)
877 rc = stored_rc;
2c964d1f
PS
878 else {
879 list_del(&li->llist);
880 kfree(li);
881 }
7ee1af76
JA
882 }
883 }
796e5661 884 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
885 }
886 }
887
d634cc15 888 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
889 posix_lock_file_wait(file, pfLock);
890 FreeXid(xid);
891 return rc;
892}
893
fbec9ab9
JL
894/*
895 * Set the timeout on write requests past EOF. For some servers (Windows)
896 * these calls can be very long.
897 *
898 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
899 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
900 * The 10M cutoff is totally arbitrary. A better scheme for this would be
901 * welcome if someone wants to suggest one.
902 *
903 * We may be able to do a better job with this if there were some way to
904 * declare that a file should be sparse.
905 */
906static int
907cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
908{
909 if (offset <= cifsi->server_eof)
910 return CIFS_STD_OP;
911 else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
912 return CIFS_VLONG_OP;
913 else
914 return CIFS_LONG_OP;
915}
916
917/* update the file size (if needed) after a write */
918static void
919cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
920 unsigned int bytes_written)
921{
922 loff_t end_of_write = offset + bytes_written;
923
924 if (end_of_write > cifsi->server_eof)
925 cifsi->server_eof = end_of_write;
926}
927
1da177e4
LT
928ssize_t cifs_user_write(struct file *file, const char __user *write_data,
929 size_t write_size, loff_t *poffset)
930{
931 int rc = 0;
932 unsigned int bytes_written = 0;
933 unsigned int total_written;
934 struct cifs_sb_info *cifs_sb;
935 struct cifsTconInfo *pTcon;
936 int xid, long_op;
937 struct cifsFileInfo *open_file;
fbec9ab9 938 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 939
e6a00296 940 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 941
b6b38f70
JP
942 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
943 *poffset, file->f_path.dentry->d_name.name); */
1da177e4
LT
944
945 if (file->private_data == NULL)
946 return -EBADF;
ba00ba64 947
c21dfb69 948 open_file = file->private_data;
13cfb733 949 pTcon = tlink_tcon(open_file->tlink);
50c2f753 950
838726c4
JL
951 rc = generic_write_checks(file, poffset, &write_size, 0);
952 if (rc)
953 return rc;
954
1da177e4 955 xid = GetXid();
1da177e4 956
fbec9ab9 957 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
958 for (total_written = 0; write_size > total_written;
959 total_written += bytes_written) {
960 rc = -EAGAIN;
961 while (rc == -EAGAIN) {
962 if (file->private_data == NULL) {
963 /* file has been closed on us */
964 FreeXid(xid);
965 /* if we have gotten here we have written some data
966 and blocked, and the file has been freed on us while
967 we blocked so return what we managed to write */
968 return total_written;
fb8c4b14 969 }
1da177e4
LT
970 if (open_file->closePend) {
971 FreeXid(xid);
972 if (total_written)
973 return total_written;
974 else
975 return -EBADF;
976 }
977 if (open_file->invalidHandle) {
1da177e4
LT
978 /* we could deadlock if we called
979 filemap_fdatawait from here so tell
980 reopen_file not to flush data to server
981 now */
15886177 982 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
983 if (rc != 0)
984 break;
985 }
986
987 rc = CIFSSMBWrite(xid, pTcon,
988 open_file->netfid,
989 min_t(const int, cifs_sb->wsize,
990 write_size - total_written),
991 *poffset, &bytes_written,
992 NULL, write_data + total_written, long_op);
993 }
994 if (rc || (bytes_written == 0)) {
995 if (total_written)
996 break;
997 else {
998 FreeXid(xid);
999 return rc;
1000 }
fbec9ab9
JL
1001 } else {
1002 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1003 *poffset += bytes_written;
fbec9ab9 1004 }
133672ef 1005 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1006 15 seconds is plenty */
1007 }
1008
a4544347 1009 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1010
1011 /* since the write may have blocked check these pointers again */
3677db10
SF
1012 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1013 struct inode *inode = file->f_path.dentry->d_inode;
fb8c4b14
SF
1014/* Do not update local mtime - server will set its actual value on write
1015 * inode->i_ctime = inode->i_mtime =
3677db10
SF
1016 * current_fs_time(inode->i_sb);*/
1017 if (total_written > 0) {
1018 spin_lock(&inode->i_lock);
1019 if (*poffset > file->f_path.dentry->d_inode->i_size)
1020 i_size_write(file->f_path.dentry->d_inode,
1da177e4 1021 *poffset);
3677db10 1022 spin_unlock(&inode->i_lock);
1da177e4 1023 }
fb8c4b14 1024 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1025 }
1026 FreeXid(xid);
1027 return total_written;
1028}
1029
1030static ssize_t cifs_write(struct file *file, const char *write_data,
d9414774 1031 size_t write_size, loff_t *poffset)
1da177e4
LT
1032{
1033 int rc = 0;
1034 unsigned int bytes_written = 0;
1035 unsigned int total_written;
1036 struct cifs_sb_info *cifs_sb;
1037 struct cifsTconInfo *pTcon;
1038 int xid, long_op;
1039 struct cifsFileInfo *open_file;
fbec9ab9 1040 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 1041
e6a00296 1042 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1043
b6b38f70
JP
1044 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1045 *poffset, file->f_path.dentry->d_name.name);
1da177e4
LT
1046
1047 if (file->private_data == NULL)
1048 return -EBADF;
c21dfb69 1049 open_file = file->private_data;
13cfb733 1050 pTcon = tlink_tcon(open_file->tlink);
50c2f753 1051
1da177e4 1052 xid = GetXid();
1da177e4 1053
fbec9ab9 1054 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
1055 for (total_written = 0; write_size > total_written;
1056 total_written += bytes_written) {
1057 rc = -EAGAIN;
1058 while (rc == -EAGAIN) {
1059 if (file->private_data == NULL) {
1060 /* file has been closed on us */
1061 FreeXid(xid);
1062 /* if we have gotten here we have written some data
1063 and blocked, and the file has been freed on us
fb8c4b14 1064 while we blocked so return what we managed to
1da177e4
LT
1065 write */
1066 return total_written;
fb8c4b14 1067 }
1da177e4
LT
1068 if (open_file->closePend) {
1069 FreeXid(xid);
1070 if (total_written)
1071 return total_written;
1072 else
1073 return -EBADF;
1074 }
1075 if (open_file->invalidHandle) {
1da177e4
LT
1076 /* we could deadlock if we called
1077 filemap_fdatawait from here so tell
fb8c4b14 1078 reopen_file not to flush data to
1da177e4 1079 server now */
15886177 1080 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1081 if (rc != 0)
1082 break;
1083 }
fb8c4b14
SF
1084 if (experimEnabled || (pTcon->ses->server &&
1085 ((pTcon->ses->server->secMode &
08775834 1086 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 1087 == 0))) {
3e84469d
SF
1088 struct kvec iov[2];
1089 unsigned int len;
1090
0ae0efad 1091 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
1092 write_size - total_written);
1093 /* iov[0] is reserved for smb header */
1094 iov[1].iov_base = (char *)write_data +
1095 total_written;
1096 iov[1].iov_len = len;
d6e04ae6 1097 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 1098 open_file->netfid, len,
d6e04ae6 1099 *poffset, &bytes_written,
3e84469d 1100 iov, 1, long_op);
d6e04ae6 1101 } else
60808233
SF
1102 rc = CIFSSMBWrite(xid, pTcon,
1103 open_file->netfid,
1104 min_t(const int, cifs_sb->wsize,
1105 write_size - total_written),
1106 *poffset, &bytes_written,
1107 write_data + total_written,
1108 NULL, long_op);
1da177e4
LT
1109 }
1110 if (rc || (bytes_written == 0)) {
1111 if (total_written)
1112 break;
1113 else {
1114 FreeXid(xid);
1115 return rc;
1116 }
fbec9ab9
JL
1117 } else {
1118 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1119 *poffset += bytes_written;
fbec9ab9 1120 }
133672ef 1121 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1122 15 seconds is plenty */
1123 }
1124
a4544347 1125 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1126
1127 /* since the write may have blocked check these pointers again */
3677db10 1128 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
004c46b9 1129/*BB We could make this contingent on superblock ATIME flag too */
3677db10
SF
1130/* file->f_path.dentry->d_inode->i_ctime =
1131 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1132 if (total_written > 0) {
1133 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1134 if (*poffset > file->f_path.dentry->d_inode->i_size)
1135 i_size_write(file->f_path.dentry->d_inode,
1136 *poffset);
1137 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1da177e4 1138 }
3677db10 1139 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1140 }
1141 FreeXid(xid);
1142 return total_written;
1143}
1144
630f3f0c 1145#ifdef CONFIG_CIFS_EXPERIMENTAL
6508d904
JL
1146struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1147 bool fsuid_only)
630f3f0c
SF
1148{
1149 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1150 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1151
1152 /* only filter by fsuid on multiuser mounts */
1153 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1154 fsuid_only = false;
630f3f0c
SF
1155
1156 read_lock(&GlobalSMBSeslock);
1157 /* we could simply get the first_list_entry since write-only entries
1158 are always at the end of the list but since the first entry might
1159 have a close pending, we go through the whole list */
1160 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1161 if (open_file->closePend)
1162 continue;
6508d904
JL
1163 if (fsuid_only && open_file->uid != current_fsuid())
1164 continue;
630f3f0c
SF
1165 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1166 (open_file->pfile->f_flags & O_RDONLY))) {
1167 if (!open_file->invalidHandle) {
1168 /* found a good file */
1169 /* lock it so it will not be closed on us */
6ab409b5 1170 cifsFileInfo_get(open_file);
630f3f0c
SF
1171 read_unlock(&GlobalSMBSeslock);
1172 return open_file;
1173 } /* else might as well continue, and look for
1174 another, or simply have the caller reopen it
1175 again rather than trying to fix this handle */
1176 } else /* write only file */
1177 break; /* write only files are last so must be done */
1178 }
1179 read_unlock(&GlobalSMBSeslock);
1180 return NULL;
1181}
1182#endif
1183
6508d904
JL
1184struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1185 bool fsuid_only)
6148a742
SF
1186{
1187 struct cifsFileInfo *open_file;
6508d904 1188 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
2846d386 1189 bool any_available = false;
dd99cd80 1190 int rc;
6148a742 1191
60808233
SF
1192 /* Having a null inode here (because mapping->host was set to zero by
1193 the VFS or MM) should not happen but we had reports of on oops (due to
1194 it being zero) during stress testcases so we need to check for it */
1195
fb8c4b14 1196 if (cifs_inode == NULL) {
b6b38f70 1197 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1198 dump_stack();
1199 return NULL;
1200 }
1201
6508d904
JL
1202 /* only filter by fsuid on multiuser mounts */
1203 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1204 fsuid_only = false;
1205
6148a742 1206 read_lock(&GlobalSMBSeslock);
9b22b0b7 1207refind_writable:
6148a742 1208 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1209 if (open_file->closePend)
1210 continue;
1211 if (!any_available && open_file->pid != current->tgid)
1212 continue;
1213 if (fsuid_only && open_file->uid != current_fsuid())
6148a742
SF
1214 continue;
1215 if (open_file->pfile &&
1216 ((open_file->pfile->f_flags & O_RDWR) ||
1217 (open_file->pfile->f_flags & O_WRONLY))) {
6ab409b5 1218 cifsFileInfo_get(open_file);
9b22b0b7
SF
1219
1220 if (!open_file->invalidHandle) {
1221 /* found a good writable file */
1222 read_unlock(&GlobalSMBSeslock);
1223 return open_file;
1224 }
8840dee9 1225
6148a742 1226 read_unlock(&GlobalSMBSeslock);
9b22b0b7 1227 /* Had to unlock since following call can block */
15886177 1228 rc = cifs_reopen_file(open_file, false);
8840dee9 1229 if (!rc) {
9b22b0b7
SF
1230 if (!open_file->closePend)
1231 return open_file;
1232 else { /* start over in case this was deleted */
1233 /* since the list could be modified */
37c0eb46 1234 read_lock(&GlobalSMBSeslock);
6ab409b5 1235 cifsFileInfo_put(open_file);
9b22b0b7 1236 goto refind_writable;
37c0eb46
SF
1237 }
1238 }
9b22b0b7
SF
1239
1240 /* if it fails, try another handle if possible -
1241 (we can not do this if closePending since
1242 loop could be modified - in which case we
1243 have to start at the beginning of the list
1244 again. Note that it would be bad
1245 to hold up writepages here (rather than
1246 in caller) with continuous retries */
b6b38f70 1247 cFYI(1, "wp failed on reopen file");
9b22b0b7
SF
1248 read_lock(&GlobalSMBSeslock);
1249 /* can not use this handle, no write
1250 pending on this one after all */
6ab409b5 1251 cifsFileInfo_put(open_file);
8840dee9 1252
9b22b0b7
SF
1253 if (open_file->closePend) /* list could have changed */
1254 goto refind_writable;
1255 /* else we simply continue to the next entry. Thus
1256 we do not loop on reopen errors. If we
1257 can not reopen the file, for example if we
1258 reconnected to a server with another client
1259 racing to delete or lock the file we would not
1260 make progress if we restarted before the beginning
1261 of the loop here. */
6148a742
SF
1262 }
1263 }
2846d386
JL
1264 /* couldn't find useable FH with same pid, try any available */
1265 if (!any_available) {
1266 any_available = true;
1267 goto refind_writable;
1268 }
6148a742
SF
1269 read_unlock(&GlobalSMBSeslock);
1270 return NULL;
1271}
1272
1da177e4
LT
1273static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1274{
1275 struct address_space *mapping = page->mapping;
1276 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1277 char *write_data;
1278 int rc = -EFAULT;
1279 int bytes_written = 0;
1280 struct cifs_sb_info *cifs_sb;
1da177e4 1281 struct inode *inode;
6148a742 1282 struct cifsFileInfo *open_file;
1da177e4
LT
1283
1284 if (!mapping || !mapping->host)
1285 return -EFAULT;
1286
1287 inode = page->mapping->host;
1288 cifs_sb = CIFS_SB(inode->i_sb);
1da177e4
LT
1289
1290 offset += (loff_t)from;
1291 write_data = kmap(page);
1292 write_data += from;
1293
1294 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1295 kunmap(page);
1296 return -EIO;
1297 }
1298
1299 /* racing with truncate? */
1300 if (offset > mapping->host->i_size) {
1301 kunmap(page);
1302 return 0; /* don't care */
1303 }
1304
1305 /* check to make sure that we are not extending the file */
1306 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1307 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1308
6508d904 1309 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742
SF
1310 if (open_file) {
1311 bytes_written = cifs_write(open_file->pfile, write_data,
1312 to-from, &offset);
6ab409b5 1313 cifsFileInfo_put(open_file);
1da177e4 1314 /* Does mm or vfs already set times? */
6148a742 1315 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1316 if ((bytes_written > 0) && (offset))
6148a742 1317 rc = 0;
bb5a9a04
SF
1318 else if (bytes_written < 0)
1319 rc = bytes_written;
6148a742 1320 } else {
b6b38f70 1321 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1322 rc = -EIO;
1323 }
1324
1325 kunmap(page);
1326 return rc;
1327}
1328
1da177e4 1329static int cifs_writepages(struct address_space *mapping,
37c0eb46 1330 struct writeback_control *wbc)
1da177e4 1331{
37c0eb46
SF
1332 struct backing_dev_info *bdi = mapping->backing_dev_info;
1333 unsigned int bytes_to_write;
1334 unsigned int bytes_written;
1335 struct cifs_sb_info *cifs_sb;
1336 int done = 0;
111ebb6e 1337 pgoff_t end;
37c0eb46 1338 pgoff_t index;
fb8c4b14
SF
1339 int range_whole = 0;
1340 struct kvec *iov;
84d2f07e 1341 int len;
37c0eb46
SF
1342 int n_iov = 0;
1343 pgoff_t next;
1344 int nr_pages;
1345 __u64 offset = 0;
23e7dd7d 1346 struct cifsFileInfo *open_file;
ba00ba64 1347 struct cifsTconInfo *tcon;
fbec9ab9 1348 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1349 struct page *page;
1350 struct pagevec pvec;
1351 int rc = 0;
1352 int scanned = 0;
fbec9ab9 1353 int xid, long_op;
1da177e4 1354
f3983c21
JL
1355 /*
1356 * BB: Is this meaningful for a non-block-device file system?
1357 * If it is, we should test it again after we do I/O
1358 */
1359 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1360 wbc->encountered_congestion = 1;
1361 return 0;
1362 }
1363
37c0eb46 1364 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1365
37c0eb46
SF
1366 /*
1367 * If wsize is smaller that the page cache size, default to writing
1368 * one page at a time via cifs_writepage
1369 */
1370 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1371 return generic_writepages(mapping, wbc);
1372
9a0c8230 1373 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1374 if (iov == NULL)
9a0c8230
SF
1375 return generic_writepages(mapping, wbc);
1376
37c0eb46 1377 /*
f3983c21
JL
1378 * if there's no open file, then this is likely to fail too,
1379 * but it'll at least handle the return. Maybe it should be
1380 * a BUG() instead?
37c0eb46 1381 */
6508d904 1382 open_file = find_writable_file(CIFS_I(mapping->host), false);
f3983c21 1383 if (!open_file) {
9a0c8230 1384 kfree(iov);
f3983c21
JL
1385 return generic_writepages(mapping, wbc);
1386 }
1387
13cfb733 1388 tcon = tlink_tcon(open_file->tlink);
f3983c21
JL
1389 if (!experimEnabled && tcon->ses->server->secMode &
1390 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1391 cifsFileInfo_put(open_file);
1392 return generic_writepages(mapping, wbc);
37c0eb46 1393 }
f3983c21 1394 cifsFileInfo_put(open_file);
37c0eb46 1395
1da177e4
LT
1396 xid = GetXid();
1397
37c0eb46 1398 pagevec_init(&pvec, 0);
111ebb6e 1399 if (wbc->range_cyclic) {
37c0eb46 1400 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1401 end = -1;
1402 } else {
1403 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1404 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1405 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1406 range_whole = 1;
37c0eb46
SF
1407 scanned = 1;
1408 }
1409retry:
1410 while (!done && (index <= end) &&
1411 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1412 PAGECACHE_TAG_DIRTY,
1413 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1414 int first;
1415 unsigned int i;
1416
37c0eb46
SF
1417 first = -1;
1418 next = 0;
1419 n_iov = 0;
1420 bytes_to_write = 0;
1421
1422 for (i = 0; i < nr_pages; i++) {
1423 page = pvec.pages[i];
1424 /*
1425 * At this point we hold neither mapping->tree_lock nor
1426 * lock on the page itself: the page may be truncated or
1427 * invalidated (changing page->mapping to NULL), or even
1428 * swizzled back from swapper_space to tmpfs file
1429 * mapping
1430 */
1431
1432 if (first < 0)
1433 lock_page(page);
529ae9aa 1434 else if (!trylock_page(page))
37c0eb46
SF
1435 break;
1436
1437 if (unlikely(page->mapping != mapping)) {
1438 unlock_page(page);
1439 break;
1440 }
1441
111ebb6e 1442 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1443 done = 1;
1444 unlock_page(page);
1445 break;
1446 }
1447
1448 if (next && (page->index != next)) {
1449 /* Not next consecutive page */
1450 unlock_page(page);
1451 break;
1452 }
1453
1454 if (wbc->sync_mode != WB_SYNC_NONE)
1455 wait_on_page_writeback(page);
1456
1457 if (PageWriteback(page) ||
cb876f45 1458 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1459 unlock_page(page);
1460 break;
1461 }
84d2f07e 1462
cb876f45
LT
1463 /*
1464 * This actually clears the dirty bit in the radix tree.
1465 * See cifs_writepage() for more commentary.
1466 */
1467 set_page_writeback(page);
1468
84d2f07e
SF
1469 if (page_offset(page) >= mapping->host->i_size) {
1470 done = 1;
1471 unlock_page(page);
cb876f45 1472 end_page_writeback(page);
84d2f07e
SF
1473 break;
1474 }
1475
37c0eb46
SF
1476 /*
1477 * BB can we get rid of this? pages are held by pvec
1478 */
1479 page_cache_get(page);
1480
84d2f07e
SF
1481 len = min(mapping->host->i_size - page_offset(page),
1482 (loff_t)PAGE_CACHE_SIZE);
1483
37c0eb46
SF
1484 /* reserve iov[0] for the smb header */
1485 n_iov++;
1486 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1487 iov[n_iov].iov_len = len;
1488 bytes_to_write += len;
37c0eb46
SF
1489
1490 if (first < 0) {
1491 first = i;
1492 offset = page_offset(page);
1493 }
1494 next = page->index + 1;
1495 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1496 break;
1497 }
1498 if (n_iov) {
6508d904
JL
1499 open_file = find_writable_file(CIFS_I(mapping->host),
1500 false);
23e7dd7d 1501 if (!open_file) {
b6b38f70 1502 cERROR(1, "No writable handles for inode");
23e7dd7d 1503 rc = -EBADF;
1047abc1 1504 } else {
fbec9ab9 1505 long_op = cifs_write_timeout(cifsi, offset);
f3983c21 1506 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
23e7dd7d
SF
1507 bytes_to_write, offset,
1508 &bytes_written, iov, n_iov,
fbec9ab9 1509 long_op);
6ab409b5 1510 cifsFileInfo_put(open_file);
fbec9ab9 1511 cifs_update_eof(cifsi, offset, bytes_written);
f3983c21 1512 }
fbec9ab9 1513
f3983c21
JL
1514 if (rc || bytes_written < bytes_to_write) {
1515 cERROR(1, "Write2 ret %d, wrote %d",
1516 rc, bytes_written);
1517 /* BB what if continued retry is
1518 requested via mount flags? */
1519 if (rc == -ENOSPC)
1520 set_bit(AS_ENOSPC, &mapping->flags);
1521 else
1522 set_bit(AS_EIO, &mapping->flags);
1523 } else {
1524 cifs_stats_bytes_written(tcon, bytes_written);
37c0eb46 1525 }
f3983c21 1526
37c0eb46
SF
1527 for (i = 0; i < n_iov; i++) {
1528 page = pvec.pages[first + i];
eb9bdaa3
SF
1529 /* Should we also set page error on
1530 success rc but too little data written? */
1531 /* BB investigate retry logic on temporary
1532 server crash cases and how recovery works
fb8c4b14
SF
1533 when page marked as error */
1534 if (rc)
eb9bdaa3 1535 SetPageError(page);
37c0eb46
SF
1536 kunmap(page);
1537 unlock_page(page);
cb876f45 1538 end_page_writeback(page);
37c0eb46
SF
1539 page_cache_release(page);
1540 }
1541 if ((wbc->nr_to_write -= n_iov) <= 0)
1542 done = 1;
1543 index = next;
b066a48c
DK
1544 } else
1545 /* Need to re-find the pages we skipped */
1546 index = pvec.pages[0]->index + 1;
1547
37c0eb46
SF
1548 pagevec_release(&pvec);
1549 }
1550 if (!scanned && !done) {
1551 /*
1552 * We hit the last page and there is more work to be done: wrap
1553 * back to the start of the file
1554 */
1555 scanned = 1;
1556 index = 0;
1557 goto retry;
1558 }
111ebb6e 1559 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1560 mapping->writeback_index = index;
1561
1da177e4 1562 FreeXid(xid);
9a0c8230 1563 kfree(iov);
1da177e4
LT
1564 return rc;
1565}
1da177e4 1566
fb8c4b14 1567static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1568{
1569 int rc = -EFAULT;
1570 int xid;
1571
1572 xid = GetXid();
1573/* BB add check for wbc flags */
1574 page_cache_get(page);
ad7a2926 1575 if (!PageUptodate(page))
b6b38f70 1576 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1577
1578 /*
1579 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1580 *
1581 * A writepage() implementation always needs to do either this,
1582 * or re-dirty the page with "redirty_page_for_writepage()" in
1583 * the case of a failure.
1584 *
1585 * Just unlocking the page will cause the radix tree tag-bits
1586 * to fail to update with the state of the page correctly.
1587 */
fb8c4b14 1588 set_page_writeback(page);
1da177e4
LT
1589 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1590 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1591 unlock_page(page);
cb876f45
LT
1592 end_page_writeback(page);
1593 page_cache_release(page);
1da177e4
LT
1594 FreeXid(xid);
1595 return rc;
1596}
1597
d9414774
NP
1598static int cifs_write_end(struct file *file, struct address_space *mapping,
1599 loff_t pos, unsigned len, unsigned copied,
1600 struct page *page, void *fsdata)
1da177e4 1601{
d9414774
NP
1602 int rc;
1603 struct inode *inode = mapping->host;
1da177e4 1604
b6b38f70
JP
1605 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1606 page, pos, copied);
d9414774 1607
a98ee8c1
JL
1608 if (PageChecked(page)) {
1609 if (copied == len)
1610 SetPageUptodate(page);
1611 ClearPageChecked(page);
1612 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1613 SetPageUptodate(page);
ad7a2926 1614
1da177e4 1615 if (!PageUptodate(page)) {
d9414774
NP
1616 char *page_data;
1617 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1618 int xid;
1619
1620 xid = GetXid();
1da177e4
LT
1621 /* this is probably better than directly calling
1622 partialpage_write since in this function the file handle is
1623 known which we might as well leverage */
1624 /* BB check if anything else missing out of ppw
1625 such as updating last write time */
1626 page_data = kmap(page);
d9414774
NP
1627 rc = cifs_write(file, page_data + offset, copied, &pos);
1628 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1629 kunmap(page);
d9414774
NP
1630
1631 FreeXid(xid);
fb8c4b14 1632 } else {
d9414774
NP
1633 rc = copied;
1634 pos += copied;
1da177e4
LT
1635 set_page_dirty(page);
1636 }
1637
d9414774
NP
1638 if (rc > 0) {
1639 spin_lock(&inode->i_lock);
1640 if (pos > inode->i_size)
1641 i_size_write(inode, pos);
1642 spin_unlock(&inode->i_lock);
1643 }
1644
1645 unlock_page(page);
1646 page_cache_release(page);
1647
1da177e4
LT
1648 return rc;
1649}
1650
7ea80859 1651int cifs_fsync(struct file *file, int datasync)
1da177e4
LT
1652{
1653 int xid;
1654 int rc = 0;
b298f223 1655 struct cifsTconInfo *tcon;
c21dfb69 1656 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1657 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1658
1659 xid = GetXid();
1660
b6b38f70 1661 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1662 file->f_path.dentry->d_name.name, datasync);
50c2f753 1663
cea21805
JL
1664 rc = filemap_write_and_wait(inode->i_mapping);
1665 if (rc == 0) {
1666 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1667 CIFS_I(inode)->write_behind_rc = 0;
13cfb733 1668 tcon = tlink_tcon(smbfile->tlink);
be652445 1669 if (!rc && tcon && smbfile &&
4717bed6 1670 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
b298f223 1671 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
cea21805 1672 }
b298f223 1673
1da177e4
LT
1674 FreeXid(xid);
1675 return rc;
1676}
1677
3978d717 1678/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1679{
1680 struct address_space *mapping;
1681 struct inode *inode;
1682 unsigned long index = page->index;
1683 unsigned int rpages = 0;
1684 int rc = 0;
1685
f19159dc 1686 cFYI(1, "sync page %p", page);
1da177e4
LT
1687 mapping = page->mapping;
1688 if (!mapping)
1689 return 0;
1690 inode = mapping->host;
1691 if (!inode)
3978d717 1692 return; */
1da177e4 1693
fb8c4b14 1694/* fill in rpages then
1da177e4
LT
1695 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1696
b6b38f70 1697/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1da177e4 1698
3978d717 1699#if 0
1da177e4
LT
1700 if (rc < 0)
1701 return rc;
1702 return 0;
3978d717 1703#endif
1da177e4
LT
1704} */
1705
1706/*
1707 * As file closes, flush all cached write data for this inode checking
1708 * for write behind errors.
1709 */
75e1fcc0 1710int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1711{
fb8c4b14 1712 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1713 int rc = 0;
1714
1715 /* Rather than do the steps manually:
1716 lock the inode for writing
1717 loop through pages looking for write behind data (dirty pages)
1718 coalesce into contiguous 16K (or smaller) chunks to write to server
1719 send to server (prefer in parallel)
1720 deal with writebehind errors
1721 unlock inode for writing
1722 filemapfdatawrite appears easier for the time being */
1723
1724 rc = filemap_fdatawrite(inode->i_mapping);
cea21805
JL
1725 /* reset wb rc if we were able to write out dirty pages */
1726 if (!rc) {
1727 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1728 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1729 }
50c2f753 1730
b6b38f70 1731 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1732
1733 return rc;
1734}
1735
1736ssize_t cifs_user_read(struct file *file, char __user *read_data,
1737 size_t read_size, loff_t *poffset)
1738{
1739 int rc = -EACCES;
1740 unsigned int bytes_read = 0;
1741 unsigned int total_read = 0;
1742 unsigned int current_read_size;
1743 struct cifs_sb_info *cifs_sb;
1744 struct cifsTconInfo *pTcon;
1745 int xid;
1746 struct cifsFileInfo *open_file;
1747 char *smb_read_data;
1748 char __user *current_offset;
1749 struct smb_com_read_rsp *pSMBr;
1750
1751 xid = GetXid();
e6a00296 1752 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1753
1754 if (file->private_data == NULL) {
0f3bc09e 1755 rc = -EBADF;
1da177e4 1756 FreeXid(xid);
0f3bc09e 1757 return rc;
1da177e4 1758 }
c21dfb69 1759 open_file = file->private_data;
13cfb733 1760 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1761
ad7a2926 1762 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1763 cFYI(1, "attempting read on write only file instance");
ad7a2926 1764
1da177e4
LT
1765 for (total_read = 0, current_offset = read_data;
1766 read_size > total_read;
1767 total_read += bytes_read, current_offset += bytes_read) {
fb8c4b14 1768 current_read_size = min_t(const int, read_size - total_read,
1da177e4
LT
1769 cifs_sb->rsize);
1770 rc = -EAGAIN;
1771 smb_read_data = NULL;
1772 while (rc == -EAGAIN) {
ec637e3f 1773 int buf_type = CIFS_NO_BUFFER;
fb8c4b14 1774 if ((open_file->invalidHandle) &&
1da177e4 1775 (!open_file->closePend)) {
15886177 1776 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1777 if (rc != 0)
1778 break;
1779 }
bfa0d75a 1780 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1781 open_file->netfid,
1782 current_read_size, *poffset,
1783 &bytes_read, &smb_read_data,
1784 &buf_type);
1da177e4 1785 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1786 if (smb_read_data) {
93544cc6
SF
1787 if (copy_to_user(current_offset,
1788 smb_read_data +
1789 4 /* RFC1001 length field */ +
1790 le16_to_cpu(pSMBr->DataOffset),
ad7a2926 1791 bytes_read))
93544cc6 1792 rc = -EFAULT;
93544cc6 1793
fb8c4b14 1794 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1795 cifs_small_buf_release(smb_read_data);
fb8c4b14 1796 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1797 cifs_buf_release(smb_read_data);
1da177e4
LT
1798 smb_read_data = NULL;
1799 }
1800 }
1801 if (rc || (bytes_read == 0)) {
1802 if (total_read) {
1803 break;
1804 } else {
1805 FreeXid(xid);
1806 return rc;
1807 }
1808 } else {
a4544347 1809 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1810 *poffset += bytes_read;
1811 }
1812 }
1813 FreeXid(xid);
1814 return total_read;
1815}
1816
1817
1818static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1819 loff_t *poffset)
1820{
1821 int rc = -EACCES;
1822 unsigned int bytes_read = 0;
1823 unsigned int total_read;
1824 unsigned int current_read_size;
1825 struct cifs_sb_info *cifs_sb;
1826 struct cifsTconInfo *pTcon;
1827 int xid;
1828 char *current_offset;
1829 struct cifsFileInfo *open_file;
ec637e3f 1830 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1831
1832 xid = GetXid();
e6a00296 1833 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1834
1835 if (file->private_data == NULL) {
0f3bc09e 1836 rc = -EBADF;
1da177e4 1837 FreeXid(xid);
0f3bc09e 1838 return rc;
1da177e4 1839 }
c21dfb69 1840 open_file = file->private_data;
13cfb733 1841 pTcon = tlink_tcon(open_file->tlink);
1da177e4
LT
1842
1843 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1844 cFYI(1, "attempting read on write only file instance");
1da177e4 1845
fb8c4b14 1846 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1847 read_size > total_read;
1848 total_read += bytes_read, current_offset += bytes_read) {
1849 current_read_size = min_t(const int, read_size - total_read,
1850 cifs_sb->rsize);
f9f5c817
SF
1851 /* For windows me and 9x we do not want to request more
1852 than it negotiated since it will refuse the read then */
fb8c4b14 1853 if ((pTcon->ses) &&
f9f5c817
SF
1854 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1855 current_read_size = min_t(const int, current_read_size,
1856 pTcon->ses->server->maxBuf - 128);
1857 }
1da177e4
LT
1858 rc = -EAGAIN;
1859 while (rc == -EAGAIN) {
fb8c4b14 1860 if ((open_file->invalidHandle) &&
1da177e4 1861 (!open_file->closePend)) {
15886177 1862 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1863 if (rc != 0)
1864 break;
1865 }
bfa0d75a 1866 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1867 open_file->netfid,
1868 current_read_size, *poffset,
1869 &bytes_read, &current_offset,
1870 &buf_type);
1da177e4
LT
1871 }
1872 if (rc || (bytes_read == 0)) {
1873 if (total_read) {
1874 break;
1875 } else {
1876 FreeXid(xid);
1877 return rc;
1878 }
1879 } else {
a4544347 1880 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1881 *poffset += bytes_read;
1882 }
1883 }
1884 FreeXid(xid);
1885 return total_read;
1886}
1887
1888int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1889{
1da177e4
LT
1890 int rc, xid;
1891
1892 xid = GetXid();
abab095d 1893 rc = cifs_revalidate_file(file);
1da177e4 1894 if (rc) {
b6b38f70 1895 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1896 FreeXid(xid);
1897 return rc;
1898 }
1899 rc = generic_file_mmap(file, vma);
1900 FreeXid(xid);
1901 return rc;
1902}
1903
1904
fb8c4b14 1905static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 1906 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
1907{
1908 struct page *page;
1909 char *target;
1910
1911 while (bytes_read > 0) {
1912 if (list_empty(pages))
1913 break;
1914
1915 page = list_entry(pages->prev, struct page, lru);
1916 list_del(&page->lru);
1917
315e995c 1918 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
1919 GFP_KERNEL)) {
1920 page_cache_release(page);
b6b38f70 1921 cFYI(1, "Add page cache failed");
3079ca62
SF
1922 data += PAGE_CACHE_SIZE;
1923 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1924 continue;
1925 }
06b43672 1926 page_cache_release(page);
1da177e4 1927
fb8c4b14 1928 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1929
1930 if (PAGE_CACHE_SIZE > bytes_read) {
1931 memcpy(target, data, bytes_read);
1932 /* zero the tail end of this partial page */
fb8c4b14 1933 memset(target + bytes_read, 0,
1da177e4
LT
1934 PAGE_CACHE_SIZE - bytes_read);
1935 bytes_read = 0;
1936 } else {
1937 memcpy(target, data, PAGE_CACHE_SIZE);
1938 bytes_read -= PAGE_CACHE_SIZE;
1939 }
1940 kunmap_atomic(target, KM_USER0);
1941
1942 flush_dcache_page(page);
1943 SetPageUptodate(page);
1944 unlock_page(page);
1da177e4 1945 data += PAGE_CACHE_SIZE;
9dc06558
SJ
1946
1947 /* add page to FS-Cache */
1948 cifs_readpage_to_fscache(mapping->host, page);
1da177e4
LT
1949 }
1950 return;
1951}
1952
1953static int cifs_readpages(struct file *file, struct address_space *mapping,
1954 struct list_head *page_list, unsigned num_pages)
1955{
1956 int rc = -EACCES;
1957 int xid;
1958 loff_t offset;
1959 struct page *page;
1960 struct cifs_sb_info *cifs_sb;
1961 struct cifsTconInfo *pTcon;
2c2130e1 1962 unsigned int bytes_read = 0;
fb8c4b14 1963 unsigned int read_size, i;
1da177e4
LT
1964 char *smb_read_data = NULL;
1965 struct smb_com_read_rsp *pSMBr;
1da177e4 1966 struct cifsFileInfo *open_file;
ec637e3f 1967 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1968
1969 xid = GetXid();
1970 if (file->private_data == NULL) {
0f3bc09e 1971 rc = -EBADF;
1da177e4 1972 FreeXid(xid);
0f3bc09e 1973 return rc;
1da177e4 1974 }
c21dfb69 1975 open_file = file->private_data;
e6a00296 1976 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 1977 pTcon = tlink_tcon(open_file->tlink);
bfa0d75a 1978
56698236
SJ
1979 /*
1980 * Reads as many pages as possible from fscache. Returns -ENOBUFS
1981 * immediately if the cookie is negative
1982 */
1983 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
1984 &num_pages);
1985 if (rc == 0)
1986 goto read_complete;
1987
f19159dc 1988 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
1989 for (i = 0; i < num_pages; ) {
1990 unsigned contig_pages;
1991 struct page *tmp_page;
1992 unsigned long expected_index;
1993
1994 if (list_empty(page_list))
1995 break;
1996
1997 page = list_entry(page_list->prev, struct page, lru);
1998 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1999
2000 /* count adjacent pages that we will read into */
2001 contig_pages = 0;
fb8c4b14 2002 expected_index =
1da177e4 2003 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 2004 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
2005 if (tmp_page->index == expected_index) {
2006 contig_pages++;
2007 expected_index++;
2008 } else
fb8c4b14 2009 break;
1da177e4
LT
2010 }
2011 if (contig_pages + i > num_pages)
2012 contig_pages = num_pages - i;
2013
2014 /* for reads over a certain size could initiate async
2015 read ahead */
2016
2017 read_size = contig_pages * PAGE_CACHE_SIZE;
2018 /* Read size needs to be in multiples of one page */
2019 read_size = min_t(const unsigned int, read_size,
2020 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2021 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2022 read_size, contig_pages);
1da177e4
LT
2023 rc = -EAGAIN;
2024 while (rc == -EAGAIN) {
fb8c4b14 2025 if ((open_file->invalidHandle) &&
1da177e4 2026 (!open_file->closePend)) {
15886177 2027 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2028 if (rc != 0)
2029 break;
2030 }
2031
bfa0d75a 2032 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2033 open_file->netfid,
2034 read_size, offset,
2035 &bytes_read, &smb_read_data,
2036 &buf_type);
a9d02ad4 2037 /* BB more RC checks ? */
fb8c4b14 2038 if (rc == -EAGAIN) {
1da177e4 2039 if (smb_read_data) {
fb8c4b14 2040 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2041 cifs_small_buf_release(smb_read_data);
fb8c4b14 2042 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2043 cifs_buf_release(smb_read_data);
1da177e4
LT
2044 smb_read_data = NULL;
2045 }
2046 }
2047 }
2048 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2049 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2050 break;
2051 } else if (bytes_read > 0) {
6f88cc2e 2052 task_io_account_read(bytes_read);
1da177e4
LT
2053 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2054 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2055 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2056 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2057
2058 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2059 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2060 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2061 i++; /* account for partial page */
2062
fb8c4b14 2063 /* server copy of file can have smaller size
1da177e4 2064 than client */
fb8c4b14
SF
2065 /* BB do we need to verify this common case ?
2066 this case is ok - if we are at server EOF
1da177e4
LT
2067 we will hit it on next read */
2068
05ac9d4b 2069 /* break; */
1da177e4
LT
2070 }
2071 } else {
b6b38f70 2072 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2073 "Cleaning remaining pages from readahead list",
b6b38f70 2074 bytes_read, offset);
fb8c4b14 2075 /* BB turn off caching and do new lookup on
1da177e4 2076 file size at server? */
1da177e4
LT
2077 break;
2078 }
2079 if (smb_read_data) {
fb8c4b14 2080 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2081 cifs_small_buf_release(smb_read_data);
fb8c4b14 2082 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2083 cifs_buf_release(smb_read_data);
1da177e4
LT
2084 smb_read_data = NULL;
2085 }
2086 bytes_read = 0;
2087 }
2088
1da177e4
LT
2089/* need to free smb_read_data buf before exit */
2090 if (smb_read_data) {
fb8c4b14 2091 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2092 cifs_small_buf_release(smb_read_data);
fb8c4b14 2093 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2094 cifs_buf_release(smb_read_data);
1da177e4 2095 smb_read_data = NULL;
fb8c4b14 2096 }
1da177e4 2097
56698236 2098read_complete:
1da177e4
LT
2099 FreeXid(xid);
2100 return rc;
2101}
2102
2103static int cifs_readpage_worker(struct file *file, struct page *page,
2104 loff_t *poffset)
2105{
2106 char *read_data;
2107 int rc;
2108
56698236
SJ
2109 /* Is the page cached? */
2110 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2111 if (rc == 0)
2112 goto read_complete;
2113
1da177e4
LT
2114 page_cache_get(page);
2115 read_data = kmap(page);
2116 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2117
1da177e4 2118 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2119
1da177e4
LT
2120 if (rc < 0)
2121 goto io_error;
2122 else
b6b38f70 2123 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2124
e6a00296
JJS
2125 file->f_path.dentry->d_inode->i_atime =
2126 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2127
1da177e4
LT
2128 if (PAGE_CACHE_SIZE > rc)
2129 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2130
2131 flush_dcache_page(page);
2132 SetPageUptodate(page);
9dc06558
SJ
2133
2134 /* send this page to the cache */
2135 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2136
1da177e4 2137 rc = 0;
fb8c4b14 2138
1da177e4 2139io_error:
fb8c4b14 2140 kunmap(page);
1da177e4 2141 page_cache_release(page);
56698236
SJ
2142
2143read_complete:
1da177e4
LT
2144 return rc;
2145}
2146
2147static int cifs_readpage(struct file *file, struct page *page)
2148{
2149 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2150 int rc = -EACCES;
2151 int xid;
2152
2153 xid = GetXid();
2154
2155 if (file->private_data == NULL) {
0f3bc09e 2156 rc = -EBADF;
1da177e4 2157 FreeXid(xid);
0f3bc09e 2158 return rc;
1da177e4
LT
2159 }
2160
b6b38f70
JP
2161 cFYI(1, "readpage %p at offset %d 0x%x\n",
2162 page, (int)offset, (int)offset);
1da177e4
LT
2163
2164 rc = cifs_readpage_worker(file, page, &offset);
2165
2166 unlock_page(page);
2167
2168 FreeXid(xid);
2169 return rc;
2170}
2171
a403a0a3
SF
2172static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2173{
2174 struct cifsFileInfo *open_file;
2175
2176 read_lock(&GlobalSMBSeslock);
2177 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2178 if (open_file->closePend)
2179 continue;
2180 if (open_file->pfile &&
2181 ((open_file->pfile->f_flags & O_RDWR) ||
2182 (open_file->pfile->f_flags & O_WRONLY))) {
2183 read_unlock(&GlobalSMBSeslock);
2184 return 1;
2185 }
2186 }
2187 read_unlock(&GlobalSMBSeslock);
2188 return 0;
2189}
2190
1da177e4
LT
2191/* We do not want to update the file size from server for inodes
2192 open for write - to avoid races with writepage extending
2193 the file - in the future we could consider allowing
fb8c4b14 2194 refreshing the inode only on increases in the file size
1da177e4
LT
2195 but this is tricky to do without racing with writebehind
2196 page caching in the current Linux kernel design */
4b18f2a9 2197bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2198{
a403a0a3 2199 if (!cifsInode)
4b18f2a9 2200 return true;
50c2f753 2201
a403a0a3
SF
2202 if (is_inode_writable(cifsInode)) {
2203 /* This inode is open for write at least once */
c32a0b68
SF
2204 struct cifs_sb_info *cifs_sb;
2205
c32a0b68 2206 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2207 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2208 /* since no page cache to corrupt on directio
c32a0b68 2209 we can change size safely */
4b18f2a9 2210 return true;
c32a0b68
SF
2211 }
2212
fb8c4b14 2213 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2214 return true;
7ba52631 2215
4b18f2a9 2216 return false;
23e7dd7d 2217 } else
4b18f2a9 2218 return true;
1da177e4
LT
2219}
2220
d9414774
NP
2221static int cifs_write_begin(struct file *file, struct address_space *mapping,
2222 loff_t pos, unsigned len, unsigned flags,
2223 struct page **pagep, void **fsdata)
1da177e4 2224{
d9414774
NP
2225 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2226 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2227 loff_t page_start = pos & PAGE_MASK;
2228 loff_t i_size;
2229 struct page *page;
2230 int rc = 0;
d9414774 2231
b6b38f70 2232 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2233
54566b2c 2234 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2235 if (!page) {
2236 rc = -ENOMEM;
2237 goto out;
2238 }
8a236264 2239
a98ee8c1
JL
2240 if (PageUptodate(page))
2241 goto out;
8a236264 2242
a98ee8c1
JL
2243 /*
2244 * If we write a full page it will be up to date, no need to read from
2245 * the server. If the write is short, we'll end up doing a sync write
2246 * instead.
2247 */
2248 if (len == PAGE_CACHE_SIZE)
2249 goto out;
8a236264 2250
a98ee8c1
JL
2251 /*
2252 * optimize away the read when we have an oplock, and we're not
2253 * expecting to use any of the data we'd be reading in. That
2254 * is, when the page lies beyond the EOF, or straddles the EOF
2255 * and the write will cover all of the existing data.
2256 */
2257 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2258 i_size = i_size_read(mapping->host);
2259 if (page_start >= i_size ||
2260 (offset == 0 && (pos + len) >= i_size)) {
2261 zero_user_segments(page, 0, offset,
2262 offset + len,
2263 PAGE_CACHE_SIZE);
2264 /*
2265 * PageChecked means that the parts of the page
2266 * to which we're not writing are considered up
2267 * to date. Once the data is copied to the
2268 * page, it can be set uptodate.
2269 */
2270 SetPageChecked(page);
2271 goto out;
2272 }
2273 }
d9414774 2274
a98ee8c1
JL
2275 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2276 /*
2277 * might as well read a page, it is fast enough. If we get
2278 * an error, we don't need to return it. cifs_write_end will
2279 * do a sync write instead since PG_uptodate isn't set.
2280 */
2281 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2282 } else {
2283 /* we could try using another file handle if there is one -
2284 but how would we lock it to prevent close of that handle
2285 racing with this read? In any case
d9414774 2286 this will be written out by write_end so is fine */
1da177e4 2287 }
a98ee8c1
JL
2288out:
2289 *pagep = page;
2290 return rc;
1da177e4
LT
2291}
2292
85f2d6b4
SJ
2293static int cifs_release_page(struct page *page, gfp_t gfp)
2294{
2295 if (PagePrivate(page))
2296 return 0;
2297
2298 return cifs_fscache_release_page(page, gfp);
2299}
2300
2301static void cifs_invalidate_page(struct page *page, unsigned long offset)
2302{
2303 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2304
2305 if (offset == 0)
2306 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2307}
2308
9b646972 2309void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2310{
2311 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2312 oplock_break);
a5e18bc3 2313 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2314 struct cifsInodeInfo *cinode = CIFS_I(inode);
3bc303c2
JL
2315 int rc, waitrc = 0;
2316
2317 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2318 if (cinode->clientCanCacheRead)
8737c930 2319 break_lease(inode, O_RDONLY);
d54ff732 2320 else
8737c930 2321 break_lease(inode, O_WRONLY);
3bc303c2
JL
2322 rc = filemap_fdatawrite(inode->i_mapping);
2323 if (cinode->clientCanCacheRead == 0) {
2324 waitrc = filemap_fdatawait(inode->i_mapping);
2325 invalidate_remote_inode(inode);
2326 }
2327 if (!rc)
2328 rc = waitrc;
2329 if (rc)
2330 cinode->write_behind_rc = rc;
b6b38f70 2331 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2332 }
2333
2334 /*
2335 * releasing stale oplock after recent reconnect of smb session using
2336 * a now incorrect file handle is not a data integrity issue but do
2337 * not bother sending an oplock release if session to server still is
2338 * disconnected since oplock already released by the server
2339 */
2340 if (!cfile->closePend && !cfile->oplock_break_cancelled) {
13cfb733
JL
2341 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2342 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
b6b38f70 2343 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2344 }
9b646972
TH
2345
2346 /*
2347 * We might have kicked in before is_valid_oplock_break()
2348 * finished grabbing reference for us. Make sure it's done by
2349 * waiting for GlobalSMSSeslock.
2350 */
2351 write_lock(&GlobalSMBSeslock);
2352 write_unlock(&GlobalSMBSeslock);
2353
2354 cifs_oplock_break_put(cfile);
3bc303c2
JL
2355}
2356
9b646972 2357void cifs_oplock_break_get(struct cifsFileInfo *cfile)
3bc303c2 2358{
d7c86ff8 2359 cifs_sb_active(cfile->dentry->d_sb);
3bc303c2 2360 cifsFileInfo_get(cfile);
3bc303c2
JL
2361}
2362
9b646972 2363void cifs_oplock_break_put(struct cifsFileInfo *cfile)
3bc303c2 2364{
3bc303c2 2365 cifsFileInfo_put(cfile);
d7c86ff8 2366 cifs_sb_deactive(cfile->dentry->d_sb);
3bc303c2
JL
2367}
2368
f5e54d6e 2369const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2370 .readpage = cifs_readpage,
2371 .readpages = cifs_readpages,
2372 .writepage = cifs_writepage,
37c0eb46 2373 .writepages = cifs_writepages,
d9414774
NP
2374 .write_begin = cifs_write_begin,
2375 .write_end = cifs_write_end,
1da177e4 2376 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2377 .releasepage = cifs_release_page,
2378 .invalidatepage = cifs_invalidate_page,
1da177e4
LT
2379 /* .sync_page = cifs_sync_page, */
2380 /* .direct_IO = */
2381};
273d81d6
DK
2382
2383/*
2384 * cifs_readpages requires the server to support a buffer large enough to
2385 * contain the header plus one complete page of data. Otherwise, we need
2386 * to leave cifs_readpages out of the address space operations.
2387 */
f5e54d6e 2388const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2389 .readpage = cifs_readpage,
2390 .writepage = cifs_writepage,
2391 .writepages = cifs_writepages,
d9414774
NP
2392 .write_begin = cifs_write_begin,
2393 .write_end = cifs_write_end,
273d81d6 2394 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2395 .releasepage = cifs_release_page,
2396 .invalidatepage = cifs_invalidate_page,
273d81d6
DK
2397 /* .sync_page = cifs_sync_page, */
2398 /* .direct_IO = */
2399};