]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/cifs/file.c
cifs: eliminate the inode argument from cifs_new_fileinfo
[net-next-2.6.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
9451a9a5 43#include "fscache.h"
1da177e4 44
1da177e4
LT
45static inline int cifs_convert_flags(unsigned int flags)
46{
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
56 }
57
e10f7b55
JL
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
7fc8f4e9 61}
e10f7b55 62
608712fe 63static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 64{
608712fe 65 u32 posix_flags = 0;
e10f7b55 66
7fc8f4e9 67 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 68 posix_flags = SMB_O_RDONLY;
7fc8f4e9 69 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR)
72 posix_flags = SMB_O_RDWR;
73
74 if (flags & O_CREAT)
75 posix_flags |= SMB_O_CREAT;
76 if (flags & O_EXCL)
77 posix_flags |= SMB_O_EXCL;
78 if (flags & O_TRUNC)
79 posix_flags |= SMB_O_TRUNC;
80 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 81 if (flags & O_DSYNC)
608712fe 82 posix_flags |= SMB_O_SYNC;
7fc8f4e9 83 if (flags & O_DIRECTORY)
608712fe 84 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 85 if (flags & O_NOFOLLOW)
608712fe 86 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 87 if (flags & O_DIRECT)
608712fe 88 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
89
90 return posix_flags;
1da177e4
LT
91}
92
93static inline int cifs_get_disposition(unsigned int flags)
94{
95 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 return FILE_CREATE;
97 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 return FILE_OVERWRITE_IF;
99 else if ((flags & O_CREAT) == O_CREAT)
100 return FILE_OPEN_IF;
55aa2e09
SF
101 else if ((flags & O_TRUNC) == O_TRUNC)
102 return FILE_OVERWRITE;
1da177e4
LT
103 else
104 return FILE_OPEN;
105}
106
db460242 107static inline int cifs_open_inode_helper(struct inode *inode,
a347ecb2 108 struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf,
1da177e4
LT
109 char *full_path, int xid)
110{
db460242 111 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
1da177e4
LT
112 struct timespec temp;
113 int rc;
114
1da177e4
LT
115 if (pCifsInode->clientCanCacheRead) {
116 /* we have the inode open somewhere else
117 no need to discard cache data */
118 goto client_can_cache;
119 }
120
121 /* BB need same check in cifs_create too? */
122 /* if not oplocked, invalidate inode pages if mtime or file
123 size changed */
07119a4d 124 temp = cifs_NTtimeToUnix(buf->LastWriteTime);
db460242
JL
125 if (timespec_equal(&inode->i_mtime, &temp) &&
126 (inode->i_size ==
1da177e4 127 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 128 cFYI(1, "inode unchanged on server");
1da177e4 129 } else {
db460242 130 if (inode->i_mapping) {
ff215713
SF
131 /* BB no need to lock inode until after invalidate
132 since namei code should already have it locked? */
db460242 133 rc = filemap_write_and_wait(inode->i_mapping);
cea21805 134 if (rc != 0)
db460242 135 pCifsInode->write_behind_rc = rc;
1da177e4 136 }
b6b38f70
JP
137 cFYI(1, "invalidating remote inode since open detected it "
138 "changed");
db460242 139 invalidate_remote_inode(inode);
1da177e4
LT
140 }
141
142client_can_cache:
c18c842b 143 if (pTcon->unix_ext)
db460242
JL
144 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
145 xid);
1da177e4 146 else
db460242
JL
147 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
148 xid, NULL);
1da177e4 149
a347ecb2 150 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
151 pCifsInode->clientCanCacheAll = true;
152 pCifsInode->clientCanCacheRead = true;
db460242 153 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
a347ecb2 154 } else if ((oplock & 0xF) == OPLOCK_READ)
4b18f2a9 155 pCifsInode->clientCanCacheRead = true;
1da177e4
LT
156
157 return rc;
158}
159
608712fe
JL
160int cifs_posix_open(char *full_path, struct inode **pinode,
161 struct super_block *sb, int mode, unsigned int f_flags,
162 __u32 *poplock, __u16 *pnetfid, int xid)
163{
164 int rc;
165 FILE_UNIX_BASIC_INFO *presp_data;
166 __u32 posix_flags = 0;
167 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
168 struct cifs_fattr fattr;
169 struct tcon_link *tlink;
170 struct cifsTconInfo *tcon;
171
172 cFYI(1, "posix open %s", full_path);
173
174 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
175 if (presp_data == NULL)
176 return -ENOMEM;
177
178 tlink = cifs_sb_tlink(cifs_sb);
179 if (IS_ERR(tlink)) {
180 rc = PTR_ERR(tlink);
181 goto posix_open_ret;
182 }
183
184 tcon = tlink_tcon(tlink);
185 mode &= ~current_umask();
186
187 posix_flags = cifs_posix_convert_flags(f_flags);
188 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
189 poplock, full_path, cifs_sb->local_nls,
190 cifs_sb->mnt_cifs_flags &
191 CIFS_MOUNT_MAP_SPECIAL_CHR);
192 cifs_put_tlink(tlink);
193
194 if (rc)
195 goto posix_open_ret;
196
197 if (presp_data->Type == cpu_to_le32(-1))
198 goto posix_open_ret; /* open ok, caller does qpathinfo */
199
200 if (!pinode)
201 goto posix_open_ret; /* caller does not need info */
202
203 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
204
205 /* get new inode and set it up */
206 if (*pinode == NULL) {
207 cifs_fill_uniqueid(sb, &fattr);
208 *pinode = cifs_iget(sb, &fattr);
209 if (!*pinode) {
210 rc = -ENOMEM;
211 goto posix_open_ret;
212 }
213 } else {
214 cifs_fattr_to_inode(*pinode, &fattr);
215 }
216
217posix_open_ret:
218 kfree(presp_data);
219 return rc;
220}
221
1da177e4
LT
222int cifs_open(struct inode *inode, struct file *file)
223{
224 int rc = -EACCES;
590a3fe0
JL
225 int xid;
226 __u32 oplock;
1da177e4 227 struct cifs_sb_info *cifs_sb;
276a74a4 228 struct cifsTconInfo *tcon;
7ffec372 229 struct tcon_link *tlink;
6ca9f3ba 230 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 231 struct cifsInodeInfo *pCifsInode;
1da177e4
LT
232 char *full_path = NULL;
233 int desiredAccess;
234 int disposition;
235 __u16 netfid;
236 FILE_ALL_INFO *buf = NULL;
237
238 xid = GetXid();
239
240 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
241 tlink = cifs_sb_tlink(cifs_sb);
242 if (IS_ERR(tlink)) {
243 FreeXid(xid);
244 return PTR_ERR(tlink);
245 }
246 tcon = tlink_tcon(tlink);
1da177e4 247
a6ce4932 248 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 249
e6a00296 250 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 251 if (full_path == NULL) {
0f3bc09e 252 rc = -ENOMEM;
232341ba 253 goto out;
1da177e4
LT
254 }
255
b6b38f70
JP
256 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
257 inode, file->f_flags, full_path);
276a74a4
SF
258
259 if (oplockEnabled)
260 oplock = REQ_OPLOCK;
261 else
262 oplock = 0;
263
64cc2c63
SF
264 if (!tcon->broken_posix_open && tcon->unix_ext &&
265 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
266 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
267 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 268 /* can not refresh inode info since size could be stale */
2422f676 269 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 270 cifs_sb->mnt_file_mode /* ignored */,
608712fe 271 file->f_flags, &oplock, &netfid, xid);
276a74a4 272 if (rc == 0) {
b6b38f70 273 cFYI(1, "posix open succeeded");
47c78b7f 274
abfe1eed
JL
275 pCifsFile = cifs_new_fileinfo(netfid, file, tlink,
276 oplock);
2422f676
JL
277 if (pCifsFile == NULL) {
278 CIFSSMBClose(xid, tcon, netfid);
279 rc = -ENOMEM;
2422f676 280 }
9451a9a5
SJ
281
282 cifs_fscache_set_inode_cookie(inode, file);
283
276a74a4 284 goto out;
64cc2c63
SF
285 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
286 if (tcon->ses->serverNOS)
b6b38f70 287 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
288 " unexpected error on SMB posix open"
289 ", disabling posix open support."
290 " Check if server update available.",
291 tcon->ses->serverName,
b6b38f70 292 tcon->ses->serverNOS);
64cc2c63 293 tcon->broken_posix_open = true;
276a74a4
SF
294 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
295 (rc != -EOPNOTSUPP)) /* path not found or net err */
296 goto out;
64cc2c63
SF
297 /* else fallthrough to retry open the old way on network i/o
298 or DFS errors */
276a74a4
SF
299 }
300
1da177e4
LT
301 desiredAccess = cifs_convert_flags(file->f_flags);
302
303/*********************************************************************
304 * open flag mapping table:
fb8c4b14 305 *
1da177e4 306 * POSIX Flag CIFS Disposition
fb8c4b14 307 * ---------- ----------------
1da177e4
LT
308 * O_CREAT FILE_OPEN_IF
309 * O_CREAT | O_EXCL FILE_CREATE
310 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
311 * O_TRUNC FILE_OVERWRITE
312 * none of the above FILE_OPEN
313 *
314 * Note that there is not a direct match between disposition
fb8c4b14 315 * FILE_SUPERSEDE (ie create whether or not file exists although
1da177e4
LT
316 * O_CREAT | O_TRUNC is similar but truncates the existing
317 * file rather than creating a new file as FILE_SUPERSEDE does
318 * (which uses the attributes / metadata passed in on open call)
319 *?
fb8c4b14 320 *? O_SYNC is a reasonable match to CIFS writethrough flag
1da177e4
LT
321 *? and the read write flags match reasonably. O_LARGEFILE
322 *? is irrelevant because largefile support is always used
323 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
324 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
325 *********************************************************************/
326
327 disposition = cifs_get_disposition(file->f_flags);
328
1da177e4
LT
329 /* BB pass O_SYNC flag through on file attributes .. BB */
330
331 /* Also refresh inode by passing in file_info buf returned by SMBOpen
332 and calling get_inode_info with returned buf (at least helps
333 non-Unix server case) */
334
fb8c4b14
SF
335 /* BB we can not do this if this is the second open of a file
336 and the first handle has writebehind data, we might be
1da177e4
LT
337 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
338 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
339 if (!buf) {
340 rc = -ENOMEM;
341 goto out;
342 }
5bafd765 343
a6e8a845 344 if (tcon->ses->capabilities & CAP_NT_SMBS)
276a74a4 345 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
5bafd765 346 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
347 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
348 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
349 else
350 rc = -EIO; /* no NT SMB support fall into legacy open below */
351
a9d02ad4
SF
352 if (rc == -EIO) {
353 /* Old server, try legacy style OpenX */
276a74a4 354 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
a9d02ad4
SF
355 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
356 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
357 & CIFS_MOUNT_MAP_SPECIAL_CHR);
358 }
1da177e4 359 if (rc) {
b6b38f70 360 cFYI(1, "cifs_open returned 0x%x", rc);
1da177e4
LT
361 goto out;
362 }
3321b791 363
a347ecb2 364 rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid);
47c78b7f
JL
365 if (rc != 0)
366 goto out;
367
abfe1eed 368 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 369 if (pCifsFile == NULL) {
1da177e4
LT
370 rc = -ENOMEM;
371 goto out;
372 }
1da177e4 373
9451a9a5
SJ
374 cifs_fscache_set_inode_cookie(inode, file);
375
fb8c4b14 376 if (oplock & CIFS_CREATE_ACTION) {
1da177e4
LT
377 /* time to set mode which we can not set earlier due to
378 problems creating new read-only files */
276a74a4 379 if (tcon->unix_ext) {
4e1e7fb9
JL
380 struct cifs_unix_set_info_args args = {
381 .mode = inode->i_mode,
382 .uid = NO_CHANGE_64,
383 .gid = NO_CHANGE_64,
384 .ctime = NO_CHANGE_64,
385 .atime = NO_CHANGE_64,
386 .mtime = NO_CHANGE_64,
387 .device = 0,
388 };
01ea95e3
JL
389 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
390 cifs_sb->local_nls,
391 cifs_sb->mnt_cifs_flags &
737b758c 392 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
393 }
394 }
395
396out:
397 kfree(buf);
398 kfree(full_path);
399 FreeXid(xid);
7ffec372 400 cifs_put_tlink(tlink);
1da177e4
LT
401 return rc;
402}
403
0418726b 404/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
405/* to server was lost */
406static int cifs_relock_file(struct cifsFileInfo *cifsFile)
407{
408 int rc = 0;
409
410/* BB list all locks open on this file and relock */
411
412 return rc;
413}
414
4b18f2a9 415static int cifs_reopen_file(struct file *file, bool can_flush)
1da177e4
LT
416{
417 int rc = -EACCES;
590a3fe0
JL
418 int xid;
419 __u32 oplock;
1da177e4 420 struct cifs_sb_info *cifs_sb;
7fc8f4e9 421 struct cifsTconInfo *tcon;
1da177e4
LT
422 struct cifsFileInfo *pCifsFile;
423 struct cifsInodeInfo *pCifsInode;
fb8c4b14 424 struct inode *inode;
1da177e4
LT
425 char *full_path = NULL;
426 int desiredAccess;
427 int disposition = FILE_OPEN;
428 __u16 netfid;
429
ad7a2926 430 if (file->private_data)
c21dfb69 431 pCifsFile = file->private_data;
ad7a2926 432 else
1da177e4
LT
433 return -EBADF;
434
435 xid = GetXid();
f0a71eb8 436 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 437 if (!pCifsFile->invalidHandle) {
f0a71eb8 438 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 439 rc = 0;
1da177e4 440 FreeXid(xid);
0f3bc09e 441 return rc;
1da177e4
LT
442 }
443
e6a00296 444 if (file->f_path.dentry == NULL) {
b6b38f70 445 cERROR(1, "no valid name if dentry freed");
3a9f462f
SF
446 dump_stack();
447 rc = -EBADF;
448 goto reopen_error_exit;
449 }
450
451 inode = file->f_path.dentry->d_inode;
fb8c4b14 452 if (inode == NULL) {
b6b38f70 453 cERROR(1, "inode not valid");
3a9f462f
SF
454 dump_stack();
455 rc = -EBADF;
456 goto reopen_error_exit;
1da177e4 457 }
50c2f753 458
1da177e4 459 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 460 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 461
1da177e4
LT
462/* can not grab rename sem here because various ops, including
463 those that already have the rename sem can end up causing writepage
464 to get called and if the server was down that means we end up here,
465 and we can never tell if the caller already has the rename_sem */
e6a00296 466 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 467 if (full_path == NULL) {
3a9f462f
SF
468 rc = -ENOMEM;
469reopen_error_exit:
f0a71eb8 470 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 471 FreeXid(xid);
3a9f462f 472 return rc;
1da177e4
LT
473 }
474
b6b38f70
JP
475 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
476 inode, file->f_flags, full_path);
1da177e4
LT
477
478 if (oplockEnabled)
479 oplock = REQ_OPLOCK;
480 else
4b18f2a9 481 oplock = 0;
1da177e4 482
7fc8f4e9
SF
483 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
484 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
485 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
486
487 /*
488 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
489 * original open. Must mask them off for a reopen.
490 */
491 unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
492
2422f676 493 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
494 cifs_sb->mnt_file_mode /* ignored */,
495 oflags, &oplock, &netfid, xid);
7fc8f4e9 496 if (rc == 0) {
b6b38f70 497 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
498 goto reopen_success;
499 }
500 /* fallthrough to retry open the old way on errors, especially
501 in the reconnect path it is important to retry hard */
502 }
503
504 desiredAccess = cifs_convert_flags(file->f_flags);
505
1da177e4 506 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
507 by SMBOpen and then calling get_inode_info with returned buf
508 since file might have write behind data that needs to be flushed
1da177e4
LT
509 and server version of file size can be stale. If we knew for sure
510 that inode was not dirty locally we could do this */
511
7fc8f4e9 512 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 513 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 514 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 515 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 516 if (rc) {
f0a71eb8 517 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
518 cFYI(1, "cifs_open returned 0x%x", rc);
519 cFYI(1, "oplock: %d", oplock);
1da177e4 520 } else {
7fc8f4e9 521reopen_success:
1da177e4 522 pCifsFile->netfid = netfid;
4b18f2a9 523 pCifsFile->invalidHandle = false;
f0a71eb8 524 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4
LT
525 pCifsInode = CIFS_I(inode);
526 if (pCifsInode) {
527 if (can_flush) {
cea21805
JL
528 rc = filemap_write_and_wait(inode->i_mapping);
529 if (rc != 0)
530 CIFS_I(inode)->write_behind_rc = rc;
1da177e4
LT
531 /* temporarily disable caching while we
532 go to server to get inode info */
4b18f2a9
SF
533 pCifsInode->clientCanCacheAll = false;
534 pCifsInode->clientCanCacheRead = false;
7fc8f4e9 535 if (tcon->unix_ext)
1da177e4
LT
536 rc = cifs_get_inode_info_unix(&inode,
537 full_path, inode->i_sb, xid);
538 else
539 rc = cifs_get_inode_info(&inode,
540 full_path, NULL, inode->i_sb,
8b1327f6 541 xid, NULL);
1da177e4
LT
542 } /* else we are writing out data to server already
543 and could deadlock if we tried to flush data, and
544 since we do not know if we have data that would
545 invalidate the current end of file on the server
546 we can not go to the server to get the new inod
547 info */
548 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
549 pCifsInode->clientCanCacheAll = true;
550 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
551 cFYI(1, "Exclusive Oplock granted on inode %p",
552 file->f_path.dentry->d_inode);
1da177e4 553 } else if ((oplock & 0xF) == OPLOCK_READ) {
4b18f2a9
SF
554 pCifsInode->clientCanCacheRead = true;
555 pCifsInode->clientCanCacheAll = false;
1da177e4 556 } else {
4b18f2a9
SF
557 pCifsInode->clientCanCacheRead = false;
558 pCifsInode->clientCanCacheAll = false;
1da177e4
LT
559 }
560 cifs_relock_file(pCifsFile);
561 }
562 }
1da177e4
LT
563 kfree(full_path);
564 FreeXid(xid);
565 return rc;
566}
567
568int cifs_close(struct inode *inode, struct file *file)
569{
570 int rc = 0;
15745320 571 int xid, timeout;
1da177e4
LT
572 struct cifs_sb_info *cifs_sb;
573 struct cifsTconInfo *pTcon;
c21dfb69 574 struct cifsFileInfo *pSMBFile = file->private_data;
1da177e4
LT
575
576 xid = GetXid();
577
578 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 579 pTcon = tlink_tcon(pSMBFile->tlink);
1da177e4 580 if (pSMBFile) {
7ee1af76 581 struct cifsLockInfo *li, *tmp;
ddb4cbfc 582 write_lock(&GlobalSMBSeslock);
4b18f2a9 583 pSMBFile->closePend = true;
1da177e4
LT
584 if (pTcon) {
585 /* no sense reconnecting to close a file that is
586 already closed */
3b795210 587 if (!pTcon->need_reconnect) {
ddb4cbfc 588 write_unlock(&GlobalSMBSeslock);
15745320 589 timeout = 2;
6ab409b5 590 while ((atomic_read(&pSMBFile->count) != 1)
15745320 591 && (timeout <= 2048)) {
23e7dd7d
SF
592 /* Give write a better chance to get to
593 server ahead of the close. We do not
594 want to add a wait_q here as it would
595 increase the memory utilization as
596 the struct would be in each open file,
fb8c4b14 597 but this should give enough time to
23e7dd7d 598 clear the socket */
b6b38f70 599 cFYI(DBG2, "close delay, write pending");
23e7dd7d
SF
600 msleep(timeout);
601 timeout *= 4;
4891d539 602 }
ddb4cbfc
SF
603 if (!pTcon->need_reconnect &&
604 !pSMBFile->invalidHandle)
605 rc = CIFSSMBClose(xid, pTcon,
1da177e4 606 pSMBFile->netfid);
ddb4cbfc
SF
607 } else
608 write_unlock(&GlobalSMBSeslock);
609 } else
610 write_unlock(&GlobalSMBSeslock);
7ee1af76
JA
611
612 /* Delete any outstanding lock records.
613 We'll lose them when the file is closed anyway. */
796e5661 614 mutex_lock(&pSMBFile->lock_mutex);
7ee1af76
JA
615 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
616 list_del(&li->llist);
617 kfree(li);
618 }
796e5661 619 mutex_unlock(&pSMBFile->lock_mutex);
7ee1af76 620
cbe0476f 621 write_lock(&GlobalSMBSeslock);
1da177e4
LT
622 list_del(&pSMBFile->flist);
623 list_del(&pSMBFile->tlist);
cbe0476f 624 write_unlock(&GlobalSMBSeslock);
6ab409b5 625 cifsFileInfo_put(file->private_data);
1da177e4
LT
626 file->private_data = NULL;
627 } else
628 rc = -EBADF;
629
4efa53f0 630 read_lock(&GlobalSMBSeslock);
1da177e4 631 if (list_empty(&(CIFS_I(inode)->openFileList))) {
b6b38f70 632 cFYI(1, "closing last open instance for inode %p", inode);
1da177e4
LT
633 /* if the file is not open we do not know if we can cache info
634 on this inode, much less write behind and read ahead */
4b18f2a9
SF
635 CIFS_I(inode)->clientCanCacheRead = false;
636 CIFS_I(inode)->clientCanCacheAll = false;
1da177e4 637 }
4efa53f0 638 read_unlock(&GlobalSMBSeslock);
fb8c4b14 639 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
1da177e4
LT
640 rc = CIFS_I(inode)->write_behind_rc;
641 FreeXid(xid);
642 return rc;
643}
644
645int cifs_closedir(struct inode *inode, struct file *file)
646{
647 int rc = 0;
648 int xid;
c21dfb69 649 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
650 char *ptmp;
651
b6b38f70 652 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
653
654 xid = GetXid();
655
656 if (pCFileStruct) {
13cfb733 657 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 658
b6b38f70 659 cFYI(1, "Freeing private data in close dir");
ddb4cbfc 660 write_lock(&GlobalSMBSeslock);
4b18f2a9
SF
661 if (!pCFileStruct->srch_inf.endOfSearch &&
662 !pCFileStruct->invalidHandle) {
663 pCFileStruct->invalidHandle = true;
ddb4cbfc 664 write_unlock(&GlobalSMBSeslock);
1da177e4 665 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
666 cFYI(1, "Closing uncompleted readdir with rc %d",
667 rc);
1da177e4
LT
668 /* not much we can do if it fails anyway, ignore rc */
669 rc = 0;
ddb4cbfc
SF
670 } else
671 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
672 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
673 if (ptmp) {
b6b38f70 674 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 675 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 676 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
677 cifs_small_buf_release(ptmp);
678 else
679 cifs_buf_release(ptmp);
1da177e4 680 }
13cfb733 681 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
682 kfree(file->private_data);
683 file->private_data = NULL;
684 }
685 /* BB can we lock the filestruct while this is going on? */
686 FreeXid(xid);
687 return rc;
688}
689
7ee1af76
JA
690static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
691 __u64 offset, __u8 lockType)
692{
fb8c4b14
SF
693 struct cifsLockInfo *li =
694 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
695 if (li == NULL)
696 return -ENOMEM;
697 li->offset = offset;
698 li->length = len;
699 li->type = lockType;
796e5661 700 mutex_lock(&fid->lock_mutex);
7ee1af76 701 list_add(&li->llist, &fid->llist);
796e5661 702 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
703 return 0;
704}
705
1da177e4
LT
706int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
707{
708 int rc, xid;
1da177e4
LT
709 __u32 numLock = 0;
710 __u32 numUnlock = 0;
711 __u64 length;
4b18f2a9 712 bool wait_flag = false;
1da177e4 713 struct cifs_sb_info *cifs_sb;
13a6e42a 714 struct cifsTconInfo *tcon;
08547b03
SF
715 __u16 netfid;
716 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 717 bool posix_locking = 0;
1da177e4
LT
718
719 length = 1 + pfLock->fl_end - pfLock->fl_start;
720 rc = -EACCES;
721 xid = GetXid();
722
b6b38f70 723 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 724 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 725 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 726 pfLock->fl_end);
1da177e4
LT
727
728 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 729 cFYI(1, "Posix");
1da177e4 730 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 731 cFYI(1, "Flock");
1da177e4 732 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 733 cFYI(1, "Blocking lock");
4b18f2a9 734 wait_flag = true;
1da177e4
LT
735 }
736 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
737 cFYI(1, "Process suspended by mandatory locking - "
738 "not implemented yet");
1da177e4 739 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 740 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 741 if (pfLock->fl_flags &
1da177e4 742 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 743 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
744
745 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 746 cFYI(1, "F_WRLCK ");
1da177e4
LT
747 numLock = 1;
748 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 749 cFYI(1, "F_UNLCK");
1da177e4 750 numUnlock = 1;
d47d7c1a
SF
751 /* Check if unlock includes more than
752 one lock range */
1da177e4 753 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 754 cFYI(1, "F_RDLCK");
1da177e4
LT
755 lockType |= LOCKING_ANDX_SHARED_LOCK;
756 numLock = 1;
757 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 758 cFYI(1, "F_EXLCK");
1da177e4
LT
759 numLock = 1;
760 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 761 cFYI(1, "F_SHLCK");
1da177e4
LT
762 lockType |= LOCKING_ANDX_SHARED_LOCK;
763 numLock = 1;
764 } else
b6b38f70 765 cFYI(1, "Unknown type of lock");
1da177e4 766
e6a00296 767 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 768 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
1da177e4
LT
769
770 if (file->private_data == NULL) {
0f3bc09e 771 rc = -EBADF;
1da177e4 772 FreeXid(xid);
0f3bc09e 773 return rc;
1da177e4 774 }
08547b03
SF
775 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
776
13a6e42a
SF
777 if ((tcon->ses->capabilities & CAP_UNIX) &&
778 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 779 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 780 posix_locking = 1;
08547b03
SF
781 /* BB add code here to normalize offset and length to
782 account for negative length which we can not accept over the
783 wire */
1da177e4 784 if (IS_GETLK(cmd)) {
fb8c4b14 785 if (posix_locking) {
08547b03 786 int posix_lock_type;
fb8c4b14 787 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
788 posix_lock_type = CIFS_RDLCK;
789 else
790 posix_lock_type = CIFS_WRLCK;
13a6e42a 791 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 792 length, pfLock,
08547b03
SF
793 posix_lock_type, wait_flag);
794 FreeXid(xid);
795 return rc;
796 }
797
798 /* BB we could chain these into one lock request BB */
13a6e42a 799 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
08547b03 800 0, 1, lockType, 0 /* wait flag */ );
1da177e4 801 if (rc == 0) {
13a6e42a 802 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
803 pfLock->fl_start, 1 /* numUnlock */ ,
804 0 /* numLock */ , lockType,
805 0 /* wait flag */ );
806 pfLock->fl_type = F_UNLCK;
807 if (rc != 0)
b6b38f70
JP
808 cERROR(1, "Error unlocking previously locked "
809 "range %d during test of lock", rc);
1da177e4
LT
810 rc = 0;
811
812 } else {
813 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
814 rc = 0;
815
816 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
817 pfLock->fl_type = F_WRLCK;
818 } else {
819 rc = CIFSSMBLock(xid, tcon, netfid, length,
820 pfLock->fl_start, 0, 1,
821 lockType | LOCKING_ANDX_SHARED_LOCK,
822 0 /* wait flag */);
823 if (rc == 0) {
824 rc = CIFSSMBLock(xid, tcon, netfid,
825 length, pfLock->fl_start, 1, 0,
826 lockType |
827 LOCKING_ANDX_SHARED_LOCK,
828 0 /* wait flag */);
829 pfLock->fl_type = F_RDLCK;
830 if (rc != 0)
f19159dc 831 cERROR(1, "Error unlocking "
f05337c6 832 "previously locked range %d "
f19159dc 833 "during test of lock", rc);
f05337c6
PS
834 rc = 0;
835 } else {
836 pfLock->fl_type = F_WRLCK;
837 rc = 0;
838 }
839 }
1da177e4
LT
840 }
841
842 FreeXid(xid);
843 return rc;
844 }
7ee1af76
JA
845
846 if (!numLock && !numUnlock) {
847 /* if no lock or unlock then nothing
848 to do since we do not know what it is */
849 FreeXid(xid);
850 return -EOPNOTSUPP;
851 }
852
853 if (posix_locking) {
08547b03 854 int posix_lock_type;
fb8c4b14 855 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
856 posix_lock_type = CIFS_RDLCK;
857 else
858 posix_lock_type = CIFS_WRLCK;
50c2f753 859
fb8c4b14 860 if (numUnlock == 1)
beb84dc8 861 posix_lock_type = CIFS_UNLCK;
7ee1af76 862
13a6e42a 863 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 864 length, pfLock,
08547b03 865 posix_lock_type, wait_flag);
7ee1af76 866 } else {
c21dfb69 867 struct cifsFileInfo *fid = file->private_data;
7ee1af76
JA
868
869 if (numLock) {
13a6e42a 870 rc = CIFSSMBLock(xid, tcon, netfid, length,
fb8c4b14 871 pfLock->fl_start,
7ee1af76
JA
872 0, numLock, lockType, wait_flag);
873
874 if (rc == 0) {
875 /* For Windows locks we must store them. */
876 rc = store_file_lock(fid, length,
877 pfLock->fl_start, lockType);
878 }
879 } else if (numUnlock) {
880 /* For each stored lock that this unlock overlaps
881 completely, unlock it. */
882 int stored_rc = 0;
883 struct cifsLockInfo *li, *tmp;
884
6b70c955 885 rc = 0;
796e5661 886 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
887 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
888 if (pfLock->fl_start <= li->offset &&
c19eb710 889 (pfLock->fl_start + length) >=
39db810c 890 (li->offset + li->length)) {
13a6e42a 891 stored_rc = CIFSSMBLock(xid, tcon,
fb8c4b14 892 netfid,
7ee1af76 893 li->length, li->offset,
4b18f2a9 894 1, 0, li->type, false);
7ee1af76
JA
895 if (stored_rc)
896 rc = stored_rc;
2c964d1f
PS
897 else {
898 list_del(&li->llist);
899 kfree(li);
900 }
7ee1af76
JA
901 }
902 }
796e5661 903 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
904 }
905 }
906
d634cc15 907 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
908 posix_lock_file_wait(file, pfLock);
909 FreeXid(xid);
910 return rc;
911}
912
fbec9ab9
JL
913/*
914 * Set the timeout on write requests past EOF. For some servers (Windows)
915 * these calls can be very long.
916 *
917 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
918 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
919 * The 10M cutoff is totally arbitrary. A better scheme for this would be
920 * welcome if someone wants to suggest one.
921 *
922 * We may be able to do a better job with this if there were some way to
923 * declare that a file should be sparse.
924 */
925static int
926cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
927{
928 if (offset <= cifsi->server_eof)
929 return CIFS_STD_OP;
930 else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
931 return CIFS_VLONG_OP;
932 else
933 return CIFS_LONG_OP;
934}
935
936/* update the file size (if needed) after a write */
937static void
938cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
939 unsigned int bytes_written)
940{
941 loff_t end_of_write = offset + bytes_written;
942
943 if (end_of_write > cifsi->server_eof)
944 cifsi->server_eof = end_of_write;
945}
946
1da177e4
LT
947ssize_t cifs_user_write(struct file *file, const char __user *write_data,
948 size_t write_size, loff_t *poffset)
949{
950 int rc = 0;
951 unsigned int bytes_written = 0;
952 unsigned int total_written;
953 struct cifs_sb_info *cifs_sb;
954 struct cifsTconInfo *pTcon;
955 int xid, long_op;
956 struct cifsFileInfo *open_file;
fbec9ab9 957 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 958
e6a00296 959 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 960
b6b38f70
JP
961 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
962 *poffset, file->f_path.dentry->d_name.name); */
1da177e4
LT
963
964 if (file->private_data == NULL)
965 return -EBADF;
ba00ba64 966
c21dfb69 967 open_file = file->private_data;
13cfb733 968 pTcon = tlink_tcon(open_file->tlink);
50c2f753 969
838726c4
JL
970 rc = generic_write_checks(file, poffset, &write_size, 0);
971 if (rc)
972 return rc;
973
1da177e4 974 xid = GetXid();
1da177e4 975
fbec9ab9 976 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
977 for (total_written = 0; write_size > total_written;
978 total_written += bytes_written) {
979 rc = -EAGAIN;
980 while (rc == -EAGAIN) {
981 if (file->private_data == NULL) {
982 /* file has been closed on us */
983 FreeXid(xid);
984 /* if we have gotten here we have written some data
985 and blocked, and the file has been freed on us while
986 we blocked so return what we managed to write */
987 return total_written;
fb8c4b14 988 }
1da177e4
LT
989 if (open_file->closePend) {
990 FreeXid(xid);
991 if (total_written)
992 return total_written;
993 else
994 return -EBADF;
995 }
996 if (open_file->invalidHandle) {
1da177e4
LT
997 /* we could deadlock if we called
998 filemap_fdatawait from here so tell
999 reopen_file not to flush data to server
1000 now */
4b18f2a9 1001 rc = cifs_reopen_file(file, false);
1da177e4
LT
1002 if (rc != 0)
1003 break;
1004 }
1005
1006 rc = CIFSSMBWrite(xid, pTcon,
1007 open_file->netfid,
1008 min_t(const int, cifs_sb->wsize,
1009 write_size - total_written),
1010 *poffset, &bytes_written,
1011 NULL, write_data + total_written, long_op);
1012 }
1013 if (rc || (bytes_written == 0)) {
1014 if (total_written)
1015 break;
1016 else {
1017 FreeXid(xid);
1018 return rc;
1019 }
fbec9ab9
JL
1020 } else {
1021 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1022 *poffset += bytes_written;
fbec9ab9 1023 }
133672ef 1024 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1025 15 seconds is plenty */
1026 }
1027
a4544347 1028 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1029
1030 /* since the write may have blocked check these pointers again */
3677db10
SF
1031 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1032 struct inode *inode = file->f_path.dentry->d_inode;
fb8c4b14
SF
1033/* Do not update local mtime - server will set its actual value on write
1034 * inode->i_ctime = inode->i_mtime =
3677db10
SF
1035 * current_fs_time(inode->i_sb);*/
1036 if (total_written > 0) {
1037 spin_lock(&inode->i_lock);
1038 if (*poffset > file->f_path.dentry->d_inode->i_size)
1039 i_size_write(file->f_path.dentry->d_inode,
1da177e4 1040 *poffset);
3677db10 1041 spin_unlock(&inode->i_lock);
1da177e4 1042 }
fb8c4b14 1043 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1044 }
1045 FreeXid(xid);
1046 return total_written;
1047}
1048
1049static ssize_t cifs_write(struct file *file, const char *write_data,
d9414774 1050 size_t write_size, loff_t *poffset)
1da177e4
LT
1051{
1052 int rc = 0;
1053 unsigned int bytes_written = 0;
1054 unsigned int total_written;
1055 struct cifs_sb_info *cifs_sb;
1056 struct cifsTconInfo *pTcon;
1057 int xid, long_op;
1058 struct cifsFileInfo *open_file;
fbec9ab9 1059 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 1060
e6a00296 1061 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1062
b6b38f70
JP
1063 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1064 *poffset, file->f_path.dentry->d_name.name);
1da177e4
LT
1065
1066 if (file->private_data == NULL)
1067 return -EBADF;
c21dfb69 1068 open_file = file->private_data;
13cfb733 1069 pTcon = tlink_tcon(open_file->tlink);
50c2f753 1070
1da177e4 1071 xid = GetXid();
1da177e4 1072
fbec9ab9 1073 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
1074 for (total_written = 0; write_size > total_written;
1075 total_written += bytes_written) {
1076 rc = -EAGAIN;
1077 while (rc == -EAGAIN) {
1078 if (file->private_data == NULL) {
1079 /* file has been closed on us */
1080 FreeXid(xid);
1081 /* if we have gotten here we have written some data
1082 and blocked, and the file has been freed on us
fb8c4b14 1083 while we blocked so return what we managed to
1da177e4
LT
1084 write */
1085 return total_written;
fb8c4b14 1086 }
1da177e4
LT
1087 if (open_file->closePend) {
1088 FreeXid(xid);
1089 if (total_written)
1090 return total_written;
1091 else
1092 return -EBADF;
1093 }
1094 if (open_file->invalidHandle) {
1da177e4
LT
1095 /* we could deadlock if we called
1096 filemap_fdatawait from here so tell
fb8c4b14 1097 reopen_file not to flush data to
1da177e4 1098 server now */
4b18f2a9 1099 rc = cifs_reopen_file(file, false);
1da177e4
LT
1100 if (rc != 0)
1101 break;
1102 }
fb8c4b14
SF
1103 if (experimEnabled || (pTcon->ses->server &&
1104 ((pTcon->ses->server->secMode &
08775834 1105 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 1106 == 0))) {
3e84469d
SF
1107 struct kvec iov[2];
1108 unsigned int len;
1109
0ae0efad 1110 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
1111 write_size - total_written);
1112 /* iov[0] is reserved for smb header */
1113 iov[1].iov_base = (char *)write_data +
1114 total_written;
1115 iov[1].iov_len = len;
d6e04ae6 1116 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 1117 open_file->netfid, len,
d6e04ae6 1118 *poffset, &bytes_written,
3e84469d 1119 iov, 1, long_op);
d6e04ae6 1120 } else
60808233
SF
1121 rc = CIFSSMBWrite(xid, pTcon,
1122 open_file->netfid,
1123 min_t(const int, cifs_sb->wsize,
1124 write_size - total_written),
1125 *poffset, &bytes_written,
1126 write_data + total_written,
1127 NULL, long_op);
1da177e4
LT
1128 }
1129 if (rc || (bytes_written == 0)) {
1130 if (total_written)
1131 break;
1132 else {
1133 FreeXid(xid);
1134 return rc;
1135 }
fbec9ab9
JL
1136 } else {
1137 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1138 *poffset += bytes_written;
fbec9ab9 1139 }
133672ef 1140 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1141 15 seconds is plenty */
1142 }
1143
a4544347 1144 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1145
1146 /* since the write may have blocked check these pointers again */
3677db10 1147 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
004c46b9 1148/*BB We could make this contingent on superblock ATIME flag too */
3677db10
SF
1149/* file->f_path.dentry->d_inode->i_ctime =
1150 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1151 if (total_written > 0) {
1152 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1153 if (*poffset > file->f_path.dentry->d_inode->i_size)
1154 i_size_write(file->f_path.dentry->d_inode,
1155 *poffset);
1156 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1da177e4 1157 }
3677db10 1158 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1159 }
1160 FreeXid(xid);
1161 return total_written;
1162}
1163
630f3f0c 1164#ifdef CONFIG_CIFS_EXPERIMENTAL
6508d904
JL
1165struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1166 bool fsuid_only)
630f3f0c
SF
1167{
1168 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1169 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1170
1171 /* only filter by fsuid on multiuser mounts */
1172 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1173 fsuid_only = false;
630f3f0c
SF
1174
1175 read_lock(&GlobalSMBSeslock);
1176 /* we could simply get the first_list_entry since write-only entries
1177 are always at the end of the list but since the first entry might
1178 have a close pending, we go through the whole list */
1179 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1180 if (open_file->closePend)
1181 continue;
6508d904
JL
1182 if (fsuid_only && open_file->uid != current_fsuid())
1183 continue;
630f3f0c
SF
1184 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1185 (open_file->pfile->f_flags & O_RDONLY))) {
1186 if (!open_file->invalidHandle) {
1187 /* found a good file */
1188 /* lock it so it will not be closed on us */
6ab409b5 1189 cifsFileInfo_get(open_file);
630f3f0c
SF
1190 read_unlock(&GlobalSMBSeslock);
1191 return open_file;
1192 } /* else might as well continue, and look for
1193 another, or simply have the caller reopen it
1194 again rather than trying to fix this handle */
1195 } else /* write only file */
1196 break; /* write only files are last so must be done */
1197 }
1198 read_unlock(&GlobalSMBSeslock);
1199 return NULL;
1200}
1201#endif
1202
6508d904
JL
1203struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1204 bool fsuid_only)
6148a742
SF
1205{
1206 struct cifsFileInfo *open_file;
6508d904 1207 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
2846d386 1208 bool any_available = false;
dd99cd80 1209 int rc;
6148a742 1210
60808233
SF
1211 /* Having a null inode here (because mapping->host was set to zero by
1212 the VFS or MM) should not happen but we had reports of on oops (due to
1213 it being zero) during stress testcases so we need to check for it */
1214
fb8c4b14 1215 if (cifs_inode == NULL) {
b6b38f70 1216 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1217 dump_stack();
1218 return NULL;
1219 }
1220
6508d904
JL
1221 /* only filter by fsuid on multiuser mounts */
1222 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1223 fsuid_only = false;
1224
6148a742 1225 read_lock(&GlobalSMBSeslock);
9b22b0b7 1226refind_writable:
6148a742 1227 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1228 if (open_file->closePend)
1229 continue;
1230 if (!any_available && open_file->pid != current->tgid)
1231 continue;
1232 if (fsuid_only && open_file->uid != current_fsuid())
6148a742
SF
1233 continue;
1234 if (open_file->pfile &&
1235 ((open_file->pfile->f_flags & O_RDWR) ||
1236 (open_file->pfile->f_flags & O_WRONLY))) {
6ab409b5 1237 cifsFileInfo_get(open_file);
9b22b0b7
SF
1238
1239 if (!open_file->invalidHandle) {
1240 /* found a good writable file */
1241 read_unlock(&GlobalSMBSeslock);
1242 return open_file;
1243 }
8840dee9 1244
6148a742 1245 read_unlock(&GlobalSMBSeslock);
9b22b0b7 1246 /* Had to unlock since following call can block */
4b18f2a9 1247 rc = cifs_reopen_file(open_file->pfile, false);
8840dee9 1248 if (!rc) {
9b22b0b7
SF
1249 if (!open_file->closePend)
1250 return open_file;
1251 else { /* start over in case this was deleted */
1252 /* since the list could be modified */
37c0eb46 1253 read_lock(&GlobalSMBSeslock);
6ab409b5 1254 cifsFileInfo_put(open_file);
9b22b0b7 1255 goto refind_writable;
37c0eb46
SF
1256 }
1257 }
9b22b0b7
SF
1258
1259 /* if it fails, try another handle if possible -
1260 (we can not do this if closePending since
1261 loop could be modified - in which case we
1262 have to start at the beginning of the list
1263 again. Note that it would be bad
1264 to hold up writepages here (rather than
1265 in caller) with continuous retries */
b6b38f70 1266 cFYI(1, "wp failed on reopen file");
9b22b0b7
SF
1267 read_lock(&GlobalSMBSeslock);
1268 /* can not use this handle, no write
1269 pending on this one after all */
6ab409b5 1270 cifsFileInfo_put(open_file);
8840dee9 1271
9b22b0b7
SF
1272 if (open_file->closePend) /* list could have changed */
1273 goto refind_writable;
1274 /* else we simply continue to the next entry. Thus
1275 we do not loop on reopen errors. If we
1276 can not reopen the file, for example if we
1277 reconnected to a server with another client
1278 racing to delete or lock the file we would not
1279 make progress if we restarted before the beginning
1280 of the loop here. */
6148a742
SF
1281 }
1282 }
2846d386
JL
1283 /* couldn't find useable FH with same pid, try any available */
1284 if (!any_available) {
1285 any_available = true;
1286 goto refind_writable;
1287 }
6148a742
SF
1288 read_unlock(&GlobalSMBSeslock);
1289 return NULL;
1290}
1291
1da177e4
LT
1292static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1293{
1294 struct address_space *mapping = page->mapping;
1295 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1296 char *write_data;
1297 int rc = -EFAULT;
1298 int bytes_written = 0;
1299 struct cifs_sb_info *cifs_sb;
1da177e4 1300 struct inode *inode;
6148a742 1301 struct cifsFileInfo *open_file;
1da177e4
LT
1302
1303 if (!mapping || !mapping->host)
1304 return -EFAULT;
1305
1306 inode = page->mapping->host;
1307 cifs_sb = CIFS_SB(inode->i_sb);
1da177e4
LT
1308
1309 offset += (loff_t)from;
1310 write_data = kmap(page);
1311 write_data += from;
1312
1313 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1314 kunmap(page);
1315 return -EIO;
1316 }
1317
1318 /* racing with truncate? */
1319 if (offset > mapping->host->i_size) {
1320 kunmap(page);
1321 return 0; /* don't care */
1322 }
1323
1324 /* check to make sure that we are not extending the file */
1325 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1326 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1327
6508d904 1328 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742
SF
1329 if (open_file) {
1330 bytes_written = cifs_write(open_file->pfile, write_data,
1331 to-from, &offset);
6ab409b5 1332 cifsFileInfo_put(open_file);
1da177e4 1333 /* Does mm or vfs already set times? */
6148a742 1334 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1335 if ((bytes_written > 0) && (offset))
6148a742 1336 rc = 0;
bb5a9a04
SF
1337 else if (bytes_written < 0)
1338 rc = bytes_written;
6148a742 1339 } else {
b6b38f70 1340 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1341 rc = -EIO;
1342 }
1343
1344 kunmap(page);
1345 return rc;
1346}
1347
1da177e4 1348static int cifs_writepages(struct address_space *mapping,
37c0eb46 1349 struct writeback_control *wbc)
1da177e4 1350{
37c0eb46
SF
1351 struct backing_dev_info *bdi = mapping->backing_dev_info;
1352 unsigned int bytes_to_write;
1353 unsigned int bytes_written;
1354 struct cifs_sb_info *cifs_sb;
1355 int done = 0;
111ebb6e 1356 pgoff_t end;
37c0eb46 1357 pgoff_t index;
fb8c4b14
SF
1358 int range_whole = 0;
1359 struct kvec *iov;
84d2f07e 1360 int len;
37c0eb46
SF
1361 int n_iov = 0;
1362 pgoff_t next;
1363 int nr_pages;
1364 __u64 offset = 0;
23e7dd7d 1365 struct cifsFileInfo *open_file;
ba00ba64 1366 struct cifsTconInfo *tcon;
fbec9ab9 1367 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1368 struct page *page;
1369 struct pagevec pvec;
1370 int rc = 0;
1371 int scanned = 0;
fbec9ab9 1372 int xid, long_op;
1da177e4 1373
f3983c21
JL
1374 /*
1375 * BB: Is this meaningful for a non-block-device file system?
1376 * If it is, we should test it again after we do I/O
1377 */
1378 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1379 wbc->encountered_congestion = 1;
1380 return 0;
1381 }
1382
37c0eb46 1383 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1384
37c0eb46
SF
1385 /*
1386 * If wsize is smaller that the page cache size, default to writing
1387 * one page at a time via cifs_writepage
1388 */
1389 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1390 return generic_writepages(mapping, wbc);
1391
9a0c8230 1392 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1393 if (iov == NULL)
9a0c8230
SF
1394 return generic_writepages(mapping, wbc);
1395
37c0eb46 1396 /*
f3983c21
JL
1397 * if there's no open file, then this is likely to fail too,
1398 * but it'll at least handle the return. Maybe it should be
1399 * a BUG() instead?
37c0eb46 1400 */
6508d904 1401 open_file = find_writable_file(CIFS_I(mapping->host), false);
f3983c21 1402 if (!open_file) {
9a0c8230 1403 kfree(iov);
f3983c21
JL
1404 return generic_writepages(mapping, wbc);
1405 }
1406
13cfb733 1407 tcon = tlink_tcon(open_file->tlink);
f3983c21
JL
1408 if (!experimEnabled && tcon->ses->server->secMode &
1409 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1410 cifsFileInfo_put(open_file);
1411 return generic_writepages(mapping, wbc);
37c0eb46 1412 }
f3983c21 1413 cifsFileInfo_put(open_file);
37c0eb46 1414
1da177e4
LT
1415 xid = GetXid();
1416
37c0eb46 1417 pagevec_init(&pvec, 0);
111ebb6e 1418 if (wbc->range_cyclic) {
37c0eb46 1419 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1420 end = -1;
1421 } else {
1422 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1423 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1424 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1425 range_whole = 1;
37c0eb46
SF
1426 scanned = 1;
1427 }
1428retry:
1429 while (!done && (index <= end) &&
1430 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1431 PAGECACHE_TAG_DIRTY,
1432 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1433 int first;
1434 unsigned int i;
1435
37c0eb46
SF
1436 first = -1;
1437 next = 0;
1438 n_iov = 0;
1439 bytes_to_write = 0;
1440
1441 for (i = 0; i < nr_pages; i++) {
1442 page = pvec.pages[i];
1443 /*
1444 * At this point we hold neither mapping->tree_lock nor
1445 * lock on the page itself: the page may be truncated or
1446 * invalidated (changing page->mapping to NULL), or even
1447 * swizzled back from swapper_space to tmpfs file
1448 * mapping
1449 */
1450
1451 if (first < 0)
1452 lock_page(page);
529ae9aa 1453 else if (!trylock_page(page))
37c0eb46
SF
1454 break;
1455
1456 if (unlikely(page->mapping != mapping)) {
1457 unlock_page(page);
1458 break;
1459 }
1460
111ebb6e 1461 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1462 done = 1;
1463 unlock_page(page);
1464 break;
1465 }
1466
1467 if (next && (page->index != next)) {
1468 /* Not next consecutive page */
1469 unlock_page(page);
1470 break;
1471 }
1472
1473 if (wbc->sync_mode != WB_SYNC_NONE)
1474 wait_on_page_writeback(page);
1475
1476 if (PageWriteback(page) ||
cb876f45 1477 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1478 unlock_page(page);
1479 break;
1480 }
84d2f07e 1481
cb876f45
LT
1482 /*
1483 * This actually clears the dirty bit in the radix tree.
1484 * See cifs_writepage() for more commentary.
1485 */
1486 set_page_writeback(page);
1487
84d2f07e
SF
1488 if (page_offset(page) >= mapping->host->i_size) {
1489 done = 1;
1490 unlock_page(page);
cb876f45 1491 end_page_writeback(page);
84d2f07e
SF
1492 break;
1493 }
1494
37c0eb46
SF
1495 /*
1496 * BB can we get rid of this? pages are held by pvec
1497 */
1498 page_cache_get(page);
1499
84d2f07e
SF
1500 len = min(mapping->host->i_size - page_offset(page),
1501 (loff_t)PAGE_CACHE_SIZE);
1502
37c0eb46
SF
1503 /* reserve iov[0] for the smb header */
1504 n_iov++;
1505 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1506 iov[n_iov].iov_len = len;
1507 bytes_to_write += len;
37c0eb46
SF
1508
1509 if (first < 0) {
1510 first = i;
1511 offset = page_offset(page);
1512 }
1513 next = page->index + 1;
1514 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1515 break;
1516 }
1517 if (n_iov) {
6508d904
JL
1518 open_file = find_writable_file(CIFS_I(mapping->host),
1519 false);
23e7dd7d 1520 if (!open_file) {
b6b38f70 1521 cERROR(1, "No writable handles for inode");
23e7dd7d 1522 rc = -EBADF;
1047abc1 1523 } else {
fbec9ab9 1524 long_op = cifs_write_timeout(cifsi, offset);
f3983c21 1525 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
23e7dd7d
SF
1526 bytes_to_write, offset,
1527 &bytes_written, iov, n_iov,
fbec9ab9 1528 long_op);
6ab409b5 1529 cifsFileInfo_put(open_file);
fbec9ab9 1530 cifs_update_eof(cifsi, offset, bytes_written);
f3983c21 1531 }
fbec9ab9 1532
f3983c21
JL
1533 if (rc || bytes_written < bytes_to_write) {
1534 cERROR(1, "Write2 ret %d, wrote %d",
1535 rc, bytes_written);
1536 /* BB what if continued retry is
1537 requested via mount flags? */
1538 if (rc == -ENOSPC)
1539 set_bit(AS_ENOSPC, &mapping->flags);
1540 else
1541 set_bit(AS_EIO, &mapping->flags);
1542 } else {
1543 cifs_stats_bytes_written(tcon, bytes_written);
37c0eb46 1544 }
f3983c21 1545
37c0eb46
SF
1546 for (i = 0; i < n_iov; i++) {
1547 page = pvec.pages[first + i];
eb9bdaa3
SF
1548 /* Should we also set page error on
1549 success rc but too little data written? */
1550 /* BB investigate retry logic on temporary
1551 server crash cases and how recovery works
fb8c4b14
SF
1552 when page marked as error */
1553 if (rc)
eb9bdaa3 1554 SetPageError(page);
37c0eb46
SF
1555 kunmap(page);
1556 unlock_page(page);
cb876f45 1557 end_page_writeback(page);
37c0eb46
SF
1558 page_cache_release(page);
1559 }
1560 if ((wbc->nr_to_write -= n_iov) <= 0)
1561 done = 1;
1562 index = next;
b066a48c
DK
1563 } else
1564 /* Need to re-find the pages we skipped */
1565 index = pvec.pages[0]->index + 1;
1566
37c0eb46
SF
1567 pagevec_release(&pvec);
1568 }
1569 if (!scanned && !done) {
1570 /*
1571 * We hit the last page and there is more work to be done: wrap
1572 * back to the start of the file
1573 */
1574 scanned = 1;
1575 index = 0;
1576 goto retry;
1577 }
111ebb6e 1578 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1579 mapping->writeback_index = index;
1580
1da177e4 1581 FreeXid(xid);
9a0c8230 1582 kfree(iov);
1da177e4
LT
1583 return rc;
1584}
1da177e4 1585
fb8c4b14 1586static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1587{
1588 int rc = -EFAULT;
1589 int xid;
1590
1591 xid = GetXid();
1592/* BB add check for wbc flags */
1593 page_cache_get(page);
ad7a2926 1594 if (!PageUptodate(page))
b6b38f70 1595 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1596
1597 /*
1598 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1599 *
1600 * A writepage() implementation always needs to do either this,
1601 * or re-dirty the page with "redirty_page_for_writepage()" in
1602 * the case of a failure.
1603 *
1604 * Just unlocking the page will cause the radix tree tag-bits
1605 * to fail to update with the state of the page correctly.
1606 */
fb8c4b14 1607 set_page_writeback(page);
1da177e4
LT
1608 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1609 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1610 unlock_page(page);
cb876f45
LT
1611 end_page_writeback(page);
1612 page_cache_release(page);
1da177e4
LT
1613 FreeXid(xid);
1614 return rc;
1615}
1616
d9414774
NP
1617static int cifs_write_end(struct file *file, struct address_space *mapping,
1618 loff_t pos, unsigned len, unsigned copied,
1619 struct page *page, void *fsdata)
1da177e4 1620{
d9414774
NP
1621 int rc;
1622 struct inode *inode = mapping->host;
1da177e4 1623
b6b38f70
JP
1624 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1625 page, pos, copied);
d9414774 1626
a98ee8c1
JL
1627 if (PageChecked(page)) {
1628 if (copied == len)
1629 SetPageUptodate(page);
1630 ClearPageChecked(page);
1631 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1632 SetPageUptodate(page);
ad7a2926 1633
1da177e4 1634 if (!PageUptodate(page)) {
d9414774
NP
1635 char *page_data;
1636 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1637 int xid;
1638
1639 xid = GetXid();
1da177e4
LT
1640 /* this is probably better than directly calling
1641 partialpage_write since in this function the file handle is
1642 known which we might as well leverage */
1643 /* BB check if anything else missing out of ppw
1644 such as updating last write time */
1645 page_data = kmap(page);
d9414774
NP
1646 rc = cifs_write(file, page_data + offset, copied, &pos);
1647 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1648 kunmap(page);
d9414774
NP
1649
1650 FreeXid(xid);
fb8c4b14 1651 } else {
d9414774
NP
1652 rc = copied;
1653 pos += copied;
1da177e4
LT
1654 set_page_dirty(page);
1655 }
1656
d9414774
NP
1657 if (rc > 0) {
1658 spin_lock(&inode->i_lock);
1659 if (pos > inode->i_size)
1660 i_size_write(inode, pos);
1661 spin_unlock(&inode->i_lock);
1662 }
1663
1664 unlock_page(page);
1665 page_cache_release(page);
1666
1da177e4
LT
1667 return rc;
1668}
1669
7ea80859 1670int cifs_fsync(struct file *file, int datasync)
1da177e4
LT
1671{
1672 int xid;
1673 int rc = 0;
b298f223 1674 struct cifsTconInfo *tcon;
c21dfb69 1675 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1676 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1677
1678 xid = GetXid();
1679
b6b38f70 1680 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1681 file->f_path.dentry->d_name.name, datasync);
50c2f753 1682
cea21805
JL
1683 rc = filemap_write_and_wait(inode->i_mapping);
1684 if (rc == 0) {
1685 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1686 CIFS_I(inode)->write_behind_rc = 0;
13cfb733 1687 tcon = tlink_tcon(smbfile->tlink);
be652445 1688 if (!rc && tcon && smbfile &&
4717bed6 1689 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
b298f223 1690 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
cea21805 1691 }
b298f223 1692
1da177e4
LT
1693 FreeXid(xid);
1694 return rc;
1695}
1696
3978d717 1697/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1698{
1699 struct address_space *mapping;
1700 struct inode *inode;
1701 unsigned long index = page->index;
1702 unsigned int rpages = 0;
1703 int rc = 0;
1704
f19159dc 1705 cFYI(1, "sync page %p", page);
1da177e4
LT
1706 mapping = page->mapping;
1707 if (!mapping)
1708 return 0;
1709 inode = mapping->host;
1710 if (!inode)
3978d717 1711 return; */
1da177e4 1712
fb8c4b14 1713/* fill in rpages then
1da177e4
LT
1714 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1715
b6b38f70 1716/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1da177e4 1717
3978d717 1718#if 0
1da177e4
LT
1719 if (rc < 0)
1720 return rc;
1721 return 0;
3978d717 1722#endif
1da177e4
LT
1723} */
1724
1725/*
1726 * As file closes, flush all cached write data for this inode checking
1727 * for write behind errors.
1728 */
75e1fcc0 1729int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1730{
fb8c4b14 1731 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1732 int rc = 0;
1733
1734 /* Rather than do the steps manually:
1735 lock the inode for writing
1736 loop through pages looking for write behind data (dirty pages)
1737 coalesce into contiguous 16K (or smaller) chunks to write to server
1738 send to server (prefer in parallel)
1739 deal with writebehind errors
1740 unlock inode for writing
1741 filemapfdatawrite appears easier for the time being */
1742
1743 rc = filemap_fdatawrite(inode->i_mapping);
cea21805
JL
1744 /* reset wb rc if we were able to write out dirty pages */
1745 if (!rc) {
1746 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1747 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1748 }
50c2f753 1749
b6b38f70 1750 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1751
1752 return rc;
1753}
1754
1755ssize_t cifs_user_read(struct file *file, char __user *read_data,
1756 size_t read_size, loff_t *poffset)
1757{
1758 int rc = -EACCES;
1759 unsigned int bytes_read = 0;
1760 unsigned int total_read = 0;
1761 unsigned int current_read_size;
1762 struct cifs_sb_info *cifs_sb;
1763 struct cifsTconInfo *pTcon;
1764 int xid;
1765 struct cifsFileInfo *open_file;
1766 char *smb_read_data;
1767 char __user *current_offset;
1768 struct smb_com_read_rsp *pSMBr;
1769
1770 xid = GetXid();
e6a00296 1771 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1772
1773 if (file->private_data == NULL) {
0f3bc09e 1774 rc = -EBADF;
1da177e4 1775 FreeXid(xid);
0f3bc09e 1776 return rc;
1da177e4 1777 }
c21dfb69 1778 open_file = file->private_data;
13cfb733 1779 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1780
ad7a2926 1781 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1782 cFYI(1, "attempting read on write only file instance");
ad7a2926 1783
1da177e4
LT
1784 for (total_read = 0, current_offset = read_data;
1785 read_size > total_read;
1786 total_read += bytes_read, current_offset += bytes_read) {
fb8c4b14 1787 current_read_size = min_t(const int, read_size - total_read,
1da177e4
LT
1788 cifs_sb->rsize);
1789 rc = -EAGAIN;
1790 smb_read_data = NULL;
1791 while (rc == -EAGAIN) {
ec637e3f 1792 int buf_type = CIFS_NO_BUFFER;
fb8c4b14 1793 if ((open_file->invalidHandle) &&
1da177e4 1794 (!open_file->closePend)) {
4b18f2a9 1795 rc = cifs_reopen_file(file, true);
1da177e4
LT
1796 if (rc != 0)
1797 break;
1798 }
bfa0d75a 1799 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1800 open_file->netfid,
1801 current_read_size, *poffset,
1802 &bytes_read, &smb_read_data,
1803 &buf_type);
1da177e4 1804 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1805 if (smb_read_data) {
93544cc6
SF
1806 if (copy_to_user(current_offset,
1807 smb_read_data +
1808 4 /* RFC1001 length field */ +
1809 le16_to_cpu(pSMBr->DataOffset),
ad7a2926 1810 bytes_read))
93544cc6 1811 rc = -EFAULT;
93544cc6 1812
fb8c4b14 1813 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1814 cifs_small_buf_release(smb_read_data);
fb8c4b14 1815 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1816 cifs_buf_release(smb_read_data);
1da177e4
LT
1817 smb_read_data = NULL;
1818 }
1819 }
1820 if (rc || (bytes_read == 0)) {
1821 if (total_read) {
1822 break;
1823 } else {
1824 FreeXid(xid);
1825 return rc;
1826 }
1827 } else {
a4544347 1828 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1829 *poffset += bytes_read;
1830 }
1831 }
1832 FreeXid(xid);
1833 return total_read;
1834}
1835
1836
1837static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1838 loff_t *poffset)
1839{
1840 int rc = -EACCES;
1841 unsigned int bytes_read = 0;
1842 unsigned int total_read;
1843 unsigned int current_read_size;
1844 struct cifs_sb_info *cifs_sb;
1845 struct cifsTconInfo *pTcon;
1846 int xid;
1847 char *current_offset;
1848 struct cifsFileInfo *open_file;
ec637e3f 1849 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1850
1851 xid = GetXid();
e6a00296 1852 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1853
1854 if (file->private_data == NULL) {
0f3bc09e 1855 rc = -EBADF;
1da177e4 1856 FreeXid(xid);
0f3bc09e 1857 return rc;
1da177e4 1858 }
c21dfb69 1859 open_file = file->private_data;
13cfb733 1860 pTcon = tlink_tcon(open_file->tlink);
1da177e4
LT
1861
1862 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1863 cFYI(1, "attempting read on write only file instance");
1da177e4 1864
fb8c4b14 1865 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1866 read_size > total_read;
1867 total_read += bytes_read, current_offset += bytes_read) {
1868 current_read_size = min_t(const int, read_size - total_read,
1869 cifs_sb->rsize);
f9f5c817
SF
1870 /* For windows me and 9x we do not want to request more
1871 than it negotiated since it will refuse the read then */
fb8c4b14 1872 if ((pTcon->ses) &&
f9f5c817
SF
1873 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1874 current_read_size = min_t(const int, current_read_size,
1875 pTcon->ses->server->maxBuf - 128);
1876 }
1da177e4
LT
1877 rc = -EAGAIN;
1878 while (rc == -EAGAIN) {
fb8c4b14 1879 if ((open_file->invalidHandle) &&
1da177e4 1880 (!open_file->closePend)) {
4b18f2a9 1881 rc = cifs_reopen_file(file, true);
1da177e4
LT
1882 if (rc != 0)
1883 break;
1884 }
bfa0d75a 1885 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1886 open_file->netfid,
1887 current_read_size, *poffset,
1888 &bytes_read, &current_offset,
1889 &buf_type);
1da177e4
LT
1890 }
1891 if (rc || (bytes_read == 0)) {
1892 if (total_read) {
1893 break;
1894 } else {
1895 FreeXid(xid);
1896 return rc;
1897 }
1898 } else {
a4544347 1899 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1900 *poffset += bytes_read;
1901 }
1902 }
1903 FreeXid(xid);
1904 return total_read;
1905}
1906
1907int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1908{
1da177e4
LT
1909 int rc, xid;
1910
1911 xid = GetXid();
abab095d 1912 rc = cifs_revalidate_file(file);
1da177e4 1913 if (rc) {
b6b38f70 1914 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1915 FreeXid(xid);
1916 return rc;
1917 }
1918 rc = generic_file_mmap(file, vma);
1919 FreeXid(xid);
1920 return rc;
1921}
1922
1923
fb8c4b14 1924static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 1925 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
1926{
1927 struct page *page;
1928 char *target;
1929
1930 while (bytes_read > 0) {
1931 if (list_empty(pages))
1932 break;
1933
1934 page = list_entry(pages->prev, struct page, lru);
1935 list_del(&page->lru);
1936
315e995c 1937 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
1938 GFP_KERNEL)) {
1939 page_cache_release(page);
b6b38f70 1940 cFYI(1, "Add page cache failed");
3079ca62
SF
1941 data += PAGE_CACHE_SIZE;
1942 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1943 continue;
1944 }
06b43672 1945 page_cache_release(page);
1da177e4 1946
fb8c4b14 1947 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1948
1949 if (PAGE_CACHE_SIZE > bytes_read) {
1950 memcpy(target, data, bytes_read);
1951 /* zero the tail end of this partial page */
fb8c4b14 1952 memset(target + bytes_read, 0,
1da177e4
LT
1953 PAGE_CACHE_SIZE - bytes_read);
1954 bytes_read = 0;
1955 } else {
1956 memcpy(target, data, PAGE_CACHE_SIZE);
1957 bytes_read -= PAGE_CACHE_SIZE;
1958 }
1959 kunmap_atomic(target, KM_USER0);
1960
1961 flush_dcache_page(page);
1962 SetPageUptodate(page);
1963 unlock_page(page);
1da177e4 1964 data += PAGE_CACHE_SIZE;
9dc06558
SJ
1965
1966 /* add page to FS-Cache */
1967 cifs_readpage_to_fscache(mapping->host, page);
1da177e4
LT
1968 }
1969 return;
1970}
1971
1972static int cifs_readpages(struct file *file, struct address_space *mapping,
1973 struct list_head *page_list, unsigned num_pages)
1974{
1975 int rc = -EACCES;
1976 int xid;
1977 loff_t offset;
1978 struct page *page;
1979 struct cifs_sb_info *cifs_sb;
1980 struct cifsTconInfo *pTcon;
2c2130e1 1981 unsigned int bytes_read = 0;
fb8c4b14 1982 unsigned int read_size, i;
1da177e4
LT
1983 char *smb_read_data = NULL;
1984 struct smb_com_read_rsp *pSMBr;
1da177e4 1985 struct cifsFileInfo *open_file;
ec637e3f 1986 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1987
1988 xid = GetXid();
1989 if (file->private_data == NULL) {
0f3bc09e 1990 rc = -EBADF;
1da177e4 1991 FreeXid(xid);
0f3bc09e 1992 return rc;
1da177e4 1993 }
c21dfb69 1994 open_file = file->private_data;
e6a00296 1995 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 1996 pTcon = tlink_tcon(open_file->tlink);
bfa0d75a 1997
56698236
SJ
1998 /*
1999 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2000 * immediately if the cookie is negative
2001 */
2002 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2003 &num_pages);
2004 if (rc == 0)
2005 goto read_complete;
2006
f19159dc 2007 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
2008 for (i = 0; i < num_pages; ) {
2009 unsigned contig_pages;
2010 struct page *tmp_page;
2011 unsigned long expected_index;
2012
2013 if (list_empty(page_list))
2014 break;
2015
2016 page = list_entry(page_list->prev, struct page, lru);
2017 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2018
2019 /* count adjacent pages that we will read into */
2020 contig_pages = 0;
fb8c4b14 2021 expected_index =
1da177e4 2022 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 2023 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
2024 if (tmp_page->index == expected_index) {
2025 contig_pages++;
2026 expected_index++;
2027 } else
fb8c4b14 2028 break;
1da177e4
LT
2029 }
2030 if (contig_pages + i > num_pages)
2031 contig_pages = num_pages - i;
2032
2033 /* for reads over a certain size could initiate async
2034 read ahead */
2035
2036 read_size = contig_pages * PAGE_CACHE_SIZE;
2037 /* Read size needs to be in multiples of one page */
2038 read_size = min_t(const unsigned int, read_size,
2039 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2040 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2041 read_size, contig_pages);
1da177e4
LT
2042 rc = -EAGAIN;
2043 while (rc == -EAGAIN) {
fb8c4b14 2044 if ((open_file->invalidHandle) &&
1da177e4 2045 (!open_file->closePend)) {
4b18f2a9 2046 rc = cifs_reopen_file(file, true);
1da177e4
LT
2047 if (rc != 0)
2048 break;
2049 }
2050
bfa0d75a 2051 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2052 open_file->netfid,
2053 read_size, offset,
2054 &bytes_read, &smb_read_data,
2055 &buf_type);
a9d02ad4 2056 /* BB more RC checks ? */
fb8c4b14 2057 if (rc == -EAGAIN) {
1da177e4 2058 if (smb_read_data) {
fb8c4b14 2059 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2060 cifs_small_buf_release(smb_read_data);
fb8c4b14 2061 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2062 cifs_buf_release(smb_read_data);
1da177e4
LT
2063 smb_read_data = NULL;
2064 }
2065 }
2066 }
2067 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2068 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2069 break;
2070 } else if (bytes_read > 0) {
6f88cc2e 2071 task_io_account_read(bytes_read);
1da177e4
LT
2072 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2073 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2074 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2075 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2076
2077 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2078 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2079 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2080 i++; /* account for partial page */
2081
fb8c4b14 2082 /* server copy of file can have smaller size
1da177e4 2083 than client */
fb8c4b14
SF
2084 /* BB do we need to verify this common case ?
2085 this case is ok - if we are at server EOF
1da177e4
LT
2086 we will hit it on next read */
2087
05ac9d4b 2088 /* break; */
1da177e4
LT
2089 }
2090 } else {
b6b38f70 2091 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2092 "Cleaning remaining pages from readahead list",
b6b38f70 2093 bytes_read, offset);
fb8c4b14 2094 /* BB turn off caching and do new lookup on
1da177e4 2095 file size at server? */
1da177e4
LT
2096 break;
2097 }
2098 if (smb_read_data) {
fb8c4b14 2099 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2100 cifs_small_buf_release(smb_read_data);
fb8c4b14 2101 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2102 cifs_buf_release(smb_read_data);
1da177e4
LT
2103 smb_read_data = NULL;
2104 }
2105 bytes_read = 0;
2106 }
2107
1da177e4
LT
2108/* need to free smb_read_data buf before exit */
2109 if (smb_read_data) {
fb8c4b14 2110 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2111 cifs_small_buf_release(smb_read_data);
fb8c4b14 2112 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2113 cifs_buf_release(smb_read_data);
1da177e4 2114 smb_read_data = NULL;
fb8c4b14 2115 }
1da177e4 2116
56698236 2117read_complete:
1da177e4
LT
2118 FreeXid(xid);
2119 return rc;
2120}
2121
2122static int cifs_readpage_worker(struct file *file, struct page *page,
2123 loff_t *poffset)
2124{
2125 char *read_data;
2126 int rc;
2127
56698236
SJ
2128 /* Is the page cached? */
2129 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2130 if (rc == 0)
2131 goto read_complete;
2132
1da177e4
LT
2133 page_cache_get(page);
2134 read_data = kmap(page);
2135 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2136
1da177e4 2137 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2138
1da177e4
LT
2139 if (rc < 0)
2140 goto io_error;
2141 else
b6b38f70 2142 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2143
e6a00296
JJS
2144 file->f_path.dentry->d_inode->i_atime =
2145 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2146
1da177e4
LT
2147 if (PAGE_CACHE_SIZE > rc)
2148 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2149
2150 flush_dcache_page(page);
2151 SetPageUptodate(page);
9dc06558
SJ
2152
2153 /* send this page to the cache */
2154 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2155
1da177e4 2156 rc = 0;
fb8c4b14 2157
1da177e4 2158io_error:
fb8c4b14 2159 kunmap(page);
1da177e4 2160 page_cache_release(page);
56698236
SJ
2161
2162read_complete:
1da177e4
LT
2163 return rc;
2164}
2165
2166static int cifs_readpage(struct file *file, struct page *page)
2167{
2168 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2169 int rc = -EACCES;
2170 int xid;
2171
2172 xid = GetXid();
2173
2174 if (file->private_data == NULL) {
0f3bc09e 2175 rc = -EBADF;
1da177e4 2176 FreeXid(xid);
0f3bc09e 2177 return rc;
1da177e4
LT
2178 }
2179
b6b38f70
JP
2180 cFYI(1, "readpage %p at offset %d 0x%x\n",
2181 page, (int)offset, (int)offset);
1da177e4
LT
2182
2183 rc = cifs_readpage_worker(file, page, &offset);
2184
2185 unlock_page(page);
2186
2187 FreeXid(xid);
2188 return rc;
2189}
2190
a403a0a3
SF
2191static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2192{
2193 struct cifsFileInfo *open_file;
2194
2195 read_lock(&GlobalSMBSeslock);
2196 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2197 if (open_file->closePend)
2198 continue;
2199 if (open_file->pfile &&
2200 ((open_file->pfile->f_flags & O_RDWR) ||
2201 (open_file->pfile->f_flags & O_WRONLY))) {
2202 read_unlock(&GlobalSMBSeslock);
2203 return 1;
2204 }
2205 }
2206 read_unlock(&GlobalSMBSeslock);
2207 return 0;
2208}
2209
1da177e4
LT
2210/* We do not want to update the file size from server for inodes
2211 open for write - to avoid races with writepage extending
2212 the file - in the future we could consider allowing
fb8c4b14 2213 refreshing the inode only on increases in the file size
1da177e4
LT
2214 but this is tricky to do without racing with writebehind
2215 page caching in the current Linux kernel design */
4b18f2a9 2216bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2217{
a403a0a3 2218 if (!cifsInode)
4b18f2a9 2219 return true;
50c2f753 2220
a403a0a3
SF
2221 if (is_inode_writable(cifsInode)) {
2222 /* This inode is open for write at least once */
c32a0b68
SF
2223 struct cifs_sb_info *cifs_sb;
2224
c32a0b68 2225 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2226 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2227 /* since no page cache to corrupt on directio
c32a0b68 2228 we can change size safely */
4b18f2a9 2229 return true;
c32a0b68
SF
2230 }
2231
fb8c4b14 2232 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2233 return true;
7ba52631 2234
4b18f2a9 2235 return false;
23e7dd7d 2236 } else
4b18f2a9 2237 return true;
1da177e4
LT
2238}
2239
d9414774
NP
2240static int cifs_write_begin(struct file *file, struct address_space *mapping,
2241 loff_t pos, unsigned len, unsigned flags,
2242 struct page **pagep, void **fsdata)
1da177e4 2243{
d9414774
NP
2244 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2245 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2246 loff_t page_start = pos & PAGE_MASK;
2247 loff_t i_size;
2248 struct page *page;
2249 int rc = 0;
d9414774 2250
b6b38f70 2251 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2252
54566b2c 2253 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2254 if (!page) {
2255 rc = -ENOMEM;
2256 goto out;
2257 }
8a236264 2258
a98ee8c1
JL
2259 if (PageUptodate(page))
2260 goto out;
8a236264 2261
a98ee8c1
JL
2262 /*
2263 * If we write a full page it will be up to date, no need to read from
2264 * the server. If the write is short, we'll end up doing a sync write
2265 * instead.
2266 */
2267 if (len == PAGE_CACHE_SIZE)
2268 goto out;
8a236264 2269
a98ee8c1
JL
2270 /*
2271 * optimize away the read when we have an oplock, and we're not
2272 * expecting to use any of the data we'd be reading in. That
2273 * is, when the page lies beyond the EOF, or straddles the EOF
2274 * and the write will cover all of the existing data.
2275 */
2276 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2277 i_size = i_size_read(mapping->host);
2278 if (page_start >= i_size ||
2279 (offset == 0 && (pos + len) >= i_size)) {
2280 zero_user_segments(page, 0, offset,
2281 offset + len,
2282 PAGE_CACHE_SIZE);
2283 /*
2284 * PageChecked means that the parts of the page
2285 * to which we're not writing are considered up
2286 * to date. Once the data is copied to the
2287 * page, it can be set uptodate.
2288 */
2289 SetPageChecked(page);
2290 goto out;
2291 }
2292 }
d9414774 2293
a98ee8c1
JL
2294 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2295 /*
2296 * might as well read a page, it is fast enough. If we get
2297 * an error, we don't need to return it. cifs_write_end will
2298 * do a sync write instead since PG_uptodate isn't set.
2299 */
2300 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2301 } else {
2302 /* we could try using another file handle if there is one -
2303 but how would we lock it to prevent close of that handle
2304 racing with this read? In any case
d9414774 2305 this will be written out by write_end so is fine */
1da177e4 2306 }
a98ee8c1
JL
2307out:
2308 *pagep = page;
2309 return rc;
1da177e4
LT
2310}
2311
85f2d6b4
SJ
2312static int cifs_release_page(struct page *page, gfp_t gfp)
2313{
2314 if (PagePrivate(page))
2315 return 0;
2316
2317 return cifs_fscache_release_page(page, gfp);
2318}
2319
2320static void cifs_invalidate_page(struct page *page, unsigned long offset)
2321{
2322 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2323
2324 if (offset == 0)
2325 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2326}
2327
9b646972 2328void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2329{
2330 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2331 oplock_break);
a5e18bc3 2332 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2333 struct cifsInodeInfo *cinode = CIFS_I(inode);
3bc303c2
JL
2334 int rc, waitrc = 0;
2335
2336 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2337 if (cinode->clientCanCacheRead)
8737c930 2338 break_lease(inode, O_RDONLY);
d54ff732 2339 else
8737c930 2340 break_lease(inode, O_WRONLY);
3bc303c2
JL
2341 rc = filemap_fdatawrite(inode->i_mapping);
2342 if (cinode->clientCanCacheRead == 0) {
2343 waitrc = filemap_fdatawait(inode->i_mapping);
2344 invalidate_remote_inode(inode);
2345 }
2346 if (!rc)
2347 rc = waitrc;
2348 if (rc)
2349 cinode->write_behind_rc = rc;
b6b38f70 2350 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2351 }
2352
2353 /*
2354 * releasing stale oplock after recent reconnect of smb session using
2355 * a now incorrect file handle is not a data integrity issue but do
2356 * not bother sending an oplock release if session to server still is
2357 * disconnected since oplock already released by the server
2358 */
2359 if (!cfile->closePend && !cfile->oplock_break_cancelled) {
13cfb733
JL
2360 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2361 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
b6b38f70 2362 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2363 }
9b646972
TH
2364
2365 /*
2366 * We might have kicked in before is_valid_oplock_break()
2367 * finished grabbing reference for us. Make sure it's done by
2368 * waiting for GlobalSMSSeslock.
2369 */
2370 write_lock(&GlobalSMBSeslock);
2371 write_unlock(&GlobalSMBSeslock);
2372
2373 cifs_oplock_break_put(cfile);
3bc303c2
JL
2374}
2375
9b646972 2376void cifs_oplock_break_get(struct cifsFileInfo *cfile)
3bc303c2 2377{
d7c86ff8 2378 cifs_sb_active(cfile->dentry->d_sb);
3bc303c2 2379 cifsFileInfo_get(cfile);
3bc303c2
JL
2380}
2381
9b646972 2382void cifs_oplock_break_put(struct cifsFileInfo *cfile)
3bc303c2 2383{
3bc303c2 2384 cifsFileInfo_put(cfile);
d7c86ff8 2385 cifs_sb_deactive(cfile->dentry->d_sb);
3bc303c2
JL
2386}
2387
f5e54d6e 2388const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2389 .readpage = cifs_readpage,
2390 .readpages = cifs_readpages,
2391 .writepage = cifs_writepage,
37c0eb46 2392 .writepages = cifs_writepages,
d9414774
NP
2393 .write_begin = cifs_write_begin,
2394 .write_end = cifs_write_end,
1da177e4 2395 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2396 .releasepage = cifs_release_page,
2397 .invalidatepage = cifs_invalidate_page,
1da177e4
LT
2398 /* .sync_page = cifs_sync_page, */
2399 /* .direct_IO = */
2400};
273d81d6
DK
2401
2402/*
2403 * cifs_readpages requires the server to support a buffer large enough to
2404 * contain the header plus one complete page of data. Otherwise, we need
2405 * to leave cifs_readpages out of the address space operations.
2406 */
f5e54d6e 2407const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2408 .readpage = cifs_readpage,
2409 .writepage = cifs_writepage,
2410 .writepages = cifs_writepages,
d9414774
NP
2411 .write_begin = cifs_write_begin,
2412 .write_end = cifs_write_end,
273d81d6 2413 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2414 .releasepage = cifs_release_page,
2415 .invalidatepage = cifs_invalidate_page,
273d81d6
DK
2416 /* .sync_page = cifs_sync_page, */
2417 /* .direct_IO = */
2418};