]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/cifs/file.c
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[net-next-2.6.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
43
1da177e4
LT
44static inline int cifs_convert_flags(unsigned int flags)
45{
46 if ((flags & O_ACCMODE) == O_RDONLY)
47 return GENERIC_READ;
48 else if ((flags & O_ACCMODE) == O_WRONLY)
49 return GENERIC_WRITE;
50 else if ((flags & O_ACCMODE) == O_RDWR) {
51 /* GENERIC_ALL is too much permission to request
52 can cause unnecessary access denied on create */
53 /* return GENERIC_ALL; */
54 return (GENERIC_READ | GENERIC_WRITE);
55 }
56
e10f7b55
JL
57 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
58 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
59 FILE_READ_DATA);
7fc8f4e9 60}
e10f7b55 61
7fc8f4e9
SF
62static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
63{
64 fmode_t posix_flags = 0;
e10f7b55 65
7fc8f4e9
SF
66 if ((flags & O_ACCMODE) == O_RDONLY)
67 posix_flags = FMODE_READ;
68 else if ((flags & O_ACCMODE) == O_WRONLY)
69 posix_flags = FMODE_WRITE;
70 else if ((flags & O_ACCMODE) == O_RDWR) {
71 /* GENERIC_ALL is too much permission to request
72 can cause unnecessary access denied on create */
73 /* return GENERIC_ALL; */
74 posix_flags = FMODE_READ | FMODE_WRITE;
75 }
76 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
77 reopening a file. They had their effect on the original open */
78 if (flags & O_APPEND)
79 posix_flags |= (fmode_t)O_APPEND;
6b2f3d1f
CH
80 if (flags & O_DSYNC)
81 posix_flags |= (fmode_t)O_DSYNC;
82 if (flags & __O_SYNC)
83 posix_flags |= (fmode_t)__O_SYNC;
7fc8f4e9
SF
84 if (flags & O_DIRECTORY)
85 posix_flags |= (fmode_t)O_DIRECTORY;
86 if (flags & O_NOFOLLOW)
87 posix_flags |= (fmode_t)O_NOFOLLOW;
88 if (flags & O_DIRECT)
89 posix_flags |= (fmode_t)O_DIRECT;
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
276a74a4 108/* all arguments to this function must be checked for validity in caller */
590a3fe0
JL
109static inline int
110cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
51c81764 111 struct cifsInodeInfo *pCifsInode, __u32 oplock,
590a3fe0 112 u16 netfid)
276a74a4 113{
276a74a4 114
276a74a4 115 write_lock(&GlobalSMBSeslock);
276a74a4
SF
116
117 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
118 if (pCifsInode == NULL) {
119 write_unlock(&GlobalSMBSeslock);
120 return -EINVAL;
121 }
122
276a74a4
SF
123 if (pCifsInode->clientCanCacheRead) {
124 /* we have the inode open somewhere else
125 no need to discard cache data */
126 goto psx_client_can_cache;
127 }
128
129 /* BB FIXME need to fix this check to move it earlier into posix_open
130 BB fIX following section BB FIXME */
131
132 /* if not oplocked, invalidate inode pages if mtime or file
133 size changed */
134/* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
135 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
136 (file->f_path.dentry->d_inode->i_size ==
137 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 138 cFYI(1, "inode unchanged on server");
276a74a4
SF
139 } else {
140 if (file->f_path.dentry->d_inode->i_mapping) {
141 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
142 if (rc != 0)
143 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
144 }
b6b38f70
JP
145 cFYI(1, "invalidating remote inode since open detected it "
146 "changed");
276a74a4
SF
147 invalidate_remote_inode(file->f_path.dentry->d_inode);
148 } */
149
150psx_client_can_cache:
151 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
152 pCifsInode->clientCanCacheAll = true;
153 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
154 cFYI(1, "Exclusive Oplock granted on inode %p",
155 file->f_path.dentry->d_inode);
276a74a4
SF
156 } else if ((oplock & 0xF) == OPLOCK_READ)
157 pCifsInode->clientCanCacheRead = true;
158
159 /* will have to change the unlock if we reenable the
160 filemap_fdatawrite (which does not seem necessary */
161 write_unlock(&GlobalSMBSeslock);
162 return 0;
163}
164
1da177e4 165/* all arguments to this function must be checked for validity in caller */
db460242 166static inline int cifs_open_inode_helper(struct inode *inode,
1da177e4
LT
167 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
168 char *full_path, int xid)
169{
db460242 170 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
1da177e4
LT
171 struct timespec temp;
172 int rc;
173
1da177e4
LT
174 if (pCifsInode->clientCanCacheRead) {
175 /* we have the inode open somewhere else
176 no need to discard cache data */
177 goto client_can_cache;
178 }
179
180 /* BB need same check in cifs_create too? */
181 /* if not oplocked, invalidate inode pages if mtime or file
182 size changed */
07119a4d 183 temp = cifs_NTtimeToUnix(buf->LastWriteTime);
db460242
JL
184 if (timespec_equal(&inode->i_mtime, &temp) &&
185 (inode->i_size ==
1da177e4 186 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 187 cFYI(1, "inode unchanged on server");
1da177e4 188 } else {
db460242 189 if (inode->i_mapping) {
ff215713
SF
190 /* BB no need to lock inode until after invalidate
191 since namei code should already have it locked? */
db460242 192 rc = filemap_write_and_wait(inode->i_mapping);
cea21805 193 if (rc != 0)
db460242 194 pCifsInode->write_behind_rc = rc;
1da177e4 195 }
b6b38f70
JP
196 cFYI(1, "invalidating remote inode since open detected it "
197 "changed");
db460242 198 invalidate_remote_inode(inode);
1da177e4
LT
199 }
200
201client_can_cache:
c18c842b 202 if (pTcon->unix_ext)
db460242
JL
203 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
204 xid);
1da177e4 205 else
db460242
JL
206 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
207 xid, NULL);
1da177e4
LT
208
209 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
210 pCifsInode->clientCanCacheAll = true;
211 pCifsInode->clientCanCacheRead = true;
db460242 212 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
1da177e4 213 } else if ((*oplock & 0xF) == OPLOCK_READ)
4b18f2a9 214 pCifsInode->clientCanCacheRead = true;
1da177e4
LT
215
216 return rc;
217}
218
219int cifs_open(struct inode *inode, struct file *file)
220{
221 int rc = -EACCES;
590a3fe0
JL
222 int xid;
223 __u32 oplock;
1da177e4 224 struct cifs_sb_info *cifs_sb;
276a74a4 225 struct cifsTconInfo *tcon;
6ca9f3ba 226 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 227 struct cifsInodeInfo *pCifsInode;
1da177e4
LT
228 char *full_path = NULL;
229 int desiredAccess;
230 int disposition;
231 __u16 netfid;
232 FILE_ALL_INFO *buf = NULL;
233
234 xid = GetXid();
235
236 cifs_sb = CIFS_SB(inode->i_sb);
276a74a4 237 tcon = cifs_sb->tcon;
1da177e4 238
a6ce4932 239 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 240
e6a00296 241 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 242 if (full_path == NULL) {
0f3bc09e 243 rc = -ENOMEM;
1da177e4 244 FreeXid(xid);
0f3bc09e 245 return rc;
1da177e4
LT
246 }
247
b6b38f70
JP
248 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
249 inode, file->f_flags, full_path);
276a74a4
SF
250
251 if (oplockEnabled)
252 oplock = REQ_OPLOCK;
253 else
254 oplock = 0;
255
64cc2c63
SF
256 if (!tcon->broken_posix_open && tcon->unix_ext &&
257 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
258 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
259 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
260 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
fa588e0c 261 oflags |= SMB_O_CREAT;
276a74a4 262 /* can not refresh inode info since size could be stale */
2422f676 263 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c
SF
264 cifs_sb->mnt_file_mode /* ignored */,
265 oflags, &oplock, &netfid, xid);
276a74a4 266 if (rc == 0) {
b6b38f70 267 cFYI(1, "posix open succeeded");
276a74a4
SF
268 /* no need for special case handling of setting mode
269 on read only files needed here */
270
47c78b7f
JL
271 rc = cifs_posix_open_inode_helper(inode, file,
272 pCifsInode, oplock, netfid);
273 if (rc != 0) {
274 CIFSSMBClose(xid, tcon, netfid);
275 goto out;
276 }
277
2422f676
JL
278 pCifsFile = cifs_new_fileinfo(inode, netfid, file,
279 file->f_path.mnt,
280 oflags);
281 if (pCifsFile == NULL) {
282 CIFSSMBClose(xid, tcon, netfid);
283 rc = -ENOMEM;
2422f676 284 }
276a74a4 285 goto out;
64cc2c63
SF
286 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
287 if (tcon->ses->serverNOS)
b6b38f70 288 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
289 " unexpected error on SMB posix open"
290 ", disabling posix open support."
291 " Check if server update available.",
292 tcon->ses->serverName,
b6b38f70 293 tcon->ses->serverNOS);
64cc2c63 294 tcon->broken_posix_open = true;
276a74a4
SF
295 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
296 (rc != -EOPNOTSUPP)) /* path not found or net err */
297 goto out;
64cc2c63
SF
298 /* else fallthrough to retry open the old way on network i/o
299 or DFS errors */
276a74a4
SF
300 }
301
1da177e4
LT
302 desiredAccess = cifs_convert_flags(file->f_flags);
303
304/*********************************************************************
305 * open flag mapping table:
fb8c4b14 306 *
1da177e4 307 * POSIX Flag CIFS Disposition
fb8c4b14 308 * ---------- ----------------
1da177e4
LT
309 * O_CREAT FILE_OPEN_IF
310 * O_CREAT | O_EXCL FILE_CREATE
311 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
312 * O_TRUNC FILE_OVERWRITE
313 * none of the above FILE_OPEN
314 *
315 * Note that there is not a direct match between disposition
fb8c4b14 316 * FILE_SUPERSEDE (ie create whether or not file exists although
1da177e4
LT
317 * O_CREAT | O_TRUNC is similar but truncates the existing
318 * file rather than creating a new file as FILE_SUPERSEDE does
319 * (which uses the attributes / metadata passed in on open call)
320 *?
fb8c4b14 321 *? O_SYNC is a reasonable match to CIFS writethrough flag
1da177e4
LT
322 *? and the read write flags match reasonably. O_LARGEFILE
323 *? is irrelevant because largefile support is always used
324 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
325 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
326 *********************************************************************/
327
328 disposition = cifs_get_disposition(file->f_flags);
329
1da177e4
LT
330 /* BB pass O_SYNC flag through on file attributes .. BB */
331
332 /* Also refresh inode by passing in file_info buf returned by SMBOpen
333 and calling get_inode_info with returned buf (at least helps
334 non-Unix server case) */
335
fb8c4b14
SF
336 /* BB we can not do this if this is the second open of a file
337 and the first handle has writebehind data, we might be
1da177e4
LT
338 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
339 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
340 if (!buf) {
341 rc = -ENOMEM;
342 goto out;
343 }
5bafd765
SF
344
345 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
276a74a4 346 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
5bafd765 347 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
348 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
349 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
350 else
351 rc = -EIO; /* no NT SMB support fall into legacy open below */
352
a9d02ad4
SF
353 if (rc == -EIO) {
354 /* Old server, try legacy style OpenX */
276a74a4 355 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
a9d02ad4
SF
356 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
357 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
358 & CIFS_MOUNT_MAP_SPECIAL_CHR);
359 }
1da177e4 360 if (rc) {
b6b38f70 361 cFYI(1, "cifs_open returned 0x%x", rc);
1da177e4
LT
362 goto out;
363 }
3321b791 364
47c78b7f
JL
365 rc = cifs_open_inode_helper(inode, tcon, &oplock, buf, full_path, xid);
366 if (rc != 0)
367 goto out;
368
086f68bd
JL
369 pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
370 file->f_flags);
6ca9f3ba 371 if (pCifsFile == NULL) {
1da177e4
LT
372 rc = -ENOMEM;
373 goto out;
374 }
1da177e4 375
fb8c4b14 376 if (oplock & CIFS_CREATE_ACTION) {
1da177e4
LT
377 /* time to set mode which we can not set earlier due to
378 problems creating new read-only files */
276a74a4 379 if (tcon->unix_ext) {
4e1e7fb9
JL
380 struct cifs_unix_set_info_args args = {
381 .mode = inode->i_mode,
382 .uid = NO_CHANGE_64,
383 .gid = NO_CHANGE_64,
384 .ctime = NO_CHANGE_64,
385 .atime = NO_CHANGE_64,
386 .mtime = NO_CHANGE_64,
387 .device = 0,
388 };
01ea95e3
JL
389 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
390 cifs_sb->local_nls,
391 cifs_sb->mnt_cifs_flags &
737b758c 392 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
393 }
394 }
395
396out:
397 kfree(buf);
398 kfree(full_path);
399 FreeXid(xid);
400 return rc;
401}
402
0418726b 403/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
404/* to server was lost */
405static int cifs_relock_file(struct cifsFileInfo *cifsFile)
406{
407 int rc = 0;
408
409/* BB list all locks open on this file and relock */
410
411 return rc;
412}
413
4b18f2a9 414static int cifs_reopen_file(struct file *file, bool can_flush)
1da177e4
LT
415{
416 int rc = -EACCES;
590a3fe0
JL
417 int xid;
418 __u32 oplock;
1da177e4 419 struct cifs_sb_info *cifs_sb;
7fc8f4e9 420 struct cifsTconInfo *tcon;
1da177e4
LT
421 struct cifsFileInfo *pCifsFile;
422 struct cifsInodeInfo *pCifsInode;
fb8c4b14 423 struct inode *inode;
1da177e4
LT
424 char *full_path = NULL;
425 int desiredAccess;
426 int disposition = FILE_OPEN;
427 __u16 netfid;
428
ad7a2926 429 if (file->private_data)
1da177e4 430 pCifsFile = (struct cifsFileInfo *)file->private_data;
ad7a2926 431 else
1da177e4
LT
432 return -EBADF;
433
434 xid = GetXid();
f0a71eb8 435 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 436 if (!pCifsFile->invalidHandle) {
f0a71eb8 437 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 438 rc = 0;
1da177e4 439 FreeXid(xid);
0f3bc09e 440 return rc;
1da177e4
LT
441 }
442
e6a00296 443 if (file->f_path.dentry == NULL) {
b6b38f70 444 cERROR(1, "no valid name if dentry freed");
3a9f462f
SF
445 dump_stack();
446 rc = -EBADF;
447 goto reopen_error_exit;
448 }
449
450 inode = file->f_path.dentry->d_inode;
fb8c4b14 451 if (inode == NULL) {
b6b38f70 452 cERROR(1, "inode not valid");
3a9f462f
SF
453 dump_stack();
454 rc = -EBADF;
455 goto reopen_error_exit;
1da177e4 456 }
50c2f753 457
1da177e4 458 cifs_sb = CIFS_SB(inode->i_sb);
7fc8f4e9 459 tcon = cifs_sb->tcon;
3a9f462f 460
1da177e4
LT
461/* can not grab rename sem here because various ops, including
462 those that already have the rename sem can end up causing writepage
463 to get called and if the server was down that means we end up here,
464 and we can never tell if the caller already has the rename_sem */
e6a00296 465 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 466 if (full_path == NULL) {
3a9f462f
SF
467 rc = -ENOMEM;
468reopen_error_exit:
f0a71eb8 469 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 470 FreeXid(xid);
3a9f462f 471 return rc;
1da177e4
LT
472 }
473
b6b38f70
JP
474 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
475 inode, file->f_flags, full_path);
1da177e4
LT
476
477 if (oplockEnabled)
478 oplock = REQ_OPLOCK;
479 else
4b18f2a9 480 oplock = 0;
1da177e4 481
7fc8f4e9
SF
482 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
483 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
484 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
485 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
486 /* can not refresh inode info since size could be stale */
2422f676 487 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
488 cifs_sb->mnt_file_mode /* ignored */,
489 oflags, &oplock, &netfid, xid);
7fc8f4e9 490 if (rc == 0) {
b6b38f70 491 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
492 goto reopen_success;
493 }
494 /* fallthrough to retry open the old way on errors, especially
495 in the reconnect path it is important to retry hard */
496 }
497
498 desiredAccess = cifs_convert_flags(file->f_flags);
499
1da177e4 500 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
501 by SMBOpen and then calling get_inode_info with returned buf
502 since file might have write behind data that needs to be flushed
1da177e4
LT
503 and server version of file size can be stale. If we knew for sure
504 that inode was not dirty locally we could do this */
505
7fc8f4e9 506 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 507 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 508 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 509 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 510 if (rc) {
f0a71eb8 511 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
512 cFYI(1, "cifs_open returned 0x%x", rc);
513 cFYI(1, "oplock: %d", oplock);
1da177e4 514 } else {
7fc8f4e9 515reopen_success:
1da177e4 516 pCifsFile->netfid = netfid;
4b18f2a9 517 pCifsFile->invalidHandle = false;
f0a71eb8 518 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4
LT
519 pCifsInode = CIFS_I(inode);
520 if (pCifsInode) {
521 if (can_flush) {
cea21805
JL
522 rc = filemap_write_and_wait(inode->i_mapping);
523 if (rc != 0)
524 CIFS_I(inode)->write_behind_rc = rc;
1da177e4
LT
525 /* temporarily disable caching while we
526 go to server to get inode info */
4b18f2a9
SF
527 pCifsInode->clientCanCacheAll = false;
528 pCifsInode->clientCanCacheRead = false;
7fc8f4e9 529 if (tcon->unix_ext)
1da177e4
LT
530 rc = cifs_get_inode_info_unix(&inode,
531 full_path, inode->i_sb, xid);
532 else
533 rc = cifs_get_inode_info(&inode,
534 full_path, NULL, inode->i_sb,
8b1327f6 535 xid, NULL);
1da177e4
LT
536 } /* else we are writing out data to server already
537 and could deadlock if we tried to flush data, and
538 since we do not know if we have data that would
539 invalidate the current end of file on the server
540 we can not go to the server to get the new inod
541 info */
542 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
543 pCifsInode->clientCanCacheAll = true;
544 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
545 cFYI(1, "Exclusive Oplock granted on inode %p",
546 file->f_path.dentry->d_inode);
1da177e4 547 } else if ((oplock & 0xF) == OPLOCK_READ) {
4b18f2a9
SF
548 pCifsInode->clientCanCacheRead = true;
549 pCifsInode->clientCanCacheAll = false;
1da177e4 550 } else {
4b18f2a9
SF
551 pCifsInode->clientCanCacheRead = false;
552 pCifsInode->clientCanCacheAll = false;
1da177e4
LT
553 }
554 cifs_relock_file(pCifsFile);
555 }
556 }
1da177e4
LT
557 kfree(full_path);
558 FreeXid(xid);
559 return rc;
560}
561
562int cifs_close(struct inode *inode, struct file *file)
563{
564 int rc = 0;
15745320 565 int xid, timeout;
1da177e4
LT
566 struct cifs_sb_info *cifs_sb;
567 struct cifsTconInfo *pTcon;
568 struct cifsFileInfo *pSMBFile =
569 (struct cifsFileInfo *)file->private_data;
570
571 xid = GetXid();
572
573 cifs_sb = CIFS_SB(inode->i_sb);
574 pTcon = cifs_sb->tcon;
575 if (pSMBFile) {
7ee1af76 576 struct cifsLockInfo *li, *tmp;
ddb4cbfc 577 write_lock(&GlobalSMBSeslock);
4b18f2a9 578 pSMBFile->closePend = true;
1da177e4
LT
579 if (pTcon) {
580 /* no sense reconnecting to close a file that is
581 already closed */
3b795210 582 if (!pTcon->need_reconnect) {
ddb4cbfc 583 write_unlock(&GlobalSMBSeslock);
15745320 584 timeout = 2;
6ab409b5 585 while ((atomic_read(&pSMBFile->count) != 1)
15745320 586 && (timeout <= 2048)) {
23e7dd7d
SF
587 /* Give write a better chance to get to
588 server ahead of the close. We do not
589 want to add a wait_q here as it would
590 increase the memory utilization as
591 the struct would be in each open file,
fb8c4b14 592 but this should give enough time to
23e7dd7d 593 clear the socket */
b6b38f70 594 cFYI(DBG2, "close delay, write pending");
23e7dd7d
SF
595 msleep(timeout);
596 timeout *= 4;
4891d539 597 }
ddb4cbfc
SF
598 if (!pTcon->need_reconnect &&
599 !pSMBFile->invalidHandle)
600 rc = CIFSSMBClose(xid, pTcon,
1da177e4 601 pSMBFile->netfid);
ddb4cbfc
SF
602 } else
603 write_unlock(&GlobalSMBSeslock);
604 } else
605 write_unlock(&GlobalSMBSeslock);
7ee1af76
JA
606
607 /* Delete any outstanding lock records.
608 We'll lose them when the file is closed anyway. */
796e5661 609 mutex_lock(&pSMBFile->lock_mutex);
7ee1af76
JA
610 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
611 list_del(&li->llist);
612 kfree(li);
613 }
796e5661 614 mutex_unlock(&pSMBFile->lock_mutex);
7ee1af76 615
cbe0476f 616 write_lock(&GlobalSMBSeslock);
1da177e4
LT
617 list_del(&pSMBFile->flist);
618 list_del(&pSMBFile->tlist);
cbe0476f 619 write_unlock(&GlobalSMBSeslock);
6ab409b5 620 cifsFileInfo_put(file->private_data);
1da177e4
LT
621 file->private_data = NULL;
622 } else
623 rc = -EBADF;
624
4efa53f0 625 read_lock(&GlobalSMBSeslock);
1da177e4 626 if (list_empty(&(CIFS_I(inode)->openFileList))) {
b6b38f70 627 cFYI(1, "closing last open instance for inode %p", inode);
1da177e4
LT
628 /* if the file is not open we do not know if we can cache info
629 on this inode, much less write behind and read ahead */
4b18f2a9
SF
630 CIFS_I(inode)->clientCanCacheRead = false;
631 CIFS_I(inode)->clientCanCacheAll = false;
1da177e4 632 }
4efa53f0 633 read_unlock(&GlobalSMBSeslock);
fb8c4b14 634 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
1da177e4
LT
635 rc = CIFS_I(inode)->write_behind_rc;
636 FreeXid(xid);
637 return rc;
638}
639
640int cifs_closedir(struct inode *inode, struct file *file)
641{
642 int rc = 0;
643 int xid;
644 struct cifsFileInfo *pCFileStruct =
645 (struct cifsFileInfo *)file->private_data;
646 char *ptmp;
647
b6b38f70 648 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
649
650 xid = GetXid();
651
652 if (pCFileStruct) {
653 struct cifsTconInfo *pTcon;
fb8c4b14
SF
654 struct cifs_sb_info *cifs_sb =
655 CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
656
657 pTcon = cifs_sb->tcon;
658
b6b38f70 659 cFYI(1, "Freeing private data in close dir");
ddb4cbfc 660 write_lock(&GlobalSMBSeslock);
4b18f2a9
SF
661 if (!pCFileStruct->srch_inf.endOfSearch &&
662 !pCFileStruct->invalidHandle) {
663 pCFileStruct->invalidHandle = true;
ddb4cbfc 664 write_unlock(&GlobalSMBSeslock);
1da177e4 665 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
666 cFYI(1, "Closing uncompleted readdir with rc %d",
667 rc);
1da177e4
LT
668 /* not much we can do if it fails anyway, ignore rc */
669 rc = 0;
ddb4cbfc
SF
670 } else
671 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
672 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
673 if (ptmp) {
b6b38f70 674 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 675 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 676 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
677 cifs_small_buf_release(ptmp);
678 else
679 cifs_buf_release(ptmp);
1da177e4 680 }
1da177e4
LT
681 kfree(file->private_data);
682 file->private_data = NULL;
683 }
684 /* BB can we lock the filestruct while this is going on? */
685 FreeXid(xid);
686 return rc;
687}
688
7ee1af76
JA
689static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
690 __u64 offset, __u8 lockType)
691{
fb8c4b14
SF
692 struct cifsLockInfo *li =
693 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
694 if (li == NULL)
695 return -ENOMEM;
696 li->offset = offset;
697 li->length = len;
698 li->type = lockType;
796e5661 699 mutex_lock(&fid->lock_mutex);
7ee1af76 700 list_add(&li->llist, &fid->llist);
796e5661 701 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
702 return 0;
703}
704
1da177e4
LT
705int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
706{
707 int rc, xid;
1da177e4
LT
708 __u32 numLock = 0;
709 __u32 numUnlock = 0;
710 __u64 length;
4b18f2a9 711 bool wait_flag = false;
1da177e4 712 struct cifs_sb_info *cifs_sb;
13a6e42a 713 struct cifsTconInfo *tcon;
08547b03
SF
714 __u16 netfid;
715 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 716 bool posix_locking = 0;
1da177e4
LT
717
718 length = 1 + pfLock->fl_end - pfLock->fl_start;
719 rc = -EACCES;
720 xid = GetXid();
721
b6b38f70 722 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 723 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 724 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 725 pfLock->fl_end);
1da177e4
LT
726
727 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 728 cFYI(1, "Posix");
1da177e4 729 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 730 cFYI(1, "Flock");
1da177e4 731 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 732 cFYI(1, "Blocking lock");
4b18f2a9 733 wait_flag = true;
1da177e4
LT
734 }
735 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
736 cFYI(1, "Process suspended by mandatory locking - "
737 "not implemented yet");
1da177e4 738 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 739 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 740 if (pfLock->fl_flags &
1da177e4 741 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 742 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
743
744 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 745 cFYI(1, "F_WRLCK ");
1da177e4
LT
746 numLock = 1;
747 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 748 cFYI(1, "F_UNLCK");
1da177e4 749 numUnlock = 1;
d47d7c1a
SF
750 /* Check if unlock includes more than
751 one lock range */
1da177e4 752 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 753 cFYI(1, "F_RDLCK");
1da177e4
LT
754 lockType |= LOCKING_ANDX_SHARED_LOCK;
755 numLock = 1;
756 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 757 cFYI(1, "F_EXLCK");
1da177e4
LT
758 numLock = 1;
759 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 760 cFYI(1, "F_SHLCK");
1da177e4
LT
761 lockType |= LOCKING_ANDX_SHARED_LOCK;
762 numLock = 1;
763 } else
b6b38f70 764 cFYI(1, "Unknown type of lock");
1da177e4 765
e6a00296 766 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13a6e42a 767 tcon = cifs_sb->tcon;
1da177e4
LT
768
769 if (file->private_data == NULL) {
0f3bc09e 770 rc = -EBADF;
1da177e4 771 FreeXid(xid);
0f3bc09e 772 return rc;
1da177e4 773 }
08547b03
SF
774 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
775
13a6e42a
SF
776 if ((tcon->ses->capabilities & CAP_UNIX) &&
777 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 778 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 779 posix_locking = 1;
08547b03
SF
780 /* BB add code here to normalize offset and length to
781 account for negative length which we can not accept over the
782 wire */
1da177e4 783 if (IS_GETLK(cmd)) {
fb8c4b14 784 if (posix_locking) {
08547b03 785 int posix_lock_type;
fb8c4b14 786 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
787 posix_lock_type = CIFS_RDLCK;
788 else
789 posix_lock_type = CIFS_WRLCK;
13a6e42a 790 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 791 length, pfLock,
08547b03
SF
792 posix_lock_type, wait_flag);
793 FreeXid(xid);
794 return rc;
795 }
796
797 /* BB we could chain these into one lock request BB */
13a6e42a 798 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
08547b03 799 0, 1, lockType, 0 /* wait flag */ );
1da177e4 800 if (rc == 0) {
13a6e42a 801 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
802 pfLock->fl_start, 1 /* numUnlock */ ,
803 0 /* numLock */ , lockType,
804 0 /* wait flag */ );
805 pfLock->fl_type = F_UNLCK;
806 if (rc != 0)
b6b38f70
JP
807 cERROR(1, "Error unlocking previously locked "
808 "range %d during test of lock", rc);
1da177e4
LT
809 rc = 0;
810
811 } else {
812 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
813 rc = 0;
814
815 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
816 pfLock->fl_type = F_WRLCK;
817 } else {
818 rc = CIFSSMBLock(xid, tcon, netfid, length,
819 pfLock->fl_start, 0, 1,
820 lockType | LOCKING_ANDX_SHARED_LOCK,
821 0 /* wait flag */);
822 if (rc == 0) {
823 rc = CIFSSMBLock(xid, tcon, netfid,
824 length, pfLock->fl_start, 1, 0,
825 lockType |
826 LOCKING_ANDX_SHARED_LOCK,
827 0 /* wait flag */);
828 pfLock->fl_type = F_RDLCK;
829 if (rc != 0)
f19159dc 830 cERROR(1, "Error unlocking "
f05337c6 831 "previously locked range %d "
f19159dc 832 "during test of lock", rc);
f05337c6
PS
833 rc = 0;
834 } else {
835 pfLock->fl_type = F_WRLCK;
836 rc = 0;
837 }
838 }
1da177e4
LT
839 }
840
841 FreeXid(xid);
842 return rc;
843 }
7ee1af76
JA
844
845 if (!numLock && !numUnlock) {
846 /* if no lock or unlock then nothing
847 to do since we do not know what it is */
848 FreeXid(xid);
849 return -EOPNOTSUPP;
850 }
851
852 if (posix_locking) {
08547b03 853 int posix_lock_type;
fb8c4b14 854 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
855 posix_lock_type = CIFS_RDLCK;
856 else
857 posix_lock_type = CIFS_WRLCK;
50c2f753 858
fb8c4b14 859 if (numUnlock == 1)
beb84dc8 860 posix_lock_type = CIFS_UNLCK;
7ee1af76 861
13a6e42a 862 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 863 length, pfLock,
08547b03 864 posix_lock_type, wait_flag);
7ee1af76 865 } else {
fb8c4b14
SF
866 struct cifsFileInfo *fid =
867 (struct cifsFileInfo *)file->private_data;
7ee1af76
JA
868
869 if (numLock) {
13a6e42a 870 rc = CIFSSMBLock(xid, tcon, netfid, length,
fb8c4b14 871 pfLock->fl_start,
7ee1af76
JA
872 0, numLock, lockType, wait_flag);
873
874 if (rc == 0) {
875 /* For Windows locks we must store them. */
876 rc = store_file_lock(fid, length,
877 pfLock->fl_start, lockType);
878 }
879 } else if (numUnlock) {
880 /* For each stored lock that this unlock overlaps
881 completely, unlock it. */
882 int stored_rc = 0;
883 struct cifsLockInfo *li, *tmp;
884
6b70c955 885 rc = 0;
796e5661 886 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
887 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
888 if (pfLock->fl_start <= li->offset &&
c19eb710 889 (pfLock->fl_start + length) >=
39db810c 890 (li->offset + li->length)) {
13a6e42a 891 stored_rc = CIFSSMBLock(xid, tcon,
fb8c4b14 892 netfid,
7ee1af76 893 li->length, li->offset,
4b18f2a9 894 1, 0, li->type, false);
7ee1af76
JA
895 if (stored_rc)
896 rc = stored_rc;
2c964d1f
PS
897 else {
898 list_del(&li->llist);
899 kfree(li);
900 }
7ee1af76
JA
901 }
902 }
796e5661 903 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
904 }
905 }
906
d634cc15 907 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
908 posix_lock_file_wait(file, pfLock);
909 FreeXid(xid);
910 return rc;
911}
912
fbec9ab9
JL
913/*
914 * Set the timeout on write requests past EOF. For some servers (Windows)
915 * these calls can be very long.
916 *
917 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
918 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
919 * The 10M cutoff is totally arbitrary. A better scheme for this would be
920 * welcome if someone wants to suggest one.
921 *
922 * We may be able to do a better job with this if there were some way to
923 * declare that a file should be sparse.
924 */
925static int
926cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
927{
928 if (offset <= cifsi->server_eof)
929 return CIFS_STD_OP;
930 else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
931 return CIFS_VLONG_OP;
932 else
933 return CIFS_LONG_OP;
934}
935
936/* update the file size (if needed) after a write */
937static void
938cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
939 unsigned int bytes_written)
940{
941 loff_t end_of_write = offset + bytes_written;
942
943 if (end_of_write > cifsi->server_eof)
944 cifsi->server_eof = end_of_write;
945}
946
1da177e4
LT
947ssize_t cifs_user_write(struct file *file, const char __user *write_data,
948 size_t write_size, loff_t *poffset)
949{
950 int rc = 0;
951 unsigned int bytes_written = 0;
952 unsigned int total_written;
953 struct cifs_sb_info *cifs_sb;
954 struct cifsTconInfo *pTcon;
955 int xid, long_op;
956 struct cifsFileInfo *open_file;
fbec9ab9 957 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 958
e6a00296 959 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
960
961 pTcon = cifs_sb->tcon;
962
b6b38f70
JP
963 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
964 *poffset, file->f_path.dentry->d_name.name); */
1da177e4
LT
965
966 if (file->private_data == NULL)
967 return -EBADF;
c33f8d32 968 open_file = (struct cifsFileInfo *) file->private_data;
50c2f753 969
838726c4
JL
970 rc = generic_write_checks(file, poffset, &write_size, 0);
971 if (rc)
972 return rc;
973
1da177e4 974 xid = GetXid();
1da177e4 975
fbec9ab9 976 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
977 for (total_written = 0; write_size > total_written;
978 total_written += bytes_written) {
979 rc = -EAGAIN;
980 while (rc == -EAGAIN) {
981 if (file->private_data == NULL) {
982 /* file has been closed on us */
983 FreeXid(xid);
984 /* if we have gotten here we have written some data
985 and blocked, and the file has been freed on us while
986 we blocked so return what we managed to write */
987 return total_written;
fb8c4b14 988 }
1da177e4
LT
989 if (open_file->closePend) {
990 FreeXid(xid);
991 if (total_written)
992 return total_written;
993 else
994 return -EBADF;
995 }
996 if (open_file->invalidHandle) {
1da177e4
LT
997 /* we could deadlock if we called
998 filemap_fdatawait from here so tell
999 reopen_file not to flush data to server
1000 now */
4b18f2a9 1001 rc = cifs_reopen_file(file, false);
1da177e4
LT
1002 if (rc != 0)
1003 break;
1004 }
1005
1006 rc = CIFSSMBWrite(xid, pTcon,
1007 open_file->netfid,
1008 min_t(const int, cifs_sb->wsize,
1009 write_size - total_written),
1010 *poffset, &bytes_written,
1011 NULL, write_data + total_written, long_op);
1012 }
1013 if (rc || (bytes_written == 0)) {
1014 if (total_written)
1015 break;
1016 else {
1017 FreeXid(xid);
1018 return rc;
1019 }
fbec9ab9
JL
1020 } else {
1021 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1022 *poffset += bytes_written;
fbec9ab9 1023 }
133672ef 1024 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1025 15 seconds is plenty */
1026 }
1027
a4544347 1028 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1029
1030 /* since the write may have blocked check these pointers again */
3677db10
SF
1031 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1032 struct inode *inode = file->f_path.dentry->d_inode;
fb8c4b14
SF
1033/* Do not update local mtime - server will set its actual value on write
1034 * inode->i_ctime = inode->i_mtime =
3677db10
SF
1035 * current_fs_time(inode->i_sb);*/
1036 if (total_written > 0) {
1037 spin_lock(&inode->i_lock);
1038 if (*poffset > file->f_path.dentry->d_inode->i_size)
1039 i_size_write(file->f_path.dentry->d_inode,
1da177e4 1040 *poffset);
3677db10 1041 spin_unlock(&inode->i_lock);
1da177e4 1042 }
fb8c4b14 1043 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1044 }
1045 FreeXid(xid);
1046 return total_written;
1047}
1048
1049static ssize_t cifs_write(struct file *file, const char *write_data,
d9414774 1050 size_t write_size, loff_t *poffset)
1da177e4
LT
1051{
1052 int rc = 0;
1053 unsigned int bytes_written = 0;
1054 unsigned int total_written;
1055 struct cifs_sb_info *cifs_sb;
1056 struct cifsTconInfo *pTcon;
1057 int xid, long_op;
1058 struct cifsFileInfo *open_file;
fbec9ab9 1059 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 1060
e6a00296 1061 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1062
1063 pTcon = cifs_sb->tcon;
1064
b6b38f70
JP
1065 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1066 *poffset, file->f_path.dentry->d_name.name);
1da177e4
LT
1067
1068 if (file->private_data == NULL)
1069 return -EBADF;
c33f8d32 1070 open_file = (struct cifsFileInfo *)file->private_data;
50c2f753 1071
1da177e4 1072 xid = GetXid();
1da177e4 1073
fbec9ab9 1074 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
1075 for (total_written = 0; write_size > total_written;
1076 total_written += bytes_written) {
1077 rc = -EAGAIN;
1078 while (rc == -EAGAIN) {
1079 if (file->private_data == NULL) {
1080 /* file has been closed on us */
1081 FreeXid(xid);
1082 /* if we have gotten here we have written some data
1083 and blocked, and the file has been freed on us
fb8c4b14 1084 while we blocked so return what we managed to
1da177e4
LT
1085 write */
1086 return total_written;
fb8c4b14 1087 }
1da177e4
LT
1088 if (open_file->closePend) {
1089 FreeXid(xid);
1090 if (total_written)
1091 return total_written;
1092 else
1093 return -EBADF;
1094 }
1095 if (open_file->invalidHandle) {
1da177e4
LT
1096 /* we could deadlock if we called
1097 filemap_fdatawait from here so tell
fb8c4b14 1098 reopen_file not to flush data to
1da177e4 1099 server now */
4b18f2a9 1100 rc = cifs_reopen_file(file, false);
1da177e4
LT
1101 if (rc != 0)
1102 break;
1103 }
fb8c4b14
SF
1104 if (experimEnabled || (pTcon->ses->server &&
1105 ((pTcon->ses->server->secMode &
08775834 1106 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 1107 == 0))) {
3e84469d
SF
1108 struct kvec iov[2];
1109 unsigned int len;
1110
0ae0efad 1111 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
1112 write_size - total_written);
1113 /* iov[0] is reserved for smb header */
1114 iov[1].iov_base = (char *)write_data +
1115 total_written;
1116 iov[1].iov_len = len;
d6e04ae6 1117 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 1118 open_file->netfid, len,
d6e04ae6 1119 *poffset, &bytes_written,
3e84469d 1120 iov, 1, long_op);
d6e04ae6 1121 } else
60808233
SF
1122 rc = CIFSSMBWrite(xid, pTcon,
1123 open_file->netfid,
1124 min_t(const int, cifs_sb->wsize,
1125 write_size - total_written),
1126 *poffset, &bytes_written,
1127 write_data + total_written,
1128 NULL, long_op);
1da177e4
LT
1129 }
1130 if (rc || (bytes_written == 0)) {
1131 if (total_written)
1132 break;
1133 else {
1134 FreeXid(xid);
1135 return rc;
1136 }
fbec9ab9
JL
1137 } else {
1138 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1139 *poffset += bytes_written;
fbec9ab9 1140 }
133672ef 1141 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1142 15 seconds is plenty */
1143 }
1144
a4544347 1145 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1146
1147 /* since the write may have blocked check these pointers again */
3677db10 1148 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
004c46b9 1149/*BB We could make this contingent on superblock ATIME flag too */
3677db10
SF
1150/* file->f_path.dentry->d_inode->i_ctime =
1151 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1152 if (total_written > 0) {
1153 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1154 if (*poffset > file->f_path.dentry->d_inode->i_size)
1155 i_size_write(file->f_path.dentry->d_inode,
1156 *poffset);
1157 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1da177e4 1158 }
3677db10 1159 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1160 }
1161 FreeXid(xid);
1162 return total_written;
1163}
1164
630f3f0c
SF
1165#ifdef CONFIG_CIFS_EXPERIMENTAL
1166struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1167{
1168 struct cifsFileInfo *open_file = NULL;
1169
1170 read_lock(&GlobalSMBSeslock);
1171 /* we could simply get the first_list_entry since write-only entries
1172 are always at the end of the list but since the first entry might
1173 have a close pending, we go through the whole list */
1174 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1175 if (open_file->closePend)
1176 continue;
1177 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1178 (open_file->pfile->f_flags & O_RDONLY))) {
1179 if (!open_file->invalidHandle) {
1180 /* found a good file */
1181 /* lock it so it will not be closed on us */
6ab409b5 1182 cifsFileInfo_get(open_file);
630f3f0c
SF
1183 read_unlock(&GlobalSMBSeslock);
1184 return open_file;
1185 } /* else might as well continue, and look for
1186 another, or simply have the caller reopen it
1187 again rather than trying to fix this handle */
1188 } else /* write only file */
1189 break; /* write only files are last so must be done */
1190 }
1191 read_unlock(&GlobalSMBSeslock);
1192 return NULL;
1193}
1194#endif
1195
dd99cd80 1196struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
1197{
1198 struct cifsFileInfo *open_file;
2846d386 1199 bool any_available = false;
dd99cd80 1200 int rc;
6148a742 1201
60808233
SF
1202 /* Having a null inode here (because mapping->host was set to zero by
1203 the VFS or MM) should not happen but we had reports of on oops (due to
1204 it being zero) during stress testcases so we need to check for it */
1205
fb8c4b14 1206 if (cifs_inode == NULL) {
b6b38f70 1207 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1208 dump_stack();
1209 return NULL;
1210 }
1211
6148a742 1212 read_lock(&GlobalSMBSeslock);
9b22b0b7 1213refind_writable:
6148a742 1214 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2846d386
JL
1215 if (open_file->closePend ||
1216 (!any_available && open_file->pid != current->tgid))
6148a742 1217 continue;
2846d386 1218
6148a742
SF
1219 if (open_file->pfile &&
1220 ((open_file->pfile->f_flags & O_RDWR) ||
1221 (open_file->pfile->f_flags & O_WRONLY))) {
6ab409b5 1222 cifsFileInfo_get(open_file);
9b22b0b7
SF
1223
1224 if (!open_file->invalidHandle) {
1225 /* found a good writable file */
1226 read_unlock(&GlobalSMBSeslock);
1227 return open_file;
1228 }
8840dee9 1229
6148a742 1230 read_unlock(&GlobalSMBSeslock);
9b22b0b7 1231 /* Had to unlock since following call can block */
4b18f2a9 1232 rc = cifs_reopen_file(open_file->pfile, false);
8840dee9 1233 if (!rc) {
9b22b0b7
SF
1234 if (!open_file->closePend)
1235 return open_file;
1236 else { /* start over in case this was deleted */
1237 /* since the list could be modified */
37c0eb46 1238 read_lock(&GlobalSMBSeslock);
6ab409b5 1239 cifsFileInfo_put(open_file);
9b22b0b7 1240 goto refind_writable;
37c0eb46
SF
1241 }
1242 }
9b22b0b7
SF
1243
1244 /* if it fails, try another handle if possible -
1245 (we can not do this if closePending since
1246 loop could be modified - in which case we
1247 have to start at the beginning of the list
1248 again. Note that it would be bad
1249 to hold up writepages here (rather than
1250 in caller) with continuous retries */
b6b38f70 1251 cFYI(1, "wp failed on reopen file");
9b22b0b7
SF
1252 read_lock(&GlobalSMBSeslock);
1253 /* can not use this handle, no write
1254 pending on this one after all */
6ab409b5 1255 cifsFileInfo_put(open_file);
8840dee9 1256
9b22b0b7
SF
1257 if (open_file->closePend) /* list could have changed */
1258 goto refind_writable;
1259 /* else we simply continue to the next entry. Thus
1260 we do not loop on reopen errors. If we
1261 can not reopen the file, for example if we
1262 reconnected to a server with another client
1263 racing to delete or lock the file we would not
1264 make progress if we restarted before the beginning
1265 of the loop here. */
6148a742
SF
1266 }
1267 }
2846d386
JL
1268 /* couldn't find useable FH with same pid, try any available */
1269 if (!any_available) {
1270 any_available = true;
1271 goto refind_writable;
1272 }
6148a742
SF
1273 read_unlock(&GlobalSMBSeslock);
1274 return NULL;
1275}
1276
1da177e4
LT
1277static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1278{
1279 struct address_space *mapping = page->mapping;
1280 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1281 char *write_data;
1282 int rc = -EFAULT;
1283 int bytes_written = 0;
1284 struct cifs_sb_info *cifs_sb;
1285 struct cifsTconInfo *pTcon;
1286 struct inode *inode;
6148a742 1287 struct cifsFileInfo *open_file;
1da177e4
LT
1288
1289 if (!mapping || !mapping->host)
1290 return -EFAULT;
1291
1292 inode = page->mapping->host;
1293 cifs_sb = CIFS_SB(inode->i_sb);
1294 pTcon = cifs_sb->tcon;
1295
1296 offset += (loff_t)from;
1297 write_data = kmap(page);
1298 write_data += from;
1299
1300 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1301 kunmap(page);
1302 return -EIO;
1303 }
1304
1305 /* racing with truncate? */
1306 if (offset > mapping->host->i_size) {
1307 kunmap(page);
1308 return 0; /* don't care */
1309 }
1310
1311 /* check to make sure that we are not extending the file */
1312 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1313 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1314
6148a742
SF
1315 open_file = find_writable_file(CIFS_I(mapping->host));
1316 if (open_file) {
1317 bytes_written = cifs_write(open_file->pfile, write_data,
1318 to-from, &offset);
6ab409b5 1319 cifsFileInfo_put(open_file);
1da177e4 1320 /* Does mm or vfs already set times? */
6148a742 1321 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1322 if ((bytes_written > 0) && (offset))
6148a742 1323 rc = 0;
bb5a9a04
SF
1324 else if (bytes_written < 0)
1325 rc = bytes_written;
6148a742 1326 } else {
b6b38f70 1327 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1328 rc = -EIO;
1329 }
1330
1331 kunmap(page);
1332 return rc;
1333}
1334
1da177e4 1335static int cifs_writepages(struct address_space *mapping,
37c0eb46 1336 struct writeback_control *wbc)
1da177e4 1337{
37c0eb46
SF
1338 struct backing_dev_info *bdi = mapping->backing_dev_info;
1339 unsigned int bytes_to_write;
1340 unsigned int bytes_written;
1341 struct cifs_sb_info *cifs_sb;
1342 int done = 0;
111ebb6e 1343 pgoff_t end;
37c0eb46 1344 pgoff_t index;
fb8c4b14
SF
1345 int range_whole = 0;
1346 struct kvec *iov;
84d2f07e 1347 int len;
37c0eb46
SF
1348 int n_iov = 0;
1349 pgoff_t next;
1350 int nr_pages;
1351 __u64 offset = 0;
23e7dd7d 1352 struct cifsFileInfo *open_file;
fbec9ab9 1353 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1354 struct page *page;
1355 struct pagevec pvec;
1356 int rc = 0;
1357 int scanned = 0;
fbec9ab9 1358 int xid, long_op;
1da177e4 1359
37c0eb46 1360 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1361
37c0eb46
SF
1362 /*
1363 * If wsize is smaller that the page cache size, default to writing
1364 * one page at a time via cifs_writepage
1365 */
1366 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1367 return generic_writepages(mapping, wbc);
1368
fb8c4b14
SF
1369 if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1370 if (cifs_sb->tcon->ses->server->secMode &
1371 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1372 if (!experimEnabled)
60808233 1373 return generic_writepages(mapping, wbc);
4a77118c 1374
9a0c8230 1375 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1376 if (iov == NULL)
9a0c8230
SF
1377 return generic_writepages(mapping, wbc);
1378
1379
37c0eb46
SF
1380 /*
1381 * BB: Is this meaningful for a non-block-device file system?
1382 * If it is, we should test it again after we do I/O
1383 */
1384 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1385 wbc->encountered_congestion = 1;
9a0c8230 1386 kfree(iov);
37c0eb46
SF
1387 return 0;
1388 }
1389
1da177e4
LT
1390 xid = GetXid();
1391
37c0eb46 1392 pagevec_init(&pvec, 0);
111ebb6e 1393 if (wbc->range_cyclic) {
37c0eb46 1394 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1395 end = -1;
1396 } else {
1397 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1398 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1399 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1400 range_whole = 1;
37c0eb46
SF
1401 scanned = 1;
1402 }
1403retry:
1404 while (!done && (index <= end) &&
1405 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1406 PAGECACHE_TAG_DIRTY,
1407 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1408 int first;
1409 unsigned int i;
1410
37c0eb46
SF
1411 first = -1;
1412 next = 0;
1413 n_iov = 0;
1414 bytes_to_write = 0;
1415
1416 for (i = 0; i < nr_pages; i++) {
1417 page = pvec.pages[i];
1418 /*
1419 * At this point we hold neither mapping->tree_lock nor
1420 * lock on the page itself: the page may be truncated or
1421 * invalidated (changing page->mapping to NULL), or even
1422 * swizzled back from swapper_space to tmpfs file
1423 * mapping
1424 */
1425
1426 if (first < 0)
1427 lock_page(page);
529ae9aa 1428 else if (!trylock_page(page))
37c0eb46
SF
1429 break;
1430
1431 if (unlikely(page->mapping != mapping)) {
1432 unlock_page(page);
1433 break;
1434 }
1435
111ebb6e 1436 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1437 done = 1;
1438 unlock_page(page);
1439 break;
1440 }
1441
1442 if (next && (page->index != next)) {
1443 /* Not next consecutive page */
1444 unlock_page(page);
1445 break;
1446 }
1447
1448 if (wbc->sync_mode != WB_SYNC_NONE)
1449 wait_on_page_writeback(page);
1450
1451 if (PageWriteback(page) ||
cb876f45 1452 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1453 unlock_page(page);
1454 break;
1455 }
84d2f07e 1456
cb876f45
LT
1457 /*
1458 * This actually clears the dirty bit in the radix tree.
1459 * See cifs_writepage() for more commentary.
1460 */
1461 set_page_writeback(page);
1462
84d2f07e
SF
1463 if (page_offset(page) >= mapping->host->i_size) {
1464 done = 1;
1465 unlock_page(page);
cb876f45 1466 end_page_writeback(page);
84d2f07e
SF
1467 break;
1468 }
1469
37c0eb46
SF
1470 /*
1471 * BB can we get rid of this? pages are held by pvec
1472 */
1473 page_cache_get(page);
1474
84d2f07e
SF
1475 len = min(mapping->host->i_size - page_offset(page),
1476 (loff_t)PAGE_CACHE_SIZE);
1477
37c0eb46
SF
1478 /* reserve iov[0] for the smb header */
1479 n_iov++;
1480 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1481 iov[n_iov].iov_len = len;
1482 bytes_to_write += len;
37c0eb46
SF
1483
1484 if (first < 0) {
1485 first = i;
1486 offset = page_offset(page);
1487 }
1488 next = page->index + 1;
1489 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1490 break;
1491 }
1492 if (n_iov) {
23e7dd7d
SF
1493 /* Search for a writable handle every time we call
1494 * CIFSSMBWrite2. We can't rely on the last handle
1495 * we used to still be valid
1496 */
1497 open_file = find_writable_file(CIFS_I(mapping->host));
1498 if (!open_file) {
b6b38f70 1499 cERROR(1, "No writable handles for inode");
23e7dd7d 1500 rc = -EBADF;
1047abc1 1501 } else {
fbec9ab9 1502 long_op = cifs_write_timeout(cifsi, offset);
23e7dd7d
SF
1503 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1504 open_file->netfid,
1505 bytes_to_write, offset,
1506 &bytes_written, iov, n_iov,
fbec9ab9 1507 long_op);
6ab409b5 1508 cifsFileInfo_put(open_file);
fbec9ab9
JL
1509 cifs_update_eof(cifsi, offset, bytes_written);
1510
23e7dd7d 1511 if (rc || bytes_written < bytes_to_write) {
b6b38f70
JP
1512 cERROR(1, "Write2 ret %d, wrote %d",
1513 rc, bytes_written);
23e7dd7d
SF
1514 /* BB what if continued retry is
1515 requested via mount flags? */
cea21805
JL
1516 if (rc == -ENOSPC)
1517 set_bit(AS_ENOSPC, &mapping->flags);
1518 else
1519 set_bit(AS_EIO, &mapping->flags);
23e7dd7d
SF
1520 } else {
1521 cifs_stats_bytes_written(cifs_sb->tcon,
1522 bytes_written);
1523 }
37c0eb46
SF
1524 }
1525 for (i = 0; i < n_iov; i++) {
1526 page = pvec.pages[first + i];
eb9bdaa3
SF
1527 /* Should we also set page error on
1528 success rc but too little data written? */
1529 /* BB investigate retry logic on temporary
1530 server crash cases and how recovery works
fb8c4b14
SF
1531 when page marked as error */
1532 if (rc)
eb9bdaa3 1533 SetPageError(page);
37c0eb46
SF
1534 kunmap(page);
1535 unlock_page(page);
cb876f45 1536 end_page_writeback(page);
37c0eb46
SF
1537 page_cache_release(page);
1538 }
1539 if ((wbc->nr_to_write -= n_iov) <= 0)
1540 done = 1;
1541 index = next;
b066a48c
DK
1542 } else
1543 /* Need to re-find the pages we skipped */
1544 index = pvec.pages[0]->index + 1;
1545
37c0eb46
SF
1546 pagevec_release(&pvec);
1547 }
1548 if (!scanned && !done) {
1549 /*
1550 * We hit the last page and there is more work to be done: wrap
1551 * back to the start of the file
1552 */
1553 scanned = 1;
1554 index = 0;
1555 goto retry;
1556 }
111ebb6e 1557 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1558 mapping->writeback_index = index;
1559
1da177e4 1560 FreeXid(xid);
9a0c8230 1561 kfree(iov);
1da177e4
LT
1562 return rc;
1563}
1da177e4 1564
fb8c4b14 1565static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1566{
1567 int rc = -EFAULT;
1568 int xid;
1569
1570 xid = GetXid();
1571/* BB add check for wbc flags */
1572 page_cache_get(page);
ad7a2926 1573 if (!PageUptodate(page))
b6b38f70 1574 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1575
1576 /*
1577 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1578 *
1579 * A writepage() implementation always needs to do either this,
1580 * or re-dirty the page with "redirty_page_for_writepage()" in
1581 * the case of a failure.
1582 *
1583 * Just unlocking the page will cause the radix tree tag-bits
1584 * to fail to update with the state of the page correctly.
1585 */
fb8c4b14 1586 set_page_writeback(page);
1da177e4
LT
1587 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1588 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1589 unlock_page(page);
cb876f45
LT
1590 end_page_writeback(page);
1591 page_cache_release(page);
1da177e4
LT
1592 FreeXid(xid);
1593 return rc;
1594}
1595
d9414774
NP
1596static int cifs_write_end(struct file *file, struct address_space *mapping,
1597 loff_t pos, unsigned len, unsigned copied,
1598 struct page *page, void *fsdata)
1da177e4 1599{
d9414774
NP
1600 int rc;
1601 struct inode *inode = mapping->host;
1da177e4 1602
b6b38f70
JP
1603 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1604 page, pos, copied);
d9414774 1605
a98ee8c1
JL
1606 if (PageChecked(page)) {
1607 if (copied == len)
1608 SetPageUptodate(page);
1609 ClearPageChecked(page);
1610 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1611 SetPageUptodate(page);
ad7a2926 1612
1da177e4 1613 if (!PageUptodate(page)) {
d9414774
NP
1614 char *page_data;
1615 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1616 int xid;
1617
1618 xid = GetXid();
1da177e4
LT
1619 /* this is probably better than directly calling
1620 partialpage_write since in this function the file handle is
1621 known which we might as well leverage */
1622 /* BB check if anything else missing out of ppw
1623 such as updating last write time */
1624 page_data = kmap(page);
d9414774
NP
1625 rc = cifs_write(file, page_data + offset, copied, &pos);
1626 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1627 kunmap(page);
d9414774
NP
1628
1629 FreeXid(xid);
fb8c4b14 1630 } else {
d9414774
NP
1631 rc = copied;
1632 pos += copied;
1da177e4
LT
1633 set_page_dirty(page);
1634 }
1635
d9414774
NP
1636 if (rc > 0) {
1637 spin_lock(&inode->i_lock);
1638 if (pos > inode->i_size)
1639 i_size_write(inode, pos);
1640 spin_unlock(&inode->i_lock);
1641 }
1642
1643 unlock_page(page);
1644 page_cache_release(page);
1645
1da177e4
LT
1646 return rc;
1647}
1648
7ea80859 1649int cifs_fsync(struct file *file, int datasync)
1da177e4
LT
1650{
1651 int xid;
1652 int rc = 0;
b298f223
SF
1653 struct cifsTconInfo *tcon;
1654 struct cifsFileInfo *smbfile =
1655 (struct cifsFileInfo *)file->private_data;
e6a00296 1656 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1657
1658 xid = GetXid();
1659
b6b38f70 1660 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1661 file->f_path.dentry->d_name.name, datasync);
50c2f753 1662
cea21805
JL
1663 rc = filemap_write_and_wait(inode->i_mapping);
1664 if (rc == 0) {
1665 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1666 CIFS_I(inode)->write_behind_rc = 0;
b298f223 1667 tcon = CIFS_SB(inode->i_sb)->tcon;
be652445 1668 if (!rc && tcon && smbfile &&
4717bed6 1669 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
b298f223 1670 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
cea21805 1671 }
b298f223 1672
1da177e4
LT
1673 FreeXid(xid);
1674 return rc;
1675}
1676
3978d717 1677/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1678{
1679 struct address_space *mapping;
1680 struct inode *inode;
1681 unsigned long index = page->index;
1682 unsigned int rpages = 0;
1683 int rc = 0;
1684
f19159dc 1685 cFYI(1, "sync page %p", page);
1da177e4
LT
1686 mapping = page->mapping;
1687 if (!mapping)
1688 return 0;
1689 inode = mapping->host;
1690 if (!inode)
3978d717 1691 return; */
1da177e4 1692
fb8c4b14 1693/* fill in rpages then
1da177e4
LT
1694 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1695
b6b38f70 1696/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1da177e4 1697
3978d717 1698#if 0
1da177e4
LT
1699 if (rc < 0)
1700 return rc;
1701 return 0;
3978d717 1702#endif
1da177e4
LT
1703} */
1704
1705/*
1706 * As file closes, flush all cached write data for this inode checking
1707 * for write behind errors.
1708 */
75e1fcc0 1709int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1710{
fb8c4b14 1711 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1712 int rc = 0;
1713
1714 /* Rather than do the steps manually:
1715 lock the inode for writing
1716 loop through pages looking for write behind data (dirty pages)
1717 coalesce into contiguous 16K (or smaller) chunks to write to server
1718 send to server (prefer in parallel)
1719 deal with writebehind errors
1720 unlock inode for writing
1721 filemapfdatawrite appears easier for the time being */
1722
1723 rc = filemap_fdatawrite(inode->i_mapping);
cea21805
JL
1724 /* reset wb rc if we were able to write out dirty pages */
1725 if (!rc) {
1726 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1727 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1728 }
50c2f753 1729
b6b38f70 1730 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1731
1732 return rc;
1733}
1734
1735ssize_t cifs_user_read(struct file *file, char __user *read_data,
1736 size_t read_size, loff_t *poffset)
1737{
1738 int rc = -EACCES;
1739 unsigned int bytes_read = 0;
1740 unsigned int total_read = 0;
1741 unsigned int current_read_size;
1742 struct cifs_sb_info *cifs_sb;
1743 struct cifsTconInfo *pTcon;
1744 int xid;
1745 struct cifsFileInfo *open_file;
1746 char *smb_read_data;
1747 char __user *current_offset;
1748 struct smb_com_read_rsp *pSMBr;
1749
1750 xid = GetXid();
e6a00296 1751 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1752 pTcon = cifs_sb->tcon;
1753
1754 if (file->private_data == NULL) {
0f3bc09e 1755 rc = -EBADF;
1da177e4 1756 FreeXid(xid);
0f3bc09e 1757 return rc;
1da177e4
LT
1758 }
1759 open_file = (struct cifsFileInfo *)file->private_data;
1760
ad7a2926 1761 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1762 cFYI(1, "attempting read on write only file instance");
ad7a2926 1763
1da177e4
LT
1764 for (total_read = 0, current_offset = read_data;
1765 read_size > total_read;
1766 total_read += bytes_read, current_offset += bytes_read) {
fb8c4b14 1767 current_read_size = min_t(const int, read_size - total_read,
1da177e4
LT
1768 cifs_sb->rsize);
1769 rc = -EAGAIN;
1770 smb_read_data = NULL;
1771 while (rc == -EAGAIN) {
ec637e3f 1772 int buf_type = CIFS_NO_BUFFER;
fb8c4b14 1773 if ((open_file->invalidHandle) &&
1da177e4 1774 (!open_file->closePend)) {
4b18f2a9 1775 rc = cifs_reopen_file(file, true);
1da177e4
LT
1776 if (rc != 0)
1777 break;
1778 }
bfa0d75a 1779 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1780 open_file->netfid,
1781 current_read_size, *poffset,
1782 &bytes_read, &smb_read_data,
1783 &buf_type);
1da177e4 1784 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1785 if (smb_read_data) {
93544cc6
SF
1786 if (copy_to_user(current_offset,
1787 smb_read_data +
1788 4 /* RFC1001 length field */ +
1789 le16_to_cpu(pSMBr->DataOffset),
ad7a2926 1790 bytes_read))
93544cc6 1791 rc = -EFAULT;
93544cc6 1792
fb8c4b14 1793 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1794 cifs_small_buf_release(smb_read_data);
fb8c4b14 1795 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1796 cifs_buf_release(smb_read_data);
1da177e4
LT
1797 smb_read_data = NULL;
1798 }
1799 }
1800 if (rc || (bytes_read == 0)) {
1801 if (total_read) {
1802 break;
1803 } else {
1804 FreeXid(xid);
1805 return rc;
1806 }
1807 } else {
a4544347 1808 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1809 *poffset += bytes_read;
1810 }
1811 }
1812 FreeXid(xid);
1813 return total_read;
1814}
1815
1816
1817static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1818 loff_t *poffset)
1819{
1820 int rc = -EACCES;
1821 unsigned int bytes_read = 0;
1822 unsigned int total_read;
1823 unsigned int current_read_size;
1824 struct cifs_sb_info *cifs_sb;
1825 struct cifsTconInfo *pTcon;
1826 int xid;
1827 char *current_offset;
1828 struct cifsFileInfo *open_file;
ec637e3f 1829 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1830
1831 xid = GetXid();
e6a00296 1832 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1833 pTcon = cifs_sb->tcon;
1834
1835 if (file->private_data == NULL) {
0f3bc09e 1836 rc = -EBADF;
1da177e4 1837 FreeXid(xid);
0f3bc09e 1838 return rc;
1da177e4
LT
1839 }
1840 open_file = (struct cifsFileInfo *)file->private_data;
1841
1842 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1843 cFYI(1, "attempting read on write only file instance");
1da177e4 1844
fb8c4b14 1845 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1846 read_size > total_read;
1847 total_read += bytes_read, current_offset += bytes_read) {
1848 current_read_size = min_t(const int, read_size - total_read,
1849 cifs_sb->rsize);
f9f5c817
SF
1850 /* For windows me and 9x we do not want to request more
1851 than it negotiated since it will refuse the read then */
fb8c4b14 1852 if ((pTcon->ses) &&
f9f5c817
SF
1853 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1854 current_read_size = min_t(const int, current_read_size,
1855 pTcon->ses->server->maxBuf - 128);
1856 }
1da177e4
LT
1857 rc = -EAGAIN;
1858 while (rc == -EAGAIN) {
fb8c4b14 1859 if ((open_file->invalidHandle) &&
1da177e4 1860 (!open_file->closePend)) {
4b18f2a9 1861 rc = cifs_reopen_file(file, true);
1da177e4
LT
1862 if (rc != 0)
1863 break;
1864 }
bfa0d75a 1865 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1866 open_file->netfid,
1867 current_read_size, *poffset,
1868 &bytes_read, &current_offset,
1869 &buf_type);
1da177e4
LT
1870 }
1871 if (rc || (bytes_read == 0)) {
1872 if (total_read) {
1873 break;
1874 } else {
1875 FreeXid(xid);
1876 return rc;
1877 }
1878 } else {
a4544347 1879 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1880 *poffset += bytes_read;
1881 }
1882 }
1883 FreeXid(xid);
1884 return total_read;
1885}
1886
1887int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1888{
1da177e4
LT
1889 int rc, xid;
1890
1891 xid = GetXid();
abab095d 1892 rc = cifs_revalidate_file(file);
1da177e4 1893 if (rc) {
b6b38f70 1894 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1895 FreeXid(xid);
1896 return rc;
1897 }
1898 rc = generic_file_mmap(file, vma);
1899 FreeXid(xid);
1900 return rc;
1901}
1902
1903
fb8c4b14 1904static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 1905 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
1906{
1907 struct page *page;
1908 char *target;
1909
1910 while (bytes_read > 0) {
1911 if (list_empty(pages))
1912 break;
1913
1914 page = list_entry(pages->prev, struct page, lru);
1915 list_del(&page->lru);
1916
315e995c 1917 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
1918 GFP_KERNEL)) {
1919 page_cache_release(page);
b6b38f70 1920 cFYI(1, "Add page cache failed");
3079ca62
SF
1921 data += PAGE_CACHE_SIZE;
1922 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1923 continue;
1924 }
06b43672 1925 page_cache_release(page);
1da177e4 1926
fb8c4b14 1927 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1928
1929 if (PAGE_CACHE_SIZE > bytes_read) {
1930 memcpy(target, data, bytes_read);
1931 /* zero the tail end of this partial page */
fb8c4b14 1932 memset(target + bytes_read, 0,
1da177e4
LT
1933 PAGE_CACHE_SIZE - bytes_read);
1934 bytes_read = 0;
1935 } else {
1936 memcpy(target, data, PAGE_CACHE_SIZE);
1937 bytes_read -= PAGE_CACHE_SIZE;
1938 }
1939 kunmap_atomic(target, KM_USER0);
1940
1941 flush_dcache_page(page);
1942 SetPageUptodate(page);
1943 unlock_page(page);
1da177e4
LT
1944 data += PAGE_CACHE_SIZE;
1945 }
1946 return;
1947}
1948
1949static int cifs_readpages(struct file *file, struct address_space *mapping,
1950 struct list_head *page_list, unsigned num_pages)
1951{
1952 int rc = -EACCES;
1953 int xid;
1954 loff_t offset;
1955 struct page *page;
1956 struct cifs_sb_info *cifs_sb;
1957 struct cifsTconInfo *pTcon;
2c2130e1 1958 unsigned int bytes_read = 0;
fb8c4b14 1959 unsigned int read_size, i;
1da177e4
LT
1960 char *smb_read_data = NULL;
1961 struct smb_com_read_rsp *pSMBr;
1da177e4 1962 struct cifsFileInfo *open_file;
ec637e3f 1963 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1964
1965 xid = GetXid();
1966 if (file->private_data == NULL) {
0f3bc09e 1967 rc = -EBADF;
1da177e4 1968 FreeXid(xid);
0f3bc09e 1969 return rc;
1da177e4
LT
1970 }
1971 open_file = (struct cifsFileInfo *)file->private_data;
e6a00296 1972 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1973 pTcon = cifs_sb->tcon;
bfa0d75a 1974
f19159dc 1975 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
1976 for (i = 0; i < num_pages; ) {
1977 unsigned contig_pages;
1978 struct page *tmp_page;
1979 unsigned long expected_index;
1980
1981 if (list_empty(page_list))
1982 break;
1983
1984 page = list_entry(page_list->prev, struct page, lru);
1985 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1986
1987 /* count adjacent pages that we will read into */
1988 contig_pages = 0;
fb8c4b14 1989 expected_index =
1da177e4 1990 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 1991 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
1992 if (tmp_page->index == expected_index) {
1993 contig_pages++;
1994 expected_index++;
1995 } else
fb8c4b14 1996 break;
1da177e4
LT
1997 }
1998 if (contig_pages + i > num_pages)
1999 contig_pages = num_pages - i;
2000
2001 /* for reads over a certain size could initiate async
2002 read ahead */
2003
2004 read_size = contig_pages * PAGE_CACHE_SIZE;
2005 /* Read size needs to be in multiples of one page */
2006 read_size = min_t(const unsigned int, read_size,
2007 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2008 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2009 read_size, contig_pages);
1da177e4
LT
2010 rc = -EAGAIN;
2011 while (rc == -EAGAIN) {
fb8c4b14 2012 if ((open_file->invalidHandle) &&
1da177e4 2013 (!open_file->closePend)) {
4b18f2a9 2014 rc = cifs_reopen_file(file, true);
1da177e4
LT
2015 if (rc != 0)
2016 break;
2017 }
2018
bfa0d75a 2019 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2020 open_file->netfid,
2021 read_size, offset,
2022 &bytes_read, &smb_read_data,
2023 &buf_type);
a9d02ad4 2024 /* BB more RC checks ? */
fb8c4b14 2025 if (rc == -EAGAIN) {
1da177e4 2026 if (smb_read_data) {
fb8c4b14 2027 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2028 cifs_small_buf_release(smb_read_data);
fb8c4b14 2029 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2030 cifs_buf_release(smb_read_data);
1da177e4
LT
2031 smb_read_data = NULL;
2032 }
2033 }
2034 }
2035 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2036 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2037 break;
2038 } else if (bytes_read > 0) {
6f88cc2e 2039 task_io_account_read(bytes_read);
1da177e4
LT
2040 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2041 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2042 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2043 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2044
2045 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2046 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2047 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2048 i++; /* account for partial page */
2049
fb8c4b14 2050 /* server copy of file can have smaller size
1da177e4 2051 than client */
fb8c4b14
SF
2052 /* BB do we need to verify this common case ?
2053 this case is ok - if we are at server EOF
1da177e4
LT
2054 we will hit it on next read */
2055
05ac9d4b 2056 /* break; */
1da177e4
LT
2057 }
2058 } else {
b6b38f70 2059 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2060 "Cleaning remaining pages from readahead list",
b6b38f70 2061 bytes_read, offset);
fb8c4b14 2062 /* BB turn off caching and do new lookup on
1da177e4 2063 file size at server? */
1da177e4
LT
2064 break;
2065 }
2066 if (smb_read_data) {
fb8c4b14 2067 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2068 cifs_small_buf_release(smb_read_data);
fb8c4b14 2069 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2070 cifs_buf_release(smb_read_data);
1da177e4
LT
2071 smb_read_data = NULL;
2072 }
2073 bytes_read = 0;
2074 }
2075
1da177e4
LT
2076/* need to free smb_read_data buf before exit */
2077 if (smb_read_data) {
fb8c4b14 2078 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2079 cifs_small_buf_release(smb_read_data);
fb8c4b14 2080 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2081 cifs_buf_release(smb_read_data);
1da177e4 2082 smb_read_data = NULL;
fb8c4b14 2083 }
1da177e4
LT
2084
2085 FreeXid(xid);
2086 return rc;
2087}
2088
2089static int cifs_readpage_worker(struct file *file, struct page *page,
2090 loff_t *poffset)
2091{
2092 char *read_data;
2093 int rc;
2094
2095 page_cache_get(page);
2096 read_data = kmap(page);
2097 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2098
1da177e4 2099 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2100
1da177e4
LT
2101 if (rc < 0)
2102 goto io_error;
2103 else
b6b38f70 2104 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2105
e6a00296
JJS
2106 file->f_path.dentry->d_inode->i_atime =
2107 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2108
1da177e4
LT
2109 if (PAGE_CACHE_SIZE > rc)
2110 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2111
2112 flush_dcache_page(page);
2113 SetPageUptodate(page);
2114 rc = 0;
fb8c4b14 2115
1da177e4 2116io_error:
fb8c4b14 2117 kunmap(page);
1da177e4
LT
2118 page_cache_release(page);
2119 return rc;
2120}
2121
2122static int cifs_readpage(struct file *file, struct page *page)
2123{
2124 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2125 int rc = -EACCES;
2126 int xid;
2127
2128 xid = GetXid();
2129
2130 if (file->private_data == NULL) {
0f3bc09e 2131 rc = -EBADF;
1da177e4 2132 FreeXid(xid);
0f3bc09e 2133 return rc;
1da177e4
LT
2134 }
2135
b6b38f70
JP
2136 cFYI(1, "readpage %p at offset %d 0x%x\n",
2137 page, (int)offset, (int)offset);
1da177e4
LT
2138
2139 rc = cifs_readpage_worker(file, page, &offset);
2140
2141 unlock_page(page);
2142
2143 FreeXid(xid);
2144 return rc;
2145}
2146
a403a0a3
SF
2147static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2148{
2149 struct cifsFileInfo *open_file;
2150
2151 read_lock(&GlobalSMBSeslock);
2152 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2153 if (open_file->closePend)
2154 continue;
2155 if (open_file->pfile &&
2156 ((open_file->pfile->f_flags & O_RDWR) ||
2157 (open_file->pfile->f_flags & O_WRONLY))) {
2158 read_unlock(&GlobalSMBSeslock);
2159 return 1;
2160 }
2161 }
2162 read_unlock(&GlobalSMBSeslock);
2163 return 0;
2164}
2165
1da177e4
LT
2166/* We do not want to update the file size from server for inodes
2167 open for write - to avoid races with writepage extending
2168 the file - in the future we could consider allowing
fb8c4b14 2169 refreshing the inode only on increases in the file size
1da177e4
LT
2170 but this is tricky to do without racing with writebehind
2171 page caching in the current Linux kernel design */
4b18f2a9 2172bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2173{
a403a0a3 2174 if (!cifsInode)
4b18f2a9 2175 return true;
50c2f753 2176
a403a0a3
SF
2177 if (is_inode_writable(cifsInode)) {
2178 /* This inode is open for write at least once */
c32a0b68
SF
2179 struct cifs_sb_info *cifs_sb;
2180
c32a0b68 2181 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2182 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2183 /* since no page cache to corrupt on directio
c32a0b68 2184 we can change size safely */
4b18f2a9 2185 return true;
c32a0b68
SF
2186 }
2187
fb8c4b14 2188 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2189 return true;
7ba52631 2190
4b18f2a9 2191 return false;
23e7dd7d 2192 } else
4b18f2a9 2193 return true;
1da177e4
LT
2194}
2195
d9414774
NP
2196static int cifs_write_begin(struct file *file, struct address_space *mapping,
2197 loff_t pos, unsigned len, unsigned flags,
2198 struct page **pagep, void **fsdata)
1da177e4 2199{
d9414774
NP
2200 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2201 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2202 loff_t page_start = pos & PAGE_MASK;
2203 loff_t i_size;
2204 struct page *page;
2205 int rc = 0;
d9414774 2206
b6b38f70 2207 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2208
54566b2c 2209 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2210 if (!page) {
2211 rc = -ENOMEM;
2212 goto out;
2213 }
8a236264 2214
a98ee8c1
JL
2215 if (PageUptodate(page))
2216 goto out;
8a236264 2217
a98ee8c1
JL
2218 /*
2219 * If we write a full page it will be up to date, no need to read from
2220 * the server. If the write is short, we'll end up doing a sync write
2221 * instead.
2222 */
2223 if (len == PAGE_CACHE_SIZE)
2224 goto out;
8a236264 2225
a98ee8c1
JL
2226 /*
2227 * optimize away the read when we have an oplock, and we're not
2228 * expecting to use any of the data we'd be reading in. That
2229 * is, when the page lies beyond the EOF, or straddles the EOF
2230 * and the write will cover all of the existing data.
2231 */
2232 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2233 i_size = i_size_read(mapping->host);
2234 if (page_start >= i_size ||
2235 (offset == 0 && (pos + len) >= i_size)) {
2236 zero_user_segments(page, 0, offset,
2237 offset + len,
2238 PAGE_CACHE_SIZE);
2239 /*
2240 * PageChecked means that the parts of the page
2241 * to which we're not writing are considered up
2242 * to date. Once the data is copied to the
2243 * page, it can be set uptodate.
2244 */
2245 SetPageChecked(page);
2246 goto out;
2247 }
2248 }
d9414774 2249
a98ee8c1
JL
2250 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2251 /*
2252 * might as well read a page, it is fast enough. If we get
2253 * an error, we don't need to return it. cifs_write_end will
2254 * do a sync write instead since PG_uptodate isn't set.
2255 */
2256 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2257 } else {
2258 /* we could try using another file handle if there is one -
2259 but how would we lock it to prevent close of that handle
2260 racing with this read? In any case
d9414774 2261 this will be written out by write_end so is fine */
1da177e4 2262 }
a98ee8c1
JL
2263out:
2264 *pagep = page;
2265 return rc;
1da177e4
LT
2266}
2267
3bc303c2
JL
2268static void
2269cifs_oplock_break(struct slow_work *work)
2270{
2271 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2272 oplock_break);
2273 struct inode *inode = cfile->pInode;
2274 struct cifsInodeInfo *cinode = CIFS_I(inode);
2275 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
2276 int rc, waitrc = 0;
2277
2278 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2279 if (cinode->clientCanCacheRead)
8737c930 2280 break_lease(inode, O_RDONLY);
d54ff732 2281 else
8737c930 2282 break_lease(inode, O_WRONLY);
3bc303c2
JL
2283 rc = filemap_fdatawrite(inode->i_mapping);
2284 if (cinode->clientCanCacheRead == 0) {
2285 waitrc = filemap_fdatawait(inode->i_mapping);
2286 invalidate_remote_inode(inode);
2287 }
2288 if (!rc)
2289 rc = waitrc;
2290 if (rc)
2291 cinode->write_behind_rc = rc;
b6b38f70 2292 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2293 }
2294
2295 /*
2296 * releasing stale oplock after recent reconnect of smb session using
2297 * a now incorrect file handle is not a data integrity issue but do
2298 * not bother sending an oplock release if session to server still is
2299 * disconnected since oplock already released by the server
2300 */
2301 if (!cfile->closePend && !cfile->oplock_break_cancelled) {
2302 rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
2303 LOCKING_ANDX_OPLOCK_RELEASE, false);
b6b38f70 2304 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2
JL
2305 }
2306}
2307
2308static int
2309cifs_oplock_break_get(struct slow_work *work)
2310{
2311 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2312 oplock_break);
2313 mntget(cfile->mnt);
2314 cifsFileInfo_get(cfile);
2315 return 0;
2316}
2317
2318static void
2319cifs_oplock_break_put(struct slow_work *work)
2320{
2321 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2322 oplock_break);
2323 mntput(cfile->mnt);
2324 cifsFileInfo_put(cfile);
2325}
2326
2327const struct slow_work_ops cifs_oplock_break_ops = {
2328 .get_ref = cifs_oplock_break_get,
2329 .put_ref = cifs_oplock_break_put,
2330 .execute = cifs_oplock_break,
2331};
2332
f5e54d6e 2333const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2334 .readpage = cifs_readpage,
2335 .readpages = cifs_readpages,
2336 .writepage = cifs_writepage,
37c0eb46 2337 .writepages = cifs_writepages,
d9414774
NP
2338 .write_begin = cifs_write_begin,
2339 .write_end = cifs_write_end,
1da177e4
LT
2340 .set_page_dirty = __set_page_dirty_nobuffers,
2341 /* .sync_page = cifs_sync_page, */
2342 /* .direct_IO = */
2343};
273d81d6
DK
2344
2345/*
2346 * cifs_readpages requires the server to support a buffer large enough to
2347 * contain the header plus one complete page of data. Otherwise, we need
2348 * to leave cifs_readpages out of the address space operations.
2349 */
f5e54d6e 2350const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2351 .readpage = cifs_readpage,
2352 .writepage = cifs_writepage,
2353 .writepages = cifs_writepages,
d9414774
NP
2354 .write_begin = cifs_write_begin,
2355 .write_end = cifs_write_end,
273d81d6
DK
2356 .set_page_dirty = __set_page_dirty_nobuffers,
2357 /* .sync_page = cifs_sync_page, */
2358 /* .direct_IO = */
2359};