]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/cifs/file.c
[CIFS] Allow cifsd to suspend if connection is lost
[net-next-2.6.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23#include <linux/fs.h>
37c0eb46 24#include <linux/backing-dev.h>
1da177e4
LT
25#include <linux/stat.h>
26#include <linux/fcntl.h>
37c0eb46 27#include <linux/mpage.h>
1da177e4
LT
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
30#include <linux/smp_lock.h>
37c0eb46 31#include <linux/writeback.h>
23e7dd7d 32#include <linux/delay.h>
1da177e4
LT
33#include <asm/div64.h>
34#include "cifsfs.h"
35#include "cifspdu.h"
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_unicode.h"
39#include "cifs_debug.h"
40#include "cifs_fs_sb.h"
41
42static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
45{
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem);
50 private_data->pfile = file; /* needed for writepage */
51 private_data->pInode = inode;
52 private_data->invalidHandle = FALSE;
53 private_data->closePend = FALSE;
23e7dd7d
SF
54 /* we have to track num writers to the inode, since writepages
55 does not tell us which handle the write is for so there can
56 be a close (overlapping with write) of the filehandle that
57 cifs_writepages chose to use */
58 atomic_set(&private_data->wrtPending,0);
1da177e4
LT
59
60 return private_data;
61}
62
63static inline int cifs_convert_flags(unsigned int flags)
64{
65 if ((flags & O_ACCMODE) == O_RDONLY)
66 return GENERIC_READ;
67 else if ((flags & O_ACCMODE) == O_WRONLY)
68 return GENERIC_WRITE;
69 else if ((flags & O_ACCMODE) == O_RDWR) {
70 /* GENERIC_ALL is too much permission to request
71 can cause unnecessary access denied on create */
72 /* return GENERIC_ALL; */
73 return (GENERIC_READ | GENERIC_WRITE);
74 }
75
76 return 0x20197;
77}
78
79static inline int cifs_get_disposition(unsigned int flags)
80{
81 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
82 return FILE_CREATE;
83 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
84 return FILE_OVERWRITE_IF;
85 else if ((flags & O_CREAT) == O_CREAT)
86 return FILE_OPEN_IF;
55aa2e09
SF
87 else if ((flags & O_TRUNC) == O_TRUNC)
88 return FILE_OVERWRITE;
1da177e4
LT
89 else
90 return FILE_OPEN;
91}
92
93/* all arguments to this function must be checked for validity in caller */
94static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
95 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
96 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
97 char *full_path, int xid)
98{
99 struct timespec temp;
100 int rc;
101
102 /* want handles we can use to read with first
103 in the list so we do not have to walk the
104 list to search for one in prepare_write */
105 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
106 list_add_tail(&pCifsFile->flist,
107 &pCifsInode->openFileList);
108 } else {
109 list_add(&pCifsFile->flist,
110 &pCifsInode->openFileList);
111 }
112 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
113 if (pCifsInode->clientCanCacheRead) {
114 /* we have the inode open somewhere else
115 no need to discard cache data */
116 goto client_can_cache;
117 }
118
119 /* BB need same check in cifs_create too? */
120 /* if not oplocked, invalidate inode pages if mtime or file
121 size changed */
122 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
123 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
124 (file->f_dentry->d_inode->i_size ==
125 (loff_t)le64_to_cpu(buf->EndOfFile))) {
126 cFYI(1, ("inode unchanged on server"));
127 } else {
128 if (file->f_dentry->d_inode->i_mapping) {
129 /* BB no need to lock inode until after invalidate
130 since namei code should already have it locked? */
28fd1298 131 filemap_write_and_wait(file->f_dentry->d_inode->i_mapping);
1da177e4
LT
132 }
133 cFYI(1, ("invalidating remote inode since open detected it "
134 "changed"));
135 invalidate_remote_inode(file->f_dentry->d_inode);
136 }
137
138client_can_cache:
139 if (pTcon->ses->capabilities & CAP_UNIX)
140 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
141 full_path, inode->i_sb, xid);
142 else
143 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
144 full_path, buf, inode->i_sb, xid);
145
146 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
147 pCifsInode->clientCanCacheAll = TRUE;
148 pCifsInode->clientCanCacheRead = TRUE;
149 cFYI(1, ("Exclusive Oplock granted on inode %p",
150 file->f_dentry->d_inode));
151 } else if ((*oplock & 0xF) == OPLOCK_READ)
152 pCifsInode->clientCanCacheRead = TRUE;
153
154 return rc;
155}
156
157int cifs_open(struct inode *inode, struct file *file)
158{
159 int rc = -EACCES;
160 int xid, oplock;
161 struct cifs_sb_info *cifs_sb;
162 struct cifsTconInfo *pTcon;
163 struct cifsFileInfo *pCifsFile;
164 struct cifsInodeInfo *pCifsInode;
165 struct list_head *tmp;
166 char *full_path = NULL;
167 int desiredAccess;
168 int disposition;
169 __u16 netfid;
170 FILE_ALL_INFO *buf = NULL;
171
172 xid = GetXid();
173
174 cifs_sb = CIFS_SB(inode->i_sb);
175 pTcon = cifs_sb->tcon;
176
177 if (file->f_flags & O_CREAT) {
178 /* search inode for this file and fill in file->private_data */
179 pCifsInode = CIFS_I(file->f_dentry->d_inode);
180 read_lock(&GlobalSMBSeslock);
181 list_for_each(tmp, &pCifsInode->openFileList) {
182 pCifsFile = list_entry(tmp, struct cifsFileInfo,
183 flist);
184 if ((pCifsFile->pfile == NULL) &&
185 (pCifsFile->pid == current->tgid)) {
186 /* mode set in cifs_create */
187
188 /* needed for writepage */
189 pCifsFile->pfile = file;
190
191 file->private_data = pCifsFile;
192 break;
193 }
194 }
195 read_unlock(&GlobalSMBSeslock);
196 if (file->private_data != NULL) {
197 rc = 0;
198 FreeXid(xid);
199 return rc;
200 } else {
201 if (file->f_flags & O_EXCL)
202 cERROR(1, ("could not find file instance for "
26a21b98 203 "new file %p", file));
1da177e4
LT
204 }
205 }
206
7f57356b 207 full_path = build_path_from_dentry(file->f_dentry);
1da177e4
LT
208 if (full_path == NULL) {
209 FreeXid(xid);
210 return -ENOMEM;
211 }
212
213 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
214 inode, file->f_flags, full_path));
215 desiredAccess = cifs_convert_flags(file->f_flags);
216
217/*********************************************************************
218 * open flag mapping table:
219 *
220 * POSIX Flag CIFS Disposition
221 * ---------- ----------------
222 * O_CREAT FILE_OPEN_IF
223 * O_CREAT | O_EXCL FILE_CREATE
224 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
225 * O_TRUNC FILE_OVERWRITE
226 * none of the above FILE_OPEN
227 *
228 * Note that there is not a direct match between disposition
229 * FILE_SUPERSEDE (ie create whether or not file exists although
230 * O_CREAT | O_TRUNC is similar but truncates the existing
231 * file rather than creating a new file as FILE_SUPERSEDE does
232 * (which uses the attributes / metadata passed in on open call)
233 *?
234 *? O_SYNC is a reasonable match to CIFS writethrough flag
235 *? and the read write flags match reasonably. O_LARGEFILE
236 *? is irrelevant because largefile support is always used
237 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
238 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
239 *********************************************************************/
240
241 disposition = cifs_get_disposition(file->f_flags);
242
243 if (oplockEnabled)
244 oplock = REQ_OPLOCK;
245 else
246 oplock = FALSE;
247
248 /* BB pass O_SYNC flag through on file attributes .. BB */
249
250 /* Also refresh inode by passing in file_info buf returned by SMBOpen
251 and calling get_inode_info with returned buf (at least helps
252 non-Unix server case) */
253
254 /* BB we can not do this if this is the second open of a file
255 and the first handle has writebehind data, we might be
256 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
257 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
258 if (!buf) {
259 rc = -ENOMEM;
260 goto out;
261 }
5bafd765
SF
262
263 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
264 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
265 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
266 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
267 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
268 else
269 rc = -EIO; /* no NT SMB support fall into legacy open below */
270
a9d02ad4
SF
271 if (rc == -EIO) {
272 /* Old server, try legacy style OpenX */
273 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
274 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
275 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
276 & CIFS_MOUNT_MAP_SPECIAL_CHR);
277 }
1da177e4 278 if (rc) {
26a21b98 279 cFYI(1, ("cifs_open returned 0x%x", rc));
1da177e4
LT
280 goto out;
281 }
282 file->private_data =
283 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
284 if (file->private_data == NULL) {
285 rc = -ENOMEM;
286 goto out;
287 }
288 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
1da177e4
LT
289 write_lock(&GlobalSMBSeslock);
290 list_add(&pCifsFile->tlist, &pTcon->openFileList);
291
292 pCifsInode = CIFS_I(file->f_dentry->d_inode);
293 if (pCifsInode) {
294 rc = cifs_open_inode_helper(inode, file, pCifsInode,
295 pCifsFile, pTcon,
296 &oplock, buf, full_path, xid);
297 } else {
298 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
299 }
300
301 if (oplock & CIFS_CREATE_ACTION) {
302 /* time to set mode which we can not set earlier due to
303 problems creating new read-only files */
304 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
305 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
306 inode->i_mode,
307 (__u64)-1, (__u64)-1, 0 /* dev */,
737b758c
SF
308 cifs_sb->local_nls,
309 cifs_sb->mnt_cifs_flags &
310 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
311 } else {
312 /* BB implement via Windows security descriptors eg
313 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
314 -1, -1, local_nls);
315 in the meantime could set r/o dos attribute when
316 perms are eg: mode & 0222 == 0 */
317 }
318 }
319
320out:
321 kfree(buf);
322 kfree(full_path);
323 FreeXid(xid);
324 return rc;
325}
326
0418726b 327/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
328/* to server was lost */
329static int cifs_relock_file(struct cifsFileInfo *cifsFile)
330{
331 int rc = 0;
332
333/* BB list all locks open on this file and relock */
334
335 return rc;
336}
337
338static int cifs_reopen_file(struct inode *inode, struct file *file,
339 int can_flush)
340{
341 int rc = -EACCES;
342 int xid, oplock;
343 struct cifs_sb_info *cifs_sb;
344 struct cifsTconInfo *pTcon;
345 struct cifsFileInfo *pCifsFile;
346 struct cifsInodeInfo *pCifsInode;
347 char *full_path = NULL;
348 int desiredAccess;
349 int disposition = FILE_OPEN;
350 __u16 netfid;
351
352 if (inode == NULL)
353 return -EBADF;
354 if (file->private_data) {
355 pCifsFile = (struct cifsFileInfo *)file->private_data;
356 } else
357 return -EBADF;
358
359 xid = GetXid();
360 down(&pCifsFile->fh_sem);
361 if (pCifsFile->invalidHandle == FALSE) {
362 up(&pCifsFile->fh_sem);
363 FreeXid(xid);
364 return 0;
365 }
366
367 if (file->f_dentry == NULL) {
368 up(&pCifsFile->fh_sem);
369 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
370 FreeXid(xid);
371 return -EBADF;
372 }
373 cifs_sb = CIFS_SB(inode->i_sb);
374 pTcon = cifs_sb->tcon;
375/* can not grab rename sem here because various ops, including
376 those that already have the rename sem can end up causing writepage
377 to get called and if the server was down that means we end up here,
378 and we can never tell if the caller already has the rename_sem */
7f57356b 379 full_path = build_path_from_dentry(file->f_dentry);
1da177e4
LT
380 if (full_path == NULL) {
381 up(&pCifsFile->fh_sem);
382 FreeXid(xid);
383 return -ENOMEM;
384 }
385
386 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
387 inode, file->f_flags,full_path));
388 desiredAccess = cifs_convert_flags(file->f_flags);
389
390 if (oplockEnabled)
391 oplock = REQ_OPLOCK;
392 else
393 oplock = FALSE;
394
395 /* Can not refresh inode by passing in file_info buf to be returned
396 by SMBOpen and then calling get_inode_info with returned buf
397 since file might have write behind data that needs to be flushed
398 and server version of file size can be stale. If we knew for sure
399 that inode was not dirty locally we could do this */
400
401/* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
402 if (buf == 0) {
403 up(&pCifsFile->fh_sem);
404 kfree(full_path);
405 FreeXid(xid);
406 return -ENOMEM;
407 } */
408 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
409 CREATE_NOT_DIR, &netfid, &oplock, NULL,
737b758c
SF
410 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
411 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
412 if (rc) {
413 up(&pCifsFile->fh_sem);
26a21b98
SF
414 cFYI(1, ("cifs_open returned 0x%x", rc));
415 cFYI(1, ("oplock: %d", oplock));
1da177e4
LT
416 } else {
417 pCifsFile->netfid = netfid;
418 pCifsFile->invalidHandle = FALSE;
419 up(&pCifsFile->fh_sem);
420 pCifsInode = CIFS_I(inode);
421 if (pCifsInode) {
422 if (can_flush) {
28fd1298 423 filemap_write_and_wait(inode->i_mapping);
1da177e4
LT
424 /* temporarily disable caching while we
425 go to server to get inode info */
426 pCifsInode->clientCanCacheAll = FALSE;
427 pCifsInode->clientCanCacheRead = FALSE;
428 if (pTcon->ses->capabilities & CAP_UNIX)
429 rc = cifs_get_inode_info_unix(&inode,
430 full_path, inode->i_sb, xid);
431 else
432 rc = cifs_get_inode_info(&inode,
433 full_path, NULL, inode->i_sb,
434 xid);
435 } /* else we are writing out data to server already
436 and could deadlock if we tried to flush data, and
437 since we do not know if we have data that would
438 invalidate the current end of file on the server
439 we can not go to the server to get the new inod
440 info */
441 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
442 pCifsInode->clientCanCacheAll = TRUE;
443 pCifsInode->clientCanCacheRead = TRUE;
444 cFYI(1, ("Exclusive Oplock granted on inode %p",
445 file->f_dentry->d_inode));
446 } else if ((oplock & 0xF) == OPLOCK_READ) {
447 pCifsInode->clientCanCacheRead = TRUE;
448 pCifsInode->clientCanCacheAll = FALSE;
449 } else {
450 pCifsInode->clientCanCacheRead = FALSE;
451 pCifsInode->clientCanCacheAll = FALSE;
452 }
453 cifs_relock_file(pCifsFile);
454 }
455 }
456
457 kfree(full_path);
458 FreeXid(xid);
459 return rc;
460}
461
462int cifs_close(struct inode *inode, struct file *file)
463{
464 int rc = 0;
465 int xid;
466 struct cifs_sb_info *cifs_sb;
467 struct cifsTconInfo *pTcon;
468 struct cifsFileInfo *pSMBFile =
469 (struct cifsFileInfo *)file->private_data;
470
471 xid = GetXid();
472
473 cifs_sb = CIFS_SB(inode->i_sb);
474 pTcon = cifs_sb->tcon;
475 if (pSMBFile) {
476 pSMBFile->closePend = TRUE;
1da177e4
LT
477 if (pTcon) {
478 /* no sense reconnecting to close a file that is
479 already closed */
480 if (pTcon->tidStatus != CifsNeedReconnect) {
23e7dd7d
SF
481 int timeout = 2;
482 while((atomic_read(&pSMBFile->wrtPending) != 0)
483 && (timeout < 1000) ) {
484 /* Give write a better chance to get to
485 server ahead of the close. We do not
486 want to add a wait_q here as it would
487 increase the memory utilization as
488 the struct would be in each open file,
489 but this should give enough time to
490 clear the socket */
491 cERROR(1,("close with pending writes"));
492 msleep(timeout);
493 timeout *= 4;
494 }
1da177e4
LT
495 rc = CIFSSMBClose(xid, pTcon,
496 pSMBFile->netfid);
1da177e4
LT
497 }
498 }
cbe0476f 499 write_lock(&GlobalSMBSeslock);
1da177e4
LT
500 list_del(&pSMBFile->flist);
501 list_del(&pSMBFile->tlist);
cbe0476f 502 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
503 kfree(pSMBFile->search_resume_name);
504 kfree(file->private_data);
505 file->private_data = NULL;
506 } else
507 rc = -EBADF;
508
509 if (list_empty(&(CIFS_I(inode)->openFileList))) {
510 cFYI(1, ("closing last open instance for inode %p", inode));
511 /* if the file is not open we do not know if we can cache info
512 on this inode, much less write behind and read ahead */
513 CIFS_I(inode)->clientCanCacheRead = FALSE;
514 CIFS_I(inode)->clientCanCacheAll = FALSE;
515 }
516 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
517 rc = CIFS_I(inode)->write_behind_rc;
518 FreeXid(xid);
519 return rc;
520}
521
522int cifs_closedir(struct inode *inode, struct file *file)
523{
524 int rc = 0;
525 int xid;
526 struct cifsFileInfo *pCFileStruct =
527 (struct cifsFileInfo *)file->private_data;
528 char *ptmp;
529
26a21b98 530 cFYI(1, ("Closedir inode = 0x%p", inode));
1da177e4
LT
531
532 xid = GetXid();
533
534 if (pCFileStruct) {
535 struct cifsTconInfo *pTcon;
536 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
537
538 pTcon = cifs_sb->tcon;
539
540 cFYI(1, ("Freeing private data in close dir"));
31ca3bc3
SF
541 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
542 (pCFileStruct->invalidHandle == FALSE)) {
1da177e4
LT
543 pCFileStruct->invalidHandle = TRUE;
544 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
545 cFYI(1, ("Closing uncompleted readdir with rc %d",
546 rc));
547 /* not much we can do if it fails anyway, ignore rc */
548 rc = 0;
549 }
550 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
551 if (ptmp) {
ec637e3f 552 cFYI(1, ("closedir free smb buf in srch struct"));
1da177e4 553 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
d47d7c1a
SF
554 if(pCFileStruct->srch_inf.smallBuf)
555 cifs_small_buf_release(ptmp);
556 else
557 cifs_buf_release(ptmp);
1da177e4
LT
558 }
559 ptmp = pCFileStruct->search_resume_name;
560 if (ptmp) {
ec637e3f 561 cFYI(1, ("closedir free resume name"));
1da177e4
LT
562 pCFileStruct->search_resume_name = NULL;
563 kfree(ptmp);
564 }
565 kfree(file->private_data);
566 file->private_data = NULL;
567 }
568 /* BB can we lock the filestruct while this is going on? */
569 FreeXid(xid);
570 return rc;
571}
572
573int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
574{
575 int rc, xid;
1da177e4
LT
576 __u32 numLock = 0;
577 __u32 numUnlock = 0;
578 __u64 length;
579 int wait_flag = FALSE;
580 struct cifs_sb_info *cifs_sb;
581 struct cifsTconInfo *pTcon;
08547b03
SF
582 __u16 netfid;
583 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
1da177e4
LT
584
585 length = 1 + pfLock->fl_end - pfLock->fl_start;
586 rc = -EACCES;
587 xid = GetXid();
588
589 cFYI(1, ("Lock parm: 0x%x flockflags: "
590 "0x%x flocktype: 0x%x start: %lld end: %lld",
591 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
592 pfLock->fl_end));
593
594 if (pfLock->fl_flags & FL_POSIX)
d47d7c1a 595 cFYI(1, ("Posix"));
1da177e4 596 if (pfLock->fl_flags & FL_FLOCK)
d47d7c1a 597 cFYI(1, ("Flock"));
1da177e4 598 if (pfLock->fl_flags & FL_SLEEP) {
d47d7c1a 599 cFYI(1, ("Blocking lock"));
1da177e4
LT
600 wait_flag = TRUE;
601 }
602 if (pfLock->fl_flags & FL_ACCESS)
603 cFYI(1, ("Process suspended by mandatory locking - "
26a21b98 604 "not implemented yet"));
1da177e4
LT
605 if (pfLock->fl_flags & FL_LEASE)
606 cFYI(1, ("Lease on file - not implemented yet"));
607 if (pfLock->fl_flags &
608 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
609 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
610
611 if (pfLock->fl_type == F_WRLCK) {
612 cFYI(1, ("F_WRLCK "));
613 numLock = 1;
614 } else if (pfLock->fl_type == F_UNLCK) {
d47d7c1a 615 cFYI(1, ("F_UNLCK"));
1da177e4 616 numUnlock = 1;
d47d7c1a
SF
617 /* Check if unlock includes more than
618 one lock range */
1da177e4 619 } else if (pfLock->fl_type == F_RDLCK) {
d47d7c1a 620 cFYI(1, ("F_RDLCK"));
1da177e4
LT
621 lockType |= LOCKING_ANDX_SHARED_LOCK;
622 numLock = 1;
623 } else if (pfLock->fl_type == F_EXLCK) {
d47d7c1a 624 cFYI(1, ("F_EXLCK"));
1da177e4
LT
625 numLock = 1;
626 } else if (pfLock->fl_type == F_SHLCK) {
d47d7c1a 627 cFYI(1, ("F_SHLCK"));
1da177e4
LT
628 lockType |= LOCKING_ANDX_SHARED_LOCK;
629 numLock = 1;
630 } else
d47d7c1a 631 cFYI(1, ("Unknown type of lock"));
1da177e4
LT
632
633 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
634 pTcon = cifs_sb->tcon;
635
636 if (file->private_data == NULL) {
637 FreeXid(xid);
638 return -EBADF;
639 }
08547b03
SF
640 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
641
1da177e4 642
08547b03
SF
643 /* BB add code here to normalize offset and length to
644 account for negative length which we can not accept over the
645 wire */
1da177e4 646 if (IS_GETLK(cmd)) {
3a5ff61c 647 if((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
82940a46
SF
648 (CIFS_UNIX_FCNTL_CAP &
649 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
08547b03
SF
650 int posix_lock_type;
651 if(lockType & LOCKING_ANDX_SHARED_LOCK)
652 posix_lock_type = CIFS_RDLCK;
653 else
654 posix_lock_type = CIFS_WRLCK;
655 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
fc94cdb9 656 length, pfLock,
08547b03
SF
657 posix_lock_type, wait_flag);
658 FreeXid(xid);
659 return rc;
660 }
661
662 /* BB we could chain these into one lock request BB */
663 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
664 0, 1, lockType, 0 /* wait flag */ );
1da177e4 665 if (rc == 0) {
08547b03 666 rc = CIFSSMBLock(xid, pTcon, netfid, length,
1da177e4
LT
667 pfLock->fl_start, 1 /* numUnlock */ ,
668 0 /* numLock */ , lockType,
669 0 /* wait flag */ );
670 pfLock->fl_type = F_UNLCK;
671 if (rc != 0)
672 cERROR(1, ("Error unlocking previously locked "
08547b03 673 "range %d during test of lock", rc));
1da177e4
LT
674 rc = 0;
675
676 } else {
677 /* if rc == ERR_SHARING_VIOLATION ? */
678 rc = 0; /* do not change lock type to unlock
679 since range in use */
680 }
681
682 FreeXid(xid);
683 return rc;
684 }
3a5ff61c 685 if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
82940a46
SF
686 (CIFS_UNIX_FCNTL_CAP &
687 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
08547b03
SF
688 int posix_lock_type;
689 if(lockType & LOCKING_ANDX_SHARED_LOCK)
690 posix_lock_type = CIFS_RDLCK;
691 else
692 posix_lock_type = CIFS_WRLCK;
693
694 if(numUnlock == 1)
beb84dc8 695 posix_lock_type = CIFS_UNLCK;
08547b03
SF
696 else if(numLock == 0) {
697 /* if no lock or unlock then nothing
698 to do since we do not know what it is */
699 FreeXid(xid);
700 return -EOPNOTSUPP;
701 }
702 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
fc94cdb9 703 length, pfLock,
08547b03
SF
704 posix_lock_type, wait_flag);
705 } else
706 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
707 numUnlock, numLock, lockType, wait_flag);
d634cc15 708 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
709 posix_lock_file_wait(file, pfLock);
710 FreeXid(xid);
711 return rc;
712}
713
714ssize_t cifs_user_write(struct file *file, const char __user *write_data,
715 size_t write_size, loff_t *poffset)
716{
717 int rc = 0;
718 unsigned int bytes_written = 0;
719 unsigned int total_written;
720 struct cifs_sb_info *cifs_sb;
721 struct cifsTconInfo *pTcon;
722 int xid, long_op;
723 struct cifsFileInfo *open_file;
724
725 if (file->f_dentry == NULL)
726 return -EBADF;
727
728 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
729 if (cifs_sb == NULL)
730 return -EBADF;
731
732 pTcon = cifs_sb->tcon;
733
734 /* cFYI(1,
735 (" write %d bytes to offset %lld of %s", write_size,
736 *poffset, file->f_dentry->d_name.name)); */
737
738 if (file->private_data == NULL)
739 return -EBADF;
740 else
741 open_file = (struct cifsFileInfo *) file->private_data;
742
743 xid = GetXid();
744 if (file->f_dentry->d_inode == NULL) {
745 FreeXid(xid);
746 return -EBADF;
747 }
748
749 if (*poffset > file->f_dentry->d_inode->i_size)
750 long_op = 2; /* writes past end of file can take a long time */
751 else
752 long_op = 1;
753
754 for (total_written = 0; write_size > total_written;
755 total_written += bytes_written) {
756 rc = -EAGAIN;
757 while (rc == -EAGAIN) {
758 if (file->private_data == NULL) {
759 /* file has been closed on us */
760 FreeXid(xid);
761 /* if we have gotten here we have written some data
762 and blocked, and the file has been freed on us while
763 we blocked so return what we managed to write */
764 return total_written;
765 }
766 if (open_file->closePend) {
767 FreeXid(xid);
768 if (total_written)
769 return total_written;
770 else
771 return -EBADF;
772 }
773 if (open_file->invalidHandle) {
774 if ((file->f_dentry == NULL) ||
775 (file->f_dentry->d_inode == NULL)) {
776 FreeXid(xid);
777 return total_written;
778 }
779 /* we could deadlock if we called
780 filemap_fdatawait from here so tell
781 reopen_file not to flush data to server
782 now */
783 rc = cifs_reopen_file(file->f_dentry->d_inode,
784 file, FALSE);
785 if (rc != 0)
786 break;
787 }
788
789 rc = CIFSSMBWrite(xid, pTcon,
790 open_file->netfid,
791 min_t(const int, cifs_sb->wsize,
792 write_size - total_written),
793 *poffset, &bytes_written,
794 NULL, write_data + total_written, long_op);
795 }
796 if (rc || (bytes_written == 0)) {
797 if (total_written)
798 break;
799 else {
800 FreeXid(xid);
801 return rc;
802 }
803 } else
804 *poffset += bytes_written;
805 long_op = FALSE; /* subsequent writes fast -
806 15 seconds is plenty */
807 }
808
a4544347 809 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
810
811 /* since the write may have blocked check these pointers again */
812 if (file->f_dentry) {
813 if (file->f_dentry->d_inode) {
814 struct inode *inode = file->f_dentry->d_inode;
815 inode->i_ctime = inode->i_mtime =
816 current_fs_time(inode->i_sb);
817 if (total_written > 0) {
818 if (*poffset > file->f_dentry->d_inode->i_size)
819 i_size_write(file->f_dentry->d_inode,
820 *poffset);
821 }
822 mark_inode_dirty_sync(file->f_dentry->d_inode);
823 }
824 }
825 FreeXid(xid);
826 return total_written;
827}
828
829static ssize_t cifs_write(struct file *file, const char *write_data,
830 size_t write_size, loff_t *poffset)
831{
832 int rc = 0;
833 unsigned int bytes_written = 0;
834 unsigned int total_written;
835 struct cifs_sb_info *cifs_sb;
836 struct cifsTconInfo *pTcon;
837 int xid, long_op;
838 struct cifsFileInfo *open_file;
839
840 if (file->f_dentry == NULL)
841 return -EBADF;
842
843 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
844 if (cifs_sb == NULL)
845 return -EBADF;
846
847 pTcon = cifs_sb->tcon;
848
ab2f218f
SF
849 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
850 *poffset, file->f_dentry->d_name.name));
1da177e4
LT
851
852 if (file->private_data == NULL)
853 return -EBADF;
854 else
855 open_file = (struct cifsFileInfo *)file->private_data;
856
857 xid = GetXid();
858 if (file->f_dentry->d_inode == NULL) {
859 FreeXid(xid);
860 return -EBADF;
861 }
862
863 if (*poffset > file->f_dentry->d_inode->i_size)
864 long_op = 2; /* writes past end of file can take a long time */
865 else
866 long_op = 1;
867
868 for (total_written = 0; write_size > total_written;
869 total_written += bytes_written) {
870 rc = -EAGAIN;
871 while (rc == -EAGAIN) {
872 if (file->private_data == NULL) {
873 /* file has been closed on us */
874 FreeXid(xid);
875 /* if we have gotten here we have written some data
876 and blocked, and the file has been freed on us
877 while we blocked so return what we managed to
878 write */
879 return total_written;
880 }
881 if (open_file->closePend) {
882 FreeXid(xid);
883 if (total_written)
884 return total_written;
885 else
886 return -EBADF;
887 }
888 if (open_file->invalidHandle) {
889 if ((file->f_dentry == NULL) ||
890 (file->f_dentry->d_inode == NULL)) {
891 FreeXid(xid);
892 return total_written;
893 }
894 /* we could deadlock if we called
895 filemap_fdatawait from here so tell
896 reopen_file not to flush data to
897 server now */
898 rc = cifs_reopen_file(file->f_dentry->d_inode,
899 file, FALSE);
900 if (rc != 0)
901 break;
902 }
c01f36a8 903 if(experimEnabled || (pTcon->ses->server &&
08775834
SF
904 ((pTcon->ses->server->secMode &
905 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 906 == 0))) {
3e84469d
SF
907 struct kvec iov[2];
908 unsigned int len;
909
0ae0efad 910 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
911 write_size - total_written);
912 /* iov[0] is reserved for smb header */
913 iov[1].iov_base = (char *)write_data +
914 total_written;
915 iov[1].iov_len = len;
d6e04ae6 916 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 917 open_file->netfid, len,
d6e04ae6 918 *poffset, &bytes_written,
3e84469d 919 iov, 1, long_op);
d6e04ae6 920 } else
60808233
SF
921 rc = CIFSSMBWrite(xid, pTcon,
922 open_file->netfid,
923 min_t(const int, cifs_sb->wsize,
924 write_size - total_written),
925 *poffset, &bytes_written,
926 write_data + total_written,
927 NULL, long_op);
1da177e4
LT
928 }
929 if (rc || (bytes_written == 0)) {
930 if (total_written)
931 break;
932 else {
933 FreeXid(xid);
934 return rc;
935 }
936 } else
937 *poffset += bytes_written;
938 long_op = FALSE; /* subsequent writes fast -
939 15 seconds is plenty */
940 }
941
a4544347 942 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
943
944 /* since the write may have blocked check these pointers again */
945 if (file->f_dentry) {
946 if (file->f_dentry->d_inode) {
947 file->f_dentry->d_inode->i_ctime =
948 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
949 if (total_written > 0) {
950 if (*poffset > file->f_dentry->d_inode->i_size)
951 i_size_write(file->f_dentry->d_inode,
952 *poffset);
953 }
954 mark_inode_dirty_sync(file->f_dentry->d_inode);
955 }
956 }
957 FreeXid(xid);
958 return total_written;
959}
960
dd99cd80 961struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
962{
963 struct cifsFileInfo *open_file;
dd99cd80 964 int rc;
6148a742 965
60808233
SF
966 /* Having a null inode here (because mapping->host was set to zero by
967 the VFS or MM) should not happen but we had reports of on oops (due to
968 it being zero) during stress testcases so we need to check for it */
969
970 if(cifs_inode == NULL) {
971 cERROR(1,("Null inode passed to cifs_writeable_file"));
972 dump_stack();
973 return NULL;
974 }
975
6148a742
SF
976 read_lock(&GlobalSMBSeslock);
977 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
978 if (open_file->closePend)
979 continue;
980 if (open_file->pfile &&
981 ((open_file->pfile->f_flags & O_RDWR) ||
982 (open_file->pfile->f_flags & O_WRONLY))) {
23e7dd7d 983 atomic_inc(&open_file->wrtPending);
6148a742 984 read_unlock(&GlobalSMBSeslock);
0ae0efad 985 if((open_file->invalidHandle) &&
23e7dd7d 986 (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
dd99cd80 987 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
37c0eb46
SF
988 open_file->pfile, FALSE);
989 /* if it fails, try another handle - might be */
990 /* dangerous to hold up writepages with retry */
991 if(rc) {
4a77118c 992 cFYI(1,("failed on reopen file in wp"));
37c0eb46 993 read_lock(&GlobalSMBSeslock);
23e7dd7d
SF
994 /* can not use this handle, no write
995 pending on this one after all */
996 atomic_dec
997 (&open_file->wrtPending);
37c0eb46
SF
998 continue;
999 }
1000 }
6148a742
SF
1001 return open_file;
1002 }
1003 }
1004 read_unlock(&GlobalSMBSeslock);
1005 return NULL;
1006}
1007
1da177e4
LT
1008static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1009{
1010 struct address_space *mapping = page->mapping;
1011 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1012 char *write_data;
1013 int rc = -EFAULT;
1014 int bytes_written = 0;
1015 struct cifs_sb_info *cifs_sb;
1016 struct cifsTconInfo *pTcon;
1017 struct inode *inode;
6148a742 1018 struct cifsFileInfo *open_file;
1da177e4
LT
1019
1020 if (!mapping || !mapping->host)
1021 return -EFAULT;
1022
1023 inode = page->mapping->host;
1024 cifs_sb = CIFS_SB(inode->i_sb);
1025 pTcon = cifs_sb->tcon;
1026
1027 offset += (loff_t)from;
1028 write_data = kmap(page);
1029 write_data += from;
1030
1031 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1032 kunmap(page);
1033 return -EIO;
1034 }
1035
1036 /* racing with truncate? */
1037 if (offset > mapping->host->i_size) {
1038 kunmap(page);
1039 return 0; /* don't care */
1040 }
1041
1042 /* check to make sure that we are not extending the file */
1043 if (mapping->host->i_size - offset < (loff_t)to)
1044 to = (unsigned)(mapping->host->i_size - offset);
1045
6148a742
SF
1046 open_file = find_writable_file(CIFS_I(mapping->host));
1047 if (open_file) {
1048 bytes_written = cifs_write(open_file->pfile, write_data,
1049 to-from, &offset);
23e7dd7d 1050 atomic_dec(&open_file->wrtPending);
1da177e4 1051 /* Does mm or vfs already set times? */
6148a742
SF
1052 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1053 if ((bytes_written > 0) && (offset)) {
1054 rc = 0;
1055 } else if (bytes_written < 0) {
1056 if (rc != -EBADF)
1057 rc = bytes_written;
1da177e4 1058 }
6148a742 1059 } else {
1da177e4
LT
1060 cFYI(1, ("No writeable filehandles for inode"));
1061 rc = -EIO;
1062 }
1063
1064 kunmap(page);
1065 return rc;
1066}
1067
1da177e4 1068static int cifs_writepages(struct address_space *mapping,
37c0eb46 1069 struct writeback_control *wbc)
1da177e4 1070{
37c0eb46
SF
1071 struct backing_dev_info *bdi = mapping->backing_dev_info;
1072 unsigned int bytes_to_write;
1073 unsigned int bytes_written;
1074 struct cifs_sb_info *cifs_sb;
1075 int done = 0;
111ebb6e 1076 pgoff_t end;
37c0eb46 1077 pgoff_t index;
111ebb6e 1078 int range_whole = 0;
37c0eb46 1079 struct kvec iov[32];
84d2f07e 1080 int len;
37c0eb46
SF
1081 int n_iov = 0;
1082 pgoff_t next;
1083 int nr_pages;
1084 __u64 offset = 0;
23e7dd7d 1085 struct cifsFileInfo *open_file;
37c0eb46
SF
1086 struct page *page;
1087 struct pagevec pvec;
1088 int rc = 0;
1089 int scanned = 0;
1da177e4
LT
1090 int xid;
1091
37c0eb46
SF
1092 cifs_sb = CIFS_SB(mapping->host->i_sb);
1093
1094 /*
1095 * If wsize is smaller that the page cache size, default to writing
1096 * one page at a time via cifs_writepage
1097 */
1098 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1099 return generic_writepages(mapping, wbc);
1100
4a77118c
SF
1101 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1102 if(cifs_sb->tcon->ses->server->secMode &
1103 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
60808233
SF
1104 if(!experimEnabled)
1105 return generic_writepages(mapping, wbc);
4a77118c 1106
37c0eb46
SF
1107 /*
1108 * BB: Is this meaningful for a non-block-device file system?
1109 * If it is, we should test it again after we do I/O
1110 */
1111 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1112 wbc->encountered_congestion = 1;
1113 return 0;
1114 }
1115
1da177e4
LT
1116 xid = GetXid();
1117
37c0eb46 1118 pagevec_init(&pvec, 0);
111ebb6e 1119 if (wbc->range_cyclic) {
37c0eb46 1120 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1121 end = -1;
1122 } else {
1123 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1124 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1125 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1126 range_whole = 1;
37c0eb46
SF
1127 scanned = 1;
1128 }
1129retry:
1130 while (!done && (index <= end) &&
1131 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1132 PAGECACHE_TAG_DIRTY,
1133 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1134 int first;
1135 unsigned int i;
1136
37c0eb46
SF
1137 first = -1;
1138 next = 0;
1139 n_iov = 0;
1140 bytes_to_write = 0;
1141
1142 for (i = 0; i < nr_pages; i++) {
1143 page = pvec.pages[i];
1144 /*
1145 * At this point we hold neither mapping->tree_lock nor
1146 * lock on the page itself: the page may be truncated or
1147 * invalidated (changing page->mapping to NULL), or even
1148 * swizzled back from swapper_space to tmpfs file
1149 * mapping
1150 */
1151
1152 if (first < 0)
1153 lock_page(page);
1154 else if (TestSetPageLocked(page))
1155 break;
1156
1157 if (unlikely(page->mapping != mapping)) {
1158 unlock_page(page);
1159 break;
1160 }
1161
111ebb6e 1162 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1163 done = 1;
1164 unlock_page(page);
1165 break;
1166 }
1167
1168 if (next && (page->index != next)) {
1169 /* Not next consecutive page */
1170 unlock_page(page);
1171 break;
1172 }
1173
1174 if (wbc->sync_mode != WB_SYNC_NONE)
1175 wait_on_page_writeback(page);
1176
1177 if (PageWriteback(page) ||
1178 !test_clear_page_dirty(page)) {
1179 unlock_page(page);
1180 break;
1181 }
84d2f07e
SF
1182
1183 if (page_offset(page) >= mapping->host->i_size) {
1184 done = 1;
1185 unlock_page(page);
1186 break;
1187 }
1188
37c0eb46
SF
1189 /*
1190 * BB can we get rid of this? pages are held by pvec
1191 */
1192 page_cache_get(page);
1193
84d2f07e
SF
1194 len = min(mapping->host->i_size - page_offset(page),
1195 (loff_t)PAGE_CACHE_SIZE);
1196
37c0eb46
SF
1197 /* reserve iov[0] for the smb header */
1198 n_iov++;
1199 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1200 iov[n_iov].iov_len = len;
1201 bytes_to_write += len;
37c0eb46
SF
1202
1203 if (first < 0) {
1204 first = i;
1205 offset = page_offset(page);
1206 }
1207 next = page->index + 1;
1208 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1209 break;
1210 }
1211 if (n_iov) {
23e7dd7d
SF
1212 /* Search for a writable handle every time we call
1213 * CIFSSMBWrite2. We can't rely on the last handle
1214 * we used to still be valid
1215 */
1216 open_file = find_writable_file(CIFS_I(mapping->host));
1217 if (!open_file) {
1218 cERROR(1, ("No writable handles for inode"));
1219 rc = -EBADF;
1047abc1 1220 } else {
23e7dd7d
SF
1221 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1222 open_file->netfid,
1223 bytes_to_write, offset,
1224 &bytes_written, iov, n_iov,
1225 1);
1226 atomic_dec(&open_file->wrtPending);
1227 if (rc || bytes_written < bytes_to_write) {
1228 cERROR(1,("Write2 ret %d, written = %d",
1229 rc, bytes_written));
1230 /* BB what if continued retry is
1231 requested via mount flags? */
1232 set_bit(AS_EIO, &mapping->flags);
23e7dd7d
SF
1233 } else {
1234 cifs_stats_bytes_written(cifs_sb->tcon,
1235 bytes_written);
1236 }
37c0eb46
SF
1237 }
1238 for (i = 0; i < n_iov; i++) {
1239 page = pvec.pages[first + i];
eb9bdaa3
SF
1240 /* Should we also set page error on
1241 success rc but too little data written? */
1242 /* BB investigate retry logic on temporary
1243 server crash cases and how recovery works
1244 when page marked as error */
1245 if(rc)
1246 SetPageError(page);
37c0eb46
SF
1247 kunmap(page);
1248 unlock_page(page);
1249 page_cache_release(page);
1250 }
1251 if ((wbc->nr_to_write -= n_iov) <= 0)
1252 done = 1;
1253 index = next;
1254 }
1255 pagevec_release(&pvec);
1256 }
1257 if (!scanned && !done) {
1258 /*
1259 * We hit the last page and there is more work to be done: wrap
1260 * back to the start of the file
1261 */
1262 scanned = 1;
1263 index = 0;
1264 goto retry;
1265 }
111ebb6e 1266 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1267 mapping->writeback_index = index;
1268
1da177e4 1269 FreeXid(xid);
37c0eb46 1270
1da177e4
LT
1271 return rc;
1272}
1da177e4
LT
1273
1274static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1275{
1276 int rc = -EFAULT;
1277 int xid;
1278
1279 xid = GetXid();
1280/* BB add check for wbc flags */
1281 page_cache_get(page);
1282 if (!PageUptodate(page)) {
1283 cFYI(1, ("ppw - page not up to date"));
1284 }
1285
1286 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1287 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1288 unlock_page(page);
1289 page_cache_release(page);
1290 FreeXid(xid);
1291 return rc;
1292}
1293
1294static int cifs_commit_write(struct file *file, struct page *page,
1295 unsigned offset, unsigned to)
1296{
1297 int xid;
1298 int rc = 0;
1299 struct inode *inode = page->mapping->host;
1300 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1301 char *page_data;
1302
1303 xid = GetXid();
1304 cFYI(1, ("commit write for page %p up to position %lld for %d",
1305 page, position, to));
1306 if (position > inode->i_size) {
1307 i_size_write(inode, position);
1308 /* if (file->private_data == NULL) {
1309 rc = -EBADF;
1310 } else {
1311 open_file = (struct cifsFileInfo *)file->private_data;
1312 cifs_sb = CIFS_SB(inode->i_sb);
1313 rc = -EAGAIN;
1314 while (rc == -EAGAIN) {
1315 if ((open_file->invalidHandle) &&
1316 (!open_file->closePend)) {
1317 rc = cifs_reopen_file(
1318 file->f_dentry->d_inode, file);
1319 if (rc != 0)
1320 break;
1321 }
1322 if (!open_file->closePend) {
1323 rc = CIFSSMBSetFileSize(xid,
1324 cifs_sb->tcon, position,
1325 open_file->netfid,
1326 open_file->pid, FALSE);
1327 } else {
1328 rc = -EBADF;
1329 break;
1330 }
1331 }
1332 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1333 } */
1334 }
1335 if (!PageUptodate(page)) {
1336 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1337 /* can not rely on (or let) writepage write this data */
1338 if (to < offset) {
1339 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1340 offset, to));
1341 FreeXid(xid);
1342 return rc;
1343 }
1344 /* this is probably better than directly calling
1345 partialpage_write since in this function the file handle is
1346 known which we might as well leverage */
1347 /* BB check if anything else missing out of ppw
1348 such as updating last write time */
1349 page_data = kmap(page);
1350 rc = cifs_write(file, page_data + offset, to-offset,
1351 &position);
1352 if (rc > 0)
1353 rc = 0;
1354 /* else if (rc < 0) should we set writebehind rc? */
1355 kunmap(page);
1356 } else {
1357 set_page_dirty(page);
1358 }
1359
1360 FreeXid(xid);
1361 return rc;
1362}
1363
1364int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1365{
1366 int xid;
1367 int rc = 0;
1368 struct inode *inode = file->f_dentry->d_inode;
1369
1370 xid = GetXid();
1371
26a21b98 1372 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1da177e4
LT
1373 dentry->d_name.name, datasync));
1374
1375 rc = filemap_fdatawrite(inode->i_mapping);
1376 if (rc == 0)
1377 CIFS_I(inode)->write_behind_rc = 0;
1378 FreeXid(xid);
1379 return rc;
1380}
1381
3978d717 1382/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1383{
1384 struct address_space *mapping;
1385 struct inode *inode;
1386 unsigned long index = page->index;
1387 unsigned int rpages = 0;
1388 int rc = 0;
1389
1390 cFYI(1, ("sync page %p",page));
1391 mapping = page->mapping;
1392 if (!mapping)
1393 return 0;
1394 inode = mapping->host;
1395 if (!inode)
3978d717 1396 return; */
1da177e4
LT
1397
1398/* fill in rpages then
1399 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1400
26a21b98 1401/* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1da177e4 1402
3978d717 1403#if 0
1da177e4
LT
1404 if (rc < 0)
1405 return rc;
1406 return 0;
3978d717 1407#endif
1da177e4
LT
1408} */
1409
1410/*
1411 * As file closes, flush all cached write data for this inode checking
1412 * for write behind errors.
1413 */
75e1fcc0 1414int cifs_flush(struct file *file, fl_owner_t id)
1da177e4
LT
1415{
1416 struct inode * inode = file->f_dentry->d_inode;
1417 int rc = 0;
1418
1419 /* Rather than do the steps manually:
1420 lock the inode for writing
1421 loop through pages looking for write behind data (dirty pages)
1422 coalesce into contiguous 16K (or smaller) chunks to write to server
1423 send to server (prefer in parallel)
1424 deal with writebehind errors
1425 unlock inode for writing
1426 filemapfdatawrite appears easier for the time being */
1427
1428 rc = filemap_fdatawrite(inode->i_mapping);
1429 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1430 CIFS_I(inode)->write_behind_rc = 0;
1431
1432 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1433
1434 return rc;
1435}
1436
1437ssize_t cifs_user_read(struct file *file, char __user *read_data,
1438 size_t read_size, loff_t *poffset)
1439{
1440 int rc = -EACCES;
1441 unsigned int bytes_read = 0;
1442 unsigned int total_read = 0;
1443 unsigned int current_read_size;
1444 struct cifs_sb_info *cifs_sb;
1445 struct cifsTconInfo *pTcon;
1446 int xid;
1447 struct cifsFileInfo *open_file;
1448 char *smb_read_data;
1449 char __user *current_offset;
1450 struct smb_com_read_rsp *pSMBr;
1451
1452 xid = GetXid();
1453 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1454 pTcon = cifs_sb->tcon;
1455
1456 if (file->private_data == NULL) {
1457 FreeXid(xid);
1458 return -EBADF;
1459 }
1460 open_file = (struct cifsFileInfo *)file->private_data;
1461
1462 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1463 cFYI(1, ("attempting read on write only file instance"));
1464 }
1465 for (total_read = 0, current_offset = read_data;
1466 read_size > total_read;
1467 total_read += bytes_read, current_offset += bytes_read) {
1468 current_read_size = min_t(const int, read_size - total_read,
1469 cifs_sb->rsize);
1470 rc = -EAGAIN;
1471 smb_read_data = NULL;
1472 while (rc == -EAGAIN) {
ec637e3f 1473 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1474 if ((open_file->invalidHandle) &&
1475 (!open_file->closePend)) {
1476 rc = cifs_reopen_file(file->f_dentry->d_inode,
1477 file, TRUE);
1478 if (rc != 0)
1479 break;
1480 }
bfa0d75a 1481 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1482 open_file->netfid,
1483 current_read_size, *poffset,
1484 &bytes_read, &smb_read_data,
1485 &buf_type);
1da177e4 1486 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1487 if (smb_read_data) {
93544cc6
SF
1488 if (copy_to_user(current_offset,
1489 smb_read_data +
1490 4 /* RFC1001 length field */ +
1491 le16_to_cpu(pSMBr->DataOffset),
1492 bytes_read)) {
1493 rc = -EFAULT;
1494 }
1495
ec637e3f
SF
1496 if(buf_type == CIFS_SMALL_BUFFER)
1497 cifs_small_buf_release(smb_read_data);
1498 else if(buf_type == CIFS_LARGE_BUFFER)
1499 cifs_buf_release(smb_read_data);
1da177e4
LT
1500 smb_read_data = NULL;
1501 }
1502 }
1503 if (rc || (bytes_read == 0)) {
1504 if (total_read) {
1505 break;
1506 } else {
1507 FreeXid(xid);
1508 return rc;
1509 }
1510 } else {
a4544347 1511 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1512 *poffset += bytes_read;
1513 }
1514 }
1515 FreeXid(xid);
1516 return total_read;
1517}
1518
1519
1520static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1521 loff_t *poffset)
1522{
1523 int rc = -EACCES;
1524 unsigned int bytes_read = 0;
1525 unsigned int total_read;
1526 unsigned int current_read_size;
1527 struct cifs_sb_info *cifs_sb;
1528 struct cifsTconInfo *pTcon;
1529 int xid;
1530 char *current_offset;
1531 struct cifsFileInfo *open_file;
ec637e3f 1532 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1533
1534 xid = GetXid();
1535 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1536 pTcon = cifs_sb->tcon;
1537
1538 if (file->private_data == NULL) {
1539 FreeXid(xid);
1540 return -EBADF;
1541 }
1542 open_file = (struct cifsFileInfo *)file->private_data;
1543
1544 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1545 cFYI(1, ("attempting read on write only file instance"));
1546
1547 for (total_read = 0, current_offset = read_data;
1548 read_size > total_read;
1549 total_read += bytes_read, current_offset += bytes_read) {
1550 current_read_size = min_t(const int, read_size - total_read,
1551 cifs_sb->rsize);
f9f5c817
SF
1552 /* For windows me and 9x we do not want to request more
1553 than it negotiated since it will refuse the read then */
1554 if((pTcon->ses) &&
1555 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1556 current_read_size = min_t(const int, current_read_size,
1557 pTcon->ses->server->maxBuf - 128);
1558 }
1da177e4
LT
1559 rc = -EAGAIN;
1560 while (rc == -EAGAIN) {
1561 if ((open_file->invalidHandle) &&
1562 (!open_file->closePend)) {
1563 rc = cifs_reopen_file(file->f_dentry->d_inode,
1564 file, TRUE);
1565 if (rc != 0)
1566 break;
1567 }
bfa0d75a 1568 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1569 open_file->netfid,
1570 current_read_size, *poffset,
1571 &bytes_read, &current_offset,
1572 &buf_type);
1da177e4
LT
1573 }
1574 if (rc || (bytes_read == 0)) {
1575 if (total_read) {
1576 break;
1577 } else {
1578 FreeXid(xid);
1579 return rc;
1580 }
1581 } else {
a4544347 1582 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1583 *poffset += bytes_read;
1584 }
1585 }
1586 FreeXid(xid);
1587 return total_read;
1588}
1589
1590int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1591{
1592 struct dentry *dentry = file->f_dentry;
1593 int rc, xid;
1594
1595 xid = GetXid();
1596 rc = cifs_revalidate(dentry);
1597 if (rc) {
1598 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1599 FreeXid(xid);
1600 return rc;
1601 }
1602 rc = generic_file_mmap(file, vma);
1603 FreeXid(xid);
1604 return rc;
1605}
1606
1607
1608static void cifs_copy_cache_pages(struct address_space *mapping,
1609 struct list_head *pages, int bytes_read, char *data,
1610 struct pagevec *plru_pvec)
1611{
1612 struct page *page;
1613 char *target;
1614
1615 while (bytes_read > 0) {
1616 if (list_empty(pages))
1617 break;
1618
1619 page = list_entry(pages->prev, struct page, lru);
1620 list_del(&page->lru);
1621
1622 if (add_to_page_cache(page, mapping, page->index,
1623 GFP_KERNEL)) {
1624 page_cache_release(page);
1625 cFYI(1, ("Add page cache failed"));
3079ca62
SF
1626 data += PAGE_CACHE_SIZE;
1627 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1628 continue;
1629 }
1630
1631 target = kmap_atomic(page,KM_USER0);
1632
1633 if (PAGE_CACHE_SIZE > bytes_read) {
1634 memcpy(target, data, bytes_read);
1635 /* zero the tail end of this partial page */
1636 memset(target + bytes_read, 0,
1637 PAGE_CACHE_SIZE - bytes_read);
1638 bytes_read = 0;
1639 } else {
1640 memcpy(target, data, PAGE_CACHE_SIZE);
1641 bytes_read -= PAGE_CACHE_SIZE;
1642 }
1643 kunmap_atomic(target, KM_USER0);
1644
1645 flush_dcache_page(page);
1646 SetPageUptodate(page);
1647 unlock_page(page);
1648 if (!pagevec_add(plru_pvec, page))
1649 __pagevec_lru_add(plru_pvec);
1650 data += PAGE_CACHE_SIZE;
1651 }
1652 return;
1653}
1654
1655static int cifs_readpages(struct file *file, struct address_space *mapping,
1656 struct list_head *page_list, unsigned num_pages)
1657{
1658 int rc = -EACCES;
1659 int xid;
1660 loff_t offset;
1661 struct page *page;
1662 struct cifs_sb_info *cifs_sb;
1663 struct cifsTconInfo *pTcon;
1664 int bytes_read = 0;
1665 unsigned int read_size,i;
1666 char *smb_read_data = NULL;
1667 struct smb_com_read_rsp *pSMBr;
1668 struct pagevec lru_pvec;
1669 struct cifsFileInfo *open_file;
ec637e3f 1670 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1671
1672 xid = GetXid();
1673 if (file->private_data == NULL) {
1674 FreeXid(xid);
1675 return -EBADF;
1676 }
1677 open_file = (struct cifsFileInfo *)file->private_data;
1678 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1679 pTcon = cifs_sb->tcon;
bfa0d75a 1680
1da177e4
LT
1681 pagevec_init(&lru_pvec, 0);
1682
1683 for (i = 0; i < num_pages; ) {
1684 unsigned contig_pages;
1685 struct page *tmp_page;
1686 unsigned long expected_index;
1687
1688 if (list_empty(page_list))
1689 break;
1690
1691 page = list_entry(page_list->prev, struct page, lru);
1692 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1693
1694 /* count adjacent pages that we will read into */
1695 contig_pages = 0;
1696 expected_index =
1697 list_entry(page_list->prev, struct page, lru)->index;
1698 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1699 if (tmp_page->index == expected_index) {
1700 contig_pages++;
1701 expected_index++;
1702 } else
1703 break;
1704 }
1705 if (contig_pages + i > num_pages)
1706 contig_pages = num_pages - i;
1707
1708 /* for reads over a certain size could initiate async
1709 read ahead */
1710
1711 read_size = contig_pages * PAGE_CACHE_SIZE;
1712 /* Read size needs to be in multiples of one page */
1713 read_size = min_t(const unsigned int, read_size,
1714 cifs_sb->rsize & PAGE_CACHE_MASK);
1715
1716 rc = -EAGAIN;
1717 while (rc == -EAGAIN) {
1718 if ((open_file->invalidHandle) &&
1719 (!open_file->closePend)) {
1720 rc = cifs_reopen_file(file->f_dentry->d_inode,
1721 file, TRUE);
1722 if (rc != 0)
1723 break;
1724 }
1725
bfa0d75a 1726 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1727 open_file->netfid,
1728 read_size, offset,
1729 &bytes_read, &smb_read_data,
1730 &buf_type);
a9d02ad4 1731 /* BB more RC checks ? */
1da177e4
LT
1732 if (rc== -EAGAIN) {
1733 if (smb_read_data) {
ec637e3f
SF
1734 if(buf_type == CIFS_SMALL_BUFFER)
1735 cifs_small_buf_release(smb_read_data);
1736 else if(buf_type == CIFS_LARGE_BUFFER)
1737 cifs_buf_release(smb_read_data);
1da177e4
LT
1738 smb_read_data = NULL;
1739 }
1740 }
1741 }
1742 if ((rc < 0) || (smb_read_data == NULL)) {
1743 cFYI(1, ("Read error in readpages: %d", rc));
1744 /* clean up remaing pages off list */
1745 while (!list_empty(page_list) && (i < num_pages)) {
1746 page = list_entry(page_list->prev, struct page,
1747 lru);
1748 list_del(&page->lru);
1749 page_cache_release(page);
1750 }
1751 break;
1752 } else if (bytes_read > 0) {
1753 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1754 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1755 smb_read_data + 4 /* RFC1001 hdr */ +
1756 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1757
1758 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 1759 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1760 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1761 i++; /* account for partial page */
1762
1763 /* server copy of file can have smaller size
1764 than client */
1765 /* BB do we need to verify this common case ?
1766 this case is ok - if we are at server EOF
1767 we will hit it on next read */
1768
1769 /* while (!list_empty(page_list) && (i < num_pages)) {
1770 page = list_entry(page_list->prev,
1771 struct page, list);
1772 list_del(&page->list);
1773 page_cache_release(page);
1774 }
1775 break; */
1776 }
1777 } else {
1778 cFYI(1, ("No bytes read (%d) at offset %lld . "
1779 "Cleaning remaining pages from readahead list",
1780 bytes_read, offset));
1781 /* BB turn off caching and do new lookup on
1782 file size at server? */
1783 while (!list_empty(page_list) && (i < num_pages)) {
1784 page = list_entry(page_list->prev, struct page,
1785 lru);
1786 list_del(&page->lru);
1787
1788 /* BB removeme - replace with zero of page? */
1789 page_cache_release(page);
1790 }
1791 break;
1792 }
1793 if (smb_read_data) {
ec637e3f
SF
1794 if(buf_type == CIFS_SMALL_BUFFER)
1795 cifs_small_buf_release(smb_read_data);
1796 else if(buf_type == CIFS_LARGE_BUFFER)
1797 cifs_buf_release(smb_read_data);
1da177e4
LT
1798 smb_read_data = NULL;
1799 }
1800 bytes_read = 0;
1801 }
1802
1803 pagevec_lru_add(&lru_pvec);
1804
1805/* need to free smb_read_data buf before exit */
1806 if (smb_read_data) {
47c886b3
SF
1807 if(buf_type == CIFS_SMALL_BUFFER)
1808 cifs_small_buf_release(smb_read_data);
1809 else if(buf_type == CIFS_LARGE_BUFFER)
1810 cifs_buf_release(smb_read_data);
1da177e4
LT
1811 smb_read_data = NULL;
1812 }
1813
1814 FreeXid(xid);
1815 return rc;
1816}
1817
1818static int cifs_readpage_worker(struct file *file, struct page *page,
1819 loff_t *poffset)
1820{
1821 char *read_data;
1822 int rc;
1823
1824 page_cache_get(page);
1825 read_data = kmap(page);
1826 /* for reads over a certain size could initiate async read ahead */
1827
1828 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1829
1830 if (rc < 0)
1831 goto io_error;
1832 else
26a21b98 1833 cFYI(1, ("Bytes read %d",rc));
1da177e4
LT
1834
1835 file->f_dentry->d_inode->i_atime =
1836 current_fs_time(file->f_dentry->d_inode->i_sb);
1837
1838 if (PAGE_CACHE_SIZE > rc)
1839 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1840
1841 flush_dcache_page(page);
1842 SetPageUptodate(page);
1843 rc = 0;
1844
1845io_error:
1846 kunmap(page);
1847 page_cache_release(page);
1848 return rc;
1849}
1850
1851static int cifs_readpage(struct file *file, struct page *page)
1852{
1853 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1854 int rc = -EACCES;
1855 int xid;
1856
1857 xid = GetXid();
1858
1859 if (file->private_data == NULL) {
1860 FreeXid(xid);
1861 return -EBADF;
1862 }
1863
1864 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1865 page, (int)offset, (int)offset));
1866
1867 rc = cifs_readpage_worker(file, page, &offset);
1868
1869 unlock_page(page);
1870
1871 FreeXid(xid);
1872 return rc;
1873}
1874
1875/* We do not want to update the file size from server for inodes
1876 open for write - to avoid races with writepage extending
1877 the file - in the future we could consider allowing
1878 refreshing the inode only on increases in the file size
1879 but this is tricky to do without racing with writebehind
1880 page caching in the current Linux kernel design */
1881int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1882{
23e7dd7d
SF
1883 struct cifsFileInfo *open_file = NULL;
1884
1885 if (cifsInode)
1886 open_file = find_writable_file(cifsInode);
1887
1888 if(open_file) {
c32a0b68
SF
1889 struct cifs_sb_info *cifs_sb;
1890
23e7dd7d
SF
1891 /* there is not actually a write pending so let
1892 this handle go free and allow it to
1893 be closable if needed */
1894 atomic_dec(&open_file->wrtPending);
c32a0b68
SF
1895
1896 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1897 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1898 /* since no page cache to corrupt on directio
1899 we can change size safely */
1900 return 1;
1901 }
1902
6148a742 1903 return 0;
23e7dd7d 1904 } else
6148a742 1905 return 1;
1da177e4
LT
1906}
1907
1da177e4
LT
1908static int cifs_prepare_write(struct file *file, struct page *page,
1909 unsigned from, unsigned to)
1910{
1911 int rc = 0;
1912 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1913 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1914 if (!PageUptodate(page)) {
1915 /* if (to - from != PAGE_CACHE_SIZE) {
1916 void *kaddr = kmap_atomic(page, KM_USER0);
1917 memset(kaddr, 0, from);
1918 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1919 flush_dcache_page(page);
1920 kunmap_atomic(kaddr, KM_USER0);
1921 } */
1922 /* If we are writing a full page it will be up to date,
1923 no need to read from the server */
1924 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1925 SetPageUptodate(page);
1926
1927 /* might as well read a page, it is fast enough */
1928 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1929 rc = cifs_readpage_worker(file, page, &offset);
1930 } else {
1931 /* should we try using another file handle if there is one -
1932 how would we lock it to prevent close of that handle
1933 racing with this read?
1934 In any case this will be written out by commit_write */
1935 }
1936 }
1937
1938 /* BB should we pass any errors back?
1939 e.g. if we do not have read access to the file */
1940 return 0;
1941}
1942
f5e54d6e 1943const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
1944 .readpage = cifs_readpage,
1945 .readpages = cifs_readpages,
1946 .writepage = cifs_writepage,
37c0eb46 1947 .writepages = cifs_writepages,
1da177e4
LT
1948 .prepare_write = cifs_prepare_write,
1949 .commit_write = cifs_commit_write,
1950 .set_page_dirty = __set_page_dirty_nobuffers,
1951 /* .sync_page = cifs_sync_page, */
1952 /* .direct_IO = */
1953};
273d81d6
DK
1954
1955/*
1956 * cifs_readpages requires the server to support a buffer large enough to
1957 * contain the header plus one complete page of data. Otherwise, we need
1958 * to leave cifs_readpages out of the address space operations.
1959 */
f5e54d6e 1960const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
1961 .readpage = cifs_readpage,
1962 .writepage = cifs_writepage,
1963 .writepages = cifs_writepages,
1964 .prepare_write = cifs_prepare_write,
1965 .commit_write = cifs_commit_write,
1966 .set_page_dirty = __set_page_dirty_nobuffers,
1967 /* .sync_page = cifs_sync_page, */
1968 /* .direct_IO = */
1969};