]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/cifs/file.c
typo fixes: mecanism -> mechanism
[net-next-2.6.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23#include <linux/fs.h>
37c0eb46 24#include <linux/backing-dev.h>
1da177e4
LT
25#include <linux/stat.h>
26#include <linux/fcntl.h>
37c0eb46 27#include <linux/mpage.h>
1da177e4
LT
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
30#include <linux/smp_lock.h>
37c0eb46 31#include <linux/writeback.h>
23e7dd7d 32#include <linux/delay.h>
1da177e4
LT
33#include <asm/div64.h>
34#include "cifsfs.h"
35#include "cifspdu.h"
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_unicode.h"
39#include "cifs_debug.h"
40#include "cifs_fs_sb.h"
41
42static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
45{
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem);
50 private_data->pfile = file; /* needed for writepage */
51 private_data->pInode = inode;
52 private_data->invalidHandle = FALSE;
53 private_data->closePend = FALSE;
23e7dd7d
SF
54 /* we have to track num writers to the inode, since writepages
55 does not tell us which handle the write is for so there can
56 be a close (overlapping with write) of the filehandle that
57 cifs_writepages chose to use */
58 atomic_set(&private_data->wrtPending,0);
1da177e4
LT
59
60 return private_data;
61}
62
63static inline int cifs_convert_flags(unsigned int flags)
64{
65 if ((flags & O_ACCMODE) == O_RDONLY)
66 return GENERIC_READ;
67 else if ((flags & O_ACCMODE) == O_WRONLY)
68 return GENERIC_WRITE;
69 else if ((flags & O_ACCMODE) == O_RDWR) {
70 /* GENERIC_ALL is too much permission to request
71 can cause unnecessary access denied on create */
72 /* return GENERIC_ALL; */
73 return (GENERIC_READ | GENERIC_WRITE);
74 }
75
76 return 0x20197;
77}
78
79static inline int cifs_get_disposition(unsigned int flags)
80{
81 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
82 return FILE_CREATE;
83 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
84 return FILE_OVERWRITE_IF;
85 else if ((flags & O_CREAT) == O_CREAT)
86 return FILE_OPEN_IF;
55aa2e09
SF
87 else if ((flags & O_TRUNC) == O_TRUNC)
88 return FILE_OVERWRITE;
1da177e4
LT
89 else
90 return FILE_OPEN;
91}
92
93/* all arguments to this function must be checked for validity in caller */
94static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
95 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
96 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
97 char *full_path, int xid)
98{
99 struct timespec temp;
100 int rc;
101
102 /* want handles we can use to read with first
103 in the list so we do not have to walk the
104 list to search for one in prepare_write */
105 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
106 list_add_tail(&pCifsFile->flist,
107 &pCifsInode->openFileList);
108 } else {
109 list_add(&pCifsFile->flist,
110 &pCifsInode->openFileList);
111 }
112 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
113 if (pCifsInode->clientCanCacheRead) {
114 /* we have the inode open somewhere else
115 no need to discard cache data */
116 goto client_can_cache;
117 }
118
119 /* BB need same check in cifs_create too? */
120 /* if not oplocked, invalidate inode pages if mtime or file
121 size changed */
122 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
123 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
124 (file->f_dentry->d_inode->i_size ==
125 (loff_t)le64_to_cpu(buf->EndOfFile))) {
126 cFYI(1, ("inode unchanged on server"));
127 } else {
128 if (file->f_dentry->d_inode->i_mapping) {
129 /* BB no need to lock inode until after invalidate
130 since namei code should already have it locked? */
28fd1298 131 filemap_write_and_wait(file->f_dentry->d_inode->i_mapping);
1da177e4
LT
132 }
133 cFYI(1, ("invalidating remote inode since open detected it "
134 "changed"));
135 invalidate_remote_inode(file->f_dentry->d_inode);
136 }
137
138client_can_cache:
139 if (pTcon->ses->capabilities & CAP_UNIX)
140 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
141 full_path, inode->i_sb, xid);
142 else
143 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
144 full_path, buf, inode->i_sb, xid);
145
146 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
147 pCifsInode->clientCanCacheAll = TRUE;
148 pCifsInode->clientCanCacheRead = TRUE;
149 cFYI(1, ("Exclusive Oplock granted on inode %p",
150 file->f_dentry->d_inode));
151 } else if ((*oplock & 0xF) == OPLOCK_READ)
152 pCifsInode->clientCanCacheRead = TRUE;
153
154 return rc;
155}
156
157int cifs_open(struct inode *inode, struct file *file)
158{
159 int rc = -EACCES;
160 int xid, oplock;
161 struct cifs_sb_info *cifs_sb;
162 struct cifsTconInfo *pTcon;
163 struct cifsFileInfo *pCifsFile;
164 struct cifsInodeInfo *pCifsInode;
165 struct list_head *tmp;
166 char *full_path = NULL;
167 int desiredAccess;
168 int disposition;
169 __u16 netfid;
170 FILE_ALL_INFO *buf = NULL;
171
172 xid = GetXid();
173
174 cifs_sb = CIFS_SB(inode->i_sb);
175 pTcon = cifs_sb->tcon;
176
177 if (file->f_flags & O_CREAT) {
178 /* search inode for this file and fill in file->private_data */
179 pCifsInode = CIFS_I(file->f_dentry->d_inode);
180 read_lock(&GlobalSMBSeslock);
181 list_for_each(tmp, &pCifsInode->openFileList) {
182 pCifsFile = list_entry(tmp, struct cifsFileInfo,
183 flist);
184 if ((pCifsFile->pfile == NULL) &&
185 (pCifsFile->pid == current->tgid)) {
186 /* mode set in cifs_create */
187
188 /* needed for writepage */
189 pCifsFile->pfile = file;
190
191 file->private_data = pCifsFile;
192 break;
193 }
194 }
195 read_unlock(&GlobalSMBSeslock);
196 if (file->private_data != NULL) {
197 rc = 0;
198 FreeXid(xid);
199 return rc;
200 } else {
201 if (file->f_flags & O_EXCL)
202 cERROR(1, ("could not find file instance for "
26a21b98 203 "new file %p", file));
1da177e4
LT
204 }
205 }
206
7f57356b 207 full_path = build_path_from_dentry(file->f_dentry);
1da177e4
LT
208 if (full_path == NULL) {
209 FreeXid(xid);
210 return -ENOMEM;
211 }
212
213 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
214 inode, file->f_flags, full_path));
215 desiredAccess = cifs_convert_flags(file->f_flags);
216
217/*********************************************************************
218 * open flag mapping table:
219 *
220 * POSIX Flag CIFS Disposition
221 * ---------- ----------------
222 * O_CREAT FILE_OPEN_IF
223 * O_CREAT | O_EXCL FILE_CREATE
224 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
225 * O_TRUNC FILE_OVERWRITE
226 * none of the above FILE_OPEN
227 *
228 * Note that there is not a direct match between disposition
229 * FILE_SUPERSEDE (ie create whether or not file exists although
230 * O_CREAT | O_TRUNC is similar but truncates the existing
231 * file rather than creating a new file as FILE_SUPERSEDE does
232 * (which uses the attributes / metadata passed in on open call)
233 *?
234 *? O_SYNC is a reasonable match to CIFS writethrough flag
235 *? and the read write flags match reasonably. O_LARGEFILE
236 *? is irrelevant because largefile support is always used
237 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
238 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
239 *********************************************************************/
240
241 disposition = cifs_get_disposition(file->f_flags);
242
243 if (oplockEnabled)
244 oplock = REQ_OPLOCK;
245 else
246 oplock = FALSE;
247
248 /* BB pass O_SYNC flag through on file attributes .. BB */
249
250 /* Also refresh inode by passing in file_info buf returned by SMBOpen
251 and calling get_inode_info with returned buf (at least helps
252 non-Unix server case) */
253
254 /* BB we can not do this if this is the second open of a file
255 and the first handle has writebehind data, we might be
256 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
257 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
258 if (!buf) {
259 rc = -ENOMEM;
260 goto out;
261 }
5bafd765
SF
262
263 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
264 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
265 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
266 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
267 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
268 else
269 rc = -EIO; /* no NT SMB support fall into legacy open below */
270
a9d02ad4
SF
271 if (rc == -EIO) {
272 /* Old server, try legacy style OpenX */
273 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
274 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
275 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
276 & CIFS_MOUNT_MAP_SPECIAL_CHR);
277 }
1da177e4 278 if (rc) {
26a21b98 279 cFYI(1, ("cifs_open returned 0x%x", rc));
1da177e4
LT
280 goto out;
281 }
282 file->private_data =
283 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
284 if (file->private_data == NULL) {
285 rc = -ENOMEM;
286 goto out;
287 }
288 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
1da177e4
LT
289 write_lock(&GlobalSMBSeslock);
290 list_add(&pCifsFile->tlist, &pTcon->openFileList);
291
292 pCifsInode = CIFS_I(file->f_dentry->d_inode);
293 if (pCifsInode) {
294 rc = cifs_open_inode_helper(inode, file, pCifsInode,
295 pCifsFile, pTcon,
296 &oplock, buf, full_path, xid);
297 } else {
298 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
299 }
300
301 if (oplock & CIFS_CREATE_ACTION) {
302 /* time to set mode which we can not set earlier due to
303 problems creating new read-only files */
304 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
305 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
306 inode->i_mode,
307 (__u64)-1, (__u64)-1, 0 /* dev */,
737b758c
SF
308 cifs_sb->local_nls,
309 cifs_sb->mnt_cifs_flags &
310 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
311 } else {
312 /* BB implement via Windows security descriptors eg
313 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
314 -1, -1, local_nls);
315 in the meantime could set r/o dos attribute when
316 perms are eg: mode & 0222 == 0 */
317 }
318 }
319
320out:
321 kfree(buf);
322 kfree(full_path);
323 FreeXid(xid);
324 return rc;
325}
326
327/* Try to reaquire byte range locks that were released when session */
328/* to server was lost */
329static int cifs_relock_file(struct cifsFileInfo *cifsFile)
330{
331 int rc = 0;
332
333/* BB list all locks open on this file and relock */
334
335 return rc;
336}
337
338static int cifs_reopen_file(struct inode *inode, struct file *file,
339 int can_flush)
340{
341 int rc = -EACCES;
342 int xid, oplock;
343 struct cifs_sb_info *cifs_sb;
344 struct cifsTconInfo *pTcon;
345 struct cifsFileInfo *pCifsFile;
346 struct cifsInodeInfo *pCifsInode;
347 char *full_path = NULL;
348 int desiredAccess;
349 int disposition = FILE_OPEN;
350 __u16 netfid;
351
352 if (inode == NULL)
353 return -EBADF;
354 if (file->private_data) {
355 pCifsFile = (struct cifsFileInfo *)file->private_data;
356 } else
357 return -EBADF;
358
359 xid = GetXid();
360 down(&pCifsFile->fh_sem);
361 if (pCifsFile->invalidHandle == FALSE) {
362 up(&pCifsFile->fh_sem);
363 FreeXid(xid);
364 return 0;
365 }
366
367 if (file->f_dentry == NULL) {
368 up(&pCifsFile->fh_sem);
369 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
370 FreeXid(xid);
371 return -EBADF;
372 }
373 cifs_sb = CIFS_SB(inode->i_sb);
374 pTcon = cifs_sb->tcon;
375/* can not grab rename sem here because various ops, including
376 those that already have the rename sem can end up causing writepage
377 to get called and if the server was down that means we end up here,
378 and we can never tell if the caller already has the rename_sem */
7f57356b 379 full_path = build_path_from_dentry(file->f_dentry);
1da177e4
LT
380 if (full_path == NULL) {
381 up(&pCifsFile->fh_sem);
382 FreeXid(xid);
383 return -ENOMEM;
384 }
385
386 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
387 inode, file->f_flags,full_path));
388 desiredAccess = cifs_convert_flags(file->f_flags);
389
390 if (oplockEnabled)
391 oplock = REQ_OPLOCK;
392 else
393 oplock = FALSE;
394
395 /* Can not refresh inode by passing in file_info buf to be returned
396 by SMBOpen and then calling get_inode_info with returned buf
397 since file might have write behind data that needs to be flushed
398 and server version of file size can be stale. If we knew for sure
399 that inode was not dirty locally we could do this */
400
401/* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
402 if (buf == 0) {
403 up(&pCifsFile->fh_sem);
404 kfree(full_path);
405 FreeXid(xid);
406 return -ENOMEM;
407 } */
408 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
409 CREATE_NOT_DIR, &netfid, &oplock, NULL,
737b758c
SF
410 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
411 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
412 if (rc) {
413 up(&pCifsFile->fh_sem);
26a21b98
SF
414 cFYI(1, ("cifs_open returned 0x%x", rc));
415 cFYI(1, ("oplock: %d", oplock));
1da177e4
LT
416 } else {
417 pCifsFile->netfid = netfid;
418 pCifsFile->invalidHandle = FALSE;
419 up(&pCifsFile->fh_sem);
420 pCifsInode = CIFS_I(inode);
421 if (pCifsInode) {
422 if (can_flush) {
28fd1298 423 filemap_write_and_wait(inode->i_mapping);
1da177e4
LT
424 /* temporarily disable caching while we
425 go to server to get inode info */
426 pCifsInode->clientCanCacheAll = FALSE;
427 pCifsInode->clientCanCacheRead = FALSE;
428 if (pTcon->ses->capabilities & CAP_UNIX)
429 rc = cifs_get_inode_info_unix(&inode,
430 full_path, inode->i_sb, xid);
431 else
432 rc = cifs_get_inode_info(&inode,
433 full_path, NULL, inode->i_sb,
434 xid);
435 } /* else we are writing out data to server already
436 and could deadlock if we tried to flush data, and
437 since we do not know if we have data that would
438 invalidate the current end of file on the server
439 we can not go to the server to get the new inod
440 info */
441 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
442 pCifsInode->clientCanCacheAll = TRUE;
443 pCifsInode->clientCanCacheRead = TRUE;
444 cFYI(1, ("Exclusive Oplock granted on inode %p",
445 file->f_dentry->d_inode));
446 } else if ((oplock & 0xF) == OPLOCK_READ) {
447 pCifsInode->clientCanCacheRead = TRUE;
448 pCifsInode->clientCanCacheAll = FALSE;
449 } else {
450 pCifsInode->clientCanCacheRead = FALSE;
451 pCifsInode->clientCanCacheAll = FALSE;
452 }
453 cifs_relock_file(pCifsFile);
454 }
455 }
456
457 kfree(full_path);
458 FreeXid(xid);
459 return rc;
460}
461
462int cifs_close(struct inode *inode, struct file *file)
463{
464 int rc = 0;
465 int xid;
466 struct cifs_sb_info *cifs_sb;
467 struct cifsTconInfo *pTcon;
468 struct cifsFileInfo *pSMBFile =
469 (struct cifsFileInfo *)file->private_data;
470
471 xid = GetXid();
472
473 cifs_sb = CIFS_SB(inode->i_sb);
474 pTcon = cifs_sb->tcon;
475 if (pSMBFile) {
476 pSMBFile->closePend = TRUE;
1da177e4
LT
477 if (pTcon) {
478 /* no sense reconnecting to close a file that is
479 already closed */
480 if (pTcon->tidStatus != CifsNeedReconnect) {
23e7dd7d
SF
481 int timeout = 2;
482 while((atomic_read(&pSMBFile->wrtPending) != 0)
483 && (timeout < 1000) ) {
484 /* Give write a better chance to get to
485 server ahead of the close. We do not
486 want to add a wait_q here as it would
487 increase the memory utilization as
488 the struct would be in each open file,
489 but this should give enough time to
490 clear the socket */
491 cERROR(1,("close with pending writes"));
492 msleep(timeout);
493 timeout *= 4;
494 }
1da177e4
LT
495 rc = CIFSSMBClose(xid, pTcon,
496 pSMBFile->netfid);
1da177e4
LT
497 }
498 }
cbe0476f 499 write_lock(&GlobalSMBSeslock);
1da177e4
LT
500 list_del(&pSMBFile->flist);
501 list_del(&pSMBFile->tlist);
cbe0476f 502 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
503 kfree(pSMBFile->search_resume_name);
504 kfree(file->private_data);
505 file->private_data = NULL;
506 } else
507 rc = -EBADF;
508
509 if (list_empty(&(CIFS_I(inode)->openFileList))) {
510 cFYI(1, ("closing last open instance for inode %p", inode));
511 /* if the file is not open we do not know if we can cache info
512 on this inode, much less write behind and read ahead */
513 CIFS_I(inode)->clientCanCacheRead = FALSE;
514 CIFS_I(inode)->clientCanCacheAll = FALSE;
515 }
516 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
517 rc = CIFS_I(inode)->write_behind_rc;
518 FreeXid(xid);
519 return rc;
520}
521
522int cifs_closedir(struct inode *inode, struct file *file)
523{
524 int rc = 0;
525 int xid;
526 struct cifsFileInfo *pCFileStruct =
527 (struct cifsFileInfo *)file->private_data;
528 char *ptmp;
529
26a21b98 530 cFYI(1, ("Closedir inode = 0x%p", inode));
1da177e4
LT
531
532 xid = GetXid();
533
534 if (pCFileStruct) {
535 struct cifsTconInfo *pTcon;
536 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
537
538 pTcon = cifs_sb->tcon;
539
540 cFYI(1, ("Freeing private data in close dir"));
31ca3bc3
SF
541 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
542 (pCFileStruct->invalidHandle == FALSE)) {
1da177e4
LT
543 pCFileStruct->invalidHandle = TRUE;
544 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
545 cFYI(1, ("Closing uncompleted readdir with rc %d",
546 rc));
547 /* not much we can do if it fails anyway, ignore rc */
548 rc = 0;
549 }
550 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
551 if (ptmp) {
ec637e3f 552 cFYI(1, ("closedir free smb buf in srch struct"));
1da177e4 553 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
d47d7c1a
SF
554 if(pCFileStruct->srch_inf.smallBuf)
555 cifs_small_buf_release(ptmp);
556 else
557 cifs_buf_release(ptmp);
1da177e4
LT
558 }
559 ptmp = pCFileStruct->search_resume_name;
560 if (ptmp) {
ec637e3f 561 cFYI(1, ("closedir free resume name"));
1da177e4
LT
562 pCFileStruct->search_resume_name = NULL;
563 kfree(ptmp);
564 }
565 kfree(file->private_data);
566 file->private_data = NULL;
567 }
568 /* BB can we lock the filestruct while this is going on? */
569 FreeXid(xid);
570 return rc;
571}
572
573int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
574{
575 int rc, xid;
1da177e4
LT
576 __u32 numLock = 0;
577 __u32 numUnlock = 0;
578 __u64 length;
579 int wait_flag = FALSE;
580 struct cifs_sb_info *cifs_sb;
581 struct cifsTconInfo *pTcon;
08547b03
SF
582 __u16 netfid;
583 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
1da177e4
LT
584
585 length = 1 + pfLock->fl_end - pfLock->fl_start;
586 rc = -EACCES;
587 xid = GetXid();
588
589 cFYI(1, ("Lock parm: 0x%x flockflags: "
590 "0x%x flocktype: 0x%x start: %lld end: %lld",
591 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
592 pfLock->fl_end));
593
594 if (pfLock->fl_flags & FL_POSIX)
d47d7c1a 595 cFYI(1, ("Posix"));
1da177e4 596 if (pfLock->fl_flags & FL_FLOCK)
d47d7c1a 597 cFYI(1, ("Flock"));
1da177e4 598 if (pfLock->fl_flags & FL_SLEEP) {
d47d7c1a 599 cFYI(1, ("Blocking lock"));
1da177e4
LT
600 wait_flag = TRUE;
601 }
602 if (pfLock->fl_flags & FL_ACCESS)
603 cFYI(1, ("Process suspended by mandatory locking - "
26a21b98 604 "not implemented yet"));
1da177e4
LT
605 if (pfLock->fl_flags & FL_LEASE)
606 cFYI(1, ("Lease on file - not implemented yet"));
607 if (pfLock->fl_flags &
608 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
609 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
610
611 if (pfLock->fl_type == F_WRLCK) {
612 cFYI(1, ("F_WRLCK "));
613 numLock = 1;
614 } else if (pfLock->fl_type == F_UNLCK) {
d47d7c1a 615 cFYI(1, ("F_UNLCK"));
1da177e4 616 numUnlock = 1;
d47d7c1a
SF
617 /* Check if unlock includes more than
618 one lock range */
1da177e4 619 } else if (pfLock->fl_type == F_RDLCK) {
d47d7c1a 620 cFYI(1, ("F_RDLCK"));
1da177e4
LT
621 lockType |= LOCKING_ANDX_SHARED_LOCK;
622 numLock = 1;
623 } else if (pfLock->fl_type == F_EXLCK) {
d47d7c1a 624 cFYI(1, ("F_EXLCK"));
1da177e4
LT
625 numLock = 1;
626 } else if (pfLock->fl_type == F_SHLCK) {
d47d7c1a 627 cFYI(1, ("F_SHLCK"));
1da177e4
LT
628 lockType |= LOCKING_ANDX_SHARED_LOCK;
629 numLock = 1;
630 } else
d47d7c1a 631 cFYI(1, ("Unknown type of lock"));
1da177e4
LT
632
633 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
634 pTcon = cifs_sb->tcon;
635
636 if (file->private_data == NULL) {
637 FreeXid(xid);
638 return -EBADF;
639 }
08547b03
SF
640 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
641
1da177e4 642
08547b03
SF
643 /* BB add code here to normalize offset and length to
644 account for negative length which we can not accept over the
645 wire */
1da177e4 646 if (IS_GETLK(cmd)) {
08547b03 647 if(experimEnabled &&
82940a46
SF
648 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
649 (CIFS_UNIX_FCNTL_CAP &
650 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
08547b03
SF
651 int posix_lock_type;
652 if(lockType & LOCKING_ANDX_SHARED_LOCK)
653 posix_lock_type = CIFS_RDLCK;
654 else
655 posix_lock_type = CIFS_WRLCK;
656 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
fc94cdb9 657 length, pfLock,
08547b03
SF
658 posix_lock_type, wait_flag);
659 FreeXid(xid);
660 return rc;
661 }
662
663 /* BB we could chain these into one lock request BB */
664 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
665 0, 1, lockType, 0 /* wait flag */ );
1da177e4 666 if (rc == 0) {
08547b03 667 rc = CIFSSMBLock(xid, pTcon, netfid, length,
1da177e4
LT
668 pfLock->fl_start, 1 /* numUnlock */ ,
669 0 /* numLock */ , lockType,
670 0 /* wait flag */ );
671 pfLock->fl_type = F_UNLCK;
672 if (rc != 0)
673 cERROR(1, ("Error unlocking previously locked "
08547b03 674 "range %d during test of lock", rc));
1da177e4
LT
675 rc = 0;
676
677 } else {
678 /* if rc == ERR_SHARING_VIOLATION ? */
679 rc = 0; /* do not change lock type to unlock
680 since range in use */
681 }
682
683 FreeXid(xid);
684 return rc;
685 }
08547b03 686 if (experimEnabled &&
82940a46
SF
687 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
688 (CIFS_UNIX_FCNTL_CAP &
689 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
08547b03
SF
690 int posix_lock_type;
691 if(lockType & LOCKING_ANDX_SHARED_LOCK)
692 posix_lock_type = CIFS_RDLCK;
693 else
694 posix_lock_type = CIFS_WRLCK;
695
696 if(numUnlock == 1)
beb84dc8 697 posix_lock_type = CIFS_UNLCK;
08547b03
SF
698 else if(numLock == 0) {
699 /* if no lock or unlock then nothing
700 to do since we do not know what it is */
701 FreeXid(xid);
702 return -EOPNOTSUPP;
703 }
704 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
fc94cdb9 705 length, pfLock,
08547b03
SF
706 posix_lock_type, wait_flag);
707 } else
708 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
709 numUnlock, numLock, lockType, wait_flag);
d634cc15 710 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
711 posix_lock_file_wait(file, pfLock);
712 FreeXid(xid);
713 return rc;
714}
715
716ssize_t cifs_user_write(struct file *file, const char __user *write_data,
717 size_t write_size, loff_t *poffset)
718{
719 int rc = 0;
720 unsigned int bytes_written = 0;
721 unsigned int total_written;
722 struct cifs_sb_info *cifs_sb;
723 struct cifsTconInfo *pTcon;
724 int xid, long_op;
725 struct cifsFileInfo *open_file;
726
727 if (file->f_dentry == NULL)
728 return -EBADF;
729
730 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
731 if (cifs_sb == NULL)
732 return -EBADF;
733
734 pTcon = cifs_sb->tcon;
735
736 /* cFYI(1,
737 (" write %d bytes to offset %lld of %s", write_size,
738 *poffset, file->f_dentry->d_name.name)); */
739
740 if (file->private_data == NULL)
741 return -EBADF;
742 else
743 open_file = (struct cifsFileInfo *) file->private_data;
744
745 xid = GetXid();
746 if (file->f_dentry->d_inode == NULL) {
747 FreeXid(xid);
748 return -EBADF;
749 }
750
751 if (*poffset > file->f_dentry->d_inode->i_size)
752 long_op = 2; /* writes past end of file can take a long time */
753 else
754 long_op = 1;
755
756 for (total_written = 0; write_size > total_written;
757 total_written += bytes_written) {
758 rc = -EAGAIN;
759 while (rc == -EAGAIN) {
760 if (file->private_data == NULL) {
761 /* file has been closed on us */
762 FreeXid(xid);
763 /* if we have gotten here we have written some data
764 and blocked, and the file has been freed on us while
765 we blocked so return what we managed to write */
766 return total_written;
767 }
768 if (open_file->closePend) {
769 FreeXid(xid);
770 if (total_written)
771 return total_written;
772 else
773 return -EBADF;
774 }
775 if (open_file->invalidHandle) {
776 if ((file->f_dentry == NULL) ||
777 (file->f_dentry->d_inode == NULL)) {
778 FreeXid(xid);
779 return total_written;
780 }
781 /* we could deadlock if we called
782 filemap_fdatawait from here so tell
783 reopen_file not to flush data to server
784 now */
785 rc = cifs_reopen_file(file->f_dentry->d_inode,
786 file, FALSE);
787 if (rc != 0)
788 break;
789 }
790
791 rc = CIFSSMBWrite(xid, pTcon,
792 open_file->netfid,
793 min_t(const int, cifs_sb->wsize,
794 write_size - total_written),
795 *poffset, &bytes_written,
796 NULL, write_data + total_written, long_op);
797 }
798 if (rc || (bytes_written == 0)) {
799 if (total_written)
800 break;
801 else {
802 FreeXid(xid);
803 return rc;
804 }
805 } else
806 *poffset += bytes_written;
807 long_op = FALSE; /* subsequent writes fast -
808 15 seconds is plenty */
809 }
810
a4544347 811 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
812
813 /* since the write may have blocked check these pointers again */
814 if (file->f_dentry) {
815 if (file->f_dentry->d_inode) {
816 struct inode *inode = file->f_dentry->d_inode;
817 inode->i_ctime = inode->i_mtime =
818 current_fs_time(inode->i_sb);
819 if (total_written > 0) {
820 if (*poffset > file->f_dentry->d_inode->i_size)
821 i_size_write(file->f_dentry->d_inode,
822 *poffset);
823 }
824 mark_inode_dirty_sync(file->f_dentry->d_inode);
825 }
826 }
827 FreeXid(xid);
828 return total_written;
829}
830
831static ssize_t cifs_write(struct file *file, const char *write_data,
832 size_t write_size, loff_t *poffset)
833{
834 int rc = 0;
835 unsigned int bytes_written = 0;
836 unsigned int total_written;
837 struct cifs_sb_info *cifs_sb;
838 struct cifsTconInfo *pTcon;
839 int xid, long_op;
840 struct cifsFileInfo *open_file;
841
842 if (file->f_dentry == NULL)
843 return -EBADF;
844
845 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
846 if (cifs_sb == NULL)
847 return -EBADF;
848
849 pTcon = cifs_sb->tcon;
850
ab2f218f
SF
851 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
852 *poffset, file->f_dentry->d_name.name));
1da177e4
LT
853
854 if (file->private_data == NULL)
855 return -EBADF;
856 else
857 open_file = (struct cifsFileInfo *)file->private_data;
858
859 xid = GetXid();
860 if (file->f_dentry->d_inode == NULL) {
861 FreeXid(xid);
862 return -EBADF;
863 }
864
865 if (*poffset > file->f_dentry->d_inode->i_size)
866 long_op = 2; /* writes past end of file can take a long time */
867 else
868 long_op = 1;
869
870 for (total_written = 0; write_size > total_written;
871 total_written += bytes_written) {
872 rc = -EAGAIN;
873 while (rc == -EAGAIN) {
874 if (file->private_data == NULL) {
875 /* file has been closed on us */
876 FreeXid(xid);
877 /* if we have gotten here we have written some data
878 and blocked, and the file has been freed on us
879 while we blocked so return what we managed to
880 write */
881 return total_written;
882 }
883 if (open_file->closePend) {
884 FreeXid(xid);
885 if (total_written)
886 return total_written;
887 else
888 return -EBADF;
889 }
890 if (open_file->invalidHandle) {
891 if ((file->f_dentry == NULL) ||
892 (file->f_dentry->d_inode == NULL)) {
893 FreeXid(xid);
894 return total_written;
895 }
896 /* we could deadlock if we called
897 filemap_fdatawait from here so tell
898 reopen_file not to flush data to
899 server now */
900 rc = cifs_reopen_file(file->f_dentry->d_inode,
901 file, FALSE);
902 if (rc != 0)
903 break;
904 }
c01f36a8 905 if(experimEnabled || (pTcon->ses->server &&
08775834
SF
906 ((pTcon->ses->server->secMode &
907 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 908 == 0))) {
3e84469d
SF
909 struct kvec iov[2];
910 unsigned int len;
911
0ae0efad 912 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
913 write_size - total_written);
914 /* iov[0] is reserved for smb header */
915 iov[1].iov_base = (char *)write_data +
916 total_written;
917 iov[1].iov_len = len;
d6e04ae6 918 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 919 open_file->netfid, len,
d6e04ae6 920 *poffset, &bytes_written,
3e84469d 921 iov, 1, long_op);
d6e04ae6 922 } else
60808233
SF
923 rc = CIFSSMBWrite(xid, pTcon,
924 open_file->netfid,
925 min_t(const int, cifs_sb->wsize,
926 write_size - total_written),
927 *poffset, &bytes_written,
928 write_data + total_written,
929 NULL, long_op);
1da177e4
LT
930 }
931 if (rc || (bytes_written == 0)) {
932 if (total_written)
933 break;
934 else {
935 FreeXid(xid);
936 return rc;
937 }
938 } else
939 *poffset += bytes_written;
940 long_op = FALSE; /* subsequent writes fast -
941 15 seconds is plenty */
942 }
943
a4544347 944 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
945
946 /* since the write may have blocked check these pointers again */
947 if (file->f_dentry) {
948 if (file->f_dentry->d_inode) {
949 file->f_dentry->d_inode->i_ctime =
950 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
951 if (total_written > 0) {
952 if (*poffset > file->f_dentry->d_inode->i_size)
953 i_size_write(file->f_dentry->d_inode,
954 *poffset);
955 }
956 mark_inode_dirty_sync(file->f_dentry->d_inode);
957 }
958 }
959 FreeXid(xid);
960 return total_written;
961}
962
dd99cd80 963struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
964{
965 struct cifsFileInfo *open_file;
dd99cd80 966 int rc;
6148a742 967
60808233
SF
968 /* Having a null inode here (because mapping->host was set to zero by
969 the VFS or MM) should not happen but we had reports of on oops (due to
970 it being zero) during stress testcases so we need to check for it */
971
972 if(cifs_inode == NULL) {
973 cERROR(1,("Null inode passed to cifs_writeable_file"));
974 dump_stack();
975 return NULL;
976 }
977
6148a742
SF
978 read_lock(&GlobalSMBSeslock);
979 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
980 if (open_file->closePend)
981 continue;
982 if (open_file->pfile &&
983 ((open_file->pfile->f_flags & O_RDWR) ||
984 (open_file->pfile->f_flags & O_WRONLY))) {
23e7dd7d 985 atomic_inc(&open_file->wrtPending);
6148a742 986 read_unlock(&GlobalSMBSeslock);
0ae0efad 987 if((open_file->invalidHandle) &&
23e7dd7d 988 (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
dd99cd80 989 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
37c0eb46
SF
990 open_file->pfile, FALSE);
991 /* if it fails, try another handle - might be */
992 /* dangerous to hold up writepages with retry */
993 if(rc) {
4a77118c 994 cFYI(1,("failed on reopen file in wp"));
37c0eb46 995 read_lock(&GlobalSMBSeslock);
23e7dd7d
SF
996 /* can not use this handle, no write
997 pending on this one after all */
998 atomic_dec
999 (&open_file->wrtPending);
37c0eb46
SF
1000 continue;
1001 }
1002 }
6148a742
SF
1003 return open_file;
1004 }
1005 }
1006 read_unlock(&GlobalSMBSeslock);
1007 return NULL;
1008}
1009
1da177e4
LT
1010static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1011{
1012 struct address_space *mapping = page->mapping;
1013 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1014 char *write_data;
1015 int rc = -EFAULT;
1016 int bytes_written = 0;
1017 struct cifs_sb_info *cifs_sb;
1018 struct cifsTconInfo *pTcon;
1019 struct inode *inode;
6148a742 1020 struct cifsFileInfo *open_file;
1da177e4
LT
1021
1022 if (!mapping || !mapping->host)
1023 return -EFAULT;
1024
1025 inode = page->mapping->host;
1026 cifs_sb = CIFS_SB(inode->i_sb);
1027 pTcon = cifs_sb->tcon;
1028
1029 offset += (loff_t)from;
1030 write_data = kmap(page);
1031 write_data += from;
1032
1033 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1034 kunmap(page);
1035 return -EIO;
1036 }
1037
1038 /* racing with truncate? */
1039 if (offset > mapping->host->i_size) {
1040 kunmap(page);
1041 return 0; /* don't care */
1042 }
1043
1044 /* check to make sure that we are not extending the file */
1045 if (mapping->host->i_size - offset < (loff_t)to)
1046 to = (unsigned)(mapping->host->i_size - offset);
1047
6148a742
SF
1048 open_file = find_writable_file(CIFS_I(mapping->host));
1049 if (open_file) {
1050 bytes_written = cifs_write(open_file->pfile, write_data,
1051 to-from, &offset);
23e7dd7d 1052 atomic_dec(&open_file->wrtPending);
1da177e4 1053 /* Does mm or vfs already set times? */
6148a742
SF
1054 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1055 if ((bytes_written > 0) && (offset)) {
1056 rc = 0;
1057 } else if (bytes_written < 0) {
1058 if (rc != -EBADF)
1059 rc = bytes_written;
1da177e4 1060 }
6148a742 1061 } else {
1da177e4
LT
1062 cFYI(1, ("No writeable filehandles for inode"));
1063 rc = -EIO;
1064 }
1065
1066 kunmap(page);
1067 return rc;
1068}
1069
1da177e4 1070static int cifs_writepages(struct address_space *mapping,
37c0eb46 1071 struct writeback_control *wbc)
1da177e4 1072{
37c0eb46
SF
1073 struct backing_dev_info *bdi = mapping->backing_dev_info;
1074 unsigned int bytes_to_write;
1075 unsigned int bytes_written;
1076 struct cifs_sb_info *cifs_sb;
1077 int done = 0;
111ebb6e 1078 pgoff_t end;
37c0eb46 1079 pgoff_t index;
111ebb6e 1080 int range_whole = 0;
37c0eb46 1081 struct kvec iov[32];
84d2f07e 1082 int len;
37c0eb46
SF
1083 int n_iov = 0;
1084 pgoff_t next;
1085 int nr_pages;
1086 __u64 offset = 0;
23e7dd7d 1087 struct cifsFileInfo *open_file;
37c0eb46
SF
1088 struct page *page;
1089 struct pagevec pvec;
1090 int rc = 0;
1091 int scanned = 0;
1da177e4
LT
1092 int xid;
1093
37c0eb46
SF
1094 cifs_sb = CIFS_SB(mapping->host->i_sb);
1095
1096 /*
1097 * If wsize is smaller that the page cache size, default to writing
1098 * one page at a time via cifs_writepage
1099 */
1100 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1101 return generic_writepages(mapping, wbc);
1102
4a77118c
SF
1103 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1104 if(cifs_sb->tcon->ses->server->secMode &
1105 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
60808233
SF
1106 if(!experimEnabled)
1107 return generic_writepages(mapping, wbc);
4a77118c 1108
37c0eb46
SF
1109 /*
1110 * BB: Is this meaningful for a non-block-device file system?
1111 * If it is, we should test it again after we do I/O
1112 */
1113 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1114 wbc->encountered_congestion = 1;
1115 return 0;
1116 }
1117
1da177e4
LT
1118 xid = GetXid();
1119
37c0eb46 1120 pagevec_init(&pvec, 0);
111ebb6e 1121 if (wbc->range_cyclic) {
37c0eb46 1122 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1123 end = -1;
1124 } else {
1125 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1126 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1127 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1128 range_whole = 1;
37c0eb46
SF
1129 scanned = 1;
1130 }
1131retry:
1132 while (!done && (index <= end) &&
1133 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1134 PAGECACHE_TAG_DIRTY,
1135 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1136 int first;
1137 unsigned int i;
1138
37c0eb46
SF
1139 first = -1;
1140 next = 0;
1141 n_iov = 0;
1142 bytes_to_write = 0;
1143
1144 for (i = 0; i < nr_pages; i++) {
1145 page = pvec.pages[i];
1146 /*
1147 * At this point we hold neither mapping->tree_lock nor
1148 * lock on the page itself: the page may be truncated or
1149 * invalidated (changing page->mapping to NULL), or even
1150 * swizzled back from swapper_space to tmpfs file
1151 * mapping
1152 */
1153
1154 if (first < 0)
1155 lock_page(page);
1156 else if (TestSetPageLocked(page))
1157 break;
1158
1159 if (unlikely(page->mapping != mapping)) {
1160 unlock_page(page);
1161 break;
1162 }
1163
111ebb6e 1164 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1165 done = 1;
1166 unlock_page(page);
1167 break;
1168 }
1169
1170 if (next && (page->index != next)) {
1171 /* Not next consecutive page */
1172 unlock_page(page);
1173 break;
1174 }
1175
1176 if (wbc->sync_mode != WB_SYNC_NONE)
1177 wait_on_page_writeback(page);
1178
1179 if (PageWriteback(page) ||
1180 !test_clear_page_dirty(page)) {
1181 unlock_page(page);
1182 break;
1183 }
84d2f07e
SF
1184
1185 if (page_offset(page) >= mapping->host->i_size) {
1186 done = 1;
1187 unlock_page(page);
1188 break;
1189 }
1190
37c0eb46
SF
1191 /*
1192 * BB can we get rid of this? pages are held by pvec
1193 */
1194 page_cache_get(page);
1195
84d2f07e
SF
1196 len = min(mapping->host->i_size - page_offset(page),
1197 (loff_t)PAGE_CACHE_SIZE);
1198
37c0eb46
SF
1199 /* reserve iov[0] for the smb header */
1200 n_iov++;
1201 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1202 iov[n_iov].iov_len = len;
1203 bytes_to_write += len;
37c0eb46
SF
1204
1205 if (first < 0) {
1206 first = i;
1207 offset = page_offset(page);
1208 }
1209 next = page->index + 1;
1210 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1211 break;
1212 }
1213 if (n_iov) {
23e7dd7d
SF
1214 /* Search for a writable handle every time we call
1215 * CIFSSMBWrite2. We can't rely on the last handle
1216 * we used to still be valid
1217 */
1218 open_file = find_writable_file(CIFS_I(mapping->host));
1219 if (!open_file) {
1220 cERROR(1, ("No writable handles for inode"));
1221 rc = -EBADF;
1047abc1 1222 } else {
23e7dd7d
SF
1223 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1224 open_file->netfid,
1225 bytes_to_write, offset,
1226 &bytes_written, iov, n_iov,
1227 1);
1228 atomic_dec(&open_file->wrtPending);
1229 if (rc || bytes_written < bytes_to_write) {
1230 cERROR(1,("Write2 ret %d, written = %d",
1231 rc, bytes_written));
1232 /* BB what if continued retry is
1233 requested via mount flags? */
1234 set_bit(AS_EIO, &mapping->flags);
23e7dd7d
SF
1235 } else {
1236 cifs_stats_bytes_written(cifs_sb->tcon,
1237 bytes_written);
1238 }
37c0eb46
SF
1239 }
1240 for (i = 0; i < n_iov; i++) {
1241 page = pvec.pages[first + i];
eb9bdaa3
SF
1242 /* Should we also set page error on
1243 success rc but too little data written? */
1244 /* BB investigate retry logic on temporary
1245 server crash cases and how recovery works
1246 when page marked as error */
1247 if(rc)
1248 SetPageError(page);
37c0eb46
SF
1249 kunmap(page);
1250 unlock_page(page);
1251 page_cache_release(page);
1252 }
1253 if ((wbc->nr_to_write -= n_iov) <= 0)
1254 done = 1;
1255 index = next;
1256 }
1257 pagevec_release(&pvec);
1258 }
1259 if (!scanned && !done) {
1260 /*
1261 * We hit the last page and there is more work to be done: wrap
1262 * back to the start of the file
1263 */
1264 scanned = 1;
1265 index = 0;
1266 goto retry;
1267 }
111ebb6e 1268 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1269 mapping->writeback_index = index;
1270
1da177e4 1271 FreeXid(xid);
37c0eb46 1272
1da177e4
LT
1273 return rc;
1274}
1da177e4
LT
1275
1276static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1277{
1278 int rc = -EFAULT;
1279 int xid;
1280
1281 xid = GetXid();
1282/* BB add check for wbc flags */
1283 page_cache_get(page);
1284 if (!PageUptodate(page)) {
1285 cFYI(1, ("ppw - page not up to date"));
1286 }
1287
1288 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1289 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1290 unlock_page(page);
1291 page_cache_release(page);
1292 FreeXid(xid);
1293 return rc;
1294}
1295
1296static int cifs_commit_write(struct file *file, struct page *page,
1297 unsigned offset, unsigned to)
1298{
1299 int xid;
1300 int rc = 0;
1301 struct inode *inode = page->mapping->host;
1302 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1303 char *page_data;
1304
1305 xid = GetXid();
1306 cFYI(1, ("commit write for page %p up to position %lld for %d",
1307 page, position, to));
1308 if (position > inode->i_size) {
1309 i_size_write(inode, position);
1310 /* if (file->private_data == NULL) {
1311 rc = -EBADF;
1312 } else {
1313 open_file = (struct cifsFileInfo *)file->private_data;
1314 cifs_sb = CIFS_SB(inode->i_sb);
1315 rc = -EAGAIN;
1316 while (rc == -EAGAIN) {
1317 if ((open_file->invalidHandle) &&
1318 (!open_file->closePend)) {
1319 rc = cifs_reopen_file(
1320 file->f_dentry->d_inode, file);
1321 if (rc != 0)
1322 break;
1323 }
1324 if (!open_file->closePend) {
1325 rc = CIFSSMBSetFileSize(xid,
1326 cifs_sb->tcon, position,
1327 open_file->netfid,
1328 open_file->pid, FALSE);
1329 } else {
1330 rc = -EBADF;
1331 break;
1332 }
1333 }
1334 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1335 } */
1336 }
1337 if (!PageUptodate(page)) {
1338 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1339 /* can not rely on (or let) writepage write this data */
1340 if (to < offset) {
1341 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1342 offset, to));
1343 FreeXid(xid);
1344 return rc;
1345 }
1346 /* this is probably better than directly calling
1347 partialpage_write since in this function the file handle is
1348 known which we might as well leverage */
1349 /* BB check if anything else missing out of ppw
1350 such as updating last write time */
1351 page_data = kmap(page);
1352 rc = cifs_write(file, page_data + offset, to-offset,
1353 &position);
1354 if (rc > 0)
1355 rc = 0;
1356 /* else if (rc < 0) should we set writebehind rc? */
1357 kunmap(page);
1358 } else {
1359 set_page_dirty(page);
1360 }
1361
1362 FreeXid(xid);
1363 return rc;
1364}
1365
1366int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1367{
1368 int xid;
1369 int rc = 0;
1370 struct inode *inode = file->f_dentry->d_inode;
1371
1372 xid = GetXid();
1373
26a21b98 1374 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1da177e4
LT
1375 dentry->d_name.name, datasync));
1376
1377 rc = filemap_fdatawrite(inode->i_mapping);
1378 if (rc == 0)
1379 CIFS_I(inode)->write_behind_rc = 0;
1380 FreeXid(xid);
1381 return rc;
1382}
1383
3978d717 1384/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1385{
1386 struct address_space *mapping;
1387 struct inode *inode;
1388 unsigned long index = page->index;
1389 unsigned int rpages = 0;
1390 int rc = 0;
1391
1392 cFYI(1, ("sync page %p",page));
1393 mapping = page->mapping;
1394 if (!mapping)
1395 return 0;
1396 inode = mapping->host;
1397 if (!inode)
3978d717 1398 return; */
1da177e4
LT
1399
1400/* fill in rpages then
1401 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1402
26a21b98 1403/* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1da177e4 1404
3978d717 1405#if 0
1da177e4
LT
1406 if (rc < 0)
1407 return rc;
1408 return 0;
3978d717 1409#endif
1da177e4
LT
1410} */
1411
1412/*
1413 * As file closes, flush all cached write data for this inode checking
1414 * for write behind errors.
1415 */
75e1fcc0 1416int cifs_flush(struct file *file, fl_owner_t id)
1da177e4
LT
1417{
1418 struct inode * inode = file->f_dentry->d_inode;
1419 int rc = 0;
1420
1421 /* Rather than do the steps manually:
1422 lock the inode for writing
1423 loop through pages looking for write behind data (dirty pages)
1424 coalesce into contiguous 16K (or smaller) chunks to write to server
1425 send to server (prefer in parallel)
1426 deal with writebehind errors
1427 unlock inode for writing
1428 filemapfdatawrite appears easier for the time being */
1429
1430 rc = filemap_fdatawrite(inode->i_mapping);
1431 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1432 CIFS_I(inode)->write_behind_rc = 0;
1433
1434 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1435
1436 return rc;
1437}
1438
1439ssize_t cifs_user_read(struct file *file, char __user *read_data,
1440 size_t read_size, loff_t *poffset)
1441{
1442 int rc = -EACCES;
1443 unsigned int bytes_read = 0;
1444 unsigned int total_read = 0;
1445 unsigned int current_read_size;
1446 struct cifs_sb_info *cifs_sb;
1447 struct cifsTconInfo *pTcon;
1448 int xid;
1449 struct cifsFileInfo *open_file;
1450 char *smb_read_data;
1451 char __user *current_offset;
1452 struct smb_com_read_rsp *pSMBr;
1453
1454 xid = GetXid();
1455 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1456 pTcon = cifs_sb->tcon;
1457
1458 if (file->private_data == NULL) {
1459 FreeXid(xid);
1460 return -EBADF;
1461 }
1462 open_file = (struct cifsFileInfo *)file->private_data;
1463
1464 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1465 cFYI(1, ("attempting read on write only file instance"));
1466 }
1467 for (total_read = 0, current_offset = read_data;
1468 read_size > total_read;
1469 total_read += bytes_read, current_offset += bytes_read) {
1470 current_read_size = min_t(const int, read_size - total_read,
1471 cifs_sb->rsize);
1472 rc = -EAGAIN;
1473 smb_read_data = NULL;
1474 while (rc == -EAGAIN) {
ec637e3f 1475 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1476 if ((open_file->invalidHandle) &&
1477 (!open_file->closePend)) {
1478 rc = cifs_reopen_file(file->f_dentry->d_inode,
1479 file, TRUE);
1480 if (rc != 0)
1481 break;
1482 }
bfa0d75a 1483 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1484 open_file->netfid,
1485 current_read_size, *poffset,
1486 &bytes_read, &smb_read_data,
1487 &buf_type);
1da177e4 1488 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1489 if (smb_read_data) {
93544cc6
SF
1490 if (copy_to_user(current_offset,
1491 smb_read_data +
1492 4 /* RFC1001 length field */ +
1493 le16_to_cpu(pSMBr->DataOffset),
1494 bytes_read)) {
1495 rc = -EFAULT;
1496 }
1497
ec637e3f
SF
1498 if(buf_type == CIFS_SMALL_BUFFER)
1499 cifs_small_buf_release(smb_read_data);
1500 else if(buf_type == CIFS_LARGE_BUFFER)
1501 cifs_buf_release(smb_read_data);
1da177e4
LT
1502 smb_read_data = NULL;
1503 }
1504 }
1505 if (rc || (bytes_read == 0)) {
1506 if (total_read) {
1507 break;
1508 } else {
1509 FreeXid(xid);
1510 return rc;
1511 }
1512 } else {
a4544347 1513 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1514 *poffset += bytes_read;
1515 }
1516 }
1517 FreeXid(xid);
1518 return total_read;
1519}
1520
1521
1522static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1523 loff_t *poffset)
1524{
1525 int rc = -EACCES;
1526 unsigned int bytes_read = 0;
1527 unsigned int total_read;
1528 unsigned int current_read_size;
1529 struct cifs_sb_info *cifs_sb;
1530 struct cifsTconInfo *pTcon;
1531 int xid;
1532 char *current_offset;
1533 struct cifsFileInfo *open_file;
ec637e3f 1534 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1535
1536 xid = GetXid();
1537 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1538 pTcon = cifs_sb->tcon;
1539
1540 if (file->private_data == NULL) {
1541 FreeXid(xid);
1542 return -EBADF;
1543 }
1544 open_file = (struct cifsFileInfo *)file->private_data;
1545
1546 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1547 cFYI(1, ("attempting read on write only file instance"));
1548
1549 for (total_read = 0, current_offset = read_data;
1550 read_size > total_read;
1551 total_read += bytes_read, current_offset += bytes_read) {
1552 current_read_size = min_t(const int, read_size - total_read,
1553 cifs_sb->rsize);
f9f5c817
SF
1554 /* For windows me and 9x we do not want to request more
1555 than it negotiated since it will refuse the read then */
1556 if((pTcon->ses) &&
1557 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1558 current_read_size = min_t(const int, current_read_size,
1559 pTcon->ses->server->maxBuf - 128);
1560 }
1da177e4
LT
1561 rc = -EAGAIN;
1562 while (rc == -EAGAIN) {
1563 if ((open_file->invalidHandle) &&
1564 (!open_file->closePend)) {
1565 rc = cifs_reopen_file(file->f_dentry->d_inode,
1566 file, TRUE);
1567 if (rc != 0)
1568 break;
1569 }
bfa0d75a 1570 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1571 open_file->netfid,
1572 current_read_size, *poffset,
1573 &bytes_read, &current_offset,
1574 &buf_type);
1da177e4
LT
1575 }
1576 if (rc || (bytes_read == 0)) {
1577 if (total_read) {
1578 break;
1579 } else {
1580 FreeXid(xid);
1581 return rc;
1582 }
1583 } else {
a4544347 1584 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1585 *poffset += bytes_read;
1586 }
1587 }
1588 FreeXid(xid);
1589 return total_read;
1590}
1591
1592int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1593{
1594 struct dentry *dentry = file->f_dentry;
1595 int rc, xid;
1596
1597 xid = GetXid();
1598 rc = cifs_revalidate(dentry);
1599 if (rc) {
1600 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1601 FreeXid(xid);
1602 return rc;
1603 }
1604 rc = generic_file_mmap(file, vma);
1605 FreeXid(xid);
1606 return rc;
1607}
1608
1609
1610static void cifs_copy_cache_pages(struct address_space *mapping,
1611 struct list_head *pages, int bytes_read, char *data,
1612 struct pagevec *plru_pvec)
1613{
1614 struct page *page;
1615 char *target;
1616
1617 while (bytes_read > 0) {
1618 if (list_empty(pages))
1619 break;
1620
1621 page = list_entry(pages->prev, struct page, lru);
1622 list_del(&page->lru);
1623
1624 if (add_to_page_cache(page, mapping, page->index,
1625 GFP_KERNEL)) {
1626 page_cache_release(page);
1627 cFYI(1, ("Add page cache failed"));
3079ca62
SF
1628 data += PAGE_CACHE_SIZE;
1629 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1630 continue;
1631 }
1632
1633 target = kmap_atomic(page,KM_USER0);
1634
1635 if (PAGE_CACHE_SIZE > bytes_read) {
1636 memcpy(target, data, bytes_read);
1637 /* zero the tail end of this partial page */
1638 memset(target + bytes_read, 0,
1639 PAGE_CACHE_SIZE - bytes_read);
1640 bytes_read = 0;
1641 } else {
1642 memcpy(target, data, PAGE_CACHE_SIZE);
1643 bytes_read -= PAGE_CACHE_SIZE;
1644 }
1645 kunmap_atomic(target, KM_USER0);
1646
1647 flush_dcache_page(page);
1648 SetPageUptodate(page);
1649 unlock_page(page);
1650 if (!pagevec_add(plru_pvec, page))
1651 __pagevec_lru_add(plru_pvec);
1652 data += PAGE_CACHE_SIZE;
1653 }
1654 return;
1655}
1656
1657static int cifs_readpages(struct file *file, struct address_space *mapping,
1658 struct list_head *page_list, unsigned num_pages)
1659{
1660 int rc = -EACCES;
1661 int xid;
1662 loff_t offset;
1663 struct page *page;
1664 struct cifs_sb_info *cifs_sb;
1665 struct cifsTconInfo *pTcon;
1666 int bytes_read = 0;
1667 unsigned int read_size,i;
1668 char *smb_read_data = NULL;
1669 struct smb_com_read_rsp *pSMBr;
1670 struct pagevec lru_pvec;
1671 struct cifsFileInfo *open_file;
ec637e3f 1672 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1673
1674 xid = GetXid();
1675 if (file->private_data == NULL) {
1676 FreeXid(xid);
1677 return -EBADF;
1678 }
1679 open_file = (struct cifsFileInfo *)file->private_data;
1680 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1681 pTcon = cifs_sb->tcon;
bfa0d75a 1682
1da177e4
LT
1683 pagevec_init(&lru_pvec, 0);
1684
1685 for (i = 0; i < num_pages; ) {
1686 unsigned contig_pages;
1687 struct page *tmp_page;
1688 unsigned long expected_index;
1689
1690 if (list_empty(page_list))
1691 break;
1692
1693 page = list_entry(page_list->prev, struct page, lru);
1694 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1695
1696 /* count adjacent pages that we will read into */
1697 contig_pages = 0;
1698 expected_index =
1699 list_entry(page_list->prev, struct page, lru)->index;
1700 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1701 if (tmp_page->index == expected_index) {
1702 contig_pages++;
1703 expected_index++;
1704 } else
1705 break;
1706 }
1707 if (contig_pages + i > num_pages)
1708 contig_pages = num_pages - i;
1709
1710 /* for reads over a certain size could initiate async
1711 read ahead */
1712
1713 read_size = contig_pages * PAGE_CACHE_SIZE;
1714 /* Read size needs to be in multiples of one page */
1715 read_size = min_t(const unsigned int, read_size,
1716 cifs_sb->rsize & PAGE_CACHE_MASK);
1717
1718 rc = -EAGAIN;
1719 while (rc == -EAGAIN) {
1720 if ((open_file->invalidHandle) &&
1721 (!open_file->closePend)) {
1722 rc = cifs_reopen_file(file->f_dentry->d_inode,
1723 file, TRUE);
1724 if (rc != 0)
1725 break;
1726 }
1727
bfa0d75a 1728 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1729 open_file->netfid,
1730 read_size, offset,
1731 &bytes_read, &smb_read_data,
1732 &buf_type);
a9d02ad4 1733 /* BB more RC checks ? */
1da177e4
LT
1734 if (rc== -EAGAIN) {
1735 if (smb_read_data) {
ec637e3f
SF
1736 if(buf_type == CIFS_SMALL_BUFFER)
1737 cifs_small_buf_release(smb_read_data);
1738 else if(buf_type == CIFS_LARGE_BUFFER)
1739 cifs_buf_release(smb_read_data);
1da177e4
LT
1740 smb_read_data = NULL;
1741 }
1742 }
1743 }
1744 if ((rc < 0) || (smb_read_data == NULL)) {
1745 cFYI(1, ("Read error in readpages: %d", rc));
1746 /* clean up remaing pages off list */
1747 while (!list_empty(page_list) && (i < num_pages)) {
1748 page = list_entry(page_list->prev, struct page,
1749 lru);
1750 list_del(&page->lru);
1751 page_cache_release(page);
1752 }
1753 break;
1754 } else if (bytes_read > 0) {
1755 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1756 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1757 smb_read_data + 4 /* RFC1001 hdr */ +
1758 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1759
1760 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 1761 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1762 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1763 i++; /* account for partial page */
1764
1765 /* server copy of file can have smaller size
1766 than client */
1767 /* BB do we need to verify this common case ?
1768 this case is ok - if we are at server EOF
1769 we will hit it on next read */
1770
1771 /* while (!list_empty(page_list) && (i < num_pages)) {
1772 page = list_entry(page_list->prev,
1773 struct page, list);
1774 list_del(&page->list);
1775 page_cache_release(page);
1776 }
1777 break; */
1778 }
1779 } else {
1780 cFYI(1, ("No bytes read (%d) at offset %lld . "
1781 "Cleaning remaining pages from readahead list",
1782 bytes_read, offset));
1783 /* BB turn off caching and do new lookup on
1784 file size at server? */
1785 while (!list_empty(page_list) && (i < num_pages)) {
1786 page = list_entry(page_list->prev, struct page,
1787 lru);
1788 list_del(&page->lru);
1789
1790 /* BB removeme - replace with zero of page? */
1791 page_cache_release(page);
1792 }
1793 break;
1794 }
1795 if (smb_read_data) {
ec637e3f
SF
1796 if(buf_type == CIFS_SMALL_BUFFER)
1797 cifs_small_buf_release(smb_read_data);
1798 else if(buf_type == CIFS_LARGE_BUFFER)
1799 cifs_buf_release(smb_read_data);
1da177e4
LT
1800 smb_read_data = NULL;
1801 }
1802 bytes_read = 0;
1803 }
1804
1805 pagevec_lru_add(&lru_pvec);
1806
1807/* need to free smb_read_data buf before exit */
1808 if (smb_read_data) {
47c886b3
SF
1809 if(buf_type == CIFS_SMALL_BUFFER)
1810 cifs_small_buf_release(smb_read_data);
1811 else if(buf_type == CIFS_LARGE_BUFFER)
1812 cifs_buf_release(smb_read_data);
1da177e4
LT
1813 smb_read_data = NULL;
1814 }
1815
1816 FreeXid(xid);
1817 return rc;
1818}
1819
1820static int cifs_readpage_worker(struct file *file, struct page *page,
1821 loff_t *poffset)
1822{
1823 char *read_data;
1824 int rc;
1825
1826 page_cache_get(page);
1827 read_data = kmap(page);
1828 /* for reads over a certain size could initiate async read ahead */
1829
1830 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1831
1832 if (rc < 0)
1833 goto io_error;
1834 else
26a21b98 1835 cFYI(1, ("Bytes read %d",rc));
1da177e4
LT
1836
1837 file->f_dentry->d_inode->i_atime =
1838 current_fs_time(file->f_dentry->d_inode->i_sb);
1839
1840 if (PAGE_CACHE_SIZE > rc)
1841 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1842
1843 flush_dcache_page(page);
1844 SetPageUptodate(page);
1845 rc = 0;
1846
1847io_error:
1848 kunmap(page);
1849 page_cache_release(page);
1850 return rc;
1851}
1852
1853static int cifs_readpage(struct file *file, struct page *page)
1854{
1855 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1856 int rc = -EACCES;
1857 int xid;
1858
1859 xid = GetXid();
1860
1861 if (file->private_data == NULL) {
1862 FreeXid(xid);
1863 return -EBADF;
1864 }
1865
1866 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1867 page, (int)offset, (int)offset));
1868
1869 rc = cifs_readpage_worker(file, page, &offset);
1870
1871 unlock_page(page);
1872
1873 FreeXid(xid);
1874 return rc;
1875}
1876
1877/* We do not want to update the file size from server for inodes
1878 open for write - to avoid races with writepage extending
1879 the file - in the future we could consider allowing
1880 refreshing the inode only on increases in the file size
1881 but this is tricky to do without racing with writebehind
1882 page caching in the current Linux kernel design */
1883int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1884{
23e7dd7d
SF
1885 struct cifsFileInfo *open_file = NULL;
1886
1887 if (cifsInode)
1888 open_file = find_writable_file(cifsInode);
1889
1890 if(open_file) {
c32a0b68
SF
1891 struct cifs_sb_info *cifs_sb;
1892
23e7dd7d
SF
1893 /* there is not actually a write pending so let
1894 this handle go free and allow it to
1895 be closable if needed */
1896 atomic_dec(&open_file->wrtPending);
c32a0b68
SF
1897
1898 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1899 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1900 /* since no page cache to corrupt on directio
1901 we can change size safely */
1902 return 1;
1903 }
1904
6148a742 1905 return 0;
23e7dd7d 1906 } else
6148a742 1907 return 1;
1da177e4
LT
1908}
1909
1da177e4
LT
1910static int cifs_prepare_write(struct file *file, struct page *page,
1911 unsigned from, unsigned to)
1912{
1913 int rc = 0;
1914 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1915 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1916 if (!PageUptodate(page)) {
1917 /* if (to - from != PAGE_CACHE_SIZE) {
1918 void *kaddr = kmap_atomic(page, KM_USER0);
1919 memset(kaddr, 0, from);
1920 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1921 flush_dcache_page(page);
1922 kunmap_atomic(kaddr, KM_USER0);
1923 } */
1924 /* If we are writing a full page it will be up to date,
1925 no need to read from the server */
1926 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1927 SetPageUptodate(page);
1928
1929 /* might as well read a page, it is fast enough */
1930 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1931 rc = cifs_readpage_worker(file, page, &offset);
1932 } else {
1933 /* should we try using another file handle if there is one -
1934 how would we lock it to prevent close of that handle
1935 racing with this read?
1936 In any case this will be written out by commit_write */
1937 }
1938 }
1939
1940 /* BB should we pass any errors back?
1941 e.g. if we do not have read access to the file */
1942 return 0;
1943}
1944
f5e54d6e 1945const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
1946 .readpage = cifs_readpage,
1947 .readpages = cifs_readpages,
1948 .writepage = cifs_writepage,
37c0eb46 1949 .writepages = cifs_writepages,
1da177e4
LT
1950 .prepare_write = cifs_prepare_write,
1951 .commit_write = cifs_commit_write,
1952 .set_page_dirty = __set_page_dirty_nobuffers,
1953 /* .sync_page = cifs_sync_page, */
1954 /* .direct_IO = */
1955};
273d81d6
DK
1956
1957/*
1958 * cifs_readpages requires the server to support a buffer large enough to
1959 * contain the header plus one complete page of data. Otherwise, we need
1960 * to leave cifs_readpages out of the address space operations.
1961 */
f5e54d6e 1962const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
1963 .readpage = cifs_readpage,
1964 .writepage = cifs_writepage,
1965 .writepages = cifs_writepages,
1966 .prepare_write = cifs_prepare_write,
1967 .commit_write = cifs_commit_write,
1968 .set_page_dirty = __set_page_dirty_nobuffers,
1969 /* .sync_page = cifs_sync_page, */
1970 /* .direct_IO = */
1971};