]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/cifs/file.c
[CIFS] Use the kthread_ API instead of opencoding lots of hairy code for kernel
[net-next-2.6.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23#include <linux/fs.h>
37c0eb46 24#include <linux/backing-dev.h>
1da177e4
LT
25#include <linux/stat.h>
26#include <linux/fcntl.h>
37c0eb46 27#include <linux/mpage.h>
1da177e4
LT
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
30#include <linux/smp_lock.h>
37c0eb46 31#include <linux/writeback.h>
23e7dd7d 32#include <linux/delay.h>
1da177e4
LT
33#include <asm/div64.h>
34#include "cifsfs.h"
35#include "cifspdu.h"
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_unicode.h"
39#include "cifs_debug.h"
40#include "cifs_fs_sb.h"
41
42static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
45{
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem);
50 private_data->pfile = file; /* needed for writepage */
51 private_data->pInode = inode;
52 private_data->invalidHandle = FALSE;
53 private_data->closePend = FALSE;
23e7dd7d
SF
54 /* we have to track num writers to the inode, since writepages
55 does not tell us which handle the write is for so there can
56 be a close (overlapping with write) of the filehandle that
57 cifs_writepages chose to use */
58 atomic_set(&private_data->wrtPending,0);
1da177e4
LT
59
60 return private_data;
61}
62
63static inline int cifs_convert_flags(unsigned int flags)
64{
65 if ((flags & O_ACCMODE) == O_RDONLY)
66 return GENERIC_READ;
67 else if ((flags & O_ACCMODE) == O_WRONLY)
68 return GENERIC_WRITE;
69 else if ((flags & O_ACCMODE) == O_RDWR) {
70 /* GENERIC_ALL is too much permission to request
71 can cause unnecessary access denied on create */
72 /* return GENERIC_ALL; */
73 return (GENERIC_READ | GENERIC_WRITE);
74 }
75
76 return 0x20197;
77}
78
79static inline int cifs_get_disposition(unsigned int flags)
80{
81 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
82 return FILE_CREATE;
83 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
84 return FILE_OVERWRITE_IF;
85 else if ((flags & O_CREAT) == O_CREAT)
86 return FILE_OPEN_IF;
87 else
88 return FILE_OPEN;
89}
90
91/* all arguments to this function must be checked for validity in caller */
92static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
93 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
94 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
95 char *full_path, int xid)
96{
97 struct timespec temp;
98 int rc;
99
100 /* want handles we can use to read with first
101 in the list so we do not have to walk the
102 list to search for one in prepare_write */
103 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
104 list_add_tail(&pCifsFile->flist,
105 &pCifsInode->openFileList);
106 } else {
107 list_add(&pCifsFile->flist,
108 &pCifsInode->openFileList);
109 }
110 write_unlock(&GlobalSMBSeslock);
111 write_unlock(&file->f_owner.lock);
112 if (pCifsInode->clientCanCacheRead) {
113 /* we have the inode open somewhere else
114 no need to discard cache data */
115 goto client_can_cache;
116 }
117
118 /* BB need same check in cifs_create too? */
119 /* if not oplocked, invalidate inode pages if mtime or file
120 size changed */
121 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
122 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
123 (file->f_dentry->d_inode->i_size ==
124 (loff_t)le64_to_cpu(buf->EndOfFile))) {
125 cFYI(1, ("inode unchanged on server"));
126 } else {
127 if (file->f_dentry->d_inode->i_mapping) {
128 /* BB no need to lock inode until after invalidate
129 since namei code should already have it locked? */
28fd1298 130 filemap_write_and_wait(file->f_dentry->d_inode->i_mapping);
1da177e4
LT
131 }
132 cFYI(1, ("invalidating remote inode since open detected it "
133 "changed"));
134 invalidate_remote_inode(file->f_dentry->d_inode);
135 }
136
137client_can_cache:
138 if (pTcon->ses->capabilities & CAP_UNIX)
139 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
140 full_path, inode->i_sb, xid);
141 else
142 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
143 full_path, buf, inode->i_sb, xid);
144
145 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
146 pCifsInode->clientCanCacheAll = TRUE;
147 pCifsInode->clientCanCacheRead = TRUE;
148 cFYI(1, ("Exclusive Oplock granted on inode %p",
149 file->f_dentry->d_inode));
150 } else if ((*oplock & 0xF) == OPLOCK_READ)
151 pCifsInode->clientCanCacheRead = TRUE;
152
153 return rc;
154}
155
156int cifs_open(struct inode *inode, struct file *file)
157{
158 int rc = -EACCES;
159 int xid, oplock;
160 struct cifs_sb_info *cifs_sb;
161 struct cifsTconInfo *pTcon;
162 struct cifsFileInfo *pCifsFile;
163 struct cifsInodeInfo *pCifsInode;
164 struct list_head *tmp;
165 char *full_path = NULL;
166 int desiredAccess;
167 int disposition;
168 __u16 netfid;
169 FILE_ALL_INFO *buf = NULL;
170
171 xid = GetXid();
172
173 cifs_sb = CIFS_SB(inode->i_sb);
174 pTcon = cifs_sb->tcon;
175
176 if (file->f_flags & O_CREAT) {
177 /* search inode for this file and fill in file->private_data */
178 pCifsInode = CIFS_I(file->f_dentry->d_inode);
179 read_lock(&GlobalSMBSeslock);
180 list_for_each(tmp, &pCifsInode->openFileList) {
181 pCifsFile = list_entry(tmp, struct cifsFileInfo,
182 flist);
183 if ((pCifsFile->pfile == NULL) &&
184 (pCifsFile->pid == current->tgid)) {
185 /* mode set in cifs_create */
186
187 /* needed for writepage */
188 pCifsFile->pfile = file;
189
190 file->private_data = pCifsFile;
191 break;
192 }
193 }
194 read_unlock(&GlobalSMBSeslock);
195 if (file->private_data != NULL) {
196 rc = 0;
197 FreeXid(xid);
198 return rc;
199 } else {
200 if (file->f_flags & O_EXCL)
201 cERROR(1, ("could not find file instance for "
202 "new file %p ", file));
203 }
204 }
205
7f57356b 206 full_path = build_path_from_dentry(file->f_dentry);
1da177e4
LT
207 if (full_path == NULL) {
208 FreeXid(xid);
209 return -ENOMEM;
210 }
211
212 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
213 inode, file->f_flags, full_path));
214 desiredAccess = cifs_convert_flags(file->f_flags);
215
216/*********************************************************************
217 * open flag mapping table:
218 *
219 * POSIX Flag CIFS Disposition
220 * ---------- ----------------
221 * O_CREAT FILE_OPEN_IF
222 * O_CREAT | O_EXCL FILE_CREATE
223 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
224 * O_TRUNC FILE_OVERWRITE
225 * none of the above FILE_OPEN
226 *
227 * Note that there is not a direct match between disposition
228 * FILE_SUPERSEDE (ie create whether or not file exists although
229 * O_CREAT | O_TRUNC is similar but truncates the existing
230 * file rather than creating a new file as FILE_SUPERSEDE does
231 * (which uses the attributes / metadata passed in on open call)
232 *?
233 *? O_SYNC is a reasonable match to CIFS writethrough flag
234 *? and the read write flags match reasonably. O_LARGEFILE
235 *? is irrelevant because largefile support is always used
236 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
237 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
238 *********************************************************************/
239
240 disposition = cifs_get_disposition(file->f_flags);
241
242 if (oplockEnabled)
243 oplock = REQ_OPLOCK;
244 else
245 oplock = FALSE;
246
247 /* BB pass O_SYNC flag through on file attributes .. BB */
248
249 /* Also refresh inode by passing in file_info buf returned by SMBOpen
250 and calling get_inode_info with returned buf (at least helps
251 non-Unix server case) */
252
253 /* BB we can not do this if this is the second open of a file
254 and the first handle has writebehind data, we might be
255 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
256 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
257 if (!buf) {
258 rc = -ENOMEM;
259 goto out;
260 }
261 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
262 CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
263 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
264 & CIFS_MOUNT_MAP_SPECIAL_CHR);
a9d02ad4
SF
265 if (rc == -EIO) {
266 /* Old server, try legacy style OpenX */
267 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
268 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
269 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
270 & CIFS_MOUNT_MAP_SPECIAL_CHR);
271 }
1da177e4
LT
272 if (rc) {
273 cFYI(1, ("cifs_open returned 0x%x ", rc));
274 goto out;
275 }
276 file->private_data =
277 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
278 if (file->private_data == NULL) {
279 rc = -ENOMEM;
280 goto out;
281 }
282 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
283 write_lock(&file->f_owner.lock);
284 write_lock(&GlobalSMBSeslock);
285 list_add(&pCifsFile->tlist, &pTcon->openFileList);
286
287 pCifsInode = CIFS_I(file->f_dentry->d_inode);
288 if (pCifsInode) {
289 rc = cifs_open_inode_helper(inode, file, pCifsInode,
290 pCifsFile, pTcon,
291 &oplock, buf, full_path, xid);
292 } else {
293 write_unlock(&GlobalSMBSeslock);
294 write_unlock(&file->f_owner.lock);
295 }
296
297 if (oplock & CIFS_CREATE_ACTION) {
298 /* time to set mode which we can not set earlier due to
299 problems creating new read-only files */
300 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
301 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
302 inode->i_mode,
303 (__u64)-1, (__u64)-1, 0 /* dev */,
737b758c
SF
304 cifs_sb->local_nls,
305 cifs_sb->mnt_cifs_flags &
306 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
307 } else {
308 /* BB implement via Windows security descriptors eg
309 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
310 -1, -1, local_nls);
311 in the meantime could set r/o dos attribute when
312 perms are eg: mode & 0222 == 0 */
313 }
314 }
315
316out:
317 kfree(buf);
318 kfree(full_path);
319 FreeXid(xid);
320 return rc;
321}
322
323/* Try to reaquire byte range locks that were released when session */
324/* to server was lost */
325static int cifs_relock_file(struct cifsFileInfo *cifsFile)
326{
327 int rc = 0;
328
329/* BB list all locks open on this file and relock */
330
331 return rc;
332}
333
334static int cifs_reopen_file(struct inode *inode, struct file *file,
335 int can_flush)
336{
337 int rc = -EACCES;
338 int xid, oplock;
339 struct cifs_sb_info *cifs_sb;
340 struct cifsTconInfo *pTcon;
341 struct cifsFileInfo *pCifsFile;
342 struct cifsInodeInfo *pCifsInode;
343 char *full_path = NULL;
344 int desiredAccess;
345 int disposition = FILE_OPEN;
346 __u16 netfid;
347
348 if (inode == NULL)
349 return -EBADF;
350 if (file->private_data) {
351 pCifsFile = (struct cifsFileInfo *)file->private_data;
352 } else
353 return -EBADF;
354
355 xid = GetXid();
356 down(&pCifsFile->fh_sem);
357 if (pCifsFile->invalidHandle == FALSE) {
358 up(&pCifsFile->fh_sem);
359 FreeXid(xid);
360 return 0;
361 }
362
363 if (file->f_dentry == NULL) {
364 up(&pCifsFile->fh_sem);
365 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
366 FreeXid(xid);
367 return -EBADF;
368 }
369 cifs_sb = CIFS_SB(inode->i_sb);
370 pTcon = cifs_sb->tcon;
371/* can not grab rename sem here because various ops, including
372 those that already have the rename sem can end up causing writepage
373 to get called and if the server was down that means we end up here,
374 and we can never tell if the caller already has the rename_sem */
7f57356b 375 full_path = build_path_from_dentry(file->f_dentry);
1da177e4
LT
376 if (full_path == NULL) {
377 up(&pCifsFile->fh_sem);
378 FreeXid(xid);
379 return -ENOMEM;
380 }
381
382 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
383 inode, file->f_flags,full_path));
384 desiredAccess = cifs_convert_flags(file->f_flags);
385
386 if (oplockEnabled)
387 oplock = REQ_OPLOCK;
388 else
389 oplock = FALSE;
390
391 /* Can not refresh inode by passing in file_info buf to be returned
392 by SMBOpen and then calling get_inode_info with returned buf
393 since file might have write behind data that needs to be flushed
394 and server version of file size can be stale. If we knew for sure
395 that inode was not dirty locally we could do this */
396
397/* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
398 if (buf == 0) {
399 up(&pCifsFile->fh_sem);
400 kfree(full_path);
401 FreeXid(xid);
402 return -ENOMEM;
403 } */
404 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
405 CREATE_NOT_DIR, &netfid, &oplock, NULL,
737b758c
SF
406 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
407 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
408 if (rc) {
409 up(&pCifsFile->fh_sem);
410 cFYI(1, ("cifs_open returned 0x%x ", rc));
411 cFYI(1, ("oplock: %d ", oplock));
412 } else {
413 pCifsFile->netfid = netfid;
414 pCifsFile->invalidHandle = FALSE;
415 up(&pCifsFile->fh_sem);
416 pCifsInode = CIFS_I(inode);
417 if (pCifsInode) {
418 if (can_flush) {
28fd1298 419 filemap_write_and_wait(inode->i_mapping);
1da177e4
LT
420 /* temporarily disable caching while we
421 go to server to get inode info */
422 pCifsInode->clientCanCacheAll = FALSE;
423 pCifsInode->clientCanCacheRead = FALSE;
424 if (pTcon->ses->capabilities & CAP_UNIX)
425 rc = cifs_get_inode_info_unix(&inode,
426 full_path, inode->i_sb, xid);
427 else
428 rc = cifs_get_inode_info(&inode,
429 full_path, NULL, inode->i_sb,
430 xid);
431 } /* else we are writing out data to server already
432 and could deadlock if we tried to flush data, and
433 since we do not know if we have data that would
434 invalidate the current end of file on the server
435 we can not go to the server to get the new inod
436 info */
437 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
438 pCifsInode->clientCanCacheAll = TRUE;
439 pCifsInode->clientCanCacheRead = TRUE;
440 cFYI(1, ("Exclusive Oplock granted on inode %p",
441 file->f_dentry->d_inode));
442 } else if ((oplock & 0xF) == OPLOCK_READ) {
443 pCifsInode->clientCanCacheRead = TRUE;
444 pCifsInode->clientCanCacheAll = FALSE;
445 } else {
446 pCifsInode->clientCanCacheRead = FALSE;
447 pCifsInode->clientCanCacheAll = FALSE;
448 }
449 cifs_relock_file(pCifsFile);
450 }
451 }
452
453 kfree(full_path);
454 FreeXid(xid);
455 return rc;
456}
457
458int cifs_close(struct inode *inode, struct file *file)
459{
460 int rc = 0;
461 int xid;
462 struct cifs_sb_info *cifs_sb;
463 struct cifsTconInfo *pTcon;
464 struct cifsFileInfo *pSMBFile =
465 (struct cifsFileInfo *)file->private_data;
466
467 xid = GetXid();
468
469 cifs_sb = CIFS_SB(inode->i_sb);
470 pTcon = cifs_sb->tcon;
471 if (pSMBFile) {
472 pSMBFile->closePend = TRUE;
473 write_lock(&file->f_owner.lock);
474 if (pTcon) {
475 /* no sense reconnecting to close a file that is
476 already closed */
477 if (pTcon->tidStatus != CifsNeedReconnect) {
23e7dd7d
SF
478 int timeout = 2;
479 while((atomic_read(&pSMBFile->wrtPending) != 0)
480 && (timeout < 1000) ) {
481 /* Give write a better chance to get to
482 server ahead of the close. We do not
483 want to add a wait_q here as it would
484 increase the memory utilization as
485 the struct would be in each open file,
486 but this should give enough time to
487 clear the socket */
c119b87d 488 write_unlock(&file->f_owner.lock);
23e7dd7d
SF
489 cERROR(1,("close with pending writes"));
490 msleep(timeout);
c119b87d 491 write_lock(&file->f_owner.lock);
23e7dd7d
SF
492 timeout *= 4;
493 }
1da177e4
LT
494 write_unlock(&file->f_owner.lock);
495 rc = CIFSSMBClose(xid, pTcon,
496 pSMBFile->netfid);
497 write_lock(&file->f_owner.lock);
498 }
499 }
cbe0476f 500 write_lock(&GlobalSMBSeslock);
1da177e4
LT
501 list_del(&pSMBFile->flist);
502 list_del(&pSMBFile->tlist);
cbe0476f 503 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
504 write_unlock(&file->f_owner.lock);
505 kfree(pSMBFile->search_resume_name);
506 kfree(file->private_data);
507 file->private_data = NULL;
508 } else
509 rc = -EBADF;
510
511 if (list_empty(&(CIFS_I(inode)->openFileList))) {
512 cFYI(1, ("closing last open instance for inode %p", inode));
513 /* if the file is not open we do not know if we can cache info
514 on this inode, much less write behind and read ahead */
515 CIFS_I(inode)->clientCanCacheRead = FALSE;
516 CIFS_I(inode)->clientCanCacheAll = FALSE;
517 }
518 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
519 rc = CIFS_I(inode)->write_behind_rc;
520 FreeXid(xid);
521 return rc;
522}
523
524int cifs_closedir(struct inode *inode, struct file *file)
525{
526 int rc = 0;
527 int xid;
528 struct cifsFileInfo *pCFileStruct =
529 (struct cifsFileInfo *)file->private_data;
530 char *ptmp;
531
532 cFYI(1, ("Closedir inode = 0x%p with ", inode));
533
534 xid = GetXid();
535
536 if (pCFileStruct) {
537 struct cifsTconInfo *pTcon;
538 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
539
540 pTcon = cifs_sb->tcon;
541
542 cFYI(1, ("Freeing private data in close dir"));
31ca3bc3
SF
543 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
544 (pCFileStruct->invalidHandle == FALSE)) {
1da177e4
LT
545 pCFileStruct->invalidHandle = TRUE;
546 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
547 cFYI(1, ("Closing uncompleted readdir with rc %d",
548 rc));
549 /* not much we can do if it fails anyway, ignore rc */
550 rc = 0;
551 }
552 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
553 if (ptmp) {
ec637e3f 554 cFYI(1, ("closedir free smb buf in srch struct"));
1da177e4 555 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
d47d7c1a
SF
556 if(pCFileStruct->srch_inf.smallBuf)
557 cifs_small_buf_release(ptmp);
558 else
559 cifs_buf_release(ptmp);
1da177e4
LT
560 }
561 ptmp = pCFileStruct->search_resume_name;
562 if (ptmp) {
ec637e3f 563 cFYI(1, ("closedir free resume name"));
1da177e4
LT
564 pCFileStruct->search_resume_name = NULL;
565 kfree(ptmp);
566 }
567 kfree(file->private_data);
568 file->private_data = NULL;
569 }
570 /* BB can we lock the filestruct while this is going on? */
571 FreeXid(xid);
572 return rc;
573}
574
575int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
576{
577 int rc, xid;
1da177e4
LT
578 __u32 numLock = 0;
579 __u32 numUnlock = 0;
580 __u64 length;
581 int wait_flag = FALSE;
582 struct cifs_sb_info *cifs_sb;
583 struct cifsTconInfo *pTcon;
08547b03
SF
584 __u16 netfid;
585 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
1da177e4
LT
586
587 length = 1 + pfLock->fl_end - pfLock->fl_start;
588 rc = -EACCES;
589 xid = GetXid();
590
591 cFYI(1, ("Lock parm: 0x%x flockflags: "
592 "0x%x flocktype: 0x%x start: %lld end: %lld",
593 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
594 pfLock->fl_end));
595
596 if (pfLock->fl_flags & FL_POSIX)
d47d7c1a 597 cFYI(1, ("Posix"));
1da177e4 598 if (pfLock->fl_flags & FL_FLOCK)
d47d7c1a 599 cFYI(1, ("Flock"));
1da177e4 600 if (pfLock->fl_flags & FL_SLEEP) {
d47d7c1a 601 cFYI(1, ("Blocking lock"));
1da177e4
LT
602 wait_flag = TRUE;
603 }
604 if (pfLock->fl_flags & FL_ACCESS)
605 cFYI(1, ("Process suspended by mandatory locking - "
606 "not implemented yet "));
607 if (pfLock->fl_flags & FL_LEASE)
608 cFYI(1, ("Lease on file - not implemented yet"));
609 if (pfLock->fl_flags &
610 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
611 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
612
613 if (pfLock->fl_type == F_WRLCK) {
614 cFYI(1, ("F_WRLCK "));
615 numLock = 1;
616 } else if (pfLock->fl_type == F_UNLCK) {
d47d7c1a 617 cFYI(1, ("F_UNLCK"));
1da177e4 618 numUnlock = 1;
d47d7c1a
SF
619 /* Check if unlock includes more than
620 one lock range */
1da177e4 621 } else if (pfLock->fl_type == F_RDLCK) {
d47d7c1a 622 cFYI(1, ("F_RDLCK"));
1da177e4
LT
623 lockType |= LOCKING_ANDX_SHARED_LOCK;
624 numLock = 1;
625 } else if (pfLock->fl_type == F_EXLCK) {
d47d7c1a 626 cFYI(1, ("F_EXLCK"));
1da177e4
LT
627 numLock = 1;
628 } else if (pfLock->fl_type == F_SHLCK) {
d47d7c1a 629 cFYI(1, ("F_SHLCK"));
1da177e4
LT
630 lockType |= LOCKING_ANDX_SHARED_LOCK;
631 numLock = 1;
632 } else
d47d7c1a 633 cFYI(1, ("Unknown type of lock"));
1da177e4
LT
634
635 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
636 pTcon = cifs_sb->tcon;
637
638 if (file->private_data == NULL) {
639 FreeXid(xid);
640 return -EBADF;
641 }
08547b03
SF
642 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
643
1da177e4 644
08547b03
SF
645 /* BB add code here to normalize offset and length to
646 account for negative length which we can not accept over the
647 wire */
1da177e4 648 if (IS_GETLK(cmd)) {
08547b03 649 if(experimEnabled &&
82940a46
SF
650 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
651 (CIFS_UNIX_FCNTL_CAP &
652 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
08547b03
SF
653 int posix_lock_type;
654 if(lockType & LOCKING_ANDX_SHARED_LOCK)
655 posix_lock_type = CIFS_RDLCK;
656 else
657 posix_lock_type = CIFS_WRLCK;
658 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
659 length, pfLock->fl_start,
660 posix_lock_type, wait_flag);
661 FreeXid(xid);
662 return rc;
663 }
664
665 /* BB we could chain these into one lock request BB */
666 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
667 0, 1, lockType, 0 /* wait flag */ );
1da177e4 668 if (rc == 0) {
08547b03 669 rc = CIFSSMBLock(xid, pTcon, netfid, length,
1da177e4
LT
670 pfLock->fl_start, 1 /* numUnlock */ ,
671 0 /* numLock */ , lockType,
672 0 /* wait flag */ );
673 pfLock->fl_type = F_UNLCK;
674 if (rc != 0)
675 cERROR(1, ("Error unlocking previously locked "
08547b03 676 "range %d during test of lock", rc));
1da177e4
LT
677 rc = 0;
678
679 } else {
680 /* if rc == ERR_SHARING_VIOLATION ? */
681 rc = 0; /* do not change lock type to unlock
682 since range in use */
683 }
684
685 FreeXid(xid);
686 return rc;
687 }
08547b03 688 if (experimEnabled &&
82940a46
SF
689 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
690 (CIFS_UNIX_FCNTL_CAP &
691 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
08547b03
SF
692 int posix_lock_type;
693 if(lockType & LOCKING_ANDX_SHARED_LOCK)
694 posix_lock_type = CIFS_RDLCK;
695 else
696 posix_lock_type = CIFS_WRLCK;
697
698 if(numUnlock == 1)
beb84dc8 699 posix_lock_type = CIFS_UNLCK;
08547b03
SF
700 else if(numLock == 0) {
701 /* if no lock or unlock then nothing
702 to do since we do not know what it is */
703 FreeXid(xid);
704 return -EOPNOTSUPP;
705 }
706 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
707 length, pfLock->fl_start,
708 posix_lock_type, wait_flag);
709 } else
710 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
711 numUnlock, numLock, lockType, wait_flag);
d634cc15 712 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
713 posix_lock_file_wait(file, pfLock);
714 FreeXid(xid);
715 return rc;
716}
717
718ssize_t cifs_user_write(struct file *file, const char __user *write_data,
719 size_t write_size, loff_t *poffset)
720{
721 int rc = 0;
722 unsigned int bytes_written = 0;
723 unsigned int total_written;
724 struct cifs_sb_info *cifs_sb;
725 struct cifsTconInfo *pTcon;
726 int xid, long_op;
727 struct cifsFileInfo *open_file;
728
729 if (file->f_dentry == NULL)
730 return -EBADF;
731
732 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
733 if (cifs_sb == NULL)
734 return -EBADF;
735
736 pTcon = cifs_sb->tcon;
737
738 /* cFYI(1,
739 (" write %d bytes to offset %lld of %s", write_size,
740 *poffset, file->f_dentry->d_name.name)); */
741
742 if (file->private_data == NULL)
743 return -EBADF;
744 else
745 open_file = (struct cifsFileInfo *) file->private_data;
746
747 xid = GetXid();
748 if (file->f_dentry->d_inode == NULL) {
749 FreeXid(xid);
750 return -EBADF;
751 }
752
753 if (*poffset > file->f_dentry->d_inode->i_size)
754 long_op = 2; /* writes past end of file can take a long time */
755 else
756 long_op = 1;
757
758 for (total_written = 0; write_size > total_written;
759 total_written += bytes_written) {
760 rc = -EAGAIN;
761 while (rc == -EAGAIN) {
762 if (file->private_data == NULL) {
763 /* file has been closed on us */
764 FreeXid(xid);
765 /* if we have gotten here we have written some data
766 and blocked, and the file has been freed on us while
767 we blocked so return what we managed to write */
768 return total_written;
769 }
770 if (open_file->closePend) {
771 FreeXid(xid);
772 if (total_written)
773 return total_written;
774 else
775 return -EBADF;
776 }
777 if (open_file->invalidHandle) {
778 if ((file->f_dentry == NULL) ||
779 (file->f_dentry->d_inode == NULL)) {
780 FreeXid(xid);
781 return total_written;
782 }
783 /* we could deadlock if we called
784 filemap_fdatawait from here so tell
785 reopen_file not to flush data to server
786 now */
787 rc = cifs_reopen_file(file->f_dentry->d_inode,
788 file, FALSE);
789 if (rc != 0)
790 break;
791 }
792
793 rc = CIFSSMBWrite(xid, pTcon,
794 open_file->netfid,
795 min_t(const int, cifs_sb->wsize,
796 write_size - total_written),
797 *poffset, &bytes_written,
798 NULL, write_data + total_written, long_op);
799 }
800 if (rc || (bytes_written == 0)) {
801 if (total_written)
802 break;
803 else {
804 FreeXid(xid);
805 return rc;
806 }
807 } else
808 *poffset += bytes_written;
809 long_op = FALSE; /* subsequent writes fast -
810 15 seconds is plenty */
811 }
812
a4544347 813 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
814
815 /* since the write may have blocked check these pointers again */
816 if (file->f_dentry) {
817 if (file->f_dentry->d_inode) {
818 struct inode *inode = file->f_dentry->d_inode;
819 inode->i_ctime = inode->i_mtime =
820 current_fs_time(inode->i_sb);
821 if (total_written > 0) {
822 if (*poffset > file->f_dentry->d_inode->i_size)
823 i_size_write(file->f_dentry->d_inode,
824 *poffset);
825 }
826 mark_inode_dirty_sync(file->f_dentry->d_inode);
827 }
828 }
829 FreeXid(xid);
830 return total_written;
831}
832
833static ssize_t cifs_write(struct file *file, const char *write_data,
834 size_t write_size, loff_t *poffset)
835{
836 int rc = 0;
837 unsigned int bytes_written = 0;
838 unsigned int total_written;
839 struct cifs_sb_info *cifs_sb;
840 struct cifsTconInfo *pTcon;
841 int xid, long_op;
842 struct cifsFileInfo *open_file;
843
844 if (file->f_dentry == NULL)
845 return -EBADF;
846
847 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
848 if (cifs_sb == NULL)
849 return -EBADF;
850
851 pTcon = cifs_sb->tcon;
852
ab2f218f
SF
853 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
854 *poffset, file->f_dentry->d_name.name));
1da177e4
LT
855
856 if (file->private_data == NULL)
857 return -EBADF;
858 else
859 open_file = (struct cifsFileInfo *)file->private_data;
860
861 xid = GetXid();
862 if (file->f_dentry->d_inode == NULL) {
863 FreeXid(xid);
864 return -EBADF;
865 }
866
867 if (*poffset > file->f_dentry->d_inode->i_size)
868 long_op = 2; /* writes past end of file can take a long time */
869 else
870 long_op = 1;
871
872 for (total_written = 0; write_size > total_written;
873 total_written += bytes_written) {
874 rc = -EAGAIN;
875 while (rc == -EAGAIN) {
876 if (file->private_data == NULL) {
877 /* file has been closed on us */
878 FreeXid(xid);
879 /* if we have gotten here we have written some data
880 and blocked, and the file has been freed on us
881 while we blocked so return what we managed to
882 write */
883 return total_written;
884 }
885 if (open_file->closePend) {
886 FreeXid(xid);
887 if (total_written)
888 return total_written;
889 else
890 return -EBADF;
891 }
892 if (open_file->invalidHandle) {
893 if ((file->f_dentry == NULL) ||
894 (file->f_dentry->d_inode == NULL)) {
895 FreeXid(xid);
896 return total_written;
897 }
898 /* we could deadlock if we called
899 filemap_fdatawait from here so tell
900 reopen_file not to flush data to
901 server now */
902 rc = cifs_reopen_file(file->f_dentry->d_inode,
903 file, FALSE);
904 if (rc != 0)
905 break;
906 }
d6e04ae6 907 /* BB FIXME We can not sign across two buffers yet */
ec637e3f
SF
908 if((pTcon->ses->server->secMode &
909 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0) {
3e84469d
SF
910 struct kvec iov[2];
911 unsigned int len;
912
0ae0efad 913 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
914 write_size - total_written);
915 /* iov[0] is reserved for smb header */
916 iov[1].iov_base = (char *)write_data +
917 total_written;
918 iov[1].iov_len = len;
d6e04ae6 919 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 920 open_file->netfid, len,
d6e04ae6 921 *poffset, &bytes_written,
3e84469d 922 iov, 1, long_op);
d6e04ae6
SF
923 } else
924 /* BB FIXME fixup indentation of line below */
1da177e4
LT
925 rc = CIFSSMBWrite(xid, pTcon,
926 open_file->netfid,
927 min_t(const int, cifs_sb->wsize,
928 write_size - total_written),
929 *poffset, &bytes_written,
930 write_data + total_written, NULL, long_op);
931 }
932 if (rc || (bytes_written == 0)) {
933 if (total_written)
934 break;
935 else {
936 FreeXid(xid);
937 return rc;
938 }
939 } else
940 *poffset += bytes_written;
941 long_op = FALSE; /* subsequent writes fast -
942 15 seconds is plenty */
943 }
944
a4544347 945 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
946
947 /* since the write may have blocked check these pointers again */
948 if (file->f_dentry) {
949 if (file->f_dentry->d_inode) {
950 file->f_dentry->d_inode->i_ctime =
951 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
952 if (total_written > 0) {
953 if (*poffset > file->f_dentry->d_inode->i_size)
954 i_size_write(file->f_dentry->d_inode,
955 *poffset);
956 }
957 mark_inode_dirty_sync(file->f_dentry->d_inode);
958 }
959 }
960 FreeXid(xid);
961 return total_written;
962}
963
dd99cd80 964struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
965{
966 struct cifsFileInfo *open_file;
dd99cd80 967 int rc;
6148a742
SF
968
969 read_lock(&GlobalSMBSeslock);
970 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
971 if (open_file->closePend)
972 continue;
973 if (open_file->pfile &&
974 ((open_file->pfile->f_flags & O_RDWR) ||
975 (open_file->pfile->f_flags & O_WRONLY))) {
23e7dd7d 976 atomic_inc(&open_file->wrtPending);
6148a742 977 read_unlock(&GlobalSMBSeslock);
0ae0efad 978 if((open_file->invalidHandle) &&
23e7dd7d 979 (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
dd99cd80 980 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
37c0eb46
SF
981 open_file->pfile, FALSE);
982 /* if it fails, try another handle - might be */
983 /* dangerous to hold up writepages with retry */
984 if(rc) {
4a77118c 985 cFYI(1,("failed on reopen file in wp"));
37c0eb46 986 read_lock(&GlobalSMBSeslock);
23e7dd7d
SF
987 /* can not use this handle, no write
988 pending on this one after all */
989 atomic_dec
990 (&open_file->wrtPending);
37c0eb46
SF
991 continue;
992 }
993 }
6148a742
SF
994 return open_file;
995 }
996 }
997 read_unlock(&GlobalSMBSeslock);
998 return NULL;
999}
1000
1da177e4
LT
1001static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1002{
1003 struct address_space *mapping = page->mapping;
1004 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1005 char *write_data;
1006 int rc = -EFAULT;
1007 int bytes_written = 0;
1008 struct cifs_sb_info *cifs_sb;
1009 struct cifsTconInfo *pTcon;
1010 struct inode *inode;
6148a742 1011 struct cifsFileInfo *open_file;
1da177e4
LT
1012
1013 if (!mapping || !mapping->host)
1014 return -EFAULT;
1015
1016 inode = page->mapping->host;
1017 cifs_sb = CIFS_SB(inode->i_sb);
1018 pTcon = cifs_sb->tcon;
1019
1020 offset += (loff_t)from;
1021 write_data = kmap(page);
1022 write_data += from;
1023
1024 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1025 kunmap(page);
1026 return -EIO;
1027 }
1028
1029 /* racing with truncate? */
1030 if (offset > mapping->host->i_size) {
1031 kunmap(page);
1032 return 0; /* don't care */
1033 }
1034
1035 /* check to make sure that we are not extending the file */
1036 if (mapping->host->i_size - offset < (loff_t)to)
1037 to = (unsigned)(mapping->host->i_size - offset);
1038
6148a742
SF
1039 open_file = find_writable_file(CIFS_I(mapping->host));
1040 if (open_file) {
1041 bytes_written = cifs_write(open_file->pfile, write_data,
1042 to-from, &offset);
23e7dd7d 1043 atomic_dec(&open_file->wrtPending);
1da177e4 1044 /* Does mm or vfs already set times? */
6148a742
SF
1045 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1046 if ((bytes_written > 0) && (offset)) {
1047 rc = 0;
1048 } else if (bytes_written < 0) {
1049 if (rc != -EBADF)
1050 rc = bytes_written;
1da177e4 1051 }
6148a742 1052 } else {
1da177e4
LT
1053 cFYI(1, ("No writeable filehandles for inode"));
1054 rc = -EIO;
1055 }
1056
1057 kunmap(page);
1058 return rc;
1059}
1060
1da177e4 1061static int cifs_writepages(struct address_space *mapping,
37c0eb46 1062 struct writeback_control *wbc)
1da177e4 1063{
37c0eb46
SF
1064 struct backing_dev_info *bdi = mapping->backing_dev_info;
1065 unsigned int bytes_to_write;
1066 unsigned int bytes_written;
1067 struct cifs_sb_info *cifs_sb;
1068 int done = 0;
1069 pgoff_t end = -1;
1070 pgoff_t index;
1071 int is_range = 0;
1072 struct kvec iov[32];
84d2f07e 1073 int len;
37c0eb46
SF
1074 int n_iov = 0;
1075 pgoff_t next;
1076 int nr_pages;
1077 __u64 offset = 0;
23e7dd7d 1078 struct cifsFileInfo *open_file;
37c0eb46
SF
1079 struct page *page;
1080 struct pagevec pvec;
1081 int rc = 0;
1082 int scanned = 0;
1da177e4
LT
1083 int xid;
1084
37c0eb46
SF
1085 cifs_sb = CIFS_SB(mapping->host->i_sb);
1086
1087 /*
1088 * If wsize is smaller that the page cache size, default to writing
1089 * one page at a time via cifs_writepage
1090 */
1091 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1092 return generic_writepages(mapping, wbc);
1093
4a77118c
SF
1094 /* BB FIXME we do not have code to sign across multiple buffers yet,
1095 so go to older writepage style write which we can sign if needed */
1096 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1097 if(cifs_sb->tcon->ses->server->secMode &
1098 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1099 return generic_writepages(mapping, wbc);
1100
37c0eb46
SF
1101 /*
1102 * BB: Is this meaningful for a non-block-device file system?
1103 * If it is, we should test it again after we do I/O
1104 */
1105 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1106 wbc->encountered_congestion = 1;
1107 return 0;
1108 }
1109
1da177e4
LT
1110 xid = GetXid();
1111
37c0eb46
SF
1112 pagevec_init(&pvec, 0);
1113 if (wbc->sync_mode == WB_SYNC_NONE)
1114 index = mapping->writeback_index; /* Start from prev offset */
1115 else {
1116 index = 0;
1117 scanned = 1;
1118 }
1119 if (wbc->start || wbc->end) {
1120 index = wbc->start >> PAGE_CACHE_SHIFT;
1121 end = wbc->end >> PAGE_CACHE_SHIFT;
1122 is_range = 1;
1123 scanned = 1;
1124 }
1125retry:
1126 while (!done && (index <= end) &&
1127 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1128 PAGECACHE_TAG_DIRTY,
1129 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1130 int first;
1131 unsigned int i;
1132
37c0eb46
SF
1133 first = -1;
1134 next = 0;
1135 n_iov = 0;
1136 bytes_to_write = 0;
1137
1138 for (i = 0; i < nr_pages; i++) {
1139 page = pvec.pages[i];
1140 /*
1141 * At this point we hold neither mapping->tree_lock nor
1142 * lock on the page itself: the page may be truncated or
1143 * invalidated (changing page->mapping to NULL), or even
1144 * swizzled back from swapper_space to tmpfs file
1145 * mapping
1146 */
1147
1148 if (first < 0)
1149 lock_page(page);
1150 else if (TestSetPageLocked(page))
1151 break;
1152
1153 if (unlikely(page->mapping != mapping)) {
1154 unlock_page(page);
1155 break;
1156 }
1157
1158 if (unlikely(is_range) && (page->index > end)) {
1159 done = 1;
1160 unlock_page(page);
1161 break;
1162 }
1163
1164 if (next && (page->index != next)) {
1165 /* Not next consecutive page */
1166 unlock_page(page);
1167 break;
1168 }
1169
1170 if (wbc->sync_mode != WB_SYNC_NONE)
1171 wait_on_page_writeback(page);
1172
1173 if (PageWriteback(page) ||
1174 !test_clear_page_dirty(page)) {
1175 unlock_page(page);
1176 break;
1177 }
84d2f07e
SF
1178
1179 if (page_offset(page) >= mapping->host->i_size) {
1180 done = 1;
1181 unlock_page(page);
1182 break;
1183 }
1184
37c0eb46
SF
1185 /*
1186 * BB can we get rid of this? pages are held by pvec
1187 */
1188 page_cache_get(page);
1189
84d2f07e
SF
1190 len = min(mapping->host->i_size - page_offset(page),
1191 (loff_t)PAGE_CACHE_SIZE);
1192
37c0eb46
SF
1193 /* reserve iov[0] for the smb header */
1194 n_iov++;
1195 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1196 iov[n_iov].iov_len = len;
1197 bytes_to_write += len;
37c0eb46
SF
1198
1199 if (first < 0) {
1200 first = i;
1201 offset = page_offset(page);
1202 }
1203 next = page->index + 1;
1204 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1205 break;
1206 }
1207 if (n_iov) {
23e7dd7d
SF
1208 /* Search for a writable handle every time we call
1209 * CIFSSMBWrite2. We can't rely on the last handle
1210 * we used to still be valid
1211 */
1212 open_file = find_writable_file(CIFS_I(mapping->host));
1213 if (!open_file) {
1214 cERROR(1, ("No writable handles for inode"));
1215 rc = -EBADF;
1047abc1 1216 } else {
23e7dd7d
SF
1217 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1218 open_file->netfid,
1219 bytes_to_write, offset,
1220 &bytes_written, iov, n_iov,
1221 1);
1222 atomic_dec(&open_file->wrtPending);
1223 if (rc || bytes_written < bytes_to_write) {
1224 cERROR(1,("Write2 ret %d, written = %d",
1225 rc, bytes_written));
1226 /* BB what if continued retry is
1227 requested via mount flags? */
1228 set_bit(AS_EIO, &mapping->flags);
23e7dd7d
SF
1229 } else {
1230 cifs_stats_bytes_written(cifs_sb->tcon,
1231 bytes_written);
1232 }
37c0eb46
SF
1233 }
1234 for (i = 0; i < n_iov; i++) {
1235 page = pvec.pages[first + i];
eb9bdaa3
SF
1236 /* Should we also set page error on
1237 success rc but too little data written? */
1238 /* BB investigate retry logic on temporary
1239 server crash cases and how recovery works
1240 when page marked as error */
1241 if(rc)
1242 SetPageError(page);
37c0eb46
SF
1243 kunmap(page);
1244 unlock_page(page);
1245 page_cache_release(page);
1246 }
1247 if ((wbc->nr_to_write -= n_iov) <= 0)
1248 done = 1;
1249 index = next;
1250 }
1251 pagevec_release(&pvec);
1252 }
1253 if (!scanned && !done) {
1254 /*
1255 * We hit the last page and there is more work to be done: wrap
1256 * back to the start of the file
1257 */
1258 scanned = 1;
1259 index = 0;
1260 goto retry;
1261 }
1262 if (!is_range)
1263 mapping->writeback_index = index;
1264
1da177e4 1265 FreeXid(xid);
37c0eb46 1266
1da177e4
LT
1267 return rc;
1268}
1da177e4
LT
1269
1270static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1271{
1272 int rc = -EFAULT;
1273 int xid;
1274
1275 xid = GetXid();
1276/* BB add check for wbc flags */
1277 page_cache_get(page);
1278 if (!PageUptodate(page)) {
1279 cFYI(1, ("ppw - page not up to date"));
1280 }
1281
1282 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1283 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1284 unlock_page(page);
1285 page_cache_release(page);
1286 FreeXid(xid);
1287 return rc;
1288}
1289
1290static int cifs_commit_write(struct file *file, struct page *page,
1291 unsigned offset, unsigned to)
1292{
1293 int xid;
1294 int rc = 0;
1295 struct inode *inode = page->mapping->host;
1296 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1297 char *page_data;
1298
1299 xid = GetXid();
1300 cFYI(1, ("commit write for page %p up to position %lld for %d",
1301 page, position, to));
1302 if (position > inode->i_size) {
1303 i_size_write(inode, position);
1304 /* if (file->private_data == NULL) {
1305 rc = -EBADF;
1306 } else {
1307 open_file = (struct cifsFileInfo *)file->private_data;
1308 cifs_sb = CIFS_SB(inode->i_sb);
1309 rc = -EAGAIN;
1310 while (rc == -EAGAIN) {
1311 if ((open_file->invalidHandle) &&
1312 (!open_file->closePend)) {
1313 rc = cifs_reopen_file(
1314 file->f_dentry->d_inode, file);
1315 if (rc != 0)
1316 break;
1317 }
1318 if (!open_file->closePend) {
1319 rc = CIFSSMBSetFileSize(xid,
1320 cifs_sb->tcon, position,
1321 open_file->netfid,
1322 open_file->pid, FALSE);
1323 } else {
1324 rc = -EBADF;
1325 break;
1326 }
1327 }
1328 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1329 } */
1330 }
1331 if (!PageUptodate(page)) {
1332 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1333 /* can not rely on (or let) writepage write this data */
1334 if (to < offset) {
1335 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1336 offset, to));
1337 FreeXid(xid);
1338 return rc;
1339 }
1340 /* this is probably better than directly calling
1341 partialpage_write since in this function the file handle is
1342 known which we might as well leverage */
1343 /* BB check if anything else missing out of ppw
1344 such as updating last write time */
1345 page_data = kmap(page);
1346 rc = cifs_write(file, page_data + offset, to-offset,
1347 &position);
1348 if (rc > 0)
1349 rc = 0;
1350 /* else if (rc < 0) should we set writebehind rc? */
1351 kunmap(page);
1352 } else {
1353 set_page_dirty(page);
1354 }
1355
1356 FreeXid(xid);
1357 return rc;
1358}
1359
1360int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1361{
1362 int xid;
1363 int rc = 0;
1364 struct inode *inode = file->f_dentry->d_inode;
1365
1366 xid = GetXid();
1367
1368 cFYI(1, ("Sync file - name: %s datasync: 0x%x ",
1369 dentry->d_name.name, datasync));
1370
1371 rc = filemap_fdatawrite(inode->i_mapping);
1372 if (rc == 0)
1373 CIFS_I(inode)->write_behind_rc = 0;
1374 FreeXid(xid);
1375 return rc;
1376}
1377
3978d717 1378/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1379{
1380 struct address_space *mapping;
1381 struct inode *inode;
1382 unsigned long index = page->index;
1383 unsigned int rpages = 0;
1384 int rc = 0;
1385
1386 cFYI(1, ("sync page %p",page));
1387 mapping = page->mapping;
1388 if (!mapping)
1389 return 0;
1390 inode = mapping->host;
1391 if (!inode)
3978d717 1392 return; */
1da177e4
LT
1393
1394/* fill in rpages then
1395 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1396
1397/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
1398
3978d717 1399#if 0
1da177e4
LT
1400 if (rc < 0)
1401 return rc;
1402 return 0;
3978d717 1403#endif
1da177e4
LT
1404} */
1405
1406/*
1407 * As file closes, flush all cached write data for this inode checking
1408 * for write behind errors.
1409 */
1410int cifs_flush(struct file *file)
1411{
1412 struct inode * inode = file->f_dentry->d_inode;
1413 int rc = 0;
1414
1415 /* Rather than do the steps manually:
1416 lock the inode for writing
1417 loop through pages looking for write behind data (dirty pages)
1418 coalesce into contiguous 16K (or smaller) chunks to write to server
1419 send to server (prefer in parallel)
1420 deal with writebehind errors
1421 unlock inode for writing
1422 filemapfdatawrite appears easier for the time being */
1423
1424 rc = filemap_fdatawrite(inode->i_mapping);
1425 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1426 CIFS_I(inode)->write_behind_rc = 0;
1427
1428 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1429
1430 return rc;
1431}
1432
1433ssize_t cifs_user_read(struct file *file, char __user *read_data,
1434 size_t read_size, loff_t *poffset)
1435{
1436 int rc = -EACCES;
1437 unsigned int bytes_read = 0;
1438 unsigned int total_read = 0;
1439 unsigned int current_read_size;
1440 struct cifs_sb_info *cifs_sb;
1441 struct cifsTconInfo *pTcon;
1442 int xid;
1443 struct cifsFileInfo *open_file;
1444 char *smb_read_data;
1445 char __user *current_offset;
1446 struct smb_com_read_rsp *pSMBr;
1447
1448 xid = GetXid();
1449 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1450 pTcon = cifs_sb->tcon;
1451
1452 if (file->private_data == NULL) {
1453 FreeXid(xid);
1454 return -EBADF;
1455 }
1456 open_file = (struct cifsFileInfo *)file->private_data;
1457
1458 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1459 cFYI(1, ("attempting read on write only file instance"));
1460 }
1461 for (total_read = 0, current_offset = read_data;
1462 read_size > total_read;
1463 total_read += bytes_read, current_offset += bytes_read) {
1464 current_read_size = min_t(const int, read_size - total_read,
1465 cifs_sb->rsize);
1466 rc = -EAGAIN;
1467 smb_read_data = NULL;
1468 while (rc == -EAGAIN) {
ec637e3f 1469 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1470 if ((open_file->invalidHandle) &&
1471 (!open_file->closePend)) {
1472 rc = cifs_reopen_file(file->f_dentry->d_inode,
1473 file, TRUE);
1474 if (rc != 0)
1475 break;
1476 }
bfa0d75a 1477 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1478 open_file->netfid,
1479 current_read_size, *poffset,
1480 &bytes_read, &smb_read_data,
1481 &buf_type);
1da177e4 1482 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1483 if (smb_read_data) {
93544cc6
SF
1484 if (copy_to_user(current_offset,
1485 smb_read_data +
1486 4 /* RFC1001 length field */ +
1487 le16_to_cpu(pSMBr->DataOffset),
1488 bytes_read)) {
1489 rc = -EFAULT;
1490 }
1491
ec637e3f
SF
1492 if(buf_type == CIFS_SMALL_BUFFER)
1493 cifs_small_buf_release(smb_read_data);
1494 else if(buf_type == CIFS_LARGE_BUFFER)
1495 cifs_buf_release(smb_read_data);
1da177e4
LT
1496 smb_read_data = NULL;
1497 }
1498 }
1499 if (rc || (bytes_read == 0)) {
1500 if (total_read) {
1501 break;
1502 } else {
1503 FreeXid(xid);
1504 return rc;
1505 }
1506 } else {
a4544347 1507 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1508 *poffset += bytes_read;
1509 }
1510 }
1511 FreeXid(xid);
1512 return total_read;
1513}
1514
1515
1516static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1517 loff_t *poffset)
1518{
1519 int rc = -EACCES;
1520 unsigned int bytes_read = 0;
1521 unsigned int total_read;
1522 unsigned int current_read_size;
1523 struct cifs_sb_info *cifs_sb;
1524 struct cifsTconInfo *pTcon;
1525 int xid;
1526 char *current_offset;
1527 struct cifsFileInfo *open_file;
ec637e3f 1528 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1529
1530 xid = GetXid();
1531 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1532 pTcon = cifs_sb->tcon;
1533
1534 if (file->private_data == NULL) {
1535 FreeXid(xid);
1536 return -EBADF;
1537 }
1538 open_file = (struct cifsFileInfo *)file->private_data;
1539
1540 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1541 cFYI(1, ("attempting read on write only file instance"));
1542
1543 for (total_read = 0, current_offset = read_data;
1544 read_size > total_read;
1545 total_read += bytes_read, current_offset += bytes_read) {
1546 current_read_size = min_t(const int, read_size - total_read,
1547 cifs_sb->rsize);
f9f5c817
SF
1548 /* For windows me and 9x we do not want to request more
1549 than it negotiated since it will refuse the read then */
1550 if((pTcon->ses) &&
1551 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1552 current_read_size = min_t(const int, current_read_size,
1553 pTcon->ses->server->maxBuf - 128);
1554 }
1da177e4
LT
1555 rc = -EAGAIN;
1556 while (rc == -EAGAIN) {
1557 if ((open_file->invalidHandle) &&
1558 (!open_file->closePend)) {
1559 rc = cifs_reopen_file(file->f_dentry->d_inode,
1560 file, TRUE);
1561 if (rc != 0)
1562 break;
1563 }
bfa0d75a 1564 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1565 open_file->netfid,
1566 current_read_size, *poffset,
1567 &bytes_read, &current_offset,
1568 &buf_type);
1da177e4
LT
1569 }
1570 if (rc || (bytes_read == 0)) {
1571 if (total_read) {
1572 break;
1573 } else {
1574 FreeXid(xid);
1575 return rc;
1576 }
1577 } else {
a4544347 1578 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1579 *poffset += bytes_read;
1580 }
1581 }
1582 FreeXid(xid);
1583 return total_read;
1584}
1585
1586int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1587{
1588 struct dentry *dentry = file->f_dentry;
1589 int rc, xid;
1590
1591 xid = GetXid();
1592 rc = cifs_revalidate(dentry);
1593 if (rc) {
1594 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1595 FreeXid(xid);
1596 return rc;
1597 }
1598 rc = generic_file_mmap(file, vma);
1599 FreeXid(xid);
1600 return rc;
1601}
1602
1603
1604static void cifs_copy_cache_pages(struct address_space *mapping,
1605 struct list_head *pages, int bytes_read, char *data,
1606 struct pagevec *plru_pvec)
1607{
1608 struct page *page;
1609 char *target;
1610
1611 while (bytes_read > 0) {
1612 if (list_empty(pages))
1613 break;
1614
1615 page = list_entry(pages->prev, struct page, lru);
1616 list_del(&page->lru);
1617
1618 if (add_to_page_cache(page, mapping, page->index,
1619 GFP_KERNEL)) {
1620 page_cache_release(page);
1621 cFYI(1, ("Add page cache failed"));
3079ca62
SF
1622 data += PAGE_CACHE_SIZE;
1623 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1624 continue;
1625 }
1626
1627 target = kmap_atomic(page,KM_USER0);
1628
1629 if (PAGE_CACHE_SIZE > bytes_read) {
1630 memcpy(target, data, bytes_read);
1631 /* zero the tail end of this partial page */
1632 memset(target + bytes_read, 0,
1633 PAGE_CACHE_SIZE - bytes_read);
1634 bytes_read = 0;
1635 } else {
1636 memcpy(target, data, PAGE_CACHE_SIZE);
1637 bytes_read -= PAGE_CACHE_SIZE;
1638 }
1639 kunmap_atomic(target, KM_USER0);
1640
1641 flush_dcache_page(page);
1642 SetPageUptodate(page);
1643 unlock_page(page);
1644 if (!pagevec_add(plru_pvec, page))
1645 __pagevec_lru_add(plru_pvec);
1646 data += PAGE_CACHE_SIZE;
1647 }
1648 return;
1649}
1650
1651static int cifs_readpages(struct file *file, struct address_space *mapping,
1652 struct list_head *page_list, unsigned num_pages)
1653{
1654 int rc = -EACCES;
1655 int xid;
1656 loff_t offset;
1657 struct page *page;
1658 struct cifs_sb_info *cifs_sb;
1659 struct cifsTconInfo *pTcon;
1660 int bytes_read = 0;
1661 unsigned int read_size,i;
1662 char *smb_read_data = NULL;
1663 struct smb_com_read_rsp *pSMBr;
1664 struct pagevec lru_pvec;
1665 struct cifsFileInfo *open_file;
ec637e3f 1666 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1667
1668 xid = GetXid();
1669 if (file->private_data == NULL) {
1670 FreeXid(xid);
1671 return -EBADF;
1672 }
1673 open_file = (struct cifsFileInfo *)file->private_data;
1674 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1675 pTcon = cifs_sb->tcon;
bfa0d75a 1676
1da177e4
LT
1677 pagevec_init(&lru_pvec, 0);
1678
1679 for (i = 0; i < num_pages; ) {
1680 unsigned contig_pages;
1681 struct page *tmp_page;
1682 unsigned long expected_index;
1683
1684 if (list_empty(page_list))
1685 break;
1686
1687 page = list_entry(page_list->prev, struct page, lru);
1688 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1689
1690 /* count adjacent pages that we will read into */
1691 contig_pages = 0;
1692 expected_index =
1693 list_entry(page_list->prev, struct page, lru)->index;
1694 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1695 if (tmp_page->index == expected_index) {
1696 contig_pages++;
1697 expected_index++;
1698 } else
1699 break;
1700 }
1701 if (contig_pages + i > num_pages)
1702 contig_pages = num_pages - i;
1703
1704 /* for reads over a certain size could initiate async
1705 read ahead */
1706
1707 read_size = contig_pages * PAGE_CACHE_SIZE;
1708 /* Read size needs to be in multiples of one page */
1709 read_size = min_t(const unsigned int, read_size,
1710 cifs_sb->rsize & PAGE_CACHE_MASK);
1711
1712 rc = -EAGAIN;
1713 while (rc == -EAGAIN) {
1714 if ((open_file->invalidHandle) &&
1715 (!open_file->closePend)) {
1716 rc = cifs_reopen_file(file->f_dentry->d_inode,
1717 file, TRUE);
1718 if (rc != 0)
1719 break;
1720 }
1721
bfa0d75a 1722 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1723 open_file->netfid,
1724 read_size, offset,
1725 &bytes_read, &smb_read_data,
1726 &buf_type);
a9d02ad4 1727 /* BB more RC checks ? */
1da177e4
LT
1728 if (rc== -EAGAIN) {
1729 if (smb_read_data) {
ec637e3f
SF
1730 if(buf_type == CIFS_SMALL_BUFFER)
1731 cifs_small_buf_release(smb_read_data);
1732 else if(buf_type == CIFS_LARGE_BUFFER)
1733 cifs_buf_release(smb_read_data);
1da177e4
LT
1734 smb_read_data = NULL;
1735 }
1736 }
1737 }
1738 if ((rc < 0) || (smb_read_data == NULL)) {
1739 cFYI(1, ("Read error in readpages: %d", rc));
1740 /* clean up remaing pages off list */
1741 while (!list_empty(page_list) && (i < num_pages)) {
1742 page = list_entry(page_list->prev, struct page,
1743 lru);
1744 list_del(&page->lru);
1745 page_cache_release(page);
1746 }
1747 break;
1748 } else if (bytes_read > 0) {
1749 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1750 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1751 smb_read_data + 4 /* RFC1001 hdr */ +
1752 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1753
1754 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 1755 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1756 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1757 i++; /* account for partial page */
1758
1759 /* server copy of file can have smaller size
1760 than client */
1761 /* BB do we need to verify this common case ?
1762 this case is ok - if we are at server EOF
1763 we will hit it on next read */
1764
1765 /* while (!list_empty(page_list) && (i < num_pages)) {
1766 page = list_entry(page_list->prev,
1767 struct page, list);
1768 list_del(&page->list);
1769 page_cache_release(page);
1770 }
1771 break; */
1772 }
1773 } else {
1774 cFYI(1, ("No bytes read (%d) at offset %lld . "
1775 "Cleaning remaining pages from readahead list",
1776 bytes_read, offset));
1777 /* BB turn off caching and do new lookup on
1778 file size at server? */
1779 while (!list_empty(page_list) && (i < num_pages)) {
1780 page = list_entry(page_list->prev, struct page,
1781 lru);
1782 list_del(&page->lru);
1783
1784 /* BB removeme - replace with zero of page? */
1785 page_cache_release(page);
1786 }
1787 break;
1788 }
1789 if (smb_read_data) {
ec637e3f
SF
1790 if(buf_type == CIFS_SMALL_BUFFER)
1791 cifs_small_buf_release(smb_read_data);
1792 else if(buf_type == CIFS_LARGE_BUFFER)
1793 cifs_buf_release(smb_read_data);
1da177e4
LT
1794 smb_read_data = NULL;
1795 }
1796 bytes_read = 0;
1797 }
1798
1799 pagevec_lru_add(&lru_pvec);
1800
1801/* need to free smb_read_data buf before exit */
1802 if (smb_read_data) {
47c886b3
SF
1803 if(buf_type == CIFS_SMALL_BUFFER)
1804 cifs_small_buf_release(smb_read_data);
1805 else if(buf_type == CIFS_LARGE_BUFFER)
1806 cifs_buf_release(smb_read_data);
1da177e4
LT
1807 smb_read_data = NULL;
1808 }
1809
1810 FreeXid(xid);
1811 return rc;
1812}
1813
1814static int cifs_readpage_worker(struct file *file, struct page *page,
1815 loff_t *poffset)
1816{
1817 char *read_data;
1818 int rc;
1819
1820 page_cache_get(page);
1821 read_data = kmap(page);
1822 /* for reads over a certain size could initiate async read ahead */
1823
1824 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1825
1826 if (rc < 0)
1827 goto io_error;
1828 else
1829 cFYI(1, ("Bytes read %d ",rc));
1830
1831 file->f_dentry->d_inode->i_atime =
1832 current_fs_time(file->f_dentry->d_inode->i_sb);
1833
1834 if (PAGE_CACHE_SIZE > rc)
1835 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1836
1837 flush_dcache_page(page);
1838 SetPageUptodate(page);
1839 rc = 0;
1840
1841io_error:
1842 kunmap(page);
1843 page_cache_release(page);
1844 return rc;
1845}
1846
1847static int cifs_readpage(struct file *file, struct page *page)
1848{
1849 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1850 int rc = -EACCES;
1851 int xid;
1852
1853 xid = GetXid();
1854
1855 if (file->private_data == NULL) {
1856 FreeXid(xid);
1857 return -EBADF;
1858 }
1859
1860 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1861 page, (int)offset, (int)offset));
1862
1863 rc = cifs_readpage_worker(file, page, &offset);
1864
1865 unlock_page(page);
1866
1867 FreeXid(xid);
1868 return rc;
1869}
1870
1871/* We do not want to update the file size from server for inodes
1872 open for write - to avoid races with writepage extending
1873 the file - in the future we could consider allowing
1874 refreshing the inode only on increases in the file size
1875 but this is tricky to do without racing with writebehind
1876 page caching in the current Linux kernel design */
1877int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1878{
23e7dd7d
SF
1879 struct cifsFileInfo *open_file = NULL;
1880
1881 if (cifsInode)
1882 open_file = find_writable_file(cifsInode);
1883
1884 if(open_file) {
c32a0b68
SF
1885 struct cifs_sb_info *cifs_sb;
1886
23e7dd7d
SF
1887 /* there is not actually a write pending so let
1888 this handle go free and allow it to
1889 be closable if needed */
1890 atomic_dec(&open_file->wrtPending);
c32a0b68
SF
1891
1892 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1893 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1894 /* since no page cache to corrupt on directio
1895 we can change size safely */
1896 return 1;
1897 }
1898
6148a742 1899 return 0;
23e7dd7d 1900 } else
6148a742 1901 return 1;
1da177e4
LT
1902}
1903
1da177e4
LT
1904static int cifs_prepare_write(struct file *file, struct page *page,
1905 unsigned from, unsigned to)
1906{
1907 int rc = 0;
1908 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1909 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1910 if (!PageUptodate(page)) {
1911 /* if (to - from != PAGE_CACHE_SIZE) {
1912 void *kaddr = kmap_atomic(page, KM_USER0);
1913 memset(kaddr, 0, from);
1914 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1915 flush_dcache_page(page);
1916 kunmap_atomic(kaddr, KM_USER0);
1917 } */
1918 /* If we are writing a full page it will be up to date,
1919 no need to read from the server */
1920 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1921 SetPageUptodate(page);
1922
1923 /* might as well read a page, it is fast enough */
1924 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1925 rc = cifs_readpage_worker(file, page, &offset);
1926 } else {
1927 /* should we try using another file handle if there is one -
1928 how would we lock it to prevent close of that handle
1929 racing with this read?
1930 In any case this will be written out by commit_write */
1931 }
1932 }
1933
1934 /* BB should we pass any errors back?
1935 e.g. if we do not have read access to the file */
1936 return 0;
1937}
1938
1939struct address_space_operations cifs_addr_ops = {
1940 .readpage = cifs_readpage,
1941 .readpages = cifs_readpages,
1942 .writepage = cifs_writepage,
37c0eb46 1943 .writepages = cifs_writepages,
1da177e4
LT
1944 .prepare_write = cifs_prepare_write,
1945 .commit_write = cifs_commit_write,
1946 .set_page_dirty = __set_page_dirty_nobuffers,
1947 /* .sync_page = cifs_sync_page, */
1948 /* .direct_IO = */
1949};