]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/cifs/file.c
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
[net-next-2.6.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23#include <linux/fs.h>
37c0eb46 24#include <linux/backing-dev.h>
1da177e4
LT
25#include <linux/stat.h>
26#include <linux/fcntl.h>
37c0eb46 27#include <linux/mpage.h>
1da177e4
LT
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
30#include <linux/smp_lock.h>
37c0eb46 31#include <linux/writeback.h>
23e7dd7d 32#include <linux/delay.h>
1da177e4
LT
33#include <asm/div64.h>
34#include "cifsfs.h"
35#include "cifspdu.h"
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_unicode.h"
39#include "cifs_debug.h"
40#include "cifs_fs_sb.h"
41
42static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
45{
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem);
50 private_data->pfile = file; /* needed for writepage */
51 private_data->pInode = inode;
52 private_data->invalidHandle = FALSE;
53 private_data->closePend = FALSE;
23e7dd7d
SF
54 /* we have to track num writers to the inode, since writepages
55 does not tell us which handle the write is for so there can
56 be a close (overlapping with write) of the filehandle that
57 cifs_writepages chose to use */
58 atomic_set(&private_data->wrtPending,0);
1da177e4
LT
59
60 return private_data;
61}
62
63static inline int cifs_convert_flags(unsigned int flags)
64{
65 if ((flags & O_ACCMODE) == O_RDONLY)
66 return GENERIC_READ;
67 else if ((flags & O_ACCMODE) == O_WRONLY)
68 return GENERIC_WRITE;
69 else if ((flags & O_ACCMODE) == O_RDWR) {
70 /* GENERIC_ALL is too much permission to request
71 can cause unnecessary access denied on create */
72 /* return GENERIC_ALL; */
73 return (GENERIC_READ | GENERIC_WRITE);
74 }
75
76 return 0x20197;
77}
78
79static inline int cifs_get_disposition(unsigned int flags)
80{
81 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
82 return FILE_CREATE;
83 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
84 return FILE_OVERWRITE_IF;
85 else if ((flags & O_CREAT) == O_CREAT)
86 return FILE_OPEN_IF;
87 else
88 return FILE_OPEN;
89}
90
91/* all arguments to this function must be checked for validity in caller */
92static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
93 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
94 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
95 char *full_path, int xid)
96{
97 struct timespec temp;
98 int rc;
99
100 /* want handles we can use to read with first
101 in the list so we do not have to walk the
102 list to search for one in prepare_write */
103 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
104 list_add_tail(&pCifsFile->flist,
105 &pCifsInode->openFileList);
106 } else {
107 list_add(&pCifsFile->flist,
108 &pCifsInode->openFileList);
109 }
110 write_unlock(&GlobalSMBSeslock);
111 write_unlock(&file->f_owner.lock);
112 if (pCifsInode->clientCanCacheRead) {
113 /* we have the inode open somewhere else
114 no need to discard cache data */
115 goto client_can_cache;
116 }
117
118 /* BB need same check in cifs_create too? */
119 /* if not oplocked, invalidate inode pages if mtime or file
120 size changed */
121 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
122 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
123 (file->f_dentry->d_inode->i_size ==
124 (loff_t)le64_to_cpu(buf->EndOfFile))) {
125 cFYI(1, ("inode unchanged on server"));
126 } else {
127 if (file->f_dentry->d_inode->i_mapping) {
128 /* BB no need to lock inode until after invalidate
129 since namei code should already have it locked? */
130 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
131 filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
132 }
133 cFYI(1, ("invalidating remote inode since open detected it "
134 "changed"));
135 invalidate_remote_inode(file->f_dentry->d_inode);
136 }
137
138client_can_cache:
139 if (pTcon->ses->capabilities & CAP_UNIX)
140 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
141 full_path, inode->i_sb, xid);
142 else
143 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
144 full_path, buf, inode->i_sb, xid);
145
146 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
147 pCifsInode->clientCanCacheAll = TRUE;
148 pCifsInode->clientCanCacheRead = TRUE;
149 cFYI(1, ("Exclusive Oplock granted on inode %p",
150 file->f_dentry->d_inode));
151 } else if ((*oplock & 0xF) == OPLOCK_READ)
152 pCifsInode->clientCanCacheRead = TRUE;
153
154 return rc;
155}
156
157int cifs_open(struct inode *inode, struct file *file)
158{
159 int rc = -EACCES;
160 int xid, oplock;
161 struct cifs_sb_info *cifs_sb;
162 struct cifsTconInfo *pTcon;
163 struct cifsFileInfo *pCifsFile;
164 struct cifsInodeInfo *pCifsInode;
165 struct list_head *tmp;
166 char *full_path = NULL;
167 int desiredAccess;
168 int disposition;
169 __u16 netfid;
170 FILE_ALL_INFO *buf = NULL;
171
172 xid = GetXid();
173
174 cifs_sb = CIFS_SB(inode->i_sb);
175 pTcon = cifs_sb->tcon;
176
177 if (file->f_flags & O_CREAT) {
178 /* search inode for this file and fill in file->private_data */
179 pCifsInode = CIFS_I(file->f_dentry->d_inode);
180 read_lock(&GlobalSMBSeslock);
181 list_for_each(tmp, &pCifsInode->openFileList) {
182 pCifsFile = list_entry(tmp, struct cifsFileInfo,
183 flist);
184 if ((pCifsFile->pfile == NULL) &&
185 (pCifsFile->pid == current->tgid)) {
186 /* mode set in cifs_create */
187
188 /* needed for writepage */
189 pCifsFile->pfile = file;
190
191 file->private_data = pCifsFile;
192 break;
193 }
194 }
195 read_unlock(&GlobalSMBSeslock);
196 if (file->private_data != NULL) {
197 rc = 0;
198 FreeXid(xid);
199 return rc;
200 } else {
201 if (file->f_flags & O_EXCL)
202 cERROR(1, ("could not find file instance for "
203 "new file %p ", file));
204 }
205 }
206
207 down(&inode->i_sb->s_vfs_rename_sem);
7f57356b 208 full_path = build_path_from_dentry(file->f_dentry);
1da177e4
LT
209 up(&inode->i_sb->s_vfs_rename_sem);
210 if (full_path == NULL) {
211 FreeXid(xid);
212 return -ENOMEM;
213 }
214
215 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
216 inode, file->f_flags, full_path));
217 desiredAccess = cifs_convert_flags(file->f_flags);
218
219/*********************************************************************
220 * open flag mapping table:
221 *
222 * POSIX Flag CIFS Disposition
223 * ---------- ----------------
224 * O_CREAT FILE_OPEN_IF
225 * O_CREAT | O_EXCL FILE_CREATE
226 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
227 * O_TRUNC FILE_OVERWRITE
228 * none of the above FILE_OPEN
229 *
230 * Note that there is not a direct match between disposition
231 * FILE_SUPERSEDE (ie create whether or not file exists although
232 * O_CREAT | O_TRUNC is similar but truncates the existing
233 * file rather than creating a new file as FILE_SUPERSEDE does
234 * (which uses the attributes / metadata passed in on open call)
235 *?
236 *? O_SYNC is a reasonable match to CIFS writethrough flag
237 *? and the read write flags match reasonably. O_LARGEFILE
238 *? is irrelevant because largefile support is always used
239 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
240 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
241 *********************************************************************/
242
243 disposition = cifs_get_disposition(file->f_flags);
244
245 if (oplockEnabled)
246 oplock = REQ_OPLOCK;
247 else
248 oplock = FALSE;
249
250 /* BB pass O_SYNC flag through on file attributes .. BB */
251
252 /* Also refresh inode by passing in file_info buf returned by SMBOpen
253 and calling get_inode_info with returned buf (at least helps
254 non-Unix server case) */
255
256 /* BB we can not do this if this is the second open of a file
257 and the first handle has writebehind data, we might be
258 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
259 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
260 if (!buf) {
261 rc = -ENOMEM;
262 goto out;
263 }
264 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
265 CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
266 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
267 & CIFS_MOUNT_MAP_SPECIAL_CHR);
a9d02ad4
SF
268 if (rc == -EIO) {
269 /* Old server, try legacy style OpenX */
270 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
271 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
272 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
273 & CIFS_MOUNT_MAP_SPECIAL_CHR);
274 }
1da177e4
LT
275 if (rc) {
276 cFYI(1, ("cifs_open returned 0x%x ", rc));
277 goto out;
278 }
279 file->private_data =
280 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
281 if (file->private_data == NULL) {
282 rc = -ENOMEM;
283 goto out;
284 }
285 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
286 write_lock(&file->f_owner.lock);
287 write_lock(&GlobalSMBSeslock);
288 list_add(&pCifsFile->tlist, &pTcon->openFileList);
289
290 pCifsInode = CIFS_I(file->f_dentry->d_inode);
291 if (pCifsInode) {
292 rc = cifs_open_inode_helper(inode, file, pCifsInode,
293 pCifsFile, pTcon,
294 &oplock, buf, full_path, xid);
295 } else {
296 write_unlock(&GlobalSMBSeslock);
297 write_unlock(&file->f_owner.lock);
298 }
299
300 if (oplock & CIFS_CREATE_ACTION) {
301 /* time to set mode which we can not set earlier due to
302 problems creating new read-only files */
303 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
304 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
305 inode->i_mode,
306 (__u64)-1, (__u64)-1, 0 /* dev */,
737b758c
SF
307 cifs_sb->local_nls,
308 cifs_sb->mnt_cifs_flags &
309 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
310 } else {
311 /* BB implement via Windows security descriptors eg
312 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
313 -1, -1, local_nls);
314 in the meantime could set r/o dos attribute when
315 perms are eg: mode & 0222 == 0 */
316 }
317 }
318
319out:
320 kfree(buf);
321 kfree(full_path);
322 FreeXid(xid);
323 return rc;
324}
325
326/* Try to reaquire byte range locks that were released when session */
327/* to server was lost */
328static int cifs_relock_file(struct cifsFileInfo *cifsFile)
329{
330 int rc = 0;
331
332/* BB list all locks open on this file and relock */
333
334 return rc;
335}
336
337static int cifs_reopen_file(struct inode *inode, struct file *file,
338 int can_flush)
339{
340 int rc = -EACCES;
341 int xid, oplock;
342 struct cifs_sb_info *cifs_sb;
343 struct cifsTconInfo *pTcon;
344 struct cifsFileInfo *pCifsFile;
345 struct cifsInodeInfo *pCifsInode;
346 char *full_path = NULL;
347 int desiredAccess;
348 int disposition = FILE_OPEN;
349 __u16 netfid;
350
351 if (inode == NULL)
352 return -EBADF;
353 if (file->private_data) {
354 pCifsFile = (struct cifsFileInfo *)file->private_data;
355 } else
356 return -EBADF;
357
358 xid = GetXid();
359 down(&pCifsFile->fh_sem);
360 if (pCifsFile->invalidHandle == FALSE) {
361 up(&pCifsFile->fh_sem);
362 FreeXid(xid);
363 return 0;
364 }
365
366 if (file->f_dentry == NULL) {
367 up(&pCifsFile->fh_sem);
368 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
369 FreeXid(xid);
370 return -EBADF;
371 }
372 cifs_sb = CIFS_SB(inode->i_sb);
373 pTcon = cifs_sb->tcon;
374/* can not grab rename sem here because various ops, including
375 those that already have the rename sem can end up causing writepage
376 to get called and if the server was down that means we end up here,
377 and we can never tell if the caller already has the rename_sem */
7f57356b 378 full_path = build_path_from_dentry(file->f_dentry);
1da177e4
LT
379 if (full_path == NULL) {
380 up(&pCifsFile->fh_sem);
381 FreeXid(xid);
382 return -ENOMEM;
383 }
384
385 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
386 inode, file->f_flags,full_path));
387 desiredAccess = cifs_convert_flags(file->f_flags);
388
389 if (oplockEnabled)
390 oplock = REQ_OPLOCK;
391 else
392 oplock = FALSE;
393
394 /* Can not refresh inode by passing in file_info buf to be returned
395 by SMBOpen and then calling get_inode_info with returned buf
396 since file might have write behind data that needs to be flushed
397 and server version of file size can be stale. If we knew for sure
398 that inode was not dirty locally we could do this */
399
400/* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
401 if (buf == 0) {
402 up(&pCifsFile->fh_sem);
403 kfree(full_path);
404 FreeXid(xid);
405 return -ENOMEM;
406 } */
407 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
408 CREATE_NOT_DIR, &netfid, &oplock, NULL,
737b758c
SF
409 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
410 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
411 if (rc) {
412 up(&pCifsFile->fh_sem);
413 cFYI(1, ("cifs_open returned 0x%x ", rc));
414 cFYI(1, ("oplock: %d ", oplock));
415 } else {
416 pCifsFile->netfid = netfid;
417 pCifsFile->invalidHandle = FALSE;
418 up(&pCifsFile->fh_sem);
419 pCifsInode = CIFS_I(inode);
420 if (pCifsInode) {
421 if (can_flush) {
422 filemap_fdatawrite(inode->i_mapping);
423 filemap_fdatawait(inode->i_mapping);
424 /* temporarily disable caching while we
425 go to server to get inode info */
426 pCifsInode->clientCanCacheAll = FALSE;
427 pCifsInode->clientCanCacheRead = FALSE;
428 if (pTcon->ses->capabilities & CAP_UNIX)
429 rc = cifs_get_inode_info_unix(&inode,
430 full_path, inode->i_sb, xid);
431 else
432 rc = cifs_get_inode_info(&inode,
433 full_path, NULL, inode->i_sb,
434 xid);
435 } /* else we are writing out data to server already
436 and could deadlock if we tried to flush data, and
437 since we do not know if we have data that would
438 invalidate the current end of file on the server
439 we can not go to the server to get the new inod
440 info */
441 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
442 pCifsInode->clientCanCacheAll = TRUE;
443 pCifsInode->clientCanCacheRead = TRUE;
444 cFYI(1, ("Exclusive Oplock granted on inode %p",
445 file->f_dentry->d_inode));
446 } else if ((oplock & 0xF) == OPLOCK_READ) {
447 pCifsInode->clientCanCacheRead = TRUE;
448 pCifsInode->clientCanCacheAll = FALSE;
449 } else {
450 pCifsInode->clientCanCacheRead = FALSE;
451 pCifsInode->clientCanCacheAll = FALSE;
452 }
453 cifs_relock_file(pCifsFile);
454 }
455 }
456
457 kfree(full_path);
458 FreeXid(xid);
459 return rc;
460}
461
462int cifs_close(struct inode *inode, struct file *file)
463{
464 int rc = 0;
465 int xid;
466 struct cifs_sb_info *cifs_sb;
467 struct cifsTconInfo *pTcon;
468 struct cifsFileInfo *pSMBFile =
469 (struct cifsFileInfo *)file->private_data;
470
471 xid = GetXid();
472
473 cifs_sb = CIFS_SB(inode->i_sb);
474 pTcon = cifs_sb->tcon;
475 if (pSMBFile) {
476 pSMBFile->closePend = TRUE;
477 write_lock(&file->f_owner.lock);
478 if (pTcon) {
479 /* no sense reconnecting to close a file that is
480 already closed */
481 if (pTcon->tidStatus != CifsNeedReconnect) {
23e7dd7d
SF
482 int timeout = 2;
483 while((atomic_read(&pSMBFile->wrtPending) != 0)
484 && (timeout < 1000) ) {
485 /* Give write a better chance to get to
486 server ahead of the close. We do not
487 want to add a wait_q here as it would
488 increase the memory utilization as
489 the struct would be in each open file,
490 but this should give enough time to
491 clear the socket */
c119b87d 492 write_unlock(&file->f_owner.lock);
23e7dd7d
SF
493 cERROR(1,("close with pending writes"));
494 msleep(timeout);
c119b87d 495 write_lock(&file->f_owner.lock);
23e7dd7d
SF
496 timeout *= 4;
497 }
1da177e4
LT
498 write_unlock(&file->f_owner.lock);
499 rc = CIFSSMBClose(xid, pTcon,
500 pSMBFile->netfid);
501 write_lock(&file->f_owner.lock);
502 }
503 }
cbe0476f 504 write_lock(&GlobalSMBSeslock);
1da177e4
LT
505 list_del(&pSMBFile->flist);
506 list_del(&pSMBFile->tlist);
cbe0476f 507 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
508 write_unlock(&file->f_owner.lock);
509 kfree(pSMBFile->search_resume_name);
510 kfree(file->private_data);
511 file->private_data = NULL;
512 } else
513 rc = -EBADF;
514
515 if (list_empty(&(CIFS_I(inode)->openFileList))) {
516 cFYI(1, ("closing last open instance for inode %p", inode));
517 /* if the file is not open we do not know if we can cache info
518 on this inode, much less write behind and read ahead */
519 CIFS_I(inode)->clientCanCacheRead = FALSE;
520 CIFS_I(inode)->clientCanCacheAll = FALSE;
521 }
522 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
523 rc = CIFS_I(inode)->write_behind_rc;
524 FreeXid(xid);
525 return rc;
526}
527
528int cifs_closedir(struct inode *inode, struct file *file)
529{
530 int rc = 0;
531 int xid;
532 struct cifsFileInfo *pCFileStruct =
533 (struct cifsFileInfo *)file->private_data;
534 char *ptmp;
535
536 cFYI(1, ("Closedir inode = 0x%p with ", inode));
537
538 xid = GetXid();
539
540 if (pCFileStruct) {
541 struct cifsTconInfo *pTcon;
542 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
543
544 pTcon = cifs_sb->tcon;
545
546 cFYI(1, ("Freeing private data in close dir"));
31ca3bc3
SF
547 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
548 (pCFileStruct->invalidHandle == FALSE)) {
1da177e4
LT
549 pCFileStruct->invalidHandle = TRUE;
550 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
551 cFYI(1, ("Closing uncompleted readdir with rc %d",
552 rc));
553 /* not much we can do if it fails anyway, ignore rc */
554 rc = 0;
555 }
556 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
557 if (ptmp) {
558 /* BB removeme BB */ cFYI(1, ("freeing smb buf in srch struct in closedir"));
559 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
560 cifs_buf_release(ptmp);
561 }
562 ptmp = pCFileStruct->search_resume_name;
563 if (ptmp) {
564 /* BB removeme BB */ cFYI(1, ("freeing resume name in closedir"));
565 pCFileStruct->search_resume_name = NULL;
566 kfree(ptmp);
567 }
568 kfree(file->private_data);
569 file->private_data = NULL;
570 }
571 /* BB can we lock the filestruct while this is going on? */
572 FreeXid(xid);
573 return rc;
574}
575
576int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
577{
578 int rc, xid;
579 __u32 lockType = LOCKING_ANDX_LARGE_FILES;
580 __u32 numLock = 0;
581 __u32 numUnlock = 0;
582 __u64 length;
583 int wait_flag = FALSE;
584 struct cifs_sb_info *cifs_sb;
585 struct cifsTconInfo *pTcon;
586
587 length = 1 + pfLock->fl_end - pfLock->fl_start;
588 rc = -EACCES;
589 xid = GetXid();
590
591 cFYI(1, ("Lock parm: 0x%x flockflags: "
592 "0x%x flocktype: 0x%x start: %lld end: %lld",
593 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
594 pfLock->fl_end));
595
596 if (pfLock->fl_flags & FL_POSIX)
597 cFYI(1, ("Posix "));
598 if (pfLock->fl_flags & FL_FLOCK)
599 cFYI(1, ("Flock "));
600 if (pfLock->fl_flags & FL_SLEEP) {
601 cFYI(1, ("Blocking lock "));
602 wait_flag = TRUE;
603 }
604 if (pfLock->fl_flags & FL_ACCESS)
605 cFYI(1, ("Process suspended by mandatory locking - "
606 "not implemented yet "));
607 if (pfLock->fl_flags & FL_LEASE)
608 cFYI(1, ("Lease on file - not implemented yet"));
609 if (pfLock->fl_flags &
610 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
611 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
612
613 if (pfLock->fl_type == F_WRLCK) {
614 cFYI(1, ("F_WRLCK "));
615 numLock = 1;
616 } else if (pfLock->fl_type == F_UNLCK) {
617 cFYI(1, ("F_UNLCK "));
618 numUnlock = 1;
619 } else if (pfLock->fl_type == F_RDLCK) {
620 cFYI(1, ("F_RDLCK "));
621 lockType |= LOCKING_ANDX_SHARED_LOCK;
622 numLock = 1;
623 } else if (pfLock->fl_type == F_EXLCK) {
624 cFYI(1, ("F_EXLCK "));
625 numLock = 1;
626 } else if (pfLock->fl_type == F_SHLCK) {
627 cFYI(1, ("F_SHLCK "));
628 lockType |= LOCKING_ANDX_SHARED_LOCK;
629 numLock = 1;
630 } else
631 cFYI(1, ("Unknown type of lock "));
632
633 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
634 pTcon = cifs_sb->tcon;
635
636 if (file->private_data == NULL) {
637 FreeXid(xid);
638 return -EBADF;
639 }
640
641 if (IS_GETLK(cmd)) {
642 rc = CIFSSMBLock(xid, pTcon,
643 ((struct cifsFileInfo *)file->
644 private_data)->netfid,
645 length,
646 pfLock->fl_start, 0, 1, lockType,
647 0 /* wait flag */ );
648 if (rc == 0) {
649 rc = CIFSSMBLock(xid, pTcon,
650 ((struct cifsFileInfo *) file->
651 private_data)->netfid,
652 length,
653 pfLock->fl_start, 1 /* numUnlock */ ,
654 0 /* numLock */ , lockType,
655 0 /* wait flag */ );
656 pfLock->fl_type = F_UNLCK;
657 if (rc != 0)
658 cERROR(1, ("Error unlocking previously locked "
659 "range %d during test of lock ",
660 rc));
661 rc = 0;
662
663 } else {
664 /* if rc == ERR_SHARING_VIOLATION ? */
665 rc = 0; /* do not change lock type to unlock
666 since range in use */
667 }
668
669 FreeXid(xid);
670 return rc;
671 }
672
673 rc = CIFSSMBLock(xid, pTcon,
674 ((struct cifsFileInfo *) file->private_data)->
675 netfid, length,
676 pfLock->fl_start, numUnlock, numLock, lockType,
677 wait_flag);
d634cc15 678 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
679 posix_lock_file_wait(file, pfLock);
680 FreeXid(xid);
681 return rc;
682}
683
684ssize_t cifs_user_write(struct file *file, const char __user *write_data,
685 size_t write_size, loff_t *poffset)
686{
687 int rc = 0;
688 unsigned int bytes_written = 0;
689 unsigned int total_written;
690 struct cifs_sb_info *cifs_sb;
691 struct cifsTconInfo *pTcon;
692 int xid, long_op;
693 struct cifsFileInfo *open_file;
694
695 if (file->f_dentry == NULL)
696 return -EBADF;
697
698 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
699 if (cifs_sb == NULL)
700 return -EBADF;
701
702 pTcon = cifs_sb->tcon;
703
704 /* cFYI(1,
705 (" write %d bytes to offset %lld of %s", write_size,
706 *poffset, file->f_dentry->d_name.name)); */
707
708 if (file->private_data == NULL)
709 return -EBADF;
710 else
711 open_file = (struct cifsFileInfo *) file->private_data;
712
713 xid = GetXid();
714 if (file->f_dentry->d_inode == NULL) {
715 FreeXid(xid);
716 return -EBADF;
717 }
718
719 if (*poffset > file->f_dentry->d_inode->i_size)
720 long_op = 2; /* writes past end of file can take a long time */
721 else
722 long_op = 1;
723
724 for (total_written = 0; write_size > total_written;
725 total_written += bytes_written) {
726 rc = -EAGAIN;
727 while (rc == -EAGAIN) {
728 if (file->private_data == NULL) {
729 /* file has been closed on us */
730 FreeXid(xid);
731 /* if we have gotten here we have written some data
732 and blocked, and the file has been freed on us while
733 we blocked so return what we managed to write */
734 return total_written;
735 }
736 if (open_file->closePend) {
737 FreeXid(xid);
738 if (total_written)
739 return total_written;
740 else
741 return -EBADF;
742 }
743 if (open_file->invalidHandle) {
744 if ((file->f_dentry == NULL) ||
745 (file->f_dentry->d_inode == NULL)) {
746 FreeXid(xid);
747 return total_written;
748 }
749 /* we could deadlock if we called
750 filemap_fdatawait from here so tell
751 reopen_file not to flush data to server
752 now */
753 rc = cifs_reopen_file(file->f_dentry->d_inode,
754 file, FALSE);
755 if (rc != 0)
756 break;
757 }
758
759 rc = CIFSSMBWrite(xid, pTcon,
760 open_file->netfid,
761 min_t(const int, cifs_sb->wsize,
762 write_size - total_written),
763 *poffset, &bytes_written,
764 NULL, write_data + total_written, long_op);
765 }
766 if (rc || (bytes_written == 0)) {
767 if (total_written)
768 break;
769 else {
770 FreeXid(xid);
771 return rc;
772 }
773 } else
774 *poffset += bytes_written;
775 long_op = FALSE; /* subsequent writes fast -
776 15 seconds is plenty */
777 }
778
a4544347 779 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
780
781 /* since the write may have blocked check these pointers again */
782 if (file->f_dentry) {
783 if (file->f_dentry->d_inode) {
784 struct inode *inode = file->f_dentry->d_inode;
785 inode->i_ctime = inode->i_mtime =
786 current_fs_time(inode->i_sb);
787 if (total_written > 0) {
788 if (*poffset > file->f_dentry->d_inode->i_size)
789 i_size_write(file->f_dentry->d_inode,
790 *poffset);
791 }
792 mark_inode_dirty_sync(file->f_dentry->d_inode);
793 }
794 }
795 FreeXid(xid);
796 return total_written;
797}
798
799static ssize_t cifs_write(struct file *file, const char *write_data,
800 size_t write_size, loff_t *poffset)
801{
802 int rc = 0;
803 unsigned int bytes_written = 0;
804 unsigned int total_written;
805 struct cifs_sb_info *cifs_sb;
806 struct cifsTconInfo *pTcon;
807 int xid, long_op;
808 struct cifsFileInfo *open_file;
809
810 if (file->f_dentry == NULL)
811 return -EBADF;
812
813 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
814 if (cifs_sb == NULL)
815 return -EBADF;
816
817 pTcon = cifs_sb->tcon;
818
ab2f218f
SF
819 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
820 *poffset, file->f_dentry->d_name.name));
1da177e4
LT
821
822 if (file->private_data == NULL)
823 return -EBADF;
824 else
825 open_file = (struct cifsFileInfo *)file->private_data;
826
827 xid = GetXid();
828 if (file->f_dentry->d_inode == NULL) {
829 FreeXid(xid);
830 return -EBADF;
831 }
832
833 if (*poffset > file->f_dentry->d_inode->i_size)
834 long_op = 2; /* writes past end of file can take a long time */
835 else
836 long_op = 1;
837
838 for (total_written = 0; write_size > total_written;
839 total_written += bytes_written) {
840 rc = -EAGAIN;
841 while (rc == -EAGAIN) {
842 if (file->private_data == NULL) {
843 /* file has been closed on us */
844 FreeXid(xid);
845 /* if we have gotten here we have written some data
846 and blocked, and the file has been freed on us
847 while we blocked so return what we managed to
848 write */
849 return total_written;
850 }
851 if (open_file->closePend) {
852 FreeXid(xid);
853 if (total_written)
854 return total_written;
855 else
856 return -EBADF;
857 }
858 if (open_file->invalidHandle) {
859 if ((file->f_dentry == NULL) ||
860 (file->f_dentry->d_inode == NULL)) {
861 FreeXid(xid);
862 return total_written;
863 }
864 /* we could deadlock if we called
865 filemap_fdatawait from here so tell
866 reopen_file not to flush data to
867 server now */
868 rc = cifs_reopen_file(file->f_dentry->d_inode,
869 file, FALSE);
870 if (rc != 0)
871 break;
872 }
d6e04ae6 873 /* BB FIXME We can not sign across two buffers yet */
0c0ff093
SF
874 if((experimEnabled) && ((pTcon->ses->server->secMode &
875 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0)) {
3e84469d
SF
876 struct kvec iov[2];
877 unsigned int len;
878
0ae0efad 879 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
880 write_size - total_written);
881 /* iov[0] is reserved for smb header */
882 iov[1].iov_base = (char *)write_data +
883 total_written;
884 iov[1].iov_len = len;
d6e04ae6 885 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 886 open_file->netfid, len,
d6e04ae6 887 *poffset, &bytes_written,
3e84469d 888 iov, 1, long_op);
d6e04ae6
SF
889 } else
890 /* BB FIXME fixup indentation of line below */
1da177e4
LT
891 rc = CIFSSMBWrite(xid, pTcon,
892 open_file->netfid,
893 min_t(const int, cifs_sb->wsize,
894 write_size - total_written),
895 *poffset, &bytes_written,
896 write_data + total_written, NULL, long_op);
897 }
898 if (rc || (bytes_written == 0)) {
899 if (total_written)
900 break;
901 else {
902 FreeXid(xid);
903 return rc;
904 }
905 } else
906 *poffset += bytes_written;
907 long_op = FALSE; /* subsequent writes fast -
908 15 seconds is plenty */
909 }
910
a4544347 911 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
912
913 /* since the write may have blocked check these pointers again */
914 if (file->f_dentry) {
915 if (file->f_dentry->d_inode) {
916 file->f_dentry->d_inode->i_ctime =
917 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
918 if (total_written > 0) {
919 if (*poffset > file->f_dentry->d_inode->i_size)
920 i_size_write(file->f_dentry->d_inode,
921 *poffset);
922 }
923 mark_inode_dirty_sync(file->f_dentry->d_inode);
924 }
925 }
926 FreeXid(xid);
927 return total_written;
928}
929
dd99cd80 930struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
931{
932 struct cifsFileInfo *open_file;
dd99cd80 933 int rc;
6148a742
SF
934
935 read_lock(&GlobalSMBSeslock);
936 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
937 if (open_file->closePend)
938 continue;
939 if (open_file->pfile &&
940 ((open_file->pfile->f_flags & O_RDWR) ||
941 (open_file->pfile->f_flags & O_WRONLY))) {
23e7dd7d 942 atomic_inc(&open_file->wrtPending);
6148a742 943 read_unlock(&GlobalSMBSeslock);
0ae0efad 944 if((open_file->invalidHandle) &&
23e7dd7d 945 (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
dd99cd80 946 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
37c0eb46
SF
947 open_file->pfile, FALSE);
948 /* if it fails, try another handle - might be */
949 /* dangerous to hold up writepages with retry */
950 if(rc) {
4a77118c 951 cFYI(1,("failed on reopen file in wp"));
37c0eb46 952 read_lock(&GlobalSMBSeslock);
23e7dd7d
SF
953 /* can not use this handle, no write
954 pending on this one after all */
955 atomic_dec
956 (&open_file->wrtPending);
37c0eb46
SF
957 continue;
958 }
959 }
6148a742
SF
960 return open_file;
961 }
962 }
963 read_unlock(&GlobalSMBSeslock);
964 return NULL;
965}
966
1da177e4
LT
967static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
968{
969 struct address_space *mapping = page->mapping;
970 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
971 char *write_data;
972 int rc = -EFAULT;
973 int bytes_written = 0;
974 struct cifs_sb_info *cifs_sb;
975 struct cifsTconInfo *pTcon;
976 struct inode *inode;
6148a742 977 struct cifsFileInfo *open_file;
1da177e4
LT
978
979 if (!mapping || !mapping->host)
980 return -EFAULT;
981
982 inode = page->mapping->host;
983 cifs_sb = CIFS_SB(inode->i_sb);
984 pTcon = cifs_sb->tcon;
985
986 offset += (loff_t)from;
987 write_data = kmap(page);
988 write_data += from;
989
990 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
991 kunmap(page);
992 return -EIO;
993 }
994
995 /* racing with truncate? */
996 if (offset > mapping->host->i_size) {
997 kunmap(page);
998 return 0; /* don't care */
999 }
1000
1001 /* check to make sure that we are not extending the file */
1002 if (mapping->host->i_size - offset < (loff_t)to)
1003 to = (unsigned)(mapping->host->i_size - offset);
1004
6148a742
SF
1005 open_file = find_writable_file(CIFS_I(mapping->host));
1006 if (open_file) {
1007 bytes_written = cifs_write(open_file->pfile, write_data,
1008 to-from, &offset);
23e7dd7d 1009 atomic_dec(&open_file->wrtPending);
1da177e4 1010 /* Does mm or vfs already set times? */
6148a742
SF
1011 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1012 if ((bytes_written > 0) && (offset)) {
1013 rc = 0;
1014 } else if (bytes_written < 0) {
1015 if (rc != -EBADF)
1016 rc = bytes_written;
1da177e4 1017 }
6148a742 1018 } else {
1da177e4
LT
1019 cFYI(1, ("No writeable filehandles for inode"));
1020 rc = -EIO;
1021 }
1022
1023 kunmap(page);
1024 return rc;
1025}
1026
1da177e4 1027static int cifs_writepages(struct address_space *mapping,
37c0eb46 1028 struct writeback_control *wbc)
1da177e4 1029{
37c0eb46
SF
1030 struct backing_dev_info *bdi = mapping->backing_dev_info;
1031 unsigned int bytes_to_write;
1032 unsigned int bytes_written;
1033 struct cifs_sb_info *cifs_sb;
1034 int done = 0;
1035 pgoff_t end = -1;
1036 pgoff_t index;
1037 int is_range = 0;
1038 struct kvec iov[32];
84d2f07e 1039 int len;
37c0eb46
SF
1040 int n_iov = 0;
1041 pgoff_t next;
1042 int nr_pages;
1043 __u64 offset = 0;
23e7dd7d 1044 struct cifsFileInfo *open_file;
37c0eb46
SF
1045 struct page *page;
1046 struct pagevec pvec;
1047 int rc = 0;
1048 int scanned = 0;
1da177e4
LT
1049 int xid;
1050
37c0eb46
SF
1051 cifs_sb = CIFS_SB(mapping->host->i_sb);
1052
1053 /*
1054 * If wsize is smaller that the page cache size, default to writing
1055 * one page at a time via cifs_writepage
1056 */
1057 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1058 return generic_writepages(mapping, wbc);
1059
4a77118c
SF
1060 /* BB FIXME we do not have code to sign across multiple buffers yet,
1061 so go to older writepage style write which we can sign if needed */
1062 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1063 if(cifs_sb->tcon->ses->server->secMode &
1064 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1065 return generic_writepages(mapping, wbc);
1066
37c0eb46
SF
1067 /*
1068 * BB: Is this meaningful for a non-block-device file system?
1069 * If it is, we should test it again after we do I/O
1070 */
1071 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1072 wbc->encountered_congestion = 1;
1073 return 0;
1074 }
1075
1da177e4
LT
1076 xid = GetXid();
1077
37c0eb46
SF
1078 pagevec_init(&pvec, 0);
1079 if (wbc->sync_mode == WB_SYNC_NONE)
1080 index = mapping->writeback_index; /* Start from prev offset */
1081 else {
1082 index = 0;
1083 scanned = 1;
1084 }
1085 if (wbc->start || wbc->end) {
1086 index = wbc->start >> PAGE_CACHE_SHIFT;
1087 end = wbc->end >> PAGE_CACHE_SHIFT;
1088 is_range = 1;
1089 scanned = 1;
1090 }
1091retry:
1092 while (!done && (index <= end) &&
1093 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1094 PAGECACHE_TAG_DIRTY,
1095 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1096 int first;
1097 unsigned int i;
1098
37c0eb46
SF
1099 first = -1;
1100 next = 0;
1101 n_iov = 0;
1102 bytes_to_write = 0;
1103
1104 for (i = 0; i < nr_pages; i++) {
1105 page = pvec.pages[i];
1106 /*
1107 * At this point we hold neither mapping->tree_lock nor
1108 * lock on the page itself: the page may be truncated or
1109 * invalidated (changing page->mapping to NULL), or even
1110 * swizzled back from swapper_space to tmpfs file
1111 * mapping
1112 */
1113
1114 if (first < 0)
1115 lock_page(page);
1116 else if (TestSetPageLocked(page))
1117 break;
1118
1119 if (unlikely(page->mapping != mapping)) {
1120 unlock_page(page);
1121 break;
1122 }
1123
1124 if (unlikely(is_range) && (page->index > end)) {
1125 done = 1;
1126 unlock_page(page);
1127 break;
1128 }
1129
1130 if (next && (page->index != next)) {
1131 /* Not next consecutive page */
1132 unlock_page(page);
1133 break;
1134 }
1135
1136 if (wbc->sync_mode != WB_SYNC_NONE)
1137 wait_on_page_writeback(page);
1138
1139 if (PageWriteback(page) ||
1140 !test_clear_page_dirty(page)) {
1141 unlock_page(page);
1142 break;
1143 }
84d2f07e
SF
1144
1145 if (page_offset(page) >= mapping->host->i_size) {
1146 done = 1;
1147 unlock_page(page);
1148 break;
1149 }
1150
37c0eb46
SF
1151 /*
1152 * BB can we get rid of this? pages are held by pvec
1153 */
1154 page_cache_get(page);
1155
84d2f07e
SF
1156 len = min(mapping->host->i_size - page_offset(page),
1157 (loff_t)PAGE_CACHE_SIZE);
1158
37c0eb46
SF
1159 /* reserve iov[0] for the smb header */
1160 n_iov++;
1161 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1162 iov[n_iov].iov_len = len;
1163 bytes_to_write += len;
37c0eb46
SF
1164
1165 if (first < 0) {
1166 first = i;
1167 offset = page_offset(page);
1168 }
1169 next = page->index + 1;
1170 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1171 break;
1172 }
1173 if (n_iov) {
23e7dd7d
SF
1174 /* Search for a writable handle every time we call
1175 * CIFSSMBWrite2. We can't rely on the last handle
1176 * we used to still be valid
1177 */
1178 open_file = find_writable_file(CIFS_I(mapping->host));
1179 if (!open_file) {
1180 cERROR(1, ("No writable handles for inode"));
1181 rc = -EBADF;
1047abc1 1182 } else {
23e7dd7d
SF
1183 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1184 open_file->netfid,
1185 bytes_to_write, offset,
1186 &bytes_written, iov, n_iov,
1187 1);
1188 atomic_dec(&open_file->wrtPending);
1189 if (rc || bytes_written < bytes_to_write) {
1190 cERROR(1,("Write2 ret %d, written = %d",
1191 rc, bytes_written));
1192 /* BB what if continued retry is
1193 requested via mount flags? */
1194 set_bit(AS_EIO, &mapping->flags);
1195 SetPageError(page);
1196 } else {
1197 cifs_stats_bytes_written(cifs_sb->tcon,
1198 bytes_written);
1199 }
37c0eb46
SF
1200 }
1201 for (i = 0; i < n_iov; i++) {
1202 page = pvec.pages[first + i];
1203 kunmap(page);
1204 unlock_page(page);
1205 page_cache_release(page);
1206 }
1207 if ((wbc->nr_to_write -= n_iov) <= 0)
1208 done = 1;
1209 index = next;
1210 }
1211 pagevec_release(&pvec);
1212 }
1213 if (!scanned && !done) {
1214 /*
1215 * We hit the last page and there is more work to be done: wrap
1216 * back to the start of the file
1217 */
1218 scanned = 1;
1219 index = 0;
1220 goto retry;
1221 }
1222 if (!is_range)
1223 mapping->writeback_index = index;
1224
1da177e4 1225 FreeXid(xid);
37c0eb46 1226
1da177e4
LT
1227 return rc;
1228}
1da177e4
LT
1229
1230static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1231{
1232 int rc = -EFAULT;
1233 int xid;
1234
1235 xid = GetXid();
1236/* BB add check for wbc flags */
1237 page_cache_get(page);
1238 if (!PageUptodate(page)) {
1239 cFYI(1, ("ppw - page not up to date"));
1240 }
1241
1242 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1243 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1244 unlock_page(page);
1245 page_cache_release(page);
1246 FreeXid(xid);
1247 return rc;
1248}
1249
1250static int cifs_commit_write(struct file *file, struct page *page,
1251 unsigned offset, unsigned to)
1252{
1253 int xid;
1254 int rc = 0;
1255 struct inode *inode = page->mapping->host;
1256 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1257 char *page_data;
1258
1259 xid = GetXid();
1260 cFYI(1, ("commit write for page %p up to position %lld for %d",
1261 page, position, to));
1262 if (position > inode->i_size) {
1263 i_size_write(inode, position);
1264 /* if (file->private_data == NULL) {
1265 rc = -EBADF;
1266 } else {
1267 open_file = (struct cifsFileInfo *)file->private_data;
1268 cifs_sb = CIFS_SB(inode->i_sb);
1269 rc = -EAGAIN;
1270 while (rc == -EAGAIN) {
1271 if ((open_file->invalidHandle) &&
1272 (!open_file->closePend)) {
1273 rc = cifs_reopen_file(
1274 file->f_dentry->d_inode, file);
1275 if (rc != 0)
1276 break;
1277 }
1278 if (!open_file->closePend) {
1279 rc = CIFSSMBSetFileSize(xid,
1280 cifs_sb->tcon, position,
1281 open_file->netfid,
1282 open_file->pid, FALSE);
1283 } else {
1284 rc = -EBADF;
1285 break;
1286 }
1287 }
1288 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1289 } */
1290 }
1291 if (!PageUptodate(page)) {
1292 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1293 /* can not rely on (or let) writepage write this data */
1294 if (to < offset) {
1295 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1296 offset, to));
1297 FreeXid(xid);
1298 return rc;
1299 }
1300 /* this is probably better than directly calling
1301 partialpage_write since in this function the file handle is
1302 known which we might as well leverage */
1303 /* BB check if anything else missing out of ppw
1304 such as updating last write time */
1305 page_data = kmap(page);
1306 rc = cifs_write(file, page_data + offset, to-offset,
1307 &position);
1308 if (rc > 0)
1309 rc = 0;
1310 /* else if (rc < 0) should we set writebehind rc? */
1311 kunmap(page);
1312 } else {
1313 set_page_dirty(page);
1314 }
1315
1316 FreeXid(xid);
1317 return rc;
1318}
1319
1320int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1321{
1322 int xid;
1323 int rc = 0;
1324 struct inode *inode = file->f_dentry->d_inode;
1325
1326 xid = GetXid();
1327
1328 cFYI(1, ("Sync file - name: %s datasync: 0x%x ",
1329 dentry->d_name.name, datasync));
1330
1331 rc = filemap_fdatawrite(inode->i_mapping);
1332 if (rc == 0)
1333 CIFS_I(inode)->write_behind_rc = 0;
1334 FreeXid(xid);
1335 return rc;
1336}
1337
1338/* static int cifs_sync_page(struct page *page)
1339{
1340 struct address_space *mapping;
1341 struct inode *inode;
1342 unsigned long index = page->index;
1343 unsigned int rpages = 0;
1344 int rc = 0;
1345
1346 cFYI(1, ("sync page %p",page));
1347 mapping = page->mapping;
1348 if (!mapping)
1349 return 0;
1350 inode = mapping->host;
1351 if (!inode)
1352 return 0; */
1353
1354/* fill in rpages then
1355 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1356
1357/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
1358
1359 if (rc < 0)
1360 return rc;
1361 return 0;
1362} */
1363
1364/*
1365 * As file closes, flush all cached write data for this inode checking
1366 * for write behind errors.
1367 */
1368int cifs_flush(struct file *file)
1369{
1370 struct inode * inode = file->f_dentry->d_inode;
1371 int rc = 0;
1372
1373 /* Rather than do the steps manually:
1374 lock the inode for writing
1375 loop through pages looking for write behind data (dirty pages)
1376 coalesce into contiguous 16K (or smaller) chunks to write to server
1377 send to server (prefer in parallel)
1378 deal with writebehind errors
1379 unlock inode for writing
1380 filemapfdatawrite appears easier for the time being */
1381
1382 rc = filemap_fdatawrite(inode->i_mapping);
1383 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1384 CIFS_I(inode)->write_behind_rc = 0;
1385
1386 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1387
1388 return rc;
1389}
1390
1391ssize_t cifs_user_read(struct file *file, char __user *read_data,
1392 size_t read_size, loff_t *poffset)
1393{
1394 int rc = -EACCES;
1395 unsigned int bytes_read = 0;
1396 unsigned int total_read = 0;
1397 unsigned int current_read_size;
1398 struct cifs_sb_info *cifs_sb;
1399 struct cifsTconInfo *pTcon;
1400 int xid;
1401 struct cifsFileInfo *open_file;
1402 char *smb_read_data;
1403 char __user *current_offset;
1404 struct smb_com_read_rsp *pSMBr;
1405
1406 xid = GetXid();
1407 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1408 pTcon = cifs_sb->tcon;
1409
1410 if (file->private_data == NULL) {
1411 FreeXid(xid);
1412 return -EBADF;
1413 }
1414 open_file = (struct cifsFileInfo *)file->private_data;
1415
1416 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1417 cFYI(1, ("attempting read on write only file instance"));
1418 }
1419 for (total_read = 0, current_offset = read_data;
1420 read_size > total_read;
1421 total_read += bytes_read, current_offset += bytes_read) {
1422 current_read_size = min_t(const int, read_size - total_read,
1423 cifs_sb->rsize);
1424 rc = -EAGAIN;
1425 smb_read_data = NULL;
1426 while (rc == -EAGAIN) {
1427 if ((open_file->invalidHandle) &&
1428 (!open_file->closePend)) {
1429 rc = cifs_reopen_file(file->f_dentry->d_inode,
1430 file, TRUE);
1431 if (rc != 0)
1432 break;
1433 }
bfa0d75a 1434 rc = CIFSSMBRead(xid, pTcon,
1c955187
SF
1435 open_file->netfid,
1436 current_read_size, *poffset,
1437 &bytes_read, &smb_read_data);
1da177e4
LT
1438 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1439 if (copy_to_user(current_offset,
1440 smb_read_data + 4 /* RFC1001 hdr */
1441 + le16_to_cpu(pSMBr->DataOffset),
1442 bytes_read)) {
1443 rc = -EFAULT;
1444 FreeXid(xid);
1445 return rc;
1446 }
1447 if (smb_read_data) {
1448 cifs_buf_release(smb_read_data);
1449 smb_read_data = NULL;
1450 }
1451 }
1452 if (rc || (bytes_read == 0)) {
1453 if (total_read) {
1454 break;
1455 } else {
1456 FreeXid(xid);
1457 return rc;
1458 }
1459 } else {
a4544347 1460 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1461 *poffset += bytes_read;
1462 }
1463 }
1464 FreeXid(xid);
1465 return total_read;
1466}
1467
1468
1469static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1470 loff_t *poffset)
1471{
1472 int rc = -EACCES;
1473 unsigned int bytes_read = 0;
1474 unsigned int total_read;
1475 unsigned int current_read_size;
1476 struct cifs_sb_info *cifs_sb;
1477 struct cifsTconInfo *pTcon;
1478 int xid;
1479 char *current_offset;
1480 struct cifsFileInfo *open_file;
1481
1482 xid = GetXid();
1483 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1484 pTcon = cifs_sb->tcon;
1485
1486 if (file->private_data == NULL) {
1487 FreeXid(xid);
1488 return -EBADF;
1489 }
1490 open_file = (struct cifsFileInfo *)file->private_data;
1491
1492 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1493 cFYI(1, ("attempting read on write only file instance"));
1494
1495 for (total_read = 0, current_offset = read_data;
1496 read_size > total_read;
1497 total_read += bytes_read, current_offset += bytes_read) {
1498 current_read_size = min_t(const int, read_size - total_read,
1499 cifs_sb->rsize);
f9f5c817
SF
1500 /* For windows me and 9x we do not want to request more
1501 than it negotiated since it will refuse the read then */
1502 if((pTcon->ses) &&
1503 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1504 current_read_size = min_t(const int, current_read_size,
1505 pTcon->ses->server->maxBuf - 128);
1506 }
1da177e4
LT
1507 rc = -EAGAIN;
1508 while (rc == -EAGAIN) {
1509 if ((open_file->invalidHandle) &&
1510 (!open_file->closePend)) {
1511 rc = cifs_reopen_file(file->f_dentry->d_inode,
1512 file, TRUE);
1513 if (rc != 0)
1514 break;
1515 }
bfa0d75a 1516 rc = CIFSSMBRead(xid, pTcon,
a9d02ad4
SF
1517 open_file->netfid,
1518 current_read_size, *poffset,
1519 &bytes_read, &current_offset);
1da177e4
LT
1520 }
1521 if (rc || (bytes_read == 0)) {
1522 if (total_read) {
1523 break;
1524 } else {
1525 FreeXid(xid);
1526 return rc;
1527 }
1528 } else {
a4544347 1529 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1530 *poffset += bytes_read;
1531 }
1532 }
1533 FreeXid(xid);
1534 return total_read;
1535}
1536
1537int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1538{
1539 struct dentry *dentry = file->f_dentry;
1540 int rc, xid;
1541
1542 xid = GetXid();
1543 rc = cifs_revalidate(dentry);
1544 if (rc) {
1545 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1546 FreeXid(xid);
1547 return rc;
1548 }
1549 rc = generic_file_mmap(file, vma);
1550 FreeXid(xid);
1551 return rc;
1552}
1553
1554
1555static void cifs_copy_cache_pages(struct address_space *mapping,
1556 struct list_head *pages, int bytes_read, char *data,
1557 struct pagevec *plru_pvec)
1558{
1559 struct page *page;
1560 char *target;
1561
1562 while (bytes_read > 0) {
1563 if (list_empty(pages))
1564 break;
1565
1566 page = list_entry(pages->prev, struct page, lru);
1567 list_del(&page->lru);
1568
1569 if (add_to_page_cache(page, mapping, page->index,
1570 GFP_KERNEL)) {
1571 page_cache_release(page);
1572 cFYI(1, ("Add page cache failed"));
3079ca62
SF
1573 data += PAGE_CACHE_SIZE;
1574 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1575 continue;
1576 }
1577
1578 target = kmap_atomic(page,KM_USER0);
1579
1580 if (PAGE_CACHE_SIZE > bytes_read) {
1581 memcpy(target, data, bytes_read);
1582 /* zero the tail end of this partial page */
1583 memset(target + bytes_read, 0,
1584 PAGE_CACHE_SIZE - bytes_read);
1585 bytes_read = 0;
1586 } else {
1587 memcpy(target, data, PAGE_CACHE_SIZE);
1588 bytes_read -= PAGE_CACHE_SIZE;
1589 }
1590 kunmap_atomic(target, KM_USER0);
1591
1592 flush_dcache_page(page);
1593 SetPageUptodate(page);
1594 unlock_page(page);
1595 if (!pagevec_add(plru_pvec, page))
1596 __pagevec_lru_add(plru_pvec);
1597 data += PAGE_CACHE_SIZE;
1598 }
1599 return;
1600}
1601
1602static int cifs_readpages(struct file *file, struct address_space *mapping,
1603 struct list_head *page_list, unsigned num_pages)
1604{
1605 int rc = -EACCES;
1606 int xid;
1607 loff_t offset;
1608 struct page *page;
1609 struct cifs_sb_info *cifs_sb;
1610 struct cifsTconInfo *pTcon;
1611 int bytes_read = 0;
1612 unsigned int read_size,i;
1613 char *smb_read_data = NULL;
1614 struct smb_com_read_rsp *pSMBr;
1615 struct pagevec lru_pvec;
1616 struct cifsFileInfo *open_file;
1617
1618 xid = GetXid();
1619 if (file->private_data == NULL) {
1620 FreeXid(xid);
1621 return -EBADF;
1622 }
1623 open_file = (struct cifsFileInfo *)file->private_data;
1624 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1625 pTcon = cifs_sb->tcon;
bfa0d75a 1626
1da177e4
LT
1627 pagevec_init(&lru_pvec, 0);
1628
1629 for (i = 0; i < num_pages; ) {
1630 unsigned contig_pages;
1631 struct page *tmp_page;
1632 unsigned long expected_index;
1633
1634 if (list_empty(page_list))
1635 break;
1636
1637 page = list_entry(page_list->prev, struct page, lru);
1638 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1639
1640 /* count adjacent pages that we will read into */
1641 contig_pages = 0;
1642 expected_index =
1643 list_entry(page_list->prev, struct page, lru)->index;
1644 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1645 if (tmp_page->index == expected_index) {
1646 contig_pages++;
1647 expected_index++;
1648 } else
1649 break;
1650 }
1651 if (contig_pages + i > num_pages)
1652 contig_pages = num_pages - i;
1653
1654 /* for reads over a certain size could initiate async
1655 read ahead */
1656
1657 read_size = contig_pages * PAGE_CACHE_SIZE;
1658 /* Read size needs to be in multiples of one page */
1659 read_size = min_t(const unsigned int, read_size,
1660 cifs_sb->rsize & PAGE_CACHE_MASK);
1661
1662 rc = -EAGAIN;
1663 while (rc == -EAGAIN) {
1664 if ((open_file->invalidHandle) &&
1665 (!open_file->closePend)) {
1666 rc = cifs_reopen_file(file->f_dentry->d_inode,
1667 file, TRUE);
1668 if (rc != 0)
1669 break;
1670 }
1671
bfa0d75a 1672 rc = CIFSSMBRead(xid, pTcon,
1c955187
SF
1673 open_file->netfid,
1674 read_size, offset,
1675 &bytes_read, &smb_read_data);
a9d02ad4
SF
1676
1677 /* BB more RC checks ? */
1da177e4
LT
1678 if (rc== -EAGAIN) {
1679 if (smb_read_data) {
1680 cifs_buf_release(smb_read_data);
1681 smb_read_data = NULL;
1682 }
1683 }
1684 }
1685 if ((rc < 0) || (smb_read_data == NULL)) {
1686 cFYI(1, ("Read error in readpages: %d", rc));
1687 /* clean up remaing pages off list */
1688 while (!list_empty(page_list) && (i < num_pages)) {
1689 page = list_entry(page_list->prev, struct page,
1690 lru);
1691 list_del(&page->lru);
1692 page_cache_release(page);
1693 }
1694 break;
1695 } else if (bytes_read > 0) {
1696 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1697 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1698 smb_read_data + 4 /* RFC1001 hdr */ +
1699 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1700
1701 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 1702 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1703 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1704 i++; /* account for partial page */
1705
1706 /* server copy of file can have smaller size
1707 than client */
1708 /* BB do we need to verify this common case ?
1709 this case is ok - if we are at server EOF
1710 we will hit it on next read */
1711
1712 /* while (!list_empty(page_list) && (i < num_pages)) {
1713 page = list_entry(page_list->prev,
1714 struct page, list);
1715 list_del(&page->list);
1716 page_cache_release(page);
1717 }
1718 break; */
1719 }
1720 } else {
1721 cFYI(1, ("No bytes read (%d) at offset %lld . "
1722 "Cleaning remaining pages from readahead list",
1723 bytes_read, offset));
1724 /* BB turn off caching and do new lookup on
1725 file size at server? */
1726 while (!list_empty(page_list) && (i < num_pages)) {
1727 page = list_entry(page_list->prev, struct page,
1728 lru);
1729 list_del(&page->lru);
1730
1731 /* BB removeme - replace with zero of page? */
1732 page_cache_release(page);
1733 }
1734 break;
1735 }
1736 if (smb_read_data) {
1737 cifs_buf_release(smb_read_data);
1738 smb_read_data = NULL;
1739 }
1740 bytes_read = 0;
1741 }
1742
1743 pagevec_lru_add(&lru_pvec);
1744
1745/* need to free smb_read_data buf before exit */
1746 if (smb_read_data) {
1747 cifs_buf_release(smb_read_data);
1748 smb_read_data = NULL;
1749 }
1750
1751 FreeXid(xid);
1752 return rc;
1753}
1754
1755static int cifs_readpage_worker(struct file *file, struct page *page,
1756 loff_t *poffset)
1757{
1758 char *read_data;
1759 int rc;
1760
1761 page_cache_get(page);
1762 read_data = kmap(page);
1763 /* for reads over a certain size could initiate async read ahead */
1764
1765 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1766
1767 if (rc < 0)
1768 goto io_error;
1769 else
1770 cFYI(1, ("Bytes read %d ",rc));
1771
1772 file->f_dentry->d_inode->i_atime =
1773 current_fs_time(file->f_dentry->d_inode->i_sb);
1774
1775 if (PAGE_CACHE_SIZE > rc)
1776 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1777
1778 flush_dcache_page(page);
1779 SetPageUptodate(page);
1780 rc = 0;
1781
1782io_error:
1783 kunmap(page);
1784 page_cache_release(page);
1785 return rc;
1786}
1787
1788static int cifs_readpage(struct file *file, struct page *page)
1789{
1790 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1791 int rc = -EACCES;
1792 int xid;
1793
1794 xid = GetXid();
1795
1796 if (file->private_data == NULL) {
1797 FreeXid(xid);
1798 return -EBADF;
1799 }
1800
1801 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1802 page, (int)offset, (int)offset));
1803
1804 rc = cifs_readpage_worker(file, page, &offset);
1805
1806 unlock_page(page);
1807
1808 FreeXid(xid);
1809 return rc;
1810}
1811
1812/* We do not want to update the file size from server for inodes
1813 open for write - to avoid races with writepage extending
1814 the file - in the future we could consider allowing
1815 refreshing the inode only on increases in the file size
1816 but this is tricky to do without racing with writebehind
1817 page caching in the current Linux kernel design */
1818int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1819{
23e7dd7d
SF
1820 struct cifsFileInfo *open_file = NULL;
1821
1822 if (cifsInode)
1823 open_file = find_writable_file(cifsInode);
1824
1825 if(open_file) {
1826 /* there is not actually a write pending so let
1827 this handle go free and allow it to
1828 be closable if needed */
1829 atomic_dec(&open_file->wrtPending);
6148a742 1830 return 0;
23e7dd7d 1831 } else
6148a742 1832 return 1;
1da177e4
LT
1833}
1834
1da177e4
LT
1835static int cifs_prepare_write(struct file *file, struct page *page,
1836 unsigned from, unsigned to)
1837{
1838 int rc = 0;
1839 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1840 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1841 if (!PageUptodate(page)) {
1842 /* if (to - from != PAGE_CACHE_SIZE) {
1843 void *kaddr = kmap_atomic(page, KM_USER0);
1844 memset(kaddr, 0, from);
1845 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1846 flush_dcache_page(page);
1847 kunmap_atomic(kaddr, KM_USER0);
1848 } */
1849 /* If we are writing a full page it will be up to date,
1850 no need to read from the server */
1851 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1852 SetPageUptodate(page);
1853
1854 /* might as well read a page, it is fast enough */
1855 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1856 rc = cifs_readpage_worker(file, page, &offset);
1857 } else {
1858 /* should we try using another file handle if there is one -
1859 how would we lock it to prevent close of that handle
1860 racing with this read?
1861 In any case this will be written out by commit_write */
1862 }
1863 }
1864
1865 /* BB should we pass any errors back?
1866 e.g. if we do not have read access to the file */
1867 return 0;
1868}
1869
1870struct address_space_operations cifs_addr_ops = {
1871 .readpage = cifs_readpage,
1872 .readpages = cifs_readpages,
1873 .writepage = cifs_writepage,
37c0eb46 1874 .writepages = cifs_writepages,
1da177e4
LT
1875 .prepare_write = cifs_prepare_write,
1876 .commit_write = cifs_commit_write,
1877 .set_page_dirty = __set_page_dirty_nobuffers,
1878 /* .sync_page = cifs_sync_page, */
1879 /* .direct_IO = */
1880};