]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/nfs/write.c
NFS: nfs_writepages() cleanup
[net-next-2.6.git] / fs / nfs / write.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/nfs/write.c
3 *
4 * Writing file data over NFS.
5 *
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
12 *
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
14 *
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
20 *
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
24 *
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
27 * cases:
28 *
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
32 *
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
35 *
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
40 *
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
45 *
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
47 */
48
1da177e4
LT
49#include <linux/types.h>
50#include <linux/slab.h>
51#include <linux/mm.h>
52#include <linux/pagemap.h>
53#include <linux/file.h>
1da177e4
LT
54#include <linux/writeback.h>
55
56#include <linux/sunrpc/clnt.h>
57#include <linux/nfs_fs.h>
58#include <linux/nfs_mount.h>
59#include <linux/nfs_page.h>
3fcfab16
AM
60#include <linux/backing-dev.h>
61
1da177e4
LT
62#include <asm/uaccess.h>
63#include <linux/smp_lock.h>
64
65#include "delegation.h"
49a70f27 66#include "internal.h"
91d5b470 67#include "iostat.h"
1da177e4
LT
68
69#define NFSDBG_FACILITY NFSDBG_PAGECACHE
70
71#define MIN_POOL_WRITE (32)
72#define MIN_POOL_COMMIT (4)
73
74/*
75 * Local function declarations
76 */
77static struct nfs_page * nfs_update_request(struct nfs_open_context*,
1da177e4
LT
78 struct page *,
79 unsigned int, unsigned int);
e261f51f 80static void nfs_mark_request_dirty(struct nfs_page *req);
1da177e4 81static int nfs_wait_on_write_congestion(struct address_space *, int);
3f442547 82static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
788e7a89
TM
83static const struct rpc_call_ops nfs_write_partial_ops;
84static const struct rpc_call_ops nfs_write_full_ops;
85static const struct rpc_call_ops nfs_commit_ops;
1da177e4 86
e18b890b 87static struct kmem_cache *nfs_wdata_cachep;
3feb2d49 88static mempool_t *nfs_wdata_mempool;
1da177e4
LT
89static mempool_t *nfs_commit_mempool;
90
91static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
92
e9f7bee1 93struct nfs_write_data *nfs_commit_alloc(void)
1da177e4 94{
e6b4f8da 95 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
40859d7e 96
1da177e4
LT
97 if (p) {
98 memset(p, 0, sizeof(*p));
99 INIT_LIST_HEAD(&p->pages);
100 }
101 return p;
102}
103
8aca67f0 104void nfs_commit_rcu_free(struct rcu_head *head)
1da177e4 105{
8aca67f0 106 struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
40859d7e
CL
107 if (p && (p->pagevec != &p->page_array[0]))
108 kfree(p->pagevec);
1da177e4
LT
109 mempool_free(p, nfs_commit_mempool);
110}
111
8aca67f0
TM
112void nfs_commit_free(struct nfs_write_data *wdata)
113{
114 call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
115}
116
e9f7bee1 117struct nfs_write_data *nfs_writedata_alloc(size_t len)
3feb2d49 118{
e9f7bee1 119 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
e6b4f8da 120 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
3feb2d49
TM
121
122 if (p) {
123 memset(p, 0, sizeof(*p));
124 INIT_LIST_HEAD(&p->pages);
e9f7bee1 125 p->npages = pagecount;
0d0b5cb3
CL
126 if (pagecount <= ARRAY_SIZE(p->page_array))
127 p->pagevec = p->page_array;
3feb2d49 128 else {
0d0b5cb3
CL
129 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
130 if (!p->pagevec) {
3feb2d49
TM
131 mempool_free(p, nfs_wdata_mempool);
132 p = NULL;
133 }
134 }
135 }
136 return p;
137}
138
8aca67f0 139static void nfs_writedata_rcu_free(struct rcu_head *head)
3feb2d49 140{
8aca67f0 141 struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
3feb2d49
TM
142 if (p && (p->pagevec != &p->page_array[0]))
143 kfree(p->pagevec);
144 mempool_free(p, nfs_wdata_mempool);
145}
146
8aca67f0
TM
147static void nfs_writedata_free(struct nfs_write_data *wdata)
148{
149 call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
150}
151
963d8fe5 152void nfs_writedata_release(void *wdata)
1da177e4 153{
1da177e4
LT
154 nfs_writedata_free(wdata);
155}
156
277459d2
TM
157static struct nfs_page *nfs_page_find_request_locked(struct page *page)
158{
159 struct nfs_page *req = NULL;
160
161 if (PagePrivate(page)) {
162 req = (struct nfs_page *)page_private(page);
163 if (req != NULL)
164 atomic_inc(&req->wb_count);
165 }
166 return req;
167}
168
169static struct nfs_page *nfs_page_find_request(struct page *page)
170{
171 struct nfs_page *req = NULL;
172 spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
173
174 spin_lock(req_lock);
175 req = nfs_page_find_request_locked(page);
176 spin_unlock(req_lock);
177 return req;
178}
179
1da177e4
LT
180/* Adjust the file length if we're writing beyond the end */
181static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
182{
183 struct inode *inode = page->mapping->host;
184 loff_t end, i_size = i_size_read(inode);
185 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
186
187 if (i_size > 0 && page->index < end_index)
188 return;
189 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
190 if (i_size >= end)
191 return;
91d5b470 192 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
1da177e4
LT
193 i_size_write(inode, end);
194}
195
196/* We can set the PG_uptodate flag if we see that a write request
197 * covers the full page.
198 */
199static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
200{
1da177e4
LT
201 if (PageUptodate(page))
202 return;
203 if (base != 0)
204 return;
49a70f27 205 if (count != nfs_page_length(page))
1da177e4 206 return;
49a70f27 207 if (count != PAGE_CACHE_SIZE)
1da177e4 208 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
49a70f27 209 SetPageUptodate(page);
1da177e4
LT
210}
211
e21195a7 212static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1da177e4
LT
213 unsigned int offset, unsigned int count)
214{
215 struct nfs_page *req;
e21195a7 216 int ret;
1da177e4 217
e21195a7
TM
218 for (;;) {
219 req = nfs_update_request(ctx, page, offset, count);
220 if (!IS_ERR(req))
221 break;
222 ret = PTR_ERR(req);
223 if (ret != -EBUSY)
224 return ret;
225 ret = nfs_wb_page(page->mapping->host, page);
226 if (ret != 0)
227 return ret;
228 }
1da177e4
LT
229 /* Update file length */
230 nfs_grow_file(page, offset, count);
231 /* Set the PG_uptodate flag? */
232 nfs_mark_uptodate(page, offset, count);
233 nfs_unlock_request(req);
abd3e641 234 return 0;
1da177e4
LT
235}
236
237static int wb_priority(struct writeback_control *wbc)
238{
239 if (wbc->for_reclaim)
240 return FLUSH_HIGHPRI;
241 if (wbc->for_kupdate)
242 return FLUSH_LOWPRI;
243 return 0;
244}
245
e261f51f
TM
246/*
247 * Find an associated nfs write request, and prepare to flush it out
248 * Returns 1 if there was no write request, or if the request was
249 * already tagged by nfs_set_page_dirty.Returns 0 if the request
250 * was not tagged.
251 * May also return an error if the user signalled nfs_wait_on_request().
252 */
253static int nfs_page_mark_flush(struct page *page)
254{
255 struct nfs_page *req;
256 spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
257 int ret;
258
259 spin_lock(req_lock);
260 for(;;) {
261 req = nfs_page_find_request_locked(page);
262 if (req == NULL) {
263 spin_unlock(req_lock);
264 return 1;
265 }
266 if (nfs_lock_request_dontget(req))
267 break;
268 /* Note: If we hold the page lock, as is the case in nfs_writepage,
269 * then the call to nfs_lock_request_dontget() will always
270 * succeed provided that someone hasn't already marked the
271 * request as dirty (in which case we don't care).
272 */
273 spin_unlock(req_lock);
274 ret = nfs_wait_on_request(req);
275 nfs_release_request(req);
276 if (ret != 0)
277 return ret;
278 spin_lock(req_lock);
279 }
280 spin_unlock(req_lock);
61822ab5 281 if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) {
e261f51f 282 nfs_mark_request_dirty(req);
61822ab5
TM
283 set_page_writeback(page);
284 }
e261f51f
TM
285 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
286 nfs_unlock_request(req);
287 return ret;
288}
289
1da177e4
LT
290/*
291 * Write an mmapped page to the server.
292 */
4d770ccf 293static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4
LT
294{
295 struct nfs_open_context *ctx;
296 struct inode *inode = page->mapping->host;
49a70f27 297 unsigned offset;
e261f51f 298 int err;
1da177e4 299
91d5b470
CL
300 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
301 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
302
e261f51f
TM
303 err = nfs_page_mark_flush(page);
304 if (err <= 0)
305 goto out;
306 err = 0;
49a70f27
TM
307 offset = nfs_page_length(page);
308 if (!offset)
1da177e4 309 goto out;
49a70f27 310
d530838b 311 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
1da177e4
LT
312 if (ctx == NULL) {
313 err = -EBADF;
314 goto out;
315 }
200baa21 316 err = nfs_writepage_setup(ctx, page, 0, offset);
1da177e4 317 put_nfs_open_context(ctx);
e261f51f
TM
318 if (err != 0)
319 goto out;
320 err = nfs_page_mark_flush(page);
321 if (err > 0)
322 err = 0;
1da177e4 323out:
200baa21 324 if (!wbc->for_writepages)
02241bc4 325 nfs_flush_mapping(page->mapping, wbc, FLUSH_STABLE|wb_priority(wbc));
4d770ccf
TM
326 return err;
327}
328
329int nfs_writepage(struct page *page, struct writeback_control *wbc)
330{
331 int err;
332
333 err = nfs_writepage_locked(page, wbc);
1da177e4 334 unlock_page(page);
1da177e4
LT
335 return err;
336}
337
338/*
339 * Note: causes nfs_update_request() to block on the assumption
340 * that the writeback is generated due to memory pressure.
341 */
342int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
343{
344 struct backing_dev_info *bdi = mapping->backing_dev_info;
345 struct inode *inode = mapping->host;
346 int err;
347
91d5b470
CL
348 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
349
1da177e4
LT
350 err = generic_writepages(mapping, wbc);
351 if (err)
352 return err;
353 while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
354 if (wbc->nonblocking)
355 return 0;
356 nfs_wait_on_write_congestion(mapping, 0);
357 }
28c6925f 358 err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc));
1da177e4
LT
359 if (err < 0)
360 goto out;
91d5b470 361 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
d30c8348 362 err = 0;
1da177e4
LT
363out:
364 clear_bit(BDI_write_congested, &bdi->state);
365 wake_up_all(&nfs_write_congestion);
3fcfab16 366 congestion_end(WRITE);
1da177e4
LT
367 return err;
368}
369
370/*
371 * Insert a write request into an inode
372 */
373static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
374{
375 struct nfs_inode *nfsi = NFS_I(inode);
376 int error;
377
378 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
379 BUG_ON(error == -EEXIST);
380 if (error)
381 return error;
382 if (!nfsi->npages) {
383 igrab(inode);
384 nfs_begin_data_update(inode);
385 if (nfs_have_delegation(inode, FMODE_WRITE))
386 nfsi->change_attr++;
387 }
deb7d638 388 SetPagePrivate(req->wb_page);
277459d2 389 set_page_private(req->wb_page, (unsigned long)req);
1da177e4
LT
390 nfsi->npages++;
391 atomic_inc(&req->wb_count);
392 return 0;
393}
394
395/*
396 * Insert a write request into an inode
397 */
398static void nfs_inode_remove_request(struct nfs_page *req)
399{
400 struct inode *inode = req->wb_context->dentry->d_inode;
401 struct nfs_inode *nfsi = NFS_I(inode);
402
403 BUG_ON (!NFS_WBACK_BUSY(req));
404
405 spin_lock(&nfsi->req_lock);
277459d2 406 set_page_private(req->wb_page, 0);
deb7d638 407 ClearPagePrivate(req->wb_page);
1da177e4
LT
408 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
409 nfsi->npages--;
410 if (!nfsi->npages) {
411 spin_unlock(&nfsi->req_lock);
951a143b 412 nfs_end_data_update(inode);
1da177e4
LT
413 iput(inode);
414 } else
415 spin_unlock(&nfsi->req_lock);
416 nfs_clear_request(req);
417 nfs_release_request(req);
418}
419
1da177e4
LT
420/*
421 * Add a request to the inode's dirty list.
422 */
423static void
424nfs_mark_request_dirty(struct nfs_page *req)
425{
426 struct inode *inode = req->wb_context->dentry->d_inode;
427 struct nfs_inode *nfsi = NFS_I(inode);
428
429 spin_lock(&nfsi->req_lock);
3da28eb1
TM
430 radix_tree_tag_set(&nfsi->nfs_page_tree,
431 req->wb_index, NFS_PAGE_TAG_DIRTY);
1da177e4
LT
432 nfs_list_add_request(req, &nfsi->dirty);
433 nfsi->ndirty++;
434 spin_unlock(&nfsi->req_lock);
a1803044 435 __mark_inode_dirty(inode, I_DIRTY_PAGES);
1da177e4
LT
436}
437
61822ab5
TM
438static void
439nfs_redirty_request(struct nfs_page *req)
440{
441 clear_bit(PG_FLUSHING, &req->wb_flags);
442 __set_page_dirty_nobuffers(req->wb_page);
443}
444
1da177e4
LT
445/*
446 * Check if a request is dirty
447 */
448static inline int
449nfs_dirty_request(struct nfs_page *req)
450{
e261f51f 451 return test_bit(PG_FLUSHING, &req->wb_flags) == 0;
1da177e4
LT
452}
453
454#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
455/*
456 * Add a request to the inode's commit list.
457 */
458static void
459nfs_mark_request_commit(struct nfs_page *req)
460{
461 struct inode *inode = req->wb_context->dentry->d_inode;
462 struct nfs_inode *nfsi = NFS_I(inode);
463
464 spin_lock(&nfsi->req_lock);
465 nfs_list_add_request(req, &nfsi->commit);
466 nfsi->ncommit++;
467 spin_unlock(&nfsi->req_lock);
fd39fc85 468 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
a1803044 469 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1da177e4
LT
470}
471#endif
472
473/*
474 * Wait for a request to complete.
475 *
476 * Interruptible by signals only if mounted with intr flag.
477 */
c42de9dd 478static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
1da177e4
LT
479{
480 struct nfs_inode *nfsi = NFS_I(inode);
481 struct nfs_page *req;
482 unsigned long idx_end, next;
483 unsigned int res = 0;
484 int error;
485
486 if (npages == 0)
487 idx_end = ~0;
488 else
489 idx_end = idx_start + npages - 1;
490
1da177e4 491 next = idx_start;
c6a556b8 492 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
1da177e4
LT
493 if (req->wb_index > idx_end)
494 break;
495
496 next = req->wb_index + 1;
c6a556b8 497 BUG_ON(!NFS_WBACK_BUSY(req));
1da177e4
LT
498
499 atomic_inc(&req->wb_count);
500 spin_unlock(&nfsi->req_lock);
501 error = nfs_wait_on_request(req);
502 nfs_release_request(req);
c42de9dd 503 spin_lock(&nfsi->req_lock);
1da177e4
LT
504 if (error < 0)
505 return error;
1da177e4
LT
506 res++;
507 }
1da177e4
LT
508 return res;
509}
510
83715ad5 511static void nfs_cancel_dirty_list(struct list_head *head)
d2ccddf0
TM
512{
513 struct nfs_page *req;
514 while(!list_empty(head)) {
515 req = nfs_list_entry(head->next);
516 nfs_list_remove_request(req);
517 nfs_inode_remove_request(req);
518 nfs_clear_page_writeback(req);
519 }
520}
521
83715ad5
TM
522static void nfs_cancel_commit_list(struct list_head *head)
523{
524 struct nfs_page *req;
525
526 while(!list_empty(head)) {
527 req = nfs_list_entry(head->next);
b6dff26a 528 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
83715ad5
TM
529 nfs_list_remove_request(req);
530 nfs_inode_remove_request(req);
b6dff26a 531 nfs_unlock_request(req);
83715ad5
TM
532 }
533}
534
1da177e4
LT
535#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
536/*
537 * nfs_scan_commit - Scan an inode for commit requests
538 * @inode: NFS inode to scan
539 * @dst: destination list
540 * @idx_start: lower bound of page->index to scan.
541 * @npages: idx_start + npages sets the upper bound to scan.
542 *
543 * Moves requests from the inode's 'commit' request list.
544 * The requests are *not* checked to ensure that they form a contiguous set.
545 */
546static int
547nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
548{
549 struct nfs_inode *nfsi = NFS_I(inode);
3da28eb1
TM
550 int res = 0;
551
552 if (nfsi->ncommit != 0) {
d2ccddf0 553 res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
3da28eb1
TM
554 nfsi->ncommit -= res;
555 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
556 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
557 }
1da177e4
LT
558 return res;
559}
c42de9dd
TM
560#else
561static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
562{
563 return 0;
564}
1da177e4
LT
565#endif
566
567static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
568{
569 struct backing_dev_info *bdi = mapping->backing_dev_info;
570 DEFINE_WAIT(wait);
571 int ret = 0;
572
573 might_sleep();
574
575 if (!bdi_write_congested(bdi))
576 return 0;
91d5b470
CL
577
578 nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
579
1da177e4
LT
580 if (intr) {
581 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
582 sigset_t oldset;
583
584 rpc_clnt_sigmask(clnt, &oldset);
585 prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
586 if (bdi_write_congested(bdi)) {
587 if (signalled())
588 ret = -ERESTARTSYS;
589 else
590 schedule();
591 }
592 rpc_clnt_sigunmask(clnt, &oldset);
593 } else {
594 prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
595 if (bdi_write_congested(bdi))
596 schedule();
597 }
598 finish_wait(&nfs_write_congestion, &wait);
599 return ret;
600}
601
602
603/*
604 * Try to update any existing write request, or create one if there is none.
605 * In order to match, the request's credentials must match those of
606 * the calling process.
607 *
608 * Note: Should always be called with the Page Lock held!
609 */
610static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
e21195a7 611 struct page *page, unsigned int offset, unsigned int bytes)
1da177e4 612{
e21195a7 613 struct inode *inode = page->mapping->host;
1da177e4
LT
614 struct nfs_inode *nfsi = NFS_I(inode);
615 struct nfs_page *req, *new = NULL;
616 unsigned long rqend, end;
617
618 end = offset + bytes;
619
e21195a7 620 if (nfs_wait_on_write_congestion(page->mapping, NFS_SERVER(inode)->flags & NFS_MOUNT_INTR))
1da177e4
LT
621 return ERR_PTR(-ERESTARTSYS);
622 for (;;) {
623 /* Loop over all inode entries and see if we find
624 * A request for the page we wish to update
625 */
626 spin_lock(&nfsi->req_lock);
277459d2 627 req = nfs_page_find_request_locked(page);
1da177e4
LT
628 if (req) {
629 if (!nfs_lock_request_dontget(req)) {
630 int error;
277459d2 631
1da177e4
LT
632 spin_unlock(&nfsi->req_lock);
633 error = nfs_wait_on_request(req);
634 nfs_release_request(req);
1dd594b2
NB
635 if (error < 0) {
636 if (new)
637 nfs_release_request(new);
1da177e4 638 return ERR_PTR(error);
1dd594b2 639 }
1da177e4
LT
640 continue;
641 }
642 spin_unlock(&nfsi->req_lock);
643 if (new)
644 nfs_release_request(new);
645 break;
646 }
647
648 if (new) {
649 int error;
650 nfs_lock_request_dontget(new);
651 error = nfs_inode_add_request(inode, new);
652 if (error) {
653 spin_unlock(&nfsi->req_lock);
654 nfs_unlock_request(new);
655 return ERR_PTR(error);
656 }
657 spin_unlock(&nfsi->req_lock);
1da177e4
LT
658 return new;
659 }
660 spin_unlock(&nfsi->req_lock);
661
662 new = nfs_create_request(ctx, inode, page, offset, bytes);
663 if (IS_ERR(new))
664 return new;
665 }
666
667 /* We have a request for our page.
668 * If the creds don't match, or the
669 * page addresses don't match,
670 * tell the caller to wait on the conflicting
671 * request.
672 */
673 rqend = req->wb_offset + req->wb_bytes;
674 if (req->wb_context != ctx
675 || req->wb_page != page
676 || !nfs_dirty_request(req)
677 || offset > rqend || end < req->wb_offset) {
678 nfs_unlock_request(req);
679 return ERR_PTR(-EBUSY);
680 }
681
682 /* Okay, the request matches. Update the region */
683 if (offset < req->wb_offset) {
684 req->wb_offset = offset;
685 req->wb_pgbase = offset;
686 req->wb_bytes = rqend - req->wb_offset;
687 }
688
689 if (end > rqend)
690 req->wb_bytes = end - req->wb_offset;
691
692 return req;
693}
694
695int nfs_flush_incompatible(struct file *file, struct page *page)
696{
697 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
1da177e4 698 struct nfs_page *req;
1a54533e 699 int do_flush, status;
1da177e4
LT
700 /*
701 * Look for a request corresponding to this page. If there
702 * is one, and it belongs to another file, we flush it out
703 * before we try to copy anything into the page. Do this
704 * due to the lack of an ACCESS-type call in NFSv2.
705 * Also do the same if we find a request from an existing
706 * dropped page.
707 */
1a54533e
TM
708 do {
709 req = nfs_page_find_request(page);
710 if (req == NULL)
711 return 0;
712 do_flush = req->wb_page != page || req->wb_context != ctx
e261f51f 713 || !nfs_dirty_request(req);
1da177e4 714 nfs_release_request(req);
1a54533e
TM
715 if (!do_flush)
716 return 0;
717 status = nfs_wb_page(page->mapping->host, page);
718 } while (status == 0);
719 return status;
1da177e4
LT
720}
721
722/*
723 * Update and possibly write a cached page of an NFS file.
724 *
725 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
726 * things with a page scheduled for an RPC call (e.g. invalidate it).
727 */
728int nfs_updatepage(struct file *file, struct page *page,
729 unsigned int offset, unsigned int count)
730{
731 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
1da177e4 732 struct inode *inode = page->mapping->host;
1da177e4
LT
733 int status = 0;
734
91d5b470
CL
735 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
736
1da177e4 737 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
01cce933
JJS
738 file->f_path.dentry->d_parent->d_name.name,
739 file->f_path.dentry->d_name.name, count,
0bbacc40 740 (long long)(page_offset(page) +offset));
1da177e4 741
1da177e4
LT
742 /* If we're not using byte range locks, and we know the page
743 * is entirely in cache, it may be more efficient to avoid
744 * fragmenting write requests.
745 */
ab0a3dbe 746 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
49a70f27 747 count = max(count + offset, nfs_page_length(page));
1da177e4 748 offset = 0;
1da177e4
LT
749 }
750
e21195a7 751 status = nfs_writepage_setup(ctx, page, offset, count);
e261f51f 752 __set_page_dirty_nobuffers(page);
1da177e4 753
1da177e4
LT
754 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
755 status, (long long)i_size_read(inode));
756 if (status < 0)
757 ClearPageUptodate(page);
758 return status;
759}
760
761static void nfs_writepage_release(struct nfs_page *req)
762{
763 end_page_writeback(req->wb_page);
764
765#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
766 if (!PageError(req->wb_page)) {
767 if (NFS_NEED_RESCHED(req)) {
61822ab5 768 nfs_redirty_request(req);
1da177e4
LT
769 goto out;
770 } else if (NFS_NEED_COMMIT(req)) {
771 nfs_mark_request_commit(req);
772 goto out;
773 }
774 }
775 nfs_inode_remove_request(req);
776
777out:
778 nfs_clear_commit(req);
779 nfs_clear_reschedule(req);
780#else
781 nfs_inode_remove_request(req);
782#endif
c6a556b8 783 nfs_clear_page_writeback(req);
1da177e4
LT
784}
785
786static inline int flush_task_priority(int how)
787{
788 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
789 case FLUSH_HIGHPRI:
790 return RPC_PRIORITY_HIGH;
791 case FLUSH_LOWPRI:
792 return RPC_PRIORITY_LOW;
793 }
794 return RPC_PRIORITY_NORMAL;
795}
796
797/*
798 * Set up the argument/result storage required for the RPC call.
799 */
800static void nfs_write_rpcsetup(struct nfs_page *req,
801 struct nfs_write_data *data,
788e7a89 802 const struct rpc_call_ops *call_ops,
1da177e4
LT
803 unsigned int count, unsigned int offset,
804 int how)
805{
1da177e4 806 struct inode *inode;
788e7a89 807 int flags;
1da177e4
LT
808
809 /* Set up the RPC argument and reply structs
810 * NB: take care not to mess about with data->commit et al. */
811
812 data->req = req;
813 data->inode = inode = req->wb_context->dentry->d_inode;
814 data->cred = req->wb_context->cred;
815
816 data->args.fh = NFS_FH(inode);
817 data->args.offset = req_offset(req) + offset;
818 data->args.pgbase = req->wb_pgbase + offset;
819 data->args.pages = data->pagevec;
820 data->args.count = count;
821 data->args.context = req->wb_context;
822
823 data->res.fattr = &data->fattr;
824 data->res.count = count;
825 data->res.verf = &data->verf;
0e574af1 826 nfs_fattr_init(&data->fattr);
1da177e4 827
788e7a89
TM
828 /* Set up the initial task struct. */
829 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
830 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
1da177e4
LT
831 NFS_PROTO(inode)->write_setup(data, how);
832
833 data->task.tk_priority = flush_task_priority(how);
834 data->task.tk_cookie = (unsigned long)inode;
1da177e4
LT
835
836 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
0bbacc40 837 data->task.tk_pid,
1da177e4
LT
838 inode->i_sb->s_id,
839 (long long)NFS_FILEID(inode),
840 count,
841 (unsigned long long)data->args.offset);
842}
843
844static void nfs_execute_write(struct nfs_write_data *data)
845{
846 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
847 sigset_t oldset;
848
849 rpc_clnt_sigmask(clnt, &oldset);
1da177e4 850 rpc_execute(&data->task);
1da177e4
LT
851 rpc_clnt_sigunmask(clnt, &oldset);
852}
853
854/*
855 * Generate multiple small requests to write out a single
856 * contiguous dirty area on one page.
857 */
7d46a49f 858static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
1da177e4
LT
859{
860 struct nfs_page *req = nfs_list_entry(head->next);
861 struct page *page = req->wb_page;
862 struct nfs_write_data *data;
e9f7bee1
TM
863 size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
864 unsigned int offset;
1da177e4
LT
865 int requests = 0;
866 LIST_HEAD(list);
867
868 nfs_list_remove_request(req);
869
870 nbytes = req->wb_bytes;
e9f7bee1
TM
871 do {
872 size_t len = min(nbytes, wsize);
873
874 data = nfs_writedata_alloc(len);
1da177e4
LT
875 if (!data)
876 goto out_bad;
877 list_add(&data->pages, &list);
878 requests++;
e9f7bee1
TM
879 nbytes -= len;
880 } while (nbytes != 0);
1da177e4
LT
881 atomic_set(&req->wb_complete, requests);
882
883 ClearPageError(page);
1da177e4
LT
884 offset = 0;
885 nbytes = req->wb_bytes;
886 do {
887 data = list_entry(list.next, struct nfs_write_data, pages);
888 list_del_init(&data->pages);
889
890 data->pagevec[0] = page;
1da177e4
LT
891
892 if (nbytes > wsize) {
788e7a89
TM
893 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
894 wsize, offset, how);
1da177e4
LT
895 offset += wsize;
896 nbytes -= wsize;
897 } else {
788e7a89
TM
898 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
899 nbytes, offset, how);
1da177e4
LT
900 nbytes = 0;
901 }
902 nfs_execute_write(data);
903 } while (nbytes != 0);
904
905 return 0;
906
907out_bad:
908 while (!list_empty(&list)) {
909 data = list_entry(list.next, struct nfs_write_data, pages);
910 list_del(&data->pages);
8aca67f0 911 nfs_writedata_release(data);
1da177e4 912 }
61822ab5 913 nfs_redirty_request(req);
c6a556b8 914 nfs_clear_page_writeback(req);
1da177e4
LT
915 return -ENOMEM;
916}
917
918/*
919 * Create an RPC task for the given write request and kick it.
920 * The page must have been locked by the caller.
921 *
922 * It may happen that the page we're passed is not marked dirty.
923 * This is the case if nfs_updatepage detects a conflicting request
924 * that has been written but not committed.
925 */
7d46a49f 926static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
1da177e4
LT
927{
928 struct nfs_page *req;
929 struct page **pages;
930 struct nfs_write_data *data;
931 unsigned int count;
932
e9f7bee1 933 data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize);
1da177e4
LT
934 if (!data)
935 goto out_bad;
936
937 pages = data->pagevec;
938 count = 0;
939 while (!list_empty(head)) {
940 req = nfs_list_entry(head->next);
941 nfs_list_remove_request(req);
942 nfs_list_add_request(req, &data->pages);
943 ClearPageError(req->wb_page);
1da177e4
LT
944 *pages++ = req->wb_page;
945 count += req->wb_bytes;
946 }
947 req = nfs_list_entry(data->pages.next);
948
1da177e4 949 /* Set up the argument struct */
788e7a89 950 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
1da177e4
LT
951
952 nfs_execute_write(data);
953 return 0;
954 out_bad:
955 while (!list_empty(head)) {
956 struct nfs_page *req = nfs_list_entry(head->next);
957 nfs_list_remove_request(req);
61822ab5 958 nfs_redirty_request(req);
c6a556b8 959 nfs_clear_page_writeback(req);
1da177e4
LT
960 }
961 return -ENOMEM;
962}
963
7d46a49f 964static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
1da177e4
LT
965{
966 LIST_HEAD(one_request);
7d46a49f
TM
967 int (*flush_one)(struct inode *, struct list_head *, int);
968 struct nfs_page *req;
969 int wpages = NFS_SERVER(inode)->wpages;
970 int wsize = NFS_SERVER(inode)->wsize;
971 int error;
1da177e4 972
7d46a49f
TM
973 flush_one = nfs_flush_one;
974 if (wsize < PAGE_CACHE_SIZE)
975 flush_one = nfs_flush_multi;
976 /* For single writes, FLUSH_STABLE is more efficient */
977 if (npages <= wpages && npages == NFS_I(inode)->npages
978 && nfs_list_entry(head->next)->wb_bytes <= wsize)
979 how |= FLUSH_STABLE;
980
981 do {
982 nfs_coalesce_requests(head, &one_request, wpages);
1da177e4 983 req = nfs_list_entry(one_request.next);
7d46a49f 984 error = flush_one(inode, &one_request, how);
1da177e4 985 if (error < 0)
7d46a49f
TM
986 goto out_err;
987 } while (!list_empty(head));
988 return 0;
989out_err:
1da177e4
LT
990 while (!list_empty(head)) {
991 req = nfs_list_entry(head->next);
992 nfs_list_remove_request(req);
61822ab5 993 nfs_redirty_request(req);
c6a556b8 994 nfs_clear_page_writeback(req);
1da177e4
LT
995 }
996 return error;
997}
998
999/*
1000 * Handle a write reply that flushed part of a page.
1001 */
788e7a89 1002static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1da177e4 1003{
788e7a89 1004 struct nfs_write_data *data = calldata;
1da177e4
LT
1005 struct nfs_page *req = data->req;
1006 struct page *page = req->wb_page;
1007
1008 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1009 req->wb_context->dentry->d_inode->i_sb->s_id,
1010 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1011 req->wb_bytes,
1012 (long long)req_offset(req));
1013
788e7a89
TM
1014 if (nfs_writeback_done(task, data) != 0)
1015 return;
1016
1017 if (task->tk_status < 0) {
1da177e4
LT
1018 ClearPageUptodate(page);
1019 SetPageError(page);
788e7a89
TM
1020 req->wb_context->error = task->tk_status;
1021 dprintk(", error = %d\n", task->tk_status);
1da177e4
LT
1022 } else {
1023#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1024 if (data->verf.committed < NFS_FILE_SYNC) {
1025 if (!NFS_NEED_COMMIT(req)) {
1026 nfs_defer_commit(req);
1027 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1028 dprintk(" defer commit\n");
1029 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1030 nfs_defer_reschedule(req);
1031 dprintk(" server reboot detected\n");
1032 }
1033 } else
1034#endif
1035 dprintk(" OK\n");
1036 }
1037
1038 if (atomic_dec_and_test(&req->wb_complete))
1039 nfs_writepage_release(req);
1040}
1041
788e7a89
TM
1042static const struct rpc_call_ops nfs_write_partial_ops = {
1043 .rpc_call_done = nfs_writeback_done_partial,
1044 .rpc_release = nfs_writedata_release,
1045};
1046
1da177e4
LT
1047/*
1048 * Handle a write reply that flushes a whole page.
1049 *
1050 * FIXME: There is an inherent race with invalidate_inode_pages and
1051 * writebacks since the page->count is kept > 1 for as long
1052 * as the page has a write request pending.
1053 */
788e7a89 1054static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1da177e4 1055{
788e7a89 1056 struct nfs_write_data *data = calldata;
1da177e4
LT
1057 struct nfs_page *req;
1058 struct page *page;
1059
788e7a89
TM
1060 if (nfs_writeback_done(task, data) != 0)
1061 return;
1062
1da177e4
LT
1063 /* Update attributes as result of writeback. */
1064 while (!list_empty(&data->pages)) {
1065 req = nfs_list_entry(data->pages.next);
1066 nfs_list_remove_request(req);
1067 page = req->wb_page;
1068
1069 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1070 req->wb_context->dentry->d_inode->i_sb->s_id,
1071 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1072 req->wb_bytes,
1073 (long long)req_offset(req));
1074
788e7a89 1075 if (task->tk_status < 0) {
1da177e4
LT
1076 ClearPageUptodate(page);
1077 SetPageError(page);
788e7a89 1078 req->wb_context->error = task->tk_status;
1da177e4
LT
1079 end_page_writeback(page);
1080 nfs_inode_remove_request(req);
788e7a89 1081 dprintk(", error = %d\n", task->tk_status);
1da177e4
LT
1082 goto next;
1083 }
1084 end_page_writeback(page);
1085
1086#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1087 if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1088 nfs_inode_remove_request(req);
1089 dprintk(" OK\n");
1090 goto next;
1091 }
1092 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1093 nfs_mark_request_commit(req);
1094 dprintk(" marked for commit\n");
1095#else
1096 nfs_inode_remove_request(req);
1097#endif
1098 next:
c6a556b8 1099 nfs_clear_page_writeback(req);
1da177e4
LT
1100 }
1101}
1102
788e7a89
TM
1103static const struct rpc_call_ops nfs_write_full_ops = {
1104 .rpc_call_done = nfs_writeback_done_full,
1105 .rpc_release = nfs_writedata_release,
1106};
1107
1108
1da177e4
LT
1109/*
1110 * This function is called when the WRITE call is complete.
1111 */
462d5b32 1112int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1da177e4 1113{
1da177e4
LT
1114 struct nfs_writeargs *argp = &data->args;
1115 struct nfs_writeres *resp = &data->res;
788e7a89 1116 int status;
1da177e4
LT
1117
1118 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1119 task->tk_pid, task->tk_status);
1120
f551e44f
CL
1121 /*
1122 * ->write_done will attempt to use post-op attributes to detect
1123 * conflicting writes by other clients. A strict interpretation
1124 * of close-to-open would allow us to continue caching even if
1125 * another writer had changed the file, but some applications
1126 * depend on tighter cache coherency when writing.
1127 */
788e7a89
TM
1128 status = NFS_PROTO(data->inode)->write_done(task, data);
1129 if (status != 0)
1130 return status;
91d5b470
CL
1131 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1132
1da177e4
LT
1133#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1134 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1135 /* We tried a write call, but the server did not
1136 * commit data to stable storage even though we
1137 * requested it.
1138 * Note: There is a known bug in Tru64 < 5.0 in which
1139 * the server reports NFS_DATA_SYNC, but performs
1140 * NFS_FILE_SYNC. We therefore implement this checking
1141 * as a dprintk() in order to avoid filling syslog.
1142 */
1143 static unsigned long complain;
1144
1145 if (time_before(complain, jiffies)) {
1146 dprintk("NFS: faulty NFS server %s:"
1147 " (committed = %d) != (stable = %d)\n",
54ceac45 1148 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1da177e4
LT
1149 resp->verf->committed, argp->stable);
1150 complain = jiffies + 300 * HZ;
1151 }
1152 }
1153#endif
1154 /* Is this a short write? */
1155 if (task->tk_status >= 0 && resp->count < argp->count) {
1156 static unsigned long complain;
1157
91d5b470
CL
1158 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1159
1da177e4
LT
1160 /* Has the server at least made some progress? */
1161 if (resp->count != 0) {
1162 /* Was this an NFSv2 write or an NFSv3 stable write? */
1163 if (resp->verf->committed != NFS_UNSTABLE) {
1164 /* Resend from where the server left off */
1165 argp->offset += resp->count;
1166 argp->pgbase += resp->count;
1167 argp->count -= resp->count;
1168 } else {
1169 /* Resend as a stable write in order to avoid
1170 * headaches in the case of a server crash.
1171 */
1172 argp->stable = NFS_FILE_SYNC;
1173 }
1174 rpc_restart_call(task);
788e7a89 1175 return -EAGAIN;
1da177e4
LT
1176 }
1177 if (time_before(complain, jiffies)) {
1178 printk(KERN_WARNING
1179 "NFS: Server wrote zero bytes, expected %u.\n",
1180 argp->count);
1181 complain = jiffies + 300 * HZ;
1182 }
1183 /* Can't do anything about it except throw an error. */
1184 task->tk_status = -EIO;
1185 }
788e7a89 1186 return 0;
1da177e4
LT
1187}
1188
1189
1190#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
963d8fe5 1191void nfs_commit_release(void *wdata)
1da177e4 1192{
1da177e4
LT
1193 nfs_commit_free(wdata);
1194}
1195
1196/*
1197 * Set up the argument/result storage required for the RPC call.
1198 */
1199static void nfs_commit_rpcsetup(struct list_head *head,
788e7a89
TM
1200 struct nfs_write_data *data,
1201 int how)
1da177e4 1202{
3da28eb1 1203 struct nfs_page *first;
1da177e4 1204 struct inode *inode;
788e7a89 1205 int flags;
1da177e4
LT
1206
1207 /* Set up the RPC argument and reply structs
1208 * NB: take care not to mess about with data->commit et al. */
1209
1210 list_splice_init(head, &data->pages);
1211 first = nfs_list_entry(data->pages.next);
1da177e4
LT
1212 inode = first->wb_context->dentry->d_inode;
1213
1da177e4
LT
1214 data->inode = inode;
1215 data->cred = first->wb_context->cred;
1216
1217 data->args.fh = NFS_FH(data->inode);
3da28eb1
TM
1218 /* Note: we always request a commit of the entire inode */
1219 data->args.offset = 0;
1220 data->args.count = 0;
1221 data->res.count = 0;
1da177e4
LT
1222 data->res.fattr = &data->fattr;
1223 data->res.verf = &data->verf;
0e574af1 1224 nfs_fattr_init(&data->fattr);
788e7a89
TM
1225
1226 /* Set up the initial task struct. */
1227 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1228 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1da177e4
LT
1229 NFS_PROTO(inode)->commit_setup(data, how);
1230
1231 data->task.tk_priority = flush_task_priority(how);
1232 data->task.tk_cookie = (unsigned long)inode;
1da177e4 1233
0bbacc40 1234 dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
1da177e4
LT
1235}
1236
1237/*
1238 * Commit dirty pages
1239 */
1240static int
40859d7e 1241nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1da177e4
LT
1242{
1243 struct nfs_write_data *data;
1244 struct nfs_page *req;
1245
e9f7bee1 1246 data = nfs_commit_alloc();
1da177e4
LT
1247
1248 if (!data)
1249 goto out_bad;
1250
1251 /* Set up the argument struct */
1252 nfs_commit_rpcsetup(head, data, how);
1253
1254 nfs_execute_write(data);
1255 return 0;
1256 out_bad:
1257 while (!list_empty(head)) {
1258 req = nfs_list_entry(head->next);
1259 nfs_list_remove_request(req);
1260 nfs_mark_request_commit(req);
83715ad5 1261 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
5c2d97cb 1262 nfs_clear_page_writeback(req);
1da177e4
LT
1263 }
1264 return -ENOMEM;
1265}
1266
1267/*
1268 * COMMIT call returned
1269 */
788e7a89 1270static void nfs_commit_done(struct rpc_task *task, void *calldata)
1da177e4 1271{
963d8fe5 1272 struct nfs_write_data *data = calldata;
1da177e4 1273 struct nfs_page *req;
1da177e4
LT
1274
1275 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1276 task->tk_pid, task->tk_status);
1277
788e7a89
TM
1278 /* Call the NFS version-specific code */
1279 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1280 return;
1281
1da177e4
LT
1282 while (!list_empty(&data->pages)) {
1283 req = nfs_list_entry(data->pages.next);
1284 nfs_list_remove_request(req);
fd39fc85 1285 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1da177e4
LT
1286
1287 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1288 req->wb_context->dentry->d_inode->i_sb->s_id,
1289 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1290 req->wb_bytes,
1291 (long long)req_offset(req));
1292 if (task->tk_status < 0) {
1293 req->wb_context->error = task->tk_status;
1294 nfs_inode_remove_request(req);
1295 dprintk(", error = %d\n", task->tk_status);
1296 goto next;
1297 }
1298
1299 /* Okay, COMMIT succeeded, apparently. Check the verifier
1300 * returned by the server against all stored verfs. */
1301 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1302 /* We have a match */
1303 nfs_inode_remove_request(req);
1304 dprintk(" OK\n");
1305 goto next;
1306 }
1307 /* We have a mismatch. Write the page again */
1308 dprintk(" mismatch\n");
61822ab5 1309 nfs_redirty_request(req);
1da177e4 1310 next:
c6a556b8 1311 nfs_clear_page_writeback(req);
1da177e4 1312 }
1da177e4 1313}
788e7a89
TM
1314
1315static const struct rpc_call_ops nfs_commit_ops = {
1316 .rpc_call_done = nfs_commit_done,
1317 .rpc_release = nfs_commit_release,
1318};
c42de9dd
TM
1319#else
1320static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1321{
1322 return 0;
1323}
1da177e4
LT
1324#endif
1325
3f442547 1326static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
1da177e4 1327{
28c6925f 1328 struct nfs_inode *nfsi = NFS_I(mapping->host);
1da177e4 1329 LIST_HEAD(head);
3f442547 1330 long res;
1da177e4
LT
1331
1332 spin_lock(&nfsi->req_lock);
3f442547 1333 res = nfs_scan_dirty(mapping, wbc, &head);
1da177e4 1334 spin_unlock(&nfsi->req_lock);
ab0a3dbe 1335 if (res) {
28c6925f 1336 int error = nfs_flush_list(mapping->host, &head, res, how);
7d46a49f
TM
1337 if (error < 0)
1338 return error;
ab0a3dbe 1339 }
1da177e4
LT
1340 return res;
1341}
1342
1343#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
3da28eb1 1344int nfs_commit_inode(struct inode *inode, int how)
1da177e4
LT
1345{
1346 struct nfs_inode *nfsi = NFS_I(inode);
1347 LIST_HEAD(head);
7d46a49f 1348 int res;
1da177e4
LT
1349
1350 spin_lock(&nfsi->req_lock);
3da28eb1
TM
1351 res = nfs_scan_commit(inode, &head, 0, 0);
1352 spin_unlock(&nfsi->req_lock);
1da177e4 1353 if (res) {
7d46a49f 1354 int error = nfs_commit_list(inode, &head, how);
3da28eb1
TM
1355 if (error < 0)
1356 return error;
1357 }
1da177e4
LT
1358 return res;
1359}
1360#endif
1361
1c75950b 1362long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1da177e4 1363{
1c75950b 1364 struct inode *inode = mapping->host;
c42de9dd 1365 struct nfs_inode *nfsi = NFS_I(inode);
1c75950b
TM
1366 unsigned long idx_start, idx_end;
1367 unsigned int npages = 0;
c42de9dd 1368 LIST_HEAD(head);
70b9ecbd 1369 int nocommit = how & FLUSH_NOCOMMIT;
3f442547 1370 long pages, ret;
1da177e4 1371
1c75950b
TM
1372 /* FIXME */
1373 if (wbc->range_cyclic)
1374 idx_start = 0;
1375 else {
1376 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
1377 idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
1378 if (idx_end > idx_start) {
1379 unsigned long l_npages = 1 + idx_end - idx_start;
1380 npages = l_npages;
1381 if (sizeof(npages) != sizeof(l_npages) &&
1382 (unsigned long)npages != l_npages)
1383 npages = 0;
1384 }
1385 }
c42de9dd
TM
1386 how &= ~FLUSH_NOCOMMIT;
1387 spin_lock(&nfsi->req_lock);
1da177e4 1388 do {
1c75950b 1389 wbc->pages_skipped = 0;
c42de9dd
TM
1390 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1391 if (ret != 0)
70b9ecbd 1392 continue;
1c75950b 1393 pages = nfs_scan_dirty(mapping, wbc, &head);
c42de9dd
TM
1394 if (pages != 0) {
1395 spin_unlock(&nfsi->req_lock);
e8e058e8 1396 if (how & FLUSH_INVALIDATE) {
83715ad5 1397 nfs_cancel_dirty_list(&head);
e8e058e8
TM
1398 ret = pages;
1399 } else
d2ccddf0 1400 ret = nfs_flush_list(inode, &head, pages, how);
c42de9dd
TM
1401 spin_lock(&nfsi->req_lock);
1402 continue;
1403 }
1c75950b
TM
1404 if (wbc->pages_skipped != 0)
1405 continue;
c42de9dd
TM
1406 if (nocommit)
1407 break;
d2ccddf0 1408 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1c75950b
TM
1409 if (pages == 0) {
1410 if (wbc->pages_skipped != 0)
1411 continue;
c42de9dd 1412 break;
1c75950b 1413 }
d2ccddf0
TM
1414 if (how & FLUSH_INVALIDATE) {
1415 spin_unlock(&nfsi->req_lock);
83715ad5 1416 nfs_cancel_commit_list(&head);
e8e058e8 1417 ret = pages;
d2ccddf0
TM
1418 spin_lock(&nfsi->req_lock);
1419 continue;
1420 }
1421 pages += nfs_scan_commit(inode, &head, 0, 0);
c42de9dd
TM
1422 spin_unlock(&nfsi->req_lock);
1423 ret = nfs_commit_list(inode, &head, how);
1424 spin_lock(&nfsi->req_lock);
1425 } while (ret >= 0);
1426 spin_unlock(&nfsi->req_lock);
1427 return ret;
1da177e4
LT
1428}
1429
1c75950b
TM
1430/*
1431 * flush the inode to disk.
1432 */
1433int nfs_wb_all(struct inode *inode)
1434{
1435 struct address_space *mapping = inode->i_mapping;
1436 struct writeback_control wbc = {
1437 .bdi = mapping->backing_dev_info,
1438 .sync_mode = WB_SYNC_ALL,
1439 .nr_to_write = LONG_MAX,
61822ab5 1440 .for_writepages = 1,
1c75950b
TM
1441 .range_cyclic = 1,
1442 };
1443 int ret;
1444
61822ab5
TM
1445 ret = generic_writepages(mapping, &wbc);
1446 if (ret < 0)
1447 goto out;
1c75950b
TM
1448 ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
1449 if (ret >= 0)
1450 return 0;
61822ab5 1451out:
e507d9eb 1452 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1c75950b
TM
1453 return ret;
1454}
1455
1456int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how)
1457{
1458 struct writeback_control wbc = {
1459 .bdi = mapping->backing_dev_info,
1460 .sync_mode = WB_SYNC_ALL,
1461 .nr_to_write = LONG_MAX,
1462 .range_start = range_start,
1463 .range_end = range_end,
61822ab5 1464 .for_writepages = 1,
1c75950b
TM
1465 };
1466 int ret;
1467
61822ab5
TM
1468 if (!(how & FLUSH_NOWRITEPAGE)) {
1469 ret = generic_writepages(mapping, &wbc);
1470 if (ret < 0)
1471 goto out;
1472 }
1c75950b
TM
1473 ret = nfs_sync_mapping_wait(mapping, &wbc, how);
1474 if (ret >= 0)
1475 return 0;
61822ab5 1476out:
e507d9eb 1477 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1c75950b
TM
1478 return ret;
1479}
1480
61822ab5 1481int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
1c75950b
TM
1482{
1483 loff_t range_start = page_offset(page);
1484 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
4d770ccf
TM
1485 struct writeback_control wbc = {
1486 .bdi = page->mapping->backing_dev_info,
1487 .sync_mode = WB_SYNC_ALL,
1488 .nr_to_write = LONG_MAX,
1489 .range_start = range_start,
1490 .range_end = range_end,
1491 };
1492 int ret;
1c75950b 1493
4d770ccf
TM
1494 BUG_ON(!PageLocked(page));
1495 if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) {
1496 ret = nfs_writepage_locked(page, &wbc);
1497 if (ret < 0)
1498 goto out;
1499 }
f40313ac
TM
1500 if (!PagePrivate(page))
1501 return 0;
4d770ccf
TM
1502 ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
1503 if (ret >= 0)
1504 return 0;
1505out:
e507d9eb 1506 __mark_inode_dirty(inode, I_DIRTY_PAGES);
4d770ccf 1507 return ret;
1c75950b
TM
1508}
1509
1510/*
1511 * Write back all requests on one page - we do this before reading it.
1512 */
1513int nfs_wb_page(struct inode *inode, struct page* page)
1514{
4d770ccf 1515 return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1c75950b
TM
1516}
1517
1a54533e
TM
1518int nfs_set_page_dirty(struct page *page)
1519{
1520 struct nfs_page *req;
1521
1522 req = nfs_page_find_request(page);
1523 if (req != NULL) {
1524 /* Mark any existing write requests for flushing */
1525 set_bit(PG_NEED_FLUSH, &req->wb_flags);
1526 nfs_release_request(req);
1527 }
1528 return __set_page_dirty_nobuffers(page);
1529}
1530
1c75950b 1531
f7b422b1 1532int __init nfs_init_writepagecache(void)
1da177e4
LT
1533{
1534 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1535 sizeof(struct nfs_write_data),
1536 0, SLAB_HWCACHE_ALIGN,
1537 NULL, NULL);
1538 if (nfs_wdata_cachep == NULL)
1539 return -ENOMEM;
1540
93d2341c
MD
1541 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1542 nfs_wdata_cachep);
1da177e4
LT
1543 if (nfs_wdata_mempool == NULL)
1544 return -ENOMEM;
1545
93d2341c
MD
1546 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1547 nfs_wdata_cachep);
1da177e4
LT
1548 if (nfs_commit_mempool == NULL)
1549 return -ENOMEM;
1550
1551 return 0;
1552}
1553
266bee88 1554void nfs_destroy_writepagecache(void)
1da177e4
LT
1555{
1556 mempool_destroy(nfs_commit_mempool);
1557 mempool_destroy(nfs_wdata_mempool);
1a1d92c1 1558 kmem_cache_destroy(nfs_wdata_cachep);
1da177e4
LT
1559}
1560