]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/nfs/write.c
NFSv4: Ensure the callback daemon flushes signals
[net-next-2.6.git] / fs / nfs / write.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/nfs/write.c
3 *
4 * Writing file data over NFS.
5 *
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
12 *
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
14 *
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
20 *
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
24 *
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
27 * cases:
28 *
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
32 *
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
35 *
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
40 *
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
45 *
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
47 */
48
49#include <linux/config.h>
50#include <linux/types.h>
51#include <linux/slab.h>
52#include <linux/mm.h>
53#include <linux/pagemap.h>
54#include <linux/file.h>
55#include <linux/mpage.h>
56#include <linux/writeback.h>
57
58#include <linux/sunrpc/clnt.h>
59#include <linux/nfs_fs.h>
60#include <linux/nfs_mount.h>
61#include <linux/nfs_page.h>
62#include <asm/uaccess.h>
63#include <linux/smp_lock.h>
64
65#include "delegation.h"
91d5b470 66#include "iostat.h"
1da177e4
LT
67
68#define NFSDBG_FACILITY NFSDBG_PAGECACHE
69
70#define MIN_POOL_WRITE (32)
71#define MIN_POOL_COMMIT (4)
72
73/*
74 * Local function declarations
75 */
76static struct nfs_page * nfs_update_request(struct nfs_open_context*,
77 struct inode *,
78 struct page *,
79 unsigned int, unsigned int);
1da177e4
LT
80static int nfs_wait_on_write_congestion(struct address_space *, int);
81static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
82static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
83 unsigned int npages, int how);
788e7a89
TM
84static const struct rpc_call_ops nfs_write_partial_ops;
85static const struct rpc_call_ops nfs_write_full_ops;
86static const struct rpc_call_ops nfs_commit_ops;
1da177e4
LT
87
88static kmem_cache_t *nfs_wdata_cachep;
3feb2d49 89static mempool_t *nfs_wdata_mempool;
1da177e4
LT
90static mempool_t *nfs_commit_mempool;
91
92static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
93
e17b1fc4 94struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount)
1da177e4
LT
95{
96 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
40859d7e 97
1da177e4
LT
98 if (p) {
99 memset(p, 0, sizeof(*p));
100 INIT_LIST_HEAD(&p->pages);
40859d7e
CL
101 if (pagecount < NFS_PAGEVEC_SIZE)
102 p->pagevec = &p->page_array[0];
103 else {
104 size_t size = ++pagecount * sizeof(struct page *);
bd647545
ES
105 p->pagevec = kzalloc(size, GFP_NOFS);
106 if (!p->pagevec) {
40859d7e
CL
107 mempool_free(p, nfs_commit_mempool);
108 p = NULL;
109 }
110 }
1da177e4
LT
111 }
112 return p;
113}
114
e17b1fc4 115void nfs_commit_free(struct nfs_write_data *p)
1da177e4 116{
40859d7e
CL
117 if (p && (p->pagevec != &p->page_array[0]))
118 kfree(p->pagevec);
1da177e4
LT
119 mempool_free(p, nfs_commit_mempool);
120}
121
3feb2d49
TM
122struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
123{
124 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
125
126 if (p) {
127 memset(p, 0, sizeof(*p));
128 INIT_LIST_HEAD(&p->pages);
129 if (pagecount < NFS_PAGEVEC_SIZE)
130 p->pagevec = &p->page_array[0];
131 else {
132 size_t size = ++pagecount * sizeof(struct page *);
133 p->pagevec = kmalloc(size, GFP_NOFS);
134 if (p->pagevec) {
135 memset(p->pagevec, 0, size);
136 } else {
137 mempool_free(p, nfs_wdata_mempool);
138 p = NULL;
139 }
140 }
141 }
142 return p;
143}
144
145void nfs_writedata_free(struct nfs_write_data *p)
146{
147 if (p && (p->pagevec != &p->page_array[0]))
148 kfree(p->pagevec);
149 mempool_free(p, nfs_wdata_mempool);
150}
151
963d8fe5 152void nfs_writedata_release(void *wdata)
1da177e4 153{
1da177e4
LT
154 nfs_writedata_free(wdata);
155}
156
157/* Adjust the file length if we're writing beyond the end */
158static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
159{
160 struct inode *inode = page->mapping->host;
161 loff_t end, i_size = i_size_read(inode);
162 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
163
164 if (i_size > 0 && page->index < end_index)
165 return;
166 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
167 if (i_size >= end)
168 return;
91d5b470 169 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
1da177e4
LT
170 i_size_write(inode, end);
171}
172
173/* We can set the PG_uptodate flag if we see that a write request
174 * covers the full page.
175 */
176static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
177{
178 loff_t end_offs;
179
180 if (PageUptodate(page))
181 return;
182 if (base != 0)
183 return;
184 if (count == PAGE_CACHE_SIZE) {
185 SetPageUptodate(page);
186 return;
187 }
188
189 end_offs = i_size_read(page->mapping->host) - 1;
190 if (end_offs < 0)
191 return;
192 /* Is this the last page? */
193 if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
194 return;
195 /* This is the last page: set PG_uptodate if we cover the entire
196 * extent of the data, then zero the rest of the page.
197 */
198 if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
199 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
200 SetPageUptodate(page);
201 }
202}
203
204/*
205 * Write a page synchronously.
206 * Offset is the data offset within the page.
207 */
208static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
209 struct page *page, unsigned int offset, unsigned int count,
210 int how)
211{
212 unsigned int wsize = NFS_SERVER(inode)->wsize;
213 int result, written = 0;
214 struct nfs_write_data *wdata;
215
40859d7e 216 wdata = nfs_writedata_alloc(1);
1da177e4
LT
217 if (!wdata)
218 return -ENOMEM;
219
220 wdata->flags = how;
221 wdata->cred = ctx->cred;
222 wdata->inode = inode;
223 wdata->args.fh = NFS_FH(inode);
224 wdata->args.context = ctx;
225 wdata->args.pages = &page;
226 wdata->args.stable = NFS_FILE_SYNC;
227 wdata->args.pgbase = offset;
228 wdata->args.count = wsize;
229 wdata->res.fattr = &wdata->fattr;
230 wdata->res.verf = &wdata->verf;
231
232 dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
233 inode->i_sb->s_id,
234 (long long)NFS_FILEID(inode),
235 count, (long long)(page_offset(page) + offset));
236
bb713d6d 237 set_page_writeback(page);
1da177e4
LT
238 nfs_begin_data_update(inode);
239 do {
240 if (count < wsize)
241 wdata->args.count = count;
242 wdata->args.offset = page_offset(page) + wdata->args.pgbase;
243
244 result = NFS_PROTO(inode)->write(wdata);
245
246 if (result < 0) {
247 /* Must mark the page invalid after I/O error */
248 ClearPageUptodate(page);
249 goto io_error;
250 }
251 if (result < wdata->args.count)
252 printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
253 wdata->args.count, result);
254
255 wdata->args.offset += result;
256 wdata->args.pgbase += result;
257 written += result;
258 count -= result;
91d5b470 259 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
1da177e4
LT
260 } while (count);
261 /* Update file length */
262 nfs_grow_file(page, offset, written);
263 /* Set the PG_uptodate flag? */
264 nfs_mark_uptodate(page, offset, written);
265
266 if (PageError(page))
267 ClearPageError(page);
268
269io_error:
951a143b 270 nfs_end_data_update(inode);
bb713d6d 271 end_page_writeback(page);
1da177e4
LT
272 nfs_writedata_free(wdata);
273 return written ? written : result;
274}
275
276static int nfs_writepage_async(struct nfs_open_context *ctx,
277 struct inode *inode, struct page *page,
278 unsigned int offset, unsigned int count)
279{
280 struct nfs_page *req;
1da177e4
LT
281
282 req = nfs_update_request(ctx, inode, page, offset, count);
abd3e641
TM
283 if (IS_ERR(req))
284 return PTR_ERR(req);
1da177e4
LT
285 /* Update file length */
286 nfs_grow_file(page, offset, count);
287 /* Set the PG_uptodate flag? */
288 nfs_mark_uptodate(page, offset, count);
289 nfs_unlock_request(req);
abd3e641 290 return 0;
1da177e4
LT
291}
292
293static int wb_priority(struct writeback_control *wbc)
294{
295 if (wbc->for_reclaim)
296 return FLUSH_HIGHPRI;
297 if (wbc->for_kupdate)
298 return FLUSH_LOWPRI;
299 return 0;
300}
301
302/*
303 * Write an mmapped page to the server.
304 */
305int nfs_writepage(struct page *page, struct writeback_control *wbc)
306{
307 struct nfs_open_context *ctx;
308 struct inode *inode = page->mapping->host;
309 unsigned long end_index;
310 unsigned offset = PAGE_CACHE_SIZE;
311 loff_t i_size = i_size_read(inode);
312 int inode_referenced = 0;
313 int priority = wb_priority(wbc);
314 int err;
315
91d5b470
CL
316 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
317 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
318
1da177e4
LT
319 /*
320 * Note: We need to ensure that we have a reference to the inode
321 * if we are to do asynchronous writes. If not, waiting
322 * in nfs_wait_on_request() may deadlock with clear_inode().
323 *
324 * If igrab() fails here, then it is in any case safe to
325 * call nfs_wb_page(), since there will be no pending writes.
326 */
327 if (igrab(inode) != 0)
328 inode_referenced = 1;
329 end_index = i_size >> PAGE_CACHE_SHIFT;
330
331 /* Ensure we've flushed out any previous writes */
332 nfs_wb_page_priority(inode, page, priority);
333
334 /* easy case */
335 if (page->index < end_index)
336 goto do_it;
337 /* things got complicated... */
338 offset = i_size & (PAGE_CACHE_SIZE-1);
339
340 /* OK, are we completely out? */
341 err = 0; /* potential race with truncate - ignore */
342 if (page->index >= end_index+1 || !offset)
343 goto out;
344do_it:
d530838b 345 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
1da177e4
LT
346 if (ctx == NULL) {
347 err = -EBADF;
348 goto out;
349 }
350 lock_kernel();
351 if (!IS_SYNC(inode) && inode_referenced) {
352 err = nfs_writepage_async(ctx, inode, page, 0, offset);
abd3e641
TM
353 if (!wbc->for_writepages)
354 nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
1da177e4
LT
355 } else {
356 err = nfs_writepage_sync(ctx, inode, page, 0,
357 offset, priority);
358 if (err >= 0) {
359 if (err != offset)
360 redirty_page_for_writepage(wbc, page);
361 err = 0;
362 }
363 }
364 unlock_kernel();
365 put_nfs_open_context(ctx);
366out:
367 unlock_page(page);
368 if (inode_referenced)
369 iput(inode);
370 return err;
371}
372
373/*
374 * Note: causes nfs_update_request() to block on the assumption
375 * that the writeback is generated due to memory pressure.
376 */
377int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
378{
379 struct backing_dev_info *bdi = mapping->backing_dev_info;
380 struct inode *inode = mapping->host;
381 int err;
382
91d5b470
CL
383 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
384
1da177e4
LT
385 err = generic_writepages(mapping, wbc);
386 if (err)
387 return err;
388 while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
389 if (wbc->nonblocking)
390 return 0;
391 nfs_wait_on_write_congestion(mapping, 0);
392 }
393 err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
394 if (err < 0)
395 goto out;
91d5b470 396 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
1da177e4
LT
397 wbc->nr_to_write -= err;
398 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
399 err = nfs_wait_on_requests(inode, 0, 0);
400 if (err < 0)
401 goto out;
402 }
3da28eb1 403 err = nfs_commit_inode(inode, wb_priority(wbc));
1da177e4
LT
404 if (err > 0) {
405 wbc->nr_to_write -= err;
406 err = 0;
407 }
408out:
409 clear_bit(BDI_write_congested, &bdi->state);
410 wake_up_all(&nfs_write_congestion);
411 return err;
412}
413
414/*
415 * Insert a write request into an inode
416 */
417static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
418{
419 struct nfs_inode *nfsi = NFS_I(inode);
420 int error;
421
422 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
423 BUG_ON(error == -EEXIST);
424 if (error)
425 return error;
426 if (!nfsi->npages) {
427 igrab(inode);
428 nfs_begin_data_update(inode);
429 if (nfs_have_delegation(inode, FMODE_WRITE))
430 nfsi->change_attr++;
431 }
432 nfsi->npages++;
433 atomic_inc(&req->wb_count);
434 return 0;
435}
436
437/*
438 * Insert a write request into an inode
439 */
440static void nfs_inode_remove_request(struct nfs_page *req)
441{
442 struct inode *inode = req->wb_context->dentry->d_inode;
443 struct nfs_inode *nfsi = NFS_I(inode);
444
445 BUG_ON (!NFS_WBACK_BUSY(req));
446
447 spin_lock(&nfsi->req_lock);
448 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
449 nfsi->npages--;
450 if (!nfsi->npages) {
451 spin_unlock(&nfsi->req_lock);
951a143b 452 nfs_end_data_update(inode);
1da177e4
LT
453 iput(inode);
454 } else
455 spin_unlock(&nfsi->req_lock);
456 nfs_clear_request(req);
457 nfs_release_request(req);
458}
459
460/*
461 * Find a request
462 */
463static inline struct nfs_page *
464_nfs_find_request(struct inode *inode, unsigned long index)
465{
466 struct nfs_inode *nfsi = NFS_I(inode);
467 struct nfs_page *req;
468
469 req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
470 if (req)
471 atomic_inc(&req->wb_count);
472 return req;
473}
474
475static struct nfs_page *
476nfs_find_request(struct inode *inode, unsigned long index)
477{
478 struct nfs_page *req;
479 struct nfs_inode *nfsi = NFS_I(inode);
480
481 spin_lock(&nfsi->req_lock);
482 req = _nfs_find_request(inode, index);
483 spin_unlock(&nfsi->req_lock);
484 return req;
485}
486
487/*
488 * Add a request to the inode's dirty list.
489 */
490static void
491nfs_mark_request_dirty(struct nfs_page *req)
492{
493 struct inode *inode = req->wb_context->dentry->d_inode;
494 struct nfs_inode *nfsi = NFS_I(inode);
495
496 spin_lock(&nfsi->req_lock);
3da28eb1
TM
497 radix_tree_tag_set(&nfsi->nfs_page_tree,
498 req->wb_index, NFS_PAGE_TAG_DIRTY);
1da177e4
LT
499 nfs_list_add_request(req, &nfsi->dirty);
500 nfsi->ndirty++;
501 spin_unlock(&nfsi->req_lock);
502 inc_page_state(nr_dirty);
503 mark_inode_dirty(inode);
504}
505
506/*
507 * Check if a request is dirty
508 */
509static inline int
510nfs_dirty_request(struct nfs_page *req)
511{
512 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
513 return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
514}
515
516#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
517/*
518 * Add a request to the inode's commit list.
519 */
520static void
521nfs_mark_request_commit(struct nfs_page *req)
522{
523 struct inode *inode = req->wb_context->dentry->d_inode;
524 struct nfs_inode *nfsi = NFS_I(inode);
525
526 spin_lock(&nfsi->req_lock);
527 nfs_list_add_request(req, &nfsi->commit);
528 nfsi->ncommit++;
529 spin_unlock(&nfsi->req_lock);
530 inc_page_state(nr_unstable);
531 mark_inode_dirty(inode);
532}
533#endif
534
535/*
536 * Wait for a request to complete.
537 *
538 * Interruptible by signals only if mounted with intr flag.
539 */
540static int
541nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
542{
543 struct nfs_inode *nfsi = NFS_I(inode);
544 struct nfs_page *req;
545 unsigned long idx_end, next;
546 unsigned int res = 0;
547 int error;
548
549 if (npages == 0)
550 idx_end = ~0;
551 else
552 idx_end = idx_start + npages - 1;
553
554 spin_lock(&nfsi->req_lock);
555 next = idx_start;
c6a556b8 556 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
1da177e4
LT
557 if (req->wb_index > idx_end)
558 break;
559
560 next = req->wb_index + 1;
c6a556b8 561 BUG_ON(!NFS_WBACK_BUSY(req));
1da177e4
LT
562
563 atomic_inc(&req->wb_count);
564 spin_unlock(&nfsi->req_lock);
565 error = nfs_wait_on_request(req);
566 nfs_release_request(req);
567 if (error < 0)
568 return error;
569 spin_lock(&nfsi->req_lock);
570 res++;
571 }
572 spin_unlock(&nfsi->req_lock);
573 return res;
574}
575
576/*
577 * nfs_scan_dirty - Scan an inode for dirty requests
578 * @inode: NFS inode to scan
579 * @dst: destination list
580 * @idx_start: lower bound of page->index to scan.
581 * @npages: idx_start + npages sets the upper bound to scan.
582 *
583 * Moves requests from the inode's dirty page list.
584 * The requests are *not* checked to ensure that they form a contiguous set.
585 */
586static int
587nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
588{
589 struct nfs_inode *nfsi = NFS_I(inode);
3da28eb1
TM
590 int res = 0;
591
592 if (nfsi->ndirty != 0) {
593 res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
594 nfsi->ndirty -= res;
595 sub_page_state(nr_dirty,res);
596 if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
597 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
598 }
1da177e4
LT
599 return res;
600}
601
602#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
603/*
604 * nfs_scan_commit - Scan an inode for commit requests
605 * @inode: NFS inode to scan
606 * @dst: destination list
607 * @idx_start: lower bound of page->index to scan.
608 * @npages: idx_start + npages sets the upper bound to scan.
609 *
610 * Moves requests from the inode's 'commit' request list.
611 * The requests are *not* checked to ensure that they form a contiguous set.
612 */
613static int
614nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
615{
616 struct nfs_inode *nfsi = NFS_I(inode);
3da28eb1
TM
617 int res = 0;
618
619 if (nfsi->ncommit != 0) {
620 res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages);
621 nfsi->ncommit -= res;
622 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
623 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
624 }
1da177e4
LT
625 return res;
626}
627#endif
628
629static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
630{
631 struct backing_dev_info *bdi = mapping->backing_dev_info;
632 DEFINE_WAIT(wait);
633 int ret = 0;
634
635 might_sleep();
636
637 if (!bdi_write_congested(bdi))
638 return 0;
91d5b470
CL
639
640 nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
641
1da177e4
LT
642 if (intr) {
643 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
644 sigset_t oldset;
645
646 rpc_clnt_sigmask(clnt, &oldset);
647 prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
648 if (bdi_write_congested(bdi)) {
649 if (signalled())
650 ret = -ERESTARTSYS;
651 else
652 schedule();
653 }
654 rpc_clnt_sigunmask(clnt, &oldset);
655 } else {
656 prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
657 if (bdi_write_congested(bdi))
658 schedule();
659 }
660 finish_wait(&nfs_write_congestion, &wait);
661 return ret;
662}
663
664
665/*
666 * Try to update any existing write request, or create one if there is none.
667 * In order to match, the request's credentials must match those of
668 * the calling process.
669 *
670 * Note: Should always be called with the Page Lock held!
671 */
672static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
673 struct inode *inode, struct page *page,
674 unsigned int offset, unsigned int bytes)
675{
676 struct nfs_server *server = NFS_SERVER(inode);
677 struct nfs_inode *nfsi = NFS_I(inode);
678 struct nfs_page *req, *new = NULL;
679 unsigned long rqend, end;
680
681 end = offset + bytes;
682
683 if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
684 return ERR_PTR(-ERESTARTSYS);
685 for (;;) {
686 /* Loop over all inode entries and see if we find
687 * A request for the page we wish to update
688 */
689 spin_lock(&nfsi->req_lock);
690 req = _nfs_find_request(inode, page->index);
691 if (req) {
692 if (!nfs_lock_request_dontget(req)) {
693 int error;
694 spin_unlock(&nfsi->req_lock);
695 error = nfs_wait_on_request(req);
696 nfs_release_request(req);
1dd594b2
NB
697 if (error < 0) {
698 if (new)
699 nfs_release_request(new);
1da177e4 700 return ERR_PTR(error);
1dd594b2 701 }
1da177e4
LT
702 continue;
703 }
704 spin_unlock(&nfsi->req_lock);
705 if (new)
706 nfs_release_request(new);
707 break;
708 }
709
710 if (new) {
711 int error;
712 nfs_lock_request_dontget(new);
713 error = nfs_inode_add_request(inode, new);
714 if (error) {
715 spin_unlock(&nfsi->req_lock);
716 nfs_unlock_request(new);
717 return ERR_PTR(error);
718 }
719 spin_unlock(&nfsi->req_lock);
720 nfs_mark_request_dirty(new);
721 return new;
722 }
723 spin_unlock(&nfsi->req_lock);
724
725 new = nfs_create_request(ctx, inode, page, offset, bytes);
726 if (IS_ERR(new))
727 return new;
728 }
729
730 /* We have a request for our page.
731 * If the creds don't match, or the
732 * page addresses don't match,
733 * tell the caller to wait on the conflicting
734 * request.
735 */
736 rqend = req->wb_offset + req->wb_bytes;
737 if (req->wb_context != ctx
738 || req->wb_page != page
739 || !nfs_dirty_request(req)
740 || offset > rqend || end < req->wb_offset) {
741 nfs_unlock_request(req);
742 return ERR_PTR(-EBUSY);
743 }
744
745 /* Okay, the request matches. Update the region */
746 if (offset < req->wb_offset) {
747 req->wb_offset = offset;
748 req->wb_pgbase = offset;
749 req->wb_bytes = rqend - req->wb_offset;
750 }
751
752 if (end > rqend)
753 req->wb_bytes = end - req->wb_offset;
754
755 return req;
756}
757
758int nfs_flush_incompatible(struct file *file, struct page *page)
759{
760 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
761 struct inode *inode = page->mapping->host;
762 struct nfs_page *req;
763 int status = 0;
764 /*
765 * Look for a request corresponding to this page. If there
766 * is one, and it belongs to another file, we flush it out
767 * before we try to copy anything into the page. Do this
768 * due to the lack of an ACCESS-type call in NFSv2.
769 * Also do the same if we find a request from an existing
770 * dropped page.
771 */
772 req = nfs_find_request(inode, page->index);
773 if (req) {
774 if (req->wb_page != page || ctx != req->wb_context)
775 status = nfs_wb_page(inode, page);
776 nfs_release_request(req);
777 }
778 return (status < 0) ? status : 0;
779}
780
781/*
782 * Update and possibly write a cached page of an NFS file.
783 *
784 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
785 * things with a page scheduled for an RPC call (e.g. invalidate it).
786 */
787int nfs_updatepage(struct file *file, struct page *page,
788 unsigned int offset, unsigned int count)
789{
790 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
1da177e4
LT
791 struct inode *inode = page->mapping->host;
792 struct nfs_page *req;
793 int status = 0;
794
91d5b470
CL
795 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
796
1da177e4 797 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
0bbacc40
CL
798 file->f_dentry->d_parent->d_name.name,
799 file->f_dentry->d_name.name, count,
800 (long long)(page_offset(page) +offset));
1da177e4
LT
801
802 if (IS_SYNC(inode)) {
803 status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
804 if (status > 0) {
805 if (offset == 0 && status == PAGE_CACHE_SIZE)
806 SetPageUptodate(page);
807 return 0;
808 }
809 return status;
810 }
811
812 /* If we're not using byte range locks, and we know the page
813 * is entirely in cache, it may be more efficient to avoid
814 * fragmenting write requests.
815 */
ab0a3dbe 816 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
1da177e4
LT
817 loff_t end_offs = i_size_read(inode) - 1;
818 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
819
820 count += offset;
821 offset = 0;
822 if (unlikely(end_offs < 0)) {
823 /* Do nothing */
824 } else if (page->index == end_index) {
825 unsigned int pglen;
826 pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
827 if (count < pglen)
828 count = pglen;
829 } else if (page->index < end_index)
830 count = PAGE_CACHE_SIZE;
831 }
832
833 /*
834 * Try to find an NFS request corresponding to this page
835 * and update it.
836 * If the existing request cannot be updated, we must flush
837 * it out now.
838 */
839 do {
840 req = nfs_update_request(ctx, inode, page, offset, count);
841 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
842 if (status != -EBUSY)
843 break;
844 /* Request could not be updated. Flush it out and try again */
845 status = nfs_wb_page(inode, page);
846 } while (status >= 0);
847 if (status < 0)
848 goto done;
849
850 status = 0;
851
852 /* Update file length */
853 nfs_grow_file(page, offset, count);
854 /* Set the PG_uptodate flag? */
855 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
856 nfs_unlock_request(req);
857done:
858 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
859 status, (long long)i_size_read(inode));
860 if (status < 0)
861 ClearPageUptodate(page);
862 return status;
863}
864
865static void nfs_writepage_release(struct nfs_page *req)
866{
867 end_page_writeback(req->wb_page);
868
869#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
870 if (!PageError(req->wb_page)) {
871 if (NFS_NEED_RESCHED(req)) {
872 nfs_mark_request_dirty(req);
873 goto out;
874 } else if (NFS_NEED_COMMIT(req)) {
875 nfs_mark_request_commit(req);
876 goto out;
877 }
878 }
879 nfs_inode_remove_request(req);
880
881out:
882 nfs_clear_commit(req);
883 nfs_clear_reschedule(req);
884#else
885 nfs_inode_remove_request(req);
886#endif
c6a556b8 887 nfs_clear_page_writeback(req);
1da177e4
LT
888}
889
890static inline int flush_task_priority(int how)
891{
892 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
893 case FLUSH_HIGHPRI:
894 return RPC_PRIORITY_HIGH;
895 case FLUSH_LOWPRI:
896 return RPC_PRIORITY_LOW;
897 }
898 return RPC_PRIORITY_NORMAL;
899}
900
901/*
902 * Set up the argument/result storage required for the RPC call.
903 */
904static void nfs_write_rpcsetup(struct nfs_page *req,
905 struct nfs_write_data *data,
788e7a89 906 const struct rpc_call_ops *call_ops,
1da177e4
LT
907 unsigned int count, unsigned int offset,
908 int how)
909{
1da177e4 910 struct inode *inode;
788e7a89 911 int flags;
1da177e4
LT
912
913 /* Set up the RPC argument and reply structs
914 * NB: take care not to mess about with data->commit et al. */
915
916 data->req = req;
917 data->inode = inode = req->wb_context->dentry->d_inode;
918 data->cred = req->wb_context->cred;
919
920 data->args.fh = NFS_FH(inode);
921 data->args.offset = req_offset(req) + offset;
922 data->args.pgbase = req->wb_pgbase + offset;
923 data->args.pages = data->pagevec;
924 data->args.count = count;
925 data->args.context = req->wb_context;
926
927 data->res.fattr = &data->fattr;
928 data->res.count = count;
929 data->res.verf = &data->verf;
0e574af1 930 nfs_fattr_init(&data->fattr);
1da177e4 931
788e7a89
TM
932 /* Set up the initial task struct. */
933 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
934 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
1da177e4
LT
935 NFS_PROTO(inode)->write_setup(data, how);
936
937 data->task.tk_priority = flush_task_priority(how);
938 data->task.tk_cookie = (unsigned long)inode;
1da177e4
LT
939
940 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
0bbacc40 941 data->task.tk_pid,
1da177e4
LT
942 inode->i_sb->s_id,
943 (long long)NFS_FILEID(inode),
944 count,
945 (unsigned long long)data->args.offset);
946}
947
948static void nfs_execute_write(struct nfs_write_data *data)
949{
950 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
951 sigset_t oldset;
952
953 rpc_clnt_sigmask(clnt, &oldset);
954 lock_kernel();
955 rpc_execute(&data->task);
956 unlock_kernel();
957 rpc_clnt_sigunmask(clnt, &oldset);
958}
959
960/*
961 * Generate multiple small requests to write out a single
962 * contiguous dirty area on one page.
963 */
964static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how)
965{
966 struct nfs_page *req = nfs_list_entry(head->next);
967 struct page *page = req->wb_page;
968 struct nfs_write_data *data;
969 unsigned int wsize = NFS_SERVER(inode)->wsize;
970 unsigned int nbytes, offset;
971 int requests = 0;
972 LIST_HEAD(list);
973
974 nfs_list_remove_request(req);
975
976 nbytes = req->wb_bytes;
977 for (;;) {
40859d7e 978 data = nfs_writedata_alloc(1);
1da177e4
LT
979 if (!data)
980 goto out_bad;
981 list_add(&data->pages, &list);
982 requests++;
983 if (nbytes <= wsize)
984 break;
985 nbytes -= wsize;
986 }
987 atomic_set(&req->wb_complete, requests);
988
989 ClearPageError(page);
bb713d6d 990 set_page_writeback(page);
1da177e4
LT
991 offset = 0;
992 nbytes = req->wb_bytes;
993 do {
994 data = list_entry(list.next, struct nfs_write_data, pages);
995 list_del_init(&data->pages);
996
997 data->pagevec[0] = page;
1da177e4
LT
998
999 if (nbytes > wsize) {
788e7a89
TM
1000 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1001 wsize, offset, how);
1da177e4
LT
1002 offset += wsize;
1003 nbytes -= wsize;
1004 } else {
788e7a89
TM
1005 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1006 nbytes, offset, how);
1da177e4
LT
1007 nbytes = 0;
1008 }
1009 nfs_execute_write(data);
1010 } while (nbytes != 0);
1011
1012 return 0;
1013
1014out_bad:
1015 while (!list_empty(&list)) {
1016 data = list_entry(list.next, struct nfs_write_data, pages);
1017 list_del(&data->pages);
1018 nfs_writedata_free(data);
1019 }
1020 nfs_mark_request_dirty(req);
c6a556b8 1021 nfs_clear_page_writeback(req);
1da177e4
LT
1022 return -ENOMEM;
1023}
1024
1025/*
1026 * Create an RPC task for the given write request and kick it.
1027 * The page must have been locked by the caller.
1028 *
1029 * It may happen that the page we're passed is not marked dirty.
1030 * This is the case if nfs_updatepage detects a conflicting request
1031 * that has been written but not committed.
1032 */
1033static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
1034{
1035 struct nfs_page *req;
1036 struct page **pages;
1037 struct nfs_write_data *data;
1038 unsigned int count;
1039
1040 if (NFS_SERVER(inode)->wsize < PAGE_CACHE_SIZE)
1041 return nfs_flush_multi(head, inode, how);
1042
40859d7e 1043 data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
1da177e4
LT
1044 if (!data)
1045 goto out_bad;
1046
1047 pages = data->pagevec;
1048 count = 0;
1049 while (!list_empty(head)) {
1050 req = nfs_list_entry(head->next);
1051 nfs_list_remove_request(req);
1052 nfs_list_add_request(req, &data->pages);
1053 ClearPageError(req->wb_page);
bb713d6d 1054 set_page_writeback(req->wb_page);
1da177e4
LT
1055 *pages++ = req->wb_page;
1056 count += req->wb_bytes;
1057 }
1058 req = nfs_list_entry(data->pages.next);
1059
1da177e4 1060 /* Set up the argument struct */
788e7a89 1061 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
1da177e4
LT
1062
1063 nfs_execute_write(data);
1064 return 0;
1065 out_bad:
1066 while (!list_empty(head)) {
1067 struct nfs_page *req = nfs_list_entry(head->next);
1068 nfs_list_remove_request(req);
1069 nfs_mark_request_dirty(req);
c6a556b8 1070 nfs_clear_page_writeback(req);
1da177e4
LT
1071 }
1072 return -ENOMEM;
1073}
1074
1075static int
1076nfs_flush_list(struct list_head *head, int wpages, int how)
1077{
1078 LIST_HEAD(one_request);
1079 struct nfs_page *req;
1080 int error = 0;
1081 unsigned int pages = 0;
1082
1083 while (!list_empty(head)) {
1084 pages += nfs_coalesce_requests(head, &one_request, wpages);
1085 req = nfs_list_entry(one_request.next);
1086 error = nfs_flush_one(&one_request, req->wb_context->dentry->d_inode, how);
1087 if (error < 0)
1088 break;
1089 }
1090 if (error >= 0)
1091 return pages;
1092
1093 while (!list_empty(head)) {
1094 req = nfs_list_entry(head->next);
1095 nfs_list_remove_request(req);
1096 nfs_mark_request_dirty(req);
c6a556b8 1097 nfs_clear_page_writeback(req);
1da177e4
LT
1098 }
1099 return error;
1100}
1101
1102/*
1103 * Handle a write reply that flushed part of a page.
1104 */
788e7a89 1105static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1da177e4 1106{
788e7a89 1107 struct nfs_write_data *data = calldata;
1da177e4
LT
1108 struct nfs_page *req = data->req;
1109 struct page *page = req->wb_page;
1110
1111 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1112 req->wb_context->dentry->d_inode->i_sb->s_id,
1113 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1114 req->wb_bytes,
1115 (long long)req_offset(req));
1116
788e7a89
TM
1117 if (nfs_writeback_done(task, data) != 0)
1118 return;
1119
1120 if (task->tk_status < 0) {
1da177e4
LT
1121 ClearPageUptodate(page);
1122 SetPageError(page);
788e7a89
TM
1123 req->wb_context->error = task->tk_status;
1124 dprintk(", error = %d\n", task->tk_status);
1da177e4
LT
1125 } else {
1126#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1127 if (data->verf.committed < NFS_FILE_SYNC) {
1128 if (!NFS_NEED_COMMIT(req)) {
1129 nfs_defer_commit(req);
1130 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1131 dprintk(" defer commit\n");
1132 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1133 nfs_defer_reschedule(req);
1134 dprintk(" server reboot detected\n");
1135 }
1136 } else
1137#endif
1138 dprintk(" OK\n");
1139 }
1140
1141 if (atomic_dec_and_test(&req->wb_complete))
1142 nfs_writepage_release(req);
1143}
1144
788e7a89
TM
1145static const struct rpc_call_ops nfs_write_partial_ops = {
1146 .rpc_call_done = nfs_writeback_done_partial,
1147 .rpc_release = nfs_writedata_release,
1148};
1149
1da177e4
LT
1150/*
1151 * Handle a write reply that flushes a whole page.
1152 *
1153 * FIXME: There is an inherent race with invalidate_inode_pages and
1154 * writebacks since the page->count is kept > 1 for as long
1155 * as the page has a write request pending.
1156 */
788e7a89 1157static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1da177e4 1158{
788e7a89 1159 struct nfs_write_data *data = calldata;
1da177e4
LT
1160 struct nfs_page *req;
1161 struct page *page;
1162
788e7a89
TM
1163 if (nfs_writeback_done(task, data) != 0)
1164 return;
1165
1da177e4
LT
1166 /* Update attributes as result of writeback. */
1167 while (!list_empty(&data->pages)) {
1168 req = nfs_list_entry(data->pages.next);
1169 nfs_list_remove_request(req);
1170 page = req->wb_page;
1171
1172 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1173 req->wb_context->dentry->d_inode->i_sb->s_id,
1174 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1175 req->wb_bytes,
1176 (long long)req_offset(req));
1177
788e7a89 1178 if (task->tk_status < 0) {
1da177e4
LT
1179 ClearPageUptodate(page);
1180 SetPageError(page);
788e7a89 1181 req->wb_context->error = task->tk_status;
1da177e4
LT
1182 end_page_writeback(page);
1183 nfs_inode_remove_request(req);
788e7a89 1184 dprintk(", error = %d\n", task->tk_status);
1da177e4
LT
1185 goto next;
1186 }
1187 end_page_writeback(page);
1188
1189#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1190 if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1191 nfs_inode_remove_request(req);
1192 dprintk(" OK\n");
1193 goto next;
1194 }
1195 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1196 nfs_mark_request_commit(req);
1197 dprintk(" marked for commit\n");
1198#else
1199 nfs_inode_remove_request(req);
1200#endif
1201 next:
c6a556b8 1202 nfs_clear_page_writeback(req);
1da177e4
LT
1203 }
1204}
1205
788e7a89
TM
1206static const struct rpc_call_ops nfs_write_full_ops = {
1207 .rpc_call_done = nfs_writeback_done_full,
1208 .rpc_release = nfs_writedata_release,
1209};
1210
1211
1da177e4
LT
1212/*
1213 * This function is called when the WRITE call is complete.
1214 */
462d5b32 1215int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1da177e4 1216{
1da177e4
LT
1217 struct nfs_writeargs *argp = &data->args;
1218 struct nfs_writeres *resp = &data->res;
788e7a89 1219 int status;
1da177e4
LT
1220
1221 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1222 task->tk_pid, task->tk_status);
1223
788e7a89
TM
1224 /* Call the NFS version-specific code */
1225 status = NFS_PROTO(data->inode)->write_done(task, data);
1226 if (status != 0)
1227 return status;
91d5b470
CL
1228 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1229
1da177e4
LT
1230#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1231 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1232 /* We tried a write call, but the server did not
1233 * commit data to stable storage even though we
1234 * requested it.
1235 * Note: There is a known bug in Tru64 < 5.0 in which
1236 * the server reports NFS_DATA_SYNC, but performs
1237 * NFS_FILE_SYNC. We therefore implement this checking
1238 * as a dprintk() in order to avoid filling syslog.
1239 */
1240 static unsigned long complain;
1241
1242 if (time_before(complain, jiffies)) {
1243 dprintk("NFS: faulty NFS server %s:"
1244 " (committed = %d) != (stable = %d)\n",
1245 NFS_SERVER(data->inode)->hostname,
1246 resp->verf->committed, argp->stable);
1247 complain = jiffies + 300 * HZ;
1248 }
1249 }
1250#endif
1251 /* Is this a short write? */
1252 if (task->tk_status >= 0 && resp->count < argp->count) {
1253 static unsigned long complain;
1254
91d5b470
CL
1255 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1256
1da177e4
LT
1257 /* Has the server at least made some progress? */
1258 if (resp->count != 0) {
1259 /* Was this an NFSv2 write or an NFSv3 stable write? */
1260 if (resp->verf->committed != NFS_UNSTABLE) {
1261 /* Resend from where the server left off */
1262 argp->offset += resp->count;
1263 argp->pgbase += resp->count;
1264 argp->count -= resp->count;
1265 } else {
1266 /* Resend as a stable write in order to avoid
1267 * headaches in the case of a server crash.
1268 */
1269 argp->stable = NFS_FILE_SYNC;
1270 }
1271 rpc_restart_call(task);
788e7a89 1272 return -EAGAIN;
1da177e4
LT
1273 }
1274 if (time_before(complain, jiffies)) {
1275 printk(KERN_WARNING
1276 "NFS: Server wrote zero bytes, expected %u.\n",
1277 argp->count);
1278 complain = jiffies + 300 * HZ;
1279 }
1280 /* Can't do anything about it except throw an error. */
1281 task->tk_status = -EIO;
1282 }
788e7a89 1283 return 0;
1da177e4
LT
1284}
1285
1286
1287#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
963d8fe5 1288void nfs_commit_release(void *wdata)
1da177e4 1289{
1da177e4
LT
1290 nfs_commit_free(wdata);
1291}
1292
1293/*
1294 * Set up the argument/result storage required for the RPC call.
1295 */
1296static void nfs_commit_rpcsetup(struct list_head *head,
788e7a89
TM
1297 struct nfs_write_data *data,
1298 int how)
1da177e4 1299{
3da28eb1 1300 struct nfs_page *first;
1da177e4 1301 struct inode *inode;
788e7a89 1302 int flags;
1da177e4
LT
1303
1304 /* Set up the RPC argument and reply structs
1305 * NB: take care not to mess about with data->commit et al. */
1306
1307 list_splice_init(head, &data->pages);
1308 first = nfs_list_entry(data->pages.next);
1da177e4
LT
1309 inode = first->wb_context->dentry->d_inode;
1310
1da177e4
LT
1311 data->inode = inode;
1312 data->cred = first->wb_context->cred;
1313
1314 data->args.fh = NFS_FH(data->inode);
3da28eb1
TM
1315 /* Note: we always request a commit of the entire inode */
1316 data->args.offset = 0;
1317 data->args.count = 0;
1318 data->res.count = 0;
1da177e4
LT
1319 data->res.fattr = &data->fattr;
1320 data->res.verf = &data->verf;
0e574af1 1321 nfs_fattr_init(&data->fattr);
788e7a89
TM
1322
1323 /* Set up the initial task struct. */
1324 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1325 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1da177e4
LT
1326 NFS_PROTO(inode)->commit_setup(data, how);
1327
1328 data->task.tk_priority = flush_task_priority(how);
1329 data->task.tk_cookie = (unsigned long)inode;
1da177e4 1330
0bbacc40 1331 dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
1da177e4
LT
1332}
1333
1334/*
1335 * Commit dirty pages
1336 */
1337static int
40859d7e 1338nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1da177e4
LT
1339{
1340 struct nfs_write_data *data;
1341 struct nfs_page *req;
1342
40859d7e 1343 data = nfs_commit_alloc(NFS_SERVER(inode)->wpages);
1da177e4
LT
1344
1345 if (!data)
1346 goto out_bad;
1347
1348 /* Set up the argument struct */
1349 nfs_commit_rpcsetup(head, data, how);
1350
1351 nfs_execute_write(data);
1352 return 0;
1353 out_bad:
1354 while (!list_empty(head)) {
1355 req = nfs_list_entry(head->next);
1356 nfs_list_remove_request(req);
1357 nfs_mark_request_commit(req);
c6a556b8 1358 nfs_clear_page_writeback(req);
1da177e4
LT
1359 }
1360 return -ENOMEM;
1361}
1362
1363/*
1364 * COMMIT call returned
1365 */
788e7a89 1366static void nfs_commit_done(struct rpc_task *task, void *calldata)
1da177e4 1367{
963d8fe5 1368 struct nfs_write_data *data = calldata;
1da177e4
LT
1369 struct nfs_page *req;
1370 int res = 0;
1371
1372 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1373 task->tk_pid, task->tk_status);
1374
788e7a89
TM
1375 /* Call the NFS version-specific code */
1376 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1377 return;
1378
1da177e4
LT
1379 while (!list_empty(&data->pages)) {
1380 req = nfs_list_entry(data->pages.next);
1381 nfs_list_remove_request(req);
1382
1383 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1384 req->wb_context->dentry->d_inode->i_sb->s_id,
1385 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1386 req->wb_bytes,
1387 (long long)req_offset(req));
1388 if (task->tk_status < 0) {
1389 req->wb_context->error = task->tk_status;
1390 nfs_inode_remove_request(req);
1391 dprintk(", error = %d\n", task->tk_status);
1392 goto next;
1393 }
1394
1395 /* Okay, COMMIT succeeded, apparently. Check the verifier
1396 * returned by the server against all stored verfs. */
1397 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1398 /* We have a match */
1399 nfs_inode_remove_request(req);
1400 dprintk(" OK\n");
1401 goto next;
1402 }
1403 /* We have a mismatch. Write the page again */
1404 dprintk(" mismatch\n");
1405 nfs_mark_request_dirty(req);
1406 next:
c6a556b8 1407 nfs_clear_page_writeback(req);
1da177e4
LT
1408 res++;
1409 }
1410 sub_page_state(nr_unstable,res);
1411}
788e7a89
TM
1412
1413static const struct rpc_call_ops nfs_commit_ops = {
1414 .rpc_call_done = nfs_commit_done,
1415 .rpc_release = nfs_commit_release,
1416};
1da177e4
LT
1417#endif
1418
1419static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
1420 unsigned int npages, int how)
1421{
1422 struct nfs_inode *nfsi = NFS_I(inode);
1423 LIST_HEAD(head);
1424 int res,
1425 error = 0;
1426
1427 spin_lock(&nfsi->req_lock);
1428 res = nfs_scan_dirty(inode, &head, idx_start, npages);
1429 spin_unlock(&nfsi->req_lock);
ab0a3dbe
TM
1430 if (res) {
1431 struct nfs_server *server = NFS_SERVER(inode);
1432
1433 /* For single writes, FLUSH_STABLE is more efficient */
1434 if (res == nfsi->npages && nfsi->npages <= server->wpages) {
1435 if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize)
1436 how |= FLUSH_STABLE;
1437 }
1438 error = nfs_flush_list(&head, server->wpages, how);
1439 }
1da177e4
LT
1440 if (error < 0)
1441 return error;
1442 return res;
1443}
1444
1445#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
3da28eb1 1446int nfs_commit_inode(struct inode *inode, int how)
1da177e4
LT
1447{
1448 struct nfs_inode *nfsi = NFS_I(inode);
1449 LIST_HEAD(head);
1450 int res,
1451 error = 0;
1452
1453 spin_lock(&nfsi->req_lock);
3da28eb1
TM
1454 res = nfs_scan_commit(inode, &head, 0, 0);
1455 spin_unlock(&nfsi->req_lock);
1da177e4 1456 if (res) {
40859d7e 1457 error = nfs_commit_list(inode, &head, how);
3da28eb1
TM
1458 if (error < 0)
1459 return error;
1460 }
1da177e4
LT
1461 return res;
1462}
1463#endif
1464
1465int nfs_sync_inode(struct inode *inode, unsigned long idx_start,
1466 unsigned int npages, int how)
1467{
70b9ecbd
TM
1468 int nocommit = how & FLUSH_NOCOMMIT;
1469 int wait = how & FLUSH_WAIT;
1470 int error;
1da177e4 1471
70b9ecbd 1472 how &= ~(FLUSH_WAIT|FLUSH_NOCOMMIT);
1da177e4
LT
1473
1474 do {
70b9ecbd 1475 if (wait) {
1da177e4 1476 error = nfs_wait_on_requests(inode, idx_start, npages);
70b9ecbd
TM
1477 if (error != 0)
1478 continue;
1479 }
1480 error = nfs_flush_inode(inode, idx_start, npages, how);
1481 if (error != 0)
1482 continue;
1483 if (!nocommit)
3da28eb1 1484 error = nfs_commit_inode(inode, how);
1da177e4
LT
1485 } while (error > 0);
1486 return error;
1487}
1488
1489int nfs_init_writepagecache(void)
1490{
1491 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1492 sizeof(struct nfs_write_data),
1493 0, SLAB_HWCACHE_ALIGN,
1494 NULL, NULL);
1495 if (nfs_wdata_cachep == NULL)
1496 return -ENOMEM;
1497
1498 nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE,
1499 mempool_alloc_slab,
1500 mempool_free_slab,
1501 nfs_wdata_cachep);
1502 if (nfs_wdata_mempool == NULL)
1503 return -ENOMEM;
1504
1505 nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT,
1506 mempool_alloc_slab,
1507 mempool_free_slab,
1508 nfs_wdata_cachep);
1509 if (nfs_commit_mempool == NULL)
1510 return -ENOMEM;
1511
1512 return 0;
1513}
1514
1515void nfs_destroy_writepagecache(void)
1516{
1517 mempool_destroy(nfs_commit_mempool);
1518 mempool_destroy(nfs_wdata_mempool);
1519 if (kmem_cache_destroy(nfs_wdata_cachep))
1520 printk(KERN_INFO "nfs_write_data: not all structures were freed\n");
1521}
1522