]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/nfs/write.c | |
3 | * | |
4 | * Writing file data over NFS. | |
5 | * | |
6 | * We do it like this: When a (user) process wishes to write data to an | |
7 | * NFS file, a write request is allocated that contains the RPC task data | |
8 | * plus some info on the page to be written, and added to the inode's | |
9 | * write chain. If the process writes past the end of the page, an async | |
10 | * RPC call to write the page is scheduled immediately; otherwise, the call | |
11 | * is delayed for a few seconds. | |
12 | * | |
13 | * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE. | |
14 | * | |
15 | * Write requests are kept on the inode's writeback list. Each entry in | |
16 | * that list references the page (portion) to be written. When the | |
17 | * cache timeout has expired, the RPC task is woken up, and tries to | |
18 | * lock the page. As soon as it manages to do so, the request is moved | |
19 | * from the writeback list to the writelock list. | |
20 | * | |
21 | * Note: we must make sure never to confuse the inode passed in the | |
22 | * write_page request with the one in page->inode. As far as I understand | |
23 | * it, these are different when doing a swap-out. | |
24 | * | |
25 | * To understand everything that goes on here and in the NFS read code, | |
26 | * one should be aware that a page is locked in exactly one of the following | |
27 | * cases: | |
28 | * | |
29 | * - A write request is in progress. | |
30 | * - A user process is in generic_file_write/nfs_update_page | |
31 | * - A user process is in generic_file_read | |
32 | * | |
33 | * Also note that because of the way pages are invalidated in | |
34 | * nfs_revalidate_inode, the following assertions hold: | |
35 | * | |
36 | * - If a page is dirty, there will be no read requests (a page will | |
37 | * not be re-read unless invalidated by nfs_revalidate_inode). | |
38 | * - If the page is not uptodate, there will be no pending write | |
39 | * requests, and no process will be in nfs_update_page. | |
40 | * | |
41 | * FIXME: Interaction with the vmscan routines is not optimal yet. | |
42 | * Either vmscan must be made nfs-savvy, or we need a different page | |
43 | * reclaim concept that supports something like FS-independent | |
44 | * buffer_heads with a b_ops-> field. | |
45 | * | |
46 | * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> | |
47 | */ | |
48 | ||
1da177e4 LT |
49 | #include <linux/types.h> |
50 | #include <linux/slab.h> | |
51 | #include <linux/mm.h> | |
52 | #include <linux/pagemap.h> | |
53 | #include <linux/file.h> | |
1da177e4 LT |
54 | #include <linux/writeback.h> |
55 | ||
56 | #include <linux/sunrpc/clnt.h> | |
57 | #include <linux/nfs_fs.h> | |
58 | #include <linux/nfs_mount.h> | |
59 | #include <linux/nfs_page.h> | |
60 | #include <asm/uaccess.h> | |
61 | #include <linux/smp_lock.h> | |
62 | ||
63 | #include "delegation.h" | |
91d5b470 | 64 | #include "iostat.h" |
1da177e4 LT |
65 | |
66 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE | |
67 | ||
68 | #define MIN_POOL_WRITE (32) | |
69 | #define MIN_POOL_COMMIT (4) | |
70 | ||
71 | /* | |
72 | * Local function declarations | |
73 | */ | |
74 | static struct nfs_page * nfs_update_request(struct nfs_open_context*, | |
75 | struct inode *, | |
76 | struct page *, | |
77 | unsigned int, unsigned int); | |
1da177e4 LT |
78 | static int nfs_wait_on_write_congestion(struct address_space *, int); |
79 | static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); | |
80 | static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, | |
81 | unsigned int npages, int how); | |
788e7a89 TM |
82 | static const struct rpc_call_ops nfs_write_partial_ops; |
83 | static const struct rpc_call_ops nfs_write_full_ops; | |
84 | static const struct rpc_call_ops nfs_commit_ops; | |
1da177e4 LT |
85 | |
86 | static kmem_cache_t *nfs_wdata_cachep; | |
3feb2d49 | 87 | static mempool_t *nfs_wdata_mempool; |
1da177e4 LT |
88 | static mempool_t *nfs_commit_mempool; |
89 | ||
90 | static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); | |
91 | ||
e9f7bee1 | 92 | struct nfs_write_data *nfs_commit_alloc(void) |
1da177e4 LT |
93 | { |
94 | struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); | |
40859d7e | 95 | |
1da177e4 LT |
96 | if (p) { |
97 | memset(p, 0, sizeof(*p)); | |
98 | INIT_LIST_HEAD(&p->pages); | |
99 | } | |
100 | return p; | |
101 | } | |
102 | ||
e17b1fc4 | 103 | void nfs_commit_free(struct nfs_write_data *p) |
1da177e4 | 104 | { |
40859d7e CL |
105 | if (p && (p->pagevec != &p->page_array[0])) |
106 | kfree(p->pagevec); | |
1da177e4 LT |
107 | mempool_free(p, nfs_commit_mempool); |
108 | } | |
109 | ||
e9f7bee1 | 110 | struct nfs_write_data *nfs_writedata_alloc(size_t len) |
3feb2d49 | 111 | { |
e9f7bee1 | 112 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
3feb2d49 TM |
113 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); |
114 | ||
115 | if (p) { | |
116 | memset(p, 0, sizeof(*p)); | |
117 | INIT_LIST_HEAD(&p->pages); | |
e9f7bee1 | 118 | p->npages = pagecount; |
0d0b5cb3 CL |
119 | if (pagecount <= ARRAY_SIZE(p->page_array)) |
120 | p->pagevec = p->page_array; | |
3feb2d49 | 121 | else { |
0d0b5cb3 CL |
122 | p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS); |
123 | if (!p->pagevec) { | |
3feb2d49 TM |
124 | mempool_free(p, nfs_wdata_mempool); |
125 | p = NULL; | |
126 | } | |
127 | } | |
128 | } | |
129 | return p; | |
130 | } | |
131 | ||
e4e20512 | 132 | static void nfs_writedata_free(struct nfs_write_data *p) |
3feb2d49 TM |
133 | { |
134 | if (p && (p->pagevec != &p->page_array[0])) | |
135 | kfree(p->pagevec); | |
136 | mempool_free(p, nfs_wdata_mempool); | |
137 | } | |
138 | ||
963d8fe5 | 139 | void nfs_writedata_release(void *wdata) |
1da177e4 | 140 | { |
1da177e4 LT |
141 | nfs_writedata_free(wdata); |
142 | } | |
143 | ||
144 | /* Adjust the file length if we're writing beyond the end */ | |
145 | static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) | |
146 | { | |
147 | struct inode *inode = page->mapping->host; | |
148 | loff_t end, i_size = i_size_read(inode); | |
149 | unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; | |
150 | ||
151 | if (i_size > 0 && page->index < end_index) | |
152 | return; | |
153 | end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count); | |
154 | if (i_size >= end) | |
155 | return; | |
91d5b470 | 156 | nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); |
1da177e4 LT |
157 | i_size_write(inode, end); |
158 | } | |
159 | ||
160 | /* We can set the PG_uptodate flag if we see that a write request | |
161 | * covers the full page. | |
162 | */ | |
163 | static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count) | |
164 | { | |
165 | loff_t end_offs; | |
166 | ||
167 | if (PageUptodate(page)) | |
168 | return; | |
169 | if (base != 0) | |
170 | return; | |
171 | if (count == PAGE_CACHE_SIZE) { | |
172 | SetPageUptodate(page); | |
173 | return; | |
174 | } | |
175 | ||
176 | end_offs = i_size_read(page->mapping->host) - 1; | |
177 | if (end_offs < 0) | |
178 | return; | |
179 | /* Is this the last page? */ | |
180 | if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT)) | |
181 | return; | |
182 | /* This is the last page: set PG_uptodate if we cover the entire | |
183 | * extent of the data, then zero the rest of the page. | |
184 | */ | |
185 | if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) { | |
186 | memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count); | |
187 | SetPageUptodate(page); | |
188 | } | |
189 | } | |
190 | ||
191 | /* | |
192 | * Write a page synchronously. | |
193 | * Offset is the data offset within the page. | |
194 | */ | |
195 | static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode, | |
196 | struct page *page, unsigned int offset, unsigned int count, | |
197 | int how) | |
198 | { | |
199 | unsigned int wsize = NFS_SERVER(inode)->wsize; | |
200 | int result, written = 0; | |
201 | struct nfs_write_data *wdata; | |
202 | ||
e9f7bee1 | 203 | wdata = nfs_writedata_alloc(wsize); |
1da177e4 LT |
204 | if (!wdata) |
205 | return -ENOMEM; | |
206 | ||
207 | wdata->flags = how; | |
208 | wdata->cred = ctx->cred; | |
209 | wdata->inode = inode; | |
210 | wdata->args.fh = NFS_FH(inode); | |
211 | wdata->args.context = ctx; | |
212 | wdata->args.pages = &page; | |
213 | wdata->args.stable = NFS_FILE_SYNC; | |
214 | wdata->args.pgbase = offset; | |
215 | wdata->args.count = wsize; | |
216 | wdata->res.fattr = &wdata->fattr; | |
217 | wdata->res.verf = &wdata->verf; | |
218 | ||
219 | dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n", | |
220 | inode->i_sb->s_id, | |
221 | (long long)NFS_FILEID(inode), | |
222 | count, (long long)(page_offset(page) + offset)); | |
223 | ||
bb713d6d | 224 | set_page_writeback(page); |
1da177e4 LT |
225 | nfs_begin_data_update(inode); |
226 | do { | |
227 | if (count < wsize) | |
228 | wdata->args.count = count; | |
229 | wdata->args.offset = page_offset(page) + wdata->args.pgbase; | |
230 | ||
231 | result = NFS_PROTO(inode)->write(wdata); | |
232 | ||
233 | if (result < 0) { | |
234 | /* Must mark the page invalid after I/O error */ | |
235 | ClearPageUptodate(page); | |
236 | goto io_error; | |
237 | } | |
238 | if (result < wdata->args.count) | |
239 | printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n", | |
240 | wdata->args.count, result); | |
241 | ||
242 | wdata->args.offset += result; | |
243 | wdata->args.pgbase += result; | |
244 | written += result; | |
245 | count -= result; | |
91d5b470 | 246 | nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result); |
1da177e4 LT |
247 | } while (count); |
248 | /* Update file length */ | |
249 | nfs_grow_file(page, offset, written); | |
250 | /* Set the PG_uptodate flag? */ | |
251 | nfs_mark_uptodate(page, offset, written); | |
252 | ||
253 | if (PageError(page)) | |
254 | ClearPageError(page); | |
255 | ||
256 | io_error: | |
951a143b | 257 | nfs_end_data_update(inode); |
bb713d6d | 258 | end_page_writeback(page); |
1da177e4 LT |
259 | nfs_writedata_free(wdata); |
260 | return written ? written : result; | |
261 | } | |
262 | ||
263 | static int nfs_writepage_async(struct nfs_open_context *ctx, | |
264 | struct inode *inode, struct page *page, | |
265 | unsigned int offset, unsigned int count) | |
266 | { | |
267 | struct nfs_page *req; | |
1da177e4 LT |
268 | |
269 | req = nfs_update_request(ctx, inode, page, offset, count); | |
abd3e641 TM |
270 | if (IS_ERR(req)) |
271 | return PTR_ERR(req); | |
1da177e4 LT |
272 | /* Update file length */ |
273 | nfs_grow_file(page, offset, count); | |
274 | /* Set the PG_uptodate flag? */ | |
275 | nfs_mark_uptodate(page, offset, count); | |
276 | nfs_unlock_request(req); | |
abd3e641 | 277 | return 0; |
1da177e4 LT |
278 | } |
279 | ||
280 | static int wb_priority(struct writeback_control *wbc) | |
281 | { | |
282 | if (wbc->for_reclaim) | |
283 | return FLUSH_HIGHPRI; | |
284 | if (wbc->for_kupdate) | |
285 | return FLUSH_LOWPRI; | |
286 | return 0; | |
287 | } | |
288 | ||
289 | /* | |
290 | * Write an mmapped page to the server. | |
291 | */ | |
292 | int nfs_writepage(struct page *page, struct writeback_control *wbc) | |
293 | { | |
294 | struct nfs_open_context *ctx; | |
295 | struct inode *inode = page->mapping->host; | |
296 | unsigned long end_index; | |
297 | unsigned offset = PAGE_CACHE_SIZE; | |
298 | loff_t i_size = i_size_read(inode); | |
299 | int inode_referenced = 0; | |
300 | int priority = wb_priority(wbc); | |
301 | int err; | |
302 | ||
91d5b470 CL |
303 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
304 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); | |
305 | ||
1da177e4 LT |
306 | /* |
307 | * Note: We need to ensure that we have a reference to the inode | |
308 | * if we are to do asynchronous writes. If not, waiting | |
309 | * in nfs_wait_on_request() may deadlock with clear_inode(). | |
310 | * | |
311 | * If igrab() fails here, then it is in any case safe to | |
312 | * call nfs_wb_page(), since there will be no pending writes. | |
313 | */ | |
314 | if (igrab(inode) != 0) | |
315 | inode_referenced = 1; | |
316 | end_index = i_size >> PAGE_CACHE_SHIFT; | |
317 | ||
318 | /* Ensure we've flushed out any previous writes */ | |
319 | nfs_wb_page_priority(inode, page, priority); | |
320 | ||
321 | /* easy case */ | |
322 | if (page->index < end_index) | |
323 | goto do_it; | |
324 | /* things got complicated... */ | |
325 | offset = i_size & (PAGE_CACHE_SIZE-1); | |
326 | ||
327 | /* OK, are we completely out? */ | |
328 | err = 0; /* potential race with truncate - ignore */ | |
329 | if (page->index >= end_index+1 || !offset) | |
330 | goto out; | |
331 | do_it: | |
d530838b | 332 | ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE); |
1da177e4 LT |
333 | if (ctx == NULL) { |
334 | err = -EBADF; | |
335 | goto out; | |
336 | } | |
337 | lock_kernel(); | |
338 | if (!IS_SYNC(inode) && inode_referenced) { | |
339 | err = nfs_writepage_async(ctx, inode, page, 0, offset); | |
abd3e641 TM |
340 | if (!wbc->for_writepages) |
341 | nfs_flush_inode(inode, 0, 0, wb_priority(wbc)); | |
1da177e4 LT |
342 | } else { |
343 | err = nfs_writepage_sync(ctx, inode, page, 0, | |
344 | offset, priority); | |
345 | if (err >= 0) { | |
346 | if (err != offset) | |
347 | redirty_page_for_writepage(wbc, page); | |
348 | err = 0; | |
349 | } | |
350 | } | |
351 | unlock_kernel(); | |
352 | put_nfs_open_context(ctx); | |
353 | out: | |
354 | unlock_page(page); | |
355 | if (inode_referenced) | |
356 | iput(inode); | |
357 | return err; | |
358 | } | |
359 | ||
360 | /* | |
361 | * Note: causes nfs_update_request() to block on the assumption | |
362 | * that the writeback is generated due to memory pressure. | |
363 | */ | |
364 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | |
365 | { | |
366 | struct backing_dev_info *bdi = mapping->backing_dev_info; | |
367 | struct inode *inode = mapping->host; | |
368 | int err; | |
369 | ||
91d5b470 CL |
370 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); |
371 | ||
1da177e4 LT |
372 | err = generic_writepages(mapping, wbc); |
373 | if (err) | |
374 | return err; | |
375 | while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) { | |
376 | if (wbc->nonblocking) | |
377 | return 0; | |
378 | nfs_wait_on_write_congestion(mapping, 0); | |
379 | } | |
380 | err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc)); | |
381 | if (err < 0) | |
382 | goto out; | |
91d5b470 | 383 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); |
1da177e4 LT |
384 | wbc->nr_to_write -= err; |
385 | if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) { | |
386 | err = nfs_wait_on_requests(inode, 0, 0); | |
387 | if (err < 0) | |
388 | goto out; | |
389 | } | |
3da28eb1 | 390 | err = nfs_commit_inode(inode, wb_priority(wbc)); |
1da177e4 LT |
391 | if (err > 0) { |
392 | wbc->nr_to_write -= err; | |
393 | err = 0; | |
394 | } | |
395 | out: | |
396 | clear_bit(BDI_write_congested, &bdi->state); | |
397 | wake_up_all(&nfs_write_congestion); | |
275a082f | 398 | writeback_congestion_end(); |
1da177e4 LT |
399 | return err; |
400 | } | |
401 | ||
402 | /* | |
403 | * Insert a write request into an inode | |
404 | */ | |
405 | static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | |
406 | { | |
407 | struct nfs_inode *nfsi = NFS_I(inode); | |
408 | int error; | |
409 | ||
410 | error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); | |
411 | BUG_ON(error == -EEXIST); | |
412 | if (error) | |
413 | return error; | |
414 | if (!nfsi->npages) { | |
415 | igrab(inode); | |
416 | nfs_begin_data_update(inode); | |
417 | if (nfs_have_delegation(inode, FMODE_WRITE)) | |
418 | nfsi->change_attr++; | |
419 | } | |
deb7d638 | 420 | SetPagePrivate(req->wb_page); |
1da177e4 LT |
421 | nfsi->npages++; |
422 | atomic_inc(&req->wb_count); | |
423 | return 0; | |
424 | } | |
425 | ||
426 | /* | |
427 | * Insert a write request into an inode | |
428 | */ | |
429 | static void nfs_inode_remove_request(struct nfs_page *req) | |
430 | { | |
431 | struct inode *inode = req->wb_context->dentry->d_inode; | |
432 | struct nfs_inode *nfsi = NFS_I(inode); | |
433 | ||
434 | BUG_ON (!NFS_WBACK_BUSY(req)); | |
435 | ||
436 | spin_lock(&nfsi->req_lock); | |
deb7d638 | 437 | ClearPagePrivate(req->wb_page); |
1da177e4 LT |
438 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); |
439 | nfsi->npages--; | |
440 | if (!nfsi->npages) { | |
441 | spin_unlock(&nfsi->req_lock); | |
951a143b | 442 | nfs_end_data_update(inode); |
1da177e4 LT |
443 | iput(inode); |
444 | } else | |
445 | spin_unlock(&nfsi->req_lock); | |
446 | nfs_clear_request(req); | |
447 | nfs_release_request(req); | |
448 | } | |
449 | ||
450 | /* | |
451 | * Find a request | |
452 | */ | |
453 | static inline struct nfs_page * | |
454 | _nfs_find_request(struct inode *inode, unsigned long index) | |
455 | { | |
456 | struct nfs_inode *nfsi = NFS_I(inode); | |
457 | struct nfs_page *req; | |
458 | ||
459 | req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index); | |
460 | if (req) | |
461 | atomic_inc(&req->wb_count); | |
462 | return req; | |
463 | } | |
464 | ||
465 | static struct nfs_page * | |
466 | nfs_find_request(struct inode *inode, unsigned long index) | |
467 | { | |
468 | struct nfs_page *req; | |
469 | struct nfs_inode *nfsi = NFS_I(inode); | |
470 | ||
471 | spin_lock(&nfsi->req_lock); | |
472 | req = _nfs_find_request(inode, index); | |
473 | spin_unlock(&nfsi->req_lock); | |
474 | return req; | |
475 | } | |
476 | ||
477 | /* | |
478 | * Add a request to the inode's dirty list. | |
479 | */ | |
480 | static void | |
481 | nfs_mark_request_dirty(struct nfs_page *req) | |
482 | { | |
483 | struct inode *inode = req->wb_context->dentry->d_inode; | |
484 | struct nfs_inode *nfsi = NFS_I(inode); | |
485 | ||
486 | spin_lock(&nfsi->req_lock); | |
3da28eb1 TM |
487 | radix_tree_tag_set(&nfsi->nfs_page_tree, |
488 | req->wb_index, NFS_PAGE_TAG_DIRTY); | |
1da177e4 LT |
489 | nfs_list_add_request(req, &nfsi->dirty); |
490 | nfsi->ndirty++; | |
491 | spin_unlock(&nfsi->req_lock); | |
b1e7a8fd | 492 | inc_zone_page_state(req->wb_page, NR_FILE_DIRTY); |
1da177e4 LT |
493 | mark_inode_dirty(inode); |
494 | } | |
495 | ||
496 | /* | |
497 | * Check if a request is dirty | |
498 | */ | |
499 | static inline int | |
500 | nfs_dirty_request(struct nfs_page *req) | |
501 | { | |
502 | struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); | |
503 | return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty; | |
504 | } | |
505 | ||
506 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | |
507 | /* | |
508 | * Add a request to the inode's commit list. | |
509 | */ | |
510 | static void | |
511 | nfs_mark_request_commit(struct nfs_page *req) | |
512 | { | |
513 | struct inode *inode = req->wb_context->dentry->d_inode; | |
514 | struct nfs_inode *nfsi = NFS_I(inode); | |
515 | ||
516 | spin_lock(&nfsi->req_lock); | |
517 | nfs_list_add_request(req, &nfsi->commit); | |
518 | nfsi->ncommit++; | |
519 | spin_unlock(&nfsi->req_lock); | |
fd39fc85 | 520 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
1da177e4 LT |
521 | mark_inode_dirty(inode); |
522 | } | |
523 | #endif | |
524 | ||
525 | /* | |
526 | * Wait for a request to complete. | |
527 | * | |
528 | * Interruptible by signals only if mounted with intr flag. | |
529 | */ | |
c42de9dd | 530 | static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages) |
1da177e4 LT |
531 | { |
532 | struct nfs_inode *nfsi = NFS_I(inode); | |
533 | struct nfs_page *req; | |
534 | unsigned long idx_end, next; | |
535 | unsigned int res = 0; | |
536 | int error; | |
537 | ||
538 | if (npages == 0) | |
539 | idx_end = ~0; | |
540 | else | |
541 | idx_end = idx_start + npages - 1; | |
542 | ||
1da177e4 | 543 | next = idx_start; |
c6a556b8 | 544 | while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) { |
1da177e4 LT |
545 | if (req->wb_index > idx_end) |
546 | break; | |
547 | ||
548 | next = req->wb_index + 1; | |
c6a556b8 | 549 | BUG_ON(!NFS_WBACK_BUSY(req)); |
1da177e4 LT |
550 | |
551 | atomic_inc(&req->wb_count); | |
552 | spin_unlock(&nfsi->req_lock); | |
553 | error = nfs_wait_on_request(req); | |
554 | nfs_release_request(req); | |
c42de9dd | 555 | spin_lock(&nfsi->req_lock); |
1da177e4 LT |
556 | if (error < 0) |
557 | return error; | |
1da177e4 LT |
558 | res++; |
559 | } | |
1da177e4 LT |
560 | return res; |
561 | } | |
562 | ||
c42de9dd TM |
563 | static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages) |
564 | { | |
565 | struct nfs_inode *nfsi = NFS_I(inode); | |
566 | int ret; | |
567 | ||
568 | spin_lock(&nfsi->req_lock); | |
569 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); | |
570 | spin_unlock(&nfsi->req_lock); | |
571 | return ret; | |
572 | } | |
573 | ||
83715ad5 | 574 | static void nfs_cancel_dirty_list(struct list_head *head) |
d2ccddf0 TM |
575 | { |
576 | struct nfs_page *req; | |
577 | while(!list_empty(head)) { | |
578 | req = nfs_list_entry(head->next); | |
579 | nfs_list_remove_request(req); | |
580 | nfs_inode_remove_request(req); | |
581 | nfs_clear_page_writeback(req); | |
582 | } | |
583 | } | |
584 | ||
83715ad5 TM |
585 | static void nfs_cancel_commit_list(struct list_head *head) |
586 | { | |
587 | struct nfs_page *req; | |
588 | ||
589 | while(!list_empty(head)) { | |
590 | req = nfs_list_entry(head->next); | |
591 | nfs_list_remove_request(req); | |
592 | nfs_inode_remove_request(req); | |
83715ad5 | 593 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
5c2d97cb | 594 | nfs_clear_page_writeback(req); |
83715ad5 TM |
595 | } |
596 | } | |
597 | ||
1da177e4 LT |
598 | /* |
599 | * nfs_scan_dirty - Scan an inode for dirty requests | |
600 | * @inode: NFS inode to scan | |
601 | * @dst: destination list | |
602 | * @idx_start: lower bound of page->index to scan. | |
603 | * @npages: idx_start + npages sets the upper bound to scan. | |
604 | * | |
605 | * Moves requests from the inode's dirty page list. | |
606 | * The requests are *not* checked to ensure that they form a contiguous set. | |
607 | */ | |
608 | static int | |
609 | nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | |
610 | { | |
611 | struct nfs_inode *nfsi = NFS_I(inode); | |
3da28eb1 TM |
612 | int res = 0; |
613 | ||
614 | if (nfsi->ndirty != 0) { | |
615 | res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages); | |
616 | nfsi->ndirty -= res; | |
3da28eb1 TM |
617 | if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) |
618 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); | |
619 | } | |
1da177e4 LT |
620 | return res; |
621 | } | |
622 | ||
623 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | |
624 | /* | |
625 | * nfs_scan_commit - Scan an inode for commit requests | |
626 | * @inode: NFS inode to scan | |
627 | * @dst: destination list | |
628 | * @idx_start: lower bound of page->index to scan. | |
629 | * @npages: idx_start + npages sets the upper bound to scan. | |
630 | * | |
631 | * Moves requests from the inode's 'commit' request list. | |
632 | * The requests are *not* checked to ensure that they form a contiguous set. | |
633 | */ | |
634 | static int | |
635 | nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | |
636 | { | |
637 | struct nfs_inode *nfsi = NFS_I(inode); | |
3da28eb1 TM |
638 | int res = 0; |
639 | ||
640 | if (nfsi->ncommit != 0) { | |
d2ccddf0 | 641 | res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages); |
3da28eb1 TM |
642 | nfsi->ncommit -= res; |
643 | if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) | |
644 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); | |
645 | } | |
1da177e4 LT |
646 | return res; |
647 | } | |
c42de9dd TM |
648 | #else |
649 | static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | |
650 | { | |
651 | return 0; | |
652 | } | |
1da177e4 LT |
653 | #endif |
654 | ||
655 | static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr) | |
656 | { | |
657 | struct backing_dev_info *bdi = mapping->backing_dev_info; | |
658 | DEFINE_WAIT(wait); | |
659 | int ret = 0; | |
660 | ||
661 | might_sleep(); | |
662 | ||
663 | if (!bdi_write_congested(bdi)) | |
664 | return 0; | |
91d5b470 CL |
665 | |
666 | nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT); | |
667 | ||
1da177e4 LT |
668 | if (intr) { |
669 | struct rpc_clnt *clnt = NFS_CLIENT(mapping->host); | |
670 | sigset_t oldset; | |
671 | ||
672 | rpc_clnt_sigmask(clnt, &oldset); | |
673 | prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE); | |
674 | if (bdi_write_congested(bdi)) { | |
675 | if (signalled()) | |
676 | ret = -ERESTARTSYS; | |
677 | else | |
678 | schedule(); | |
679 | } | |
680 | rpc_clnt_sigunmask(clnt, &oldset); | |
681 | } else { | |
682 | prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE); | |
683 | if (bdi_write_congested(bdi)) | |
684 | schedule(); | |
685 | } | |
686 | finish_wait(&nfs_write_congestion, &wait); | |
687 | return ret; | |
688 | } | |
689 | ||
690 | ||
691 | /* | |
692 | * Try to update any existing write request, or create one if there is none. | |
693 | * In order to match, the request's credentials must match those of | |
694 | * the calling process. | |
695 | * | |
696 | * Note: Should always be called with the Page Lock held! | |
697 | */ | |
698 | static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |
699 | struct inode *inode, struct page *page, | |
700 | unsigned int offset, unsigned int bytes) | |
701 | { | |
702 | struct nfs_server *server = NFS_SERVER(inode); | |
703 | struct nfs_inode *nfsi = NFS_I(inode); | |
704 | struct nfs_page *req, *new = NULL; | |
705 | unsigned long rqend, end; | |
706 | ||
707 | end = offset + bytes; | |
708 | ||
709 | if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR)) | |
710 | return ERR_PTR(-ERESTARTSYS); | |
711 | for (;;) { | |
712 | /* Loop over all inode entries and see if we find | |
713 | * A request for the page we wish to update | |
714 | */ | |
715 | spin_lock(&nfsi->req_lock); | |
716 | req = _nfs_find_request(inode, page->index); | |
717 | if (req) { | |
718 | if (!nfs_lock_request_dontget(req)) { | |
719 | int error; | |
720 | spin_unlock(&nfsi->req_lock); | |
721 | error = nfs_wait_on_request(req); | |
722 | nfs_release_request(req); | |
1dd594b2 NB |
723 | if (error < 0) { |
724 | if (new) | |
725 | nfs_release_request(new); | |
1da177e4 | 726 | return ERR_PTR(error); |
1dd594b2 | 727 | } |
1da177e4 LT |
728 | continue; |
729 | } | |
730 | spin_unlock(&nfsi->req_lock); | |
731 | if (new) | |
732 | nfs_release_request(new); | |
733 | break; | |
734 | } | |
735 | ||
736 | if (new) { | |
737 | int error; | |
738 | nfs_lock_request_dontget(new); | |
739 | error = nfs_inode_add_request(inode, new); | |
740 | if (error) { | |
741 | spin_unlock(&nfsi->req_lock); | |
742 | nfs_unlock_request(new); | |
743 | return ERR_PTR(error); | |
744 | } | |
745 | spin_unlock(&nfsi->req_lock); | |
746 | nfs_mark_request_dirty(new); | |
747 | return new; | |
748 | } | |
749 | spin_unlock(&nfsi->req_lock); | |
750 | ||
751 | new = nfs_create_request(ctx, inode, page, offset, bytes); | |
752 | if (IS_ERR(new)) | |
753 | return new; | |
754 | } | |
755 | ||
756 | /* We have a request for our page. | |
757 | * If the creds don't match, or the | |
758 | * page addresses don't match, | |
759 | * tell the caller to wait on the conflicting | |
760 | * request. | |
761 | */ | |
762 | rqend = req->wb_offset + req->wb_bytes; | |
763 | if (req->wb_context != ctx | |
764 | || req->wb_page != page | |
765 | || !nfs_dirty_request(req) | |
766 | || offset > rqend || end < req->wb_offset) { | |
767 | nfs_unlock_request(req); | |
768 | return ERR_PTR(-EBUSY); | |
769 | } | |
770 | ||
771 | /* Okay, the request matches. Update the region */ | |
772 | if (offset < req->wb_offset) { | |
773 | req->wb_offset = offset; | |
774 | req->wb_pgbase = offset; | |
775 | req->wb_bytes = rqend - req->wb_offset; | |
776 | } | |
777 | ||
778 | if (end > rqend) | |
779 | req->wb_bytes = end - req->wb_offset; | |
780 | ||
781 | return req; | |
782 | } | |
783 | ||
784 | int nfs_flush_incompatible(struct file *file, struct page *page) | |
785 | { | |
786 | struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; | |
787 | struct inode *inode = page->mapping->host; | |
788 | struct nfs_page *req; | |
789 | int status = 0; | |
790 | /* | |
791 | * Look for a request corresponding to this page. If there | |
792 | * is one, and it belongs to another file, we flush it out | |
793 | * before we try to copy anything into the page. Do this | |
794 | * due to the lack of an ACCESS-type call in NFSv2. | |
795 | * Also do the same if we find a request from an existing | |
796 | * dropped page. | |
797 | */ | |
798 | req = nfs_find_request(inode, page->index); | |
799 | if (req) { | |
800 | if (req->wb_page != page || ctx != req->wb_context) | |
801 | status = nfs_wb_page(inode, page); | |
802 | nfs_release_request(req); | |
803 | } | |
804 | return (status < 0) ? status : 0; | |
805 | } | |
806 | ||
807 | /* | |
808 | * Update and possibly write a cached page of an NFS file. | |
809 | * | |
810 | * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad | |
811 | * things with a page scheduled for an RPC call (e.g. invalidate it). | |
812 | */ | |
813 | int nfs_updatepage(struct file *file, struct page *page, | |
814 | unsigned int offset, unsigned int count) | |
815 | { | |
816 | struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; | |
1da177e4 LT |
817 | struct inode *inode = page->mapping->host; |
818 | struct nfs_page *req; | |
819 | int status = 0; | |
820 | ||
91d5b470 CL |
821 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); |
822 | ||
1da177e4 | 823 | dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n", |
0bbacc40 CL |
824 | file->f_dentry->d_parent->d_name.name, |
825 | file->f_dentry->d_name.name, count, | |
826 | (long long)(page_offset(page) +offset)); | |
1da177e4 LT |
827 | |
828 | if (IS_SYNC(inode)) { | |
829 | status = nfs_writepage_sync(ctx, inode, page, offset, count, 0); | |
830 | if (status > 0) { | |
831 | if (offset == 0 && status == PAGE_CACHE_SIZE) | |
832 | SetPageUptodate(page); | |
833 | return 0; | |
834 | } | |
835 | return status; | |
836 | } | |
837 | ||
838 | /* If we're not using byte range locks, and we know the page | |
839 | * is entirely in cache, it may be more efficient to avoid | |
840 | * fragmenting write requests. | |
841 | */ | |
ab0a3dbe | 842 | if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { |
1da177e4 LT |
843 | loff_t end_offs = i_size_read(inode) - 1; |
844 | unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT; | |
845 | ||
846 | count += offset; | |
847 | offset = 0; | |
848 | if (unlikely(end_offs < 0)) { | |
849 | /* Do nothing */ | |
850 | } else if (page->index == end_index) { | |
851 | unsigned int pglen; | |
852 | pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1; | |
853 | if (count < pglen) | |
854 | count = pglen; | |
855 | } else if (page->index < end_index) | |
856 | count = PAGE_CACHE_SIZE; | |
857 | } | |
858 | ||
859 | /* | |
860 | * Try to find an NFS request corresponding to this page | |
861 | * and update it. | |
862 | * If the existing request cannot be updated, we must flush | |
863 | * it out now. | |
864 | */ | |
865 | do { | |
866 | req = nfs_update_request(ctx, inode, page, offset, count); | |
867 | status = (IS_ERR(req)) ? PTR_ERR(req) : 0; | |
868 | if (status != -EBUSY) | |
869 | break; | |
870 | /* Request could not be updated. Flush it out and try again */ | |
871 | status = nfs_wb_page(inode, page); | |
872 | } while (status >= 0); | |
873 | if (status < 0) | |
874 | goto done; | |
875 | ||
876 | status = 0; | |
877 | ||
878 | /* Update file length */ | |
879 | nfs_grow_file(page, offset, count); | |
880 | /* Set the PG_uptodate flag? */ | |
881 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); | |
882 | nfs_unlock_request(req); | |
883 | done: | |
884 | dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n", | |
885 | status, (long long)i_size_read(inode)); | |
886 | if (status < 0) | |
887 | ClearPageUptodate(page); | |
888 | return status; | |
889 | } | |
890 | ||
891 | static void nfs_writepage_release(struct nfs_page *req) | |
892 | { | |
893 | end_page_writeback(req->wb_page); | |
894 | ||
895 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | |
896 | if (!PageError(req->wb_page)) { | |
897 | if (NFS_NEED_RESCHED(req)) { | |
898 | nfs_mark_request_dirty(req); | |
899 | goto out; | |
900 | } else if (NFS_NEED_COMMIT(req)) { | |
901 | nfs_mark_request_commit(req); | |
902 | goto out; | |
903 | } | |
904 | } | |
905 | nfs_inode_remove_request(req); | |
906 | ||
907 | out: | |
908 | nfs_clear_commit(req); | |
909 | nfs_clear_reschedule(req); | |
910 | #else | |
911 | nfs_inode_remove_request(req); | |
912 | #endif | |
c6a556b8 | 913 | nfs_clear_page_writeback(req); |
1da177e4 LT |
914 | } |
915 | ||
916 | static inline int flush_task_priority(int how) | |
917 | { | |
918 | switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { | |
919 | case FLUSH_HIGHPRI: | |
920 | return RPC_PRIORITY_HIGH; | |
921 | case FLUSH_LOWPRI: | |
922 | return RPC_PRIORITY_LOW; | |
923 | } | |
924 | return RPC_PRIORITY_NORMAL; | |
925 | } | |
926 | ||
927 | /* | |
928 | * Set up the argument/result storage required for the RPC call. | |
929 | */ | |
930 | static void nfs_write_rpcsetup(struct nfs_page *req, | |
931 | struct nfs_write_data *data, | |
788e7a89 | 932 | const struct rpc_call_ops *call_ops, |
1da177e4 LT |
933 | unsigned int count, unsigned int offset, |
934 | int how) | |
935 | { | |
1da177e4 | 936 | struct inode *inode; |
788e7a89 | 937 | int flags; |
1da177e4 LT |
938 | |
939 | /* Set up the RPC argument and reply structs | |
940 | * NB: take care not to mess about with data->commit et al. */ | |
941 | ||
942 | data->req = req; | |
943 | data->inode = inode = req->wb_context->dentry->d_inode; | |
944 | data->cred = req->wb_context->cred; | |
945 | ||
946 | data->args.fh = NFS_FH(inode); | |
947 | data->args.offset = req_offset(req) + offset; | |
948 | data->args.pgbase = req->wb_pgbase + offset; | |
949 | data->args.pages = data->pagevec; | |
950 | data->args.count = count; | |
951 | data->args.context = req->wb_context; | |
952 | ||
953 | data->res.fattr = &data->fattr; | |
954 | data->res.count = count; | |
955 | data->res.verf = &data->verf; | |
0e574af1 | 956 | nfs_fattr_init(&data->fattr); |
1da177e4 | 957 | |
788e7a89 TM |
958 | /* Set up the initial task struct. */ |
959 | flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | |
960 | rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data); | |
1da177e4 LT |
961 | NFS_PROTO(inode)->write_setup(data, how); |
962 | ||
963 | data->task.tk_priority = flush_task_priority(how); | |
964 | data->task.tk_cookie = (unsigned long)inode; | |
1da177e4 LT |
965 | |
966 | dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n", | |
0bbacc40 | 967 | data->task.tk_pid, |
1da177e4 LT |
968 | inode->i_sb->s_id, |
969 | (long long)NFS_FILEID(inode), | |
970 | count, | |
971 | (unsigned long long)data->args.offset); | |
972 | } | |
973 | ||
974 | static void nfs_execute_write(struct nfs_write_data *data) | |
975 | { | |
976 | struct rpc_clnt *clnt = NFS_CLIENT(data->inode); | |
977 | sigset_t oldset; | |
978 | ||
979 | rpc_clnt_sigmask(clnt, &oldset); | |
980 | lock_kernel(); | |
981 | rpc_execute(&data->task); | |
982 | unlock_kernel(); | |
983 | rpc_clnt_sigunmask(clnt, &oldset); | |
984 | } | |
985 | ||
986 | /* | |
987 | * Generate multiple small requests to write out a single | |
988 | * contiguous dirty area on one page. | |
989 | */ | |
7d46a49f | 990 | static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) |
1da177e4 LT |
991 | { |
992 | struct nfs_page *req = nfs_list_entry(head->next); | |
993 | struct page *page = req->wb_page; | |
994 | struct nfs_write_data *data; | |
e9f7bee1 TM |
995 | size_t wsize = NFS_SERVER(inode)->wsize, nbytes; |
996 | unsigned int offset; | |
1da177e4 LT |
997 | int requests = 0; |
998 | LIST_HEAD(list); | |
999 | ||
1000 | nfs_list_remove_request(req); | |
1001 | ||
1002 | nbytes = req->wb_bytes; | |
e9f7bee1 TM |
1003 | do { |
1004 | size_t len = min(nbytes, wsize); | |
1005 | ||
1006 | data = nfs_writedata_alloc(len); | |
1da177e4 LT |
1007 | if (!data) |
1008 | goto out_bad; | |
1009 | list_add(&data->pages, &list); | |
1010 | requests++; | |
e9f7bee1 TM |
1011 | nbytes -= len; |
1012 | } while (nbytes != 0); | |
1da177e4 LT |
1013 | atomic_set(&req->wb_complete, requests); |
1014 | ||
1015 | ClearPageError(page); | |
bb713d6d | 1016 | set_page_writeback(page); |
1da177e4 LT |
1017 | offset = 0; |
1018 | nbytes = req->wb_bytes; | |
1019 | do { | |
1020 | data = list_entry(list.next, struct nfs_write_data, pages); | |
1021 | list_del_init(&data->pages); | |
1022 | ||
1023 | data->pagevec[0] = page; | |
1da177e4 LT |
1024 | |
1025 | if (nbytes > wsize) { | |
788e7a89 TM |
1026 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, |
1027 | wsize, offset, how); | |
1da177e4 LT |
1028 | offset += wsize; |
1029 | nbytes -= wsize; | |
1030 | } else { | |
788e7a89 TM |
1031 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, |
1032 | nbytes, offset, how); | |
1da177e4 LT |
1033 | nbytes = 0; |
1034 | } | |
1035 | nfs_execute_write(data); | |
1036 | } while (nbytes != 0); | |
1037 | ||
1038 | return 0; | |
1039 | ||
1040 | out_bad: | |
1041 | while (!list_empty(&list)) { | |
1042 | data = list_entry(list.next, struct nfs_write_data, pages); | |
1043 | list_del(&data->pages); | |
1044 | nfs_writedata_free(data); | |
1045 | } | |
1046 | nfs_mark_request_dirty(req); | |
c6a556b8 | 1047 | nfs_clear_page_writeback(req); |
1da177e4 LT |
1048 | return -ENOMEM; |
1049 | } | |
1050 | ||
1051 | /* | |
1052 | * Create an RPC task for the given write request and kick it. | |
1053 | * The page must have been locked by the caller. | |
1054 | * | |
1055 | * It may happen that the page we're passed is not marked dirty. | |
1056 | * This is the case if nfs_updatepage detects a conflicting request | |
1057 | * that has been written but not committed. | |
1058 | */ | |
7d46a49f | 1059 | static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) |
1da177e4 LT |
1060 | { |
1061 | struct nfs_page *req; | |
1062 | struct page **pages; | |
1063 | struct nfs_write_data *data; | |
1064 | unsigned int count; | |
1065 | ||
e9f7bee1 | 1066 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize); |
1da177e4 LT |
1067 | if (!data) |
1068 | goto out_bad; | |
1069 | ||
1070 | pages = data->pagevec; | |
1071 | count = 0; | |
1072 | while (!list_empty(head)) { | |
1073 | req = nfs_list_entry(head->next); | |
1074 | nfs_list_remove_request(req); | |
1075 | nfs_list_add_request(req, &data->pages); | |
1076 | ClearPageError(req->wb_page); | |
bb713d6d | 1077 | set_page_writeback(req->wb_page); |
1da177e4 LT |
1078 | *pages++ = req->wb_page; |
1079 | count += req->wb_bytes; | |
1080 | } | |
1081 | req = nfs_list_entry(data->pages.next); | |
1082 | ||
1da177e4 | 1083 | /* Set up the argument struct */ |
788e7a89 | 1084 | nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how); |
1da177e4 LT |
1085 | |
1086 | nfs_execute_write(data); | |
1087 | return 0; | |
1088 | out_bad: | |
1089 | while (!list_empty(head)) { | |
1090 | struct nfs_page *req = nfs_list_entry(head->next); | |
1091 | nfs_list_remove_request(req); | |
1092 | nfs_mark_request_dirty(req); | |
c6a556b8 | 1093 | nfs_clear_page_writeback(req); |
1da177e4 LT |
1094 | } |
1095 | return -ENOMEM; | |
1096 | } | |
1097 | ||
7d46a49f | 1098 | static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how) |
1da177e4 LT |
1099 | { |
1100 | LIST_HEAD(one_request); | |
7d46a49f TM |
1101 | int (*flush_one)(struct inode *, struct list_head *, int); |
1102 | struct nfs_page *req; | |
1103 | int wpages = NFS_SERVER(inode)->wpages; | |
1104 | int wsize = NFS_SERVER(inode)->wsize; | |
1105 | int error; | |
1da177e4 | 1106 | |
7d46a49f TM |
1107 | flush_one = nfs_flush_one; |
1108 | if (wsize < PAGE_CACHE_SIZE) | |
1109 | flush_one = nfs_flush_multi; | |
1110 | /* For single writes, FLUSH_STABLE is more efficient */ | |
1111 | if (npages <= wpages && npages == NFS_I(inode)->npages | |
1112 | && nfs_list_entry(head->next)->wb_bytes <= wsize) | |
1113 | how |= FLUSH_STABLE; | |
1114 | ||
1115 | do { | |
1116 | nfs_coalesce_requests(head, &one_request, wpages); | |
1da177e4 | 1117 | req = nfs_list_entry(one_request.next); |
7d46a49f | 1118 | error = flush_one(inode, &one_request, how); |
1da177e4 | 1119 | if (error < 0) |
7d46a49f TM |
1120 | goto out_err; |
1121 | } while (!list_empty(head)); | |
1122 | return 0; | |
1123 | out_err: | |
1da177e4 LT |
1124 | while (!list_empty(head)) { |
1125 | req = nfs_list_entry(head->next); | |
1126 | nfs_list_remove_request(req); | |
1127 | nfs_mark_request_dirty(req); | |
c6a556b8 | 1128 | nfs_clear_page_writeback(req); |
1da177e4 LT |
1129 | } |
1130 | return error; | |
1131 | } | |
1132 | ||
1133 | /* | |
1134 | * Handle a write reply that flushed part of a page. | |
1135 | */ | |
788e7a89 | 1136 | static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) |
1da177e4 | 1137 | { |
788e7a89 | 1138 | struct nfs_write_data *data = calldata; |
1da177e4 LT |
1139 | struct nfs_page *req = data->req; |
1140 | struct page *page = req->wb_page; | |
1141 | ||
1142 | dprintk("NFS: write (%s/%Ld %d@%Ld)", | |
1143 | req->wb_context->dentry->d_inode->i_sb->s_id, | |
1144 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | |
1145 | req->wb_bytes, | |
1146 | (long long)req_offset(req)); | |
1147 | ||
788e7a89 TM |
1148 | if (nfs_writeback_done(task, data) != 0) |
1149 | return; | |
1150 | ||
1151 | if (task->tk_status < 0) { | |
1da177e4 LT |
1152 | ClearPageUptodate(page); |
1153 | SetPageError(page); | |
788e7a89 TM |
1154 | req->wb_context->error = task->tk_status; |
1155 | dprintk(", error = %d\n", task->tk_status); | |
1da177e4 LT |
1156 | } else { |
1157 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | |
1158 | if (data->verf.committed < NFS_FILE_SYNC) { | |
1159 | if (!NFS_NEED_COMMIT(req)) { | |
1160 | nfs_defer_commit(req); | |
1161 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | |
1162 | dprintk(" defer commit\n"); | |
1163 | } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) { | |
1164 | nfs_defer_reschedule(req); | |
1165 | dprintk(" server reboot detected\n"); | |
1166 | } | |
1167 | } else | |
1168 | #endif | |
1169 | dprintk(" OK\n"); | |
1170 | } | |
1171 | ||
1172 | if (atomic_dec_and_test(&req->wb_complete)) | |
1173 | nfs_writepage_release(req); | |
1174 | } | |
1175 | ||
788e7a89 TM |
1176 | static const struct rpc_call_ops nfs_write_partial_ops = { |
1177 | .rpc_call_done = nfs_writeback_done_partial, | |
1178 | .rpc_release = nfs_writedata_release, | |
1179 | }; | |
1180 | ||
1da177e4 LT |
1181 | /* |
1182 | * Handle a write reply that flushes a whole page. | |
1183 | * | |
1184 | * FIXME: There is an inherent race with invalidate_inode_pages and | |
1185 | * writebacks since the page->count is kept > 1 for as long | |
1186 | * as the page has a write request pending. | |
1187 | */ | |
788e7a89 | 1188 | static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) |
1da177e4 | 1189 | { |
788e7a89 | 1190 | struct nfs_write_data *data = calldata; |
1da177e4 LT |
1191 | struct nfs_page *req; |
1192 | struct page *page; | |
1193 | ||
788e7a89 TM |
1194 | if (nfs_writeback_done(task, data) != 0) |
1195 | return; | |
1196 | ||
1da177e4 LT |
1197 | /* Update attributes as result of writeback. */ |
1198 | while (!list_empty(&data->pages)) { | |
1199 | req = nfs_list_entry(data->pages.next); | |
1200 | nfs_list_remove_request(req); | |
1201 | page = req->wb_page; | |
1202 | ||
1203 | dprintk("NFS: write (%s/%Ld %d@%Ld)", | |
1204 | req->wb_context->dentry->d_inode->i_sb->s_id, | |
1205 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | |
1206 | req->wb_bytes, | |
1207 | (long long)req_offset(req)); | |
1208 | ||
788e7a89 | 1209 | if (task->tk_status < 0) { |
1da177e4 LT |
1210 | ClearPageUptodate(page); |
1211 | SetPageError(page); | |
788e7a89 | 1212 | req->wb_context->error = task->tk_status; |
1da177e4 LT |
1213 | end_page_writeback(page); |
1214 | nfs_inode_remove_request(req); | |
788e7a89 | 1215 | dprintk(", error = %d\n", task->tk_status); |
1da177e4 LT |
1216 | goto next; |
1217 | } | |
1218 | end_page_writeback(page); | |
1219 | ||
1220 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | |
1221 | if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) { | |
1222 | nfs_inode_remove_request(req); | |
1223 | dprintk(" OK\n"); | |
1224 | goto next; | |
1225 | } | |
1226 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | |
1227 | nfs_mark_request_commit(req); | |
1228 | dprintk(" marked for commit\n"); | |
1229 | #else | |
1230 | nfs_inode_remove_request(req); | |
1231 | #endif | |
1232 | next: | |
c6a556b8 | 1233 | nfs_clear_page_writeback(req); |
1da177e4 LT |
1234 | } |
1235 | } | |
1236 | ||
788e7a89 TM |
1237 | static const struct rpc_call_ops nfs_write_full_ops = { |
1238 | .rpc_call_done = nfs_writeback_done_full, | |
1239 | .rpc_release = nfs_writedata_release, | |
1240 | }; | |
1241 | ||
1242 | ||
1da177e4 LT |
1243 | /* |
1244 | * This function is called when the WRITE call is complete. | |
1245 | */ | |
462d5b32 | 1246 | int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) |
1da177e4 | 1247 | { |
1da177e4 LT |
1248 | struct nfs_writeargs *argp = &data->args; |
1249 | struct nfs_writeres *resp = &data->res; | |
788e7a89 | 1250 | int status; |
1da177e4 LT |
1251 | |
1252 | dprintk("NFS: %4d nfs_writeback_done (status %d)\n", | |
1253 | task->tk_pid, task->tk_status); | |
1254 | ||
f551e44f CL |
1255 | /* |
1256 | * ->write_done will attempt to use post-op attributes to detect | |
1257 | * conflicting writes by other clients. A strict interpretation | |
1258 | * of close-to-open would allow us to continue caching even if | |
1259 | * another writer had changed the file, but some applications | |
1260 | * depend on tighter cache coherency when writing. | |
1261 | */ | |
788e7a89 TM |
1262 | status = NFS_PROTO(data->inode)->write_done(task, data); |
1263 | if (status != 0) | |
1264 | return status; | |
91d5b470 CL |
1265 | nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count); |
1266 | ||
1da177e4 LT |
1267 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
1268 | if (resp->verf->committed < argp->stable && task->tk_status >= 0) { | |
1269 | /* We tried a write call, but the server did not | |
1270 | * commit data to stable storage even though we | |
1271 | * requested it. | |
1272 | * Note: There is a known bug in Tru64 < 5.0 in which | |
1273 | * the server reports NFS_DATA_SYNC, but performs | |
1274 | * NFS_FILE_SYNC. We therefore implement this checking | |
1275 | * as a dprintk() in order to avoid filling syslog. | |
1276 | */ | |
1277 | static unsigned long complain; | |
1278 | ||
1279 | if (time_before(complain, jiffies)) { | |
1280 | dprintk("NFS: faulty NFS server %s:" | |
1281 | " (committed = %d) != (stable = %d)\n", | |
54ceac45 | 1282 | NFS_SERVER(data->inode)->nfs_client->cl_hostname, |
1da177e4 LT |
1283 | resp->verf->committed, argp->stable); |
1284 | complain = jiffies + 300 * HZ; | |
1285 | } | |
1286 | } | |
1287 | #endif | |
1288 | /* Is this a short write? */ | |
1289 | if (task->tk_status >= 0 && resp->count < argp->count) { | |
1290 | static unsigned long complain; | |
1291 | ||
91d5b470 CL |
1292 | nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE); |
1293 | ||
1da177e4 LT |
1294 | /* Has the server at least made some progress? */ |
1295 | if (resp->count != 0) { | |
1296 | /* Was this an NFSv2 write or an NFSv3 stable write? */ | |
1297 | if (resp->verf->committed != NFS_UNSTABLE) { | |
1298 | /* Resend from where the server left off */ | |
1299 | argp->offset += resp->count; | |
1300 | argp->pgbase += resp->count; | |
1301 | argp->count -= resp->count; | |
1302 | } else { | |
1303 | /* Resend as a stable write in order to avoid | |
1304 | * headaches in the case of a server crash. | |
1305 | */ | |
1306 | argp->stable = NFS_FILE_SYNC; | |
1307 | } | |
1308 | rpc_restart_call(task); | |
788e7a89 | 1309 | return -EAGAIN; |
1da177e4 LT |
1310 | } |
1311 | if (time_before(complain, jiffies)) { | |
1312 | printk(KERN_WARNING | |
1313 | "NFS: Server wrote zero bytes, expected %u.\n", | |
1314 | argp->count); | |
1315 | complain = jiffies + 300 * HZ; | |
1316 | } | |
1317 | /* Can't do anything about it except throw an error. */ | |
1318 | task->tk_status = -EIO; | |
1319 | } | |
788e7a89 | 1320 | return 0; |
1da177e4 LT |
1321 | } |
1322 | ||
1323 | ||
1324 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | |
963d8fe5 | 1325 | void nfs_commit_release(void *wdata) |
1da177e4 | 1326 | { |
1da177e4 LT |
1327 | nfs_commit_free(wdata); |
1328 | } | |
1329 | ||
1330 | /* | |
1331 | * Set up the argument/result storage required for the RPC call. | |
1332 | */ | |
1333 | static void nfs_commit_rpcsetup(struct list_head *head, | |
788e7a89 TM |
1334 | struct nfs_write_data *data, |
1335 | int how) | |
1da177e4 | 1336 | { |
3da28eb1 | 1337 | struct nfs_page *first; |
1da177e4 | 1338 | struct inode *inode; |
788e7a89 | 1339 | int flags; |
1da177e4 LT |
1340 | |
1341 | /* Set up the RPC argument and reply structs | |
1342 | * NB: take care not to mess about with data->commit et al. */ | |
1343 | ||
1344 | list_splice_init(head, &data->pages); | |
1345 | first = nfs_list_entry(data->pages.next); | |
1da177e4 LT |
1346 | inode = first->wb_context->dentry->d_inode; |
1347 | ||
1da177e4 LT |
1348 | data->inode = inode; |
1349 | data->cred = first->wb_context->cred; | |
1350 | ||
1351 | data->args.fh = NFS_FH(data->inode); | |
3da28eb1 TM |
1352 | /* Note: we always request a commit of the entire inode */ |
1353 | data->args.offset = 0; | |
1354 | data->args.count = 0; | |
1355 | data->res.count = 0; | |
1da177e4 LT |
1356 | data->res.fattr = &data->fattr; |
1357 | data->res.verf = &data->verf; | |
0e574af1 | 1358 | nfs_fattr_init(&data->fattr); |
788e7a89 TM |
1359 | |
1360 | /* Set up the initial task struct. */ | |
1361 | flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | |
1362 | rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data); | |
1da177e4 LT |
1363 | NFS_PROTO(inode)->commit_setup(data, how); |
1364 | ||
1365 | data->task.tk_priority = flush_task_priority(how); | |
1366 | data->task.tk_cookie = (unsigned long)inode; | |
1da177e4 | 1367 | |
0bbacc40 | 1368 | dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid); |
1da177e4 LT |
1369 | } |
1370 | ||
1371 | /* | |
1372 | * Commit dirty pages | |
1373 | */ | |
1374 | static int | |
40859d7e | 1375 | nfs_commit_list(struct inode *inode, struct list_head *head, int how) |
1da177e4 LT |
1376 | { |
1377 | struct nfs_write_data *data; | |
1378 | struct nfs_page *req; | |
1379 | ||
e9f7bee1 | 1380 | data = nfs_commit_alloc(); |
1da177e4 LT |
1381 | |
1382 | if (!data) | |
1383 | goto out_bad; | |
1384 | ||
1385 | /* Set up the argument struct */ | |
1386 | nfs_commit_rpcsetup(head, data, how); | |
1387 | ||
1388 | nfs_execute_write(data); | |
1389 | return 0; | |
1390 | out_bad: | |
1391 | while (!list_empty(head)) { | |
1392 | req = nfs_list_entry(head->next); | |
1393 | nfs_list_remove_request(req); | |
1394 | nfs_mark_request_commit(req); | |
83715ad5 | 1395 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
5c2d97cb | 1396 | nfs_clear_page_writeback(req); |
1da177e4 LT |
1397 | } |
1398 | return -ENOMEM; | |
1399 | } | |
1400 | ||
1401 | /* | |
1402 | * COMMIT call returned | |
1403 | */ | |
788e7a89 | 1404 | static void nfs_commit_done(struct rpc_task *task, void *calldata) |
1da177e4 | 1405 | { |
963d8fe5 | 1406 | struct nfs_write_data *data = calldata; |
1da177e4 | 1407 | struct nfs_page *req; |
1da177e4 LT |
1408 | |
1409 | dprintk("NFS: %4d nfs_commit_done (status %d)\n", | |
1410 | task->tk_pid, task->tk_status); | |
1411 | ||
788e7a89 TM |
1412 | /* Call the NFS version-specific code */ |
1413 | if (NFS_PROTO(data->inode)->commit_done(task, data) != 0) | |
1414 | return; | |
1415 | ||
1da177e4 LT |
1416 | while (!list_empty(&data->pages)) { |
1417 | req = nfs_list_entry(data->pages.next); | |
1418 | nfs_list_remove_request(req); | |
fd39fc85 | 1419 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
1da177e4 LT |
1420 | |
1421 | dprintk("NFS: commit (%s/%Ld %d@%Ld)", | |
1422 | req->wb_context->dentry->d_inode->i_sb->s_id, | |
1423 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | |
1424 | req->wb_bytes, | |
1425 | (long long)req_offset(req)); | |
1426 | if (task->tk_status < 0) { | |
1427 | req->wb_context->error = task->tk_status; | |
1428 | nfs_inode_remove_request(req); | |
1429 | dprintk(", error = %d\n", task->tk_status); | |
1430 | goto next; | |
1431 | } | |
1432 | ||
1433 | /* Okay, COMMIT succeeded, apparently. Check the verifier | |
1434 | * returned by the server against all stored verfs. */ | |
1435 | if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) { | |
1436 | /* We have a match */ | |
1437 | nfs_inode_remove_request(req); | |
1438 | dprintk(" OK\n"); | |
1439 | goto next; | |
1440 | } | |
1441 | /* We have a mismatch. Write the page again */ | |
1442 | dprintk(" mismatch\n"); | |
1443 | nfs_mark_request_dirty(req); | |
1444 | next: | |
c6a556b8 | 1445 | nfs_clear_page_writeback(req); |
1da177e4 | 1446 | } |
1da177e4 | 1447 | } |
788e7a89 TM |
1448 | |
1449 | static const struct rpc_call_ops nfs_commit_ops = { | |
1450 | .rpc_call_done = nfs_commit_done, | |
1451 | .rpc_release = nfs_commit_release, | |
1452 | }; | |
c42de9dd TM |
1453 | #else |
1454 | static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) | |
1455 | { | |
1456 | return 0; | |
1457 | } | |
1da177e4 LT |
1458 | #endif |
1459 | ||
1460 | static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, | |
1461 | unsigned int npages, int how) | |
1462 | { | |
1463 | struct nfs_inode *nfsi = NFS_I(inode); | |
1464 | LIST_HEAD(head); | |
7d46a49f | 1465 | int res; |
1da177e4 LT |
1466 | |
1467 | spin_lock(&nfsi->req_lock); | |
1468 | res = nfs_scan_dirty(inode, &head, idx_start, npages); | |
1469 | spin_unlock(&nfsi->req_lock); | |
ab0a3dbe | 1470 | if (res) { |
7d46a49f TM |
1471 | int error = nfs_flush_list(inode, &head, res, how); |
1472 | if (error < 0) | |
1473 | return error; | |
ab0a3dbe | 1474 | } |
1da177e4 LT |
1475 | return res; |
1476 | } | |
1477 | ||
1478 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | |
3da28eb1 | 1479 | int nfs_commit_inode(struct inode *inode, int how) |
1da177e4 LT |
1480 | { |
1481 | struct nfs_inode *nfsi = NFS_I(inode); | |
1482 | LIST_HEAD(head); | |
7d46a49f | 1483 | int res; |
1da177e4 LT |
1484 | |
1485 | spin_lock(&nfsi->req_lock); | |
3da28eb1 TM |
1486 | res = nfs_scan_commit(inode, &head, 0, 0); |
1487 | spin_unlock(&nfsi->req_lock); | |
1da177e4 | 1488 | if (res) { |
7d46a49f | 1489 | int error = nfs_commit_list(inode, &head, how); |
3da28eb1 TM |
1490 | if (error < 0) |
1491 | return error; | |
1492 | } | |
1da177e4 LT |
1493 | return res; |
1494 | } | |
1495 | #endif | |
1496 | ||
c42de9dd TM |
1497 | int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, |
1498 | unsigned int npages, int how) | |
1da177e4 | 1499 | { |
c42de9dd TM |
1500 | struct nfs_inode *nfsi = NFS_I(inode); |
1501 | LIST_HEAD(head); | |
70b9ecbd | 1502 | int nocommit = how & FLUSH_NOCOMMIT; |
c42de9dd | 1503 | int pages, ret; |
1da177e4 | 1504 | |
c42de9dd TM |
1505 | how &= ~FLUSH_NOCOMMIT; |
1506 | spin_lock(&nfsi->req_lock); | |
1da177e4 | 1507 | do { |
c42de9dd TM |
1508 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); |
1509 | if (ret != 0) | |
70b9ecbd | 1510 | continue; |
c42de9dd TM |
1511 | pages = nfs_scan_dirty(inode, &head, idx_start, npages); |
1512 | if (pages != 0) { | |
1513 | spin_unlock(&nfsi->req_lock); | |
d2ccddf0 | 1514 | if (how & FLUSH_INVALIDATE) |
83715ad5 | 1515 | nfs_cancel_dirty_list(&head); |
d2ccddf0 TM |
1516 | else |
1517 | ret = nfs_flush_list(inode, &head, pages, how); | |
c42de9dd TM |
1518 | spin_lock(&nfsi->req_lock); |
1519 | continue; | |
1520 | } | |
1521 | if (nocommit) | |
1522 | break; | |
d2ccddf0 | 1523 | pages = nfs_scan_commit(inode, &head, idx_start, npages); |
c42de9dd TM |
1524 | if (pages == 0) |
1525 | break; | |
d2ccddf0 TM |
1526 | if (how & FLUSH_INVALIDATE) { |
1527 | spin_unlock(&nfsi->req_lock); | |
83715ad5 | 1528 | nfs_cancel_commit_list(&head); |
d2ccddf0 TM |
1529 | spin_lock(&nfsi->req_lock); |
1530 | continue; | |
1531 | } | |
1532 | pages += nfs_scan_commit(inode, &head, 0, 0); | |
c42de9dd TM |
1533 | spin_unlock(&nfsi->req_lock); |
1534 | ret = nfs_commit_list(inode, &head, how); | |
1535 | spin_lock(&nfsi->req_lock); | |
1536 | } while (ret >= 0); | |
1537 | spin_unlock(&nfsi->req_lock); | |
1538 | return ret; | |
1da177e4 LT |
1539 | } |
1540 | ||
f7b422b1 | 1541 | int __init nfs_init_writepagecache(void) |
1da177e4 LT |
1542 | { |
1543 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", | |
1544 | sizeof(struct nfs_write_data), | |
1545 | 0, SLAB_HWCACHE_ALIGN, | |
1546 | NULL, NULL); | |
1547 | if (nfs_wdata_cachep == NULL) | |
1548 | return -ENOMEM; | |
1549 | ||
93d2341c MD |
1550 | nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, |
1551 | nfs_wdata_cachep); | |
1da177e4 LT |
1552 | if (nfs_wdata_mempool == NULL) |
1553 | return -ENOMEM; | |
1554 | ||
93d2341c MD |
1555 | nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, |
1556 | nfs_wdata_cachep); | |
1da177e4 LT |
1557 | if (nfs_commit_mempool == NULL) |
1558 | return -ENOMEM; | |
1559 | ||
1560 | return 0; | |
1561 | } | |
1562 | ||
266bee88 | 1563 | void nfs_destroy_writepagecache(void) |
1da177e4 LT |
1564 | { |
1565 | mempool_destroy(nfs_commit_mempool); | |
1566 | mempool_destroy(nfs_wdata_mempool); | |
1a1d92c1 | 1567 | kmem_cache_destroy(nfs_wdata_cachep); |
1da177e4 LT |
1568 | } |
1569 |