]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/xfs/linux-2.6/xfs_buf.c
xfs: Don't issue buffer IO direct from AIL push V2
[net-next-2.6.git] / fs / xfs / linux-2.6 / xfs_buf.c
CommitLineData
1da177e4 1/*
f07c2250 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
93c189c1 18#include "xfs.h"
1da177e4
LT
19#include <linux/stddef.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/pagemap.h>
23#include <linux/init.h>
24#include <linux/vmalloc.h>
25#include <linux/bio.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/workqueue.h>
29#include <linux/percpu.h>
30#include <linux/blkdev.h>
31#include <linux/hash.h>
4df08c52 32#include <linux/kthread.h>
b20a3503 33#include <linux/migrate.h>
3fcfab16 34#include <linux/backing-dev.h>
7dfb7103 35#include <linux/freezer.h>
1da177e4 36
b7963133
CH
37#include "xfs_sb.h"
38#include "xfs_inum.h"
39#include "xfs_ag.h"
40#include "xfs_dmapi.h"
41#include "xfs_mount.h"
0b1b213f 42#include "xfs_trace.h"
b7963133 43
7989cb8e 44static kmem_zone_t *xfs_buf_zone;
a6867a68 45STATIC int xfsbufd(void *);
27496a8c 46STATIC int xfsbufd_wakeup(int, gfp_t);
ce8e922c 47STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
8e1f936b
RR
48static struct shrinker xfs_buf_shake = {
49 .shrink = xfsbufd_wakeup,
50 .seeks = DEFAULT_SEEKS,
51};
23ea4032 52
7989cb8e 53static struct workqueue_struct *xfslogd_workqueue;
0829c360 54struct workqueue_struct *xfsdatad_workqueue;
c626d174 55struct workqueue_struct *xfsconvertd_workqueue;
1da177e4 56
ce8e922c
NS
57#ifdef XFS_BUF_LOCK_TRACKING
58# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
59# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
60# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
1da177e4 61#else
ce8e922c
NS
62# define XB_SET_OWNER(bp) do { } while (0)
63# define XB_CLEAR_OWNER(bp) do { } while (0)
64# define XB_GET_OWNER(bp) do { } while (0)
1da177e4
LT
65#endif
66
ce8e922c
NS
67#define xb_to_gfp(flags) \
68 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
69 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
1da177e4 70
ce8e922c
NS
71#define xb_to_km(flags) \
72 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
1da177e4 73
ce8e922c
NS
74#define xfs_buf_allocate(flags) \
75 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
76#define xfs_buf_deallocate(bp) \
77 kmem_zone_free(xfs_buf_zone, (bp));
1da177e4
LT
78
79/*
ce8e922c 80 * Page Region interfaces.
1da177e4 81 *
ce8e922c
NS
82 * For pages in filesystems where the blocksize is smaller than the
83 * pagesize, we use the page->private field (long) to hold a bitmap
84 * of uptodate regions within the page.
1da177e4 85 *
ce8e922c 86 * Each such region is "bytes per page / bits per long" bytes long.
1da177e4 87 *
ce8e922c
NS
88 * NBPPR == number-of-bytes-per-page-region
89 * BTOPR == bytes-to-page-region (rounded up)
90 * BTOPRT == bytes-to-page-region-truncated (rounded down)
1da177e4
LT
91 */
92#if (BITS_PER_LONG == 32)
93#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
94#elif (BITS_PER_LONG == 64)
95#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
96#else
97#error BITS_PER_LONG must be 32 or 64
98#endif
99#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
100#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
101#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
102
103STATIC unsigned long
104page_region_mask(
105 size_t offset,
106 size_t length)
107{
108 unsigned long mask;
109 int first, final;
110
111 first = BTOPR(offset);
112 final = BTOPRT(offset + length - 1);
113 first = min(first, final);
114
115 mask = ~0UL;
116 mask <<= BITS_PER_LONG - (final - first);
117 mask >>= BITS_PER_LONG - (final);
118
119 ASSERT(offset + length <= PAGE_CACHE_SIZE);
120 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
121
122 return mask;
123}
124
b8f82a4a 125STATIC void
1da177e4
LT
126set_page_region(
127 struct page *page,
128 size_t offset,
129 size_t length)
130{
4c21e2f2
HD
131 set_page_private(page,
132 page_private(page) | page_region_mask(offset, length));
133 if (page_private(page) == ~0UL)
1da177e4
LT
134 SetPageUptodate(page);
135}
136
b8f82a4a 137STATIC int
1da177e4
LT
138test_page_region(
139 struct page *page,
140 size_t offset,
141 size_t length)
142{
143 unsigned long mask = page_region_mask(offset, length);
144
4c21e2f2 145 return (mask && (page_private(page) & mask) == mask);
1da177e4
LT
146}
147
3a011a17
FB
148/*
149 * Mapping of multi-page buffers into contiguous virtual space
150 */
151
152typedef struct a_list {
153 void *vm_addr;
154 struct a_list *next;
155} a_list_t;
156
157static a_list_t *as_free_head;
158static int as_list_len;
159static DEFINE_SPINLOCK(as_lock);
160
161/*
162 * Try to batch vunmaps because they are costly.
163 */
164STATIC void
165free_address(
166 void *addr)
167{
168 a_list_t *aentry;
169
170#ifdef CONFIG_XEN
171 /*
172 * Xen needs to be able to make sure it can get an exclusive
173 * RO mapping of pages it wants to turn into a pagetable. If
174 * a newly allocated page is also still being vmap()ed by xfs,
175 * it will cause pagetable construction to fail. This is a
176 * quick workaround to always eagerly unmap pages so that Xen
177 * is happy.
178 */
179 vunmap(addr);
180 return;
181#endif
182
183 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
184 if (likely(aentry)) {
185 spin_lock(&as_lock);
186 aentry->next = as_free_head;
187 aentry->vm_addr = addr;
188 as_free_head = aentry;
189 as_list_len++;
190 spin_unlock(&as_lock);
191 } else {
192 vunmap(addr);
193 }
194}
195
196STATIC void
197purge_addresses(void)
198{
199 a_list_t *aentry, *old;
200
201 if (as_free_head == NULL)
202 return;
203
204 spin_lock(&as_lock);
205 aentry = as_free_head;
206 as_free_head = NULL;
207 as_list_len = 0;
208 spin_unlock(&as_lock);
209
210 while ((old = aentry) != NULL) {
211 vunmap(aentry->vm_addr);
212 aentry = aentry->next;
213 kfree(old);
214 }
215}
216
1da177e4 217/*
ce8e922c 218 * Internal xfs_buf_t object manipulation
1da177e4
LT
219 */
220
221STATIC void
ce8e922c
NS
222_xfs_buf_initialize(
223 xfs_buf_t *bp,
1da177e4 224 xfs_buftarg_t *target,
204ab25f 225 xfs_off_t range_base,
1da177e4 226 size_t range_length,
ce8e922c 227 xfs_buf_flags_t flags)
1da177e4
LT
228{
229 /*
ce8e922c 230 * We don't want certain flags to appear in b_flags.
1da177e4 231 */
ce8e922c
NS
232 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
233
234 memset(bp, 0, sizeof(xfs_buf_t));
235 atomic_set(&bp->b_hold, 1);
b4dd330b 236 init_completion(&bp->b_iowait);
ce8e922c
NS
237 INIT_LIST_HEAD(&bp->b_list);
238 INIT_LIST_HEAD(&bp->b_hash_list);
239 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
240 XB_SET_OWNER(bp);
241 bp->b_target = target;
242 bp->b_file_offset = range_base;
1da177e4
LT
243 /*
244 * Set buffer_length and count_desired to the same value initially.
245 * I/O routines should use count_desired, which will be the same in
246 * most cases but may be reset (e.g. XFS recovery).
247 */
ce8e922c
NS
248 bp->b_buffer_length = bp->b_count_desired = range_length;
249 bp->b_flags = flags;
250 bp->b_bn = XFS_BUF_DADDR_NULL;
251 atomic_set(&bp->b_pin_count, 0);
252 init_waitqueue_head(&bp->b_waiters);
253
254 XFS_STATS_INC(xb_create);
0b1b213f
CH
255
256 trace_xfs_buf_init(bp, _RET_IP_);
1da177e4
LT
257}
258
259/*
ce8e922c
NS
260 * Allocate a page array capable of holding a specified number
261 * of pages, and point the page buf at it.
1da177e4
LT
262 */
263STATIC int
ce8e922c
NS
264_xfs_buf_get_pages(
265 xfs_buf_t *bp,
1da177e4 266 int page_count,
ce8e922c 267 xfs_buf_flags_t flags)
1da177e4
LT
268{
269 /* Make sure that we have a page list */
ce8e922c
NS
270 if (bp->b_pages == NULL) {
271 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
272 bp->b_page_count = page_count;
273 if (page_count <= XB_PAGES) {
274 bp->b_pages = bp->b_page_array;
1da177e4 275 } else {
ce8e922c
NS
276 bp->b_pages = kmem_alloc(sizeof(struct page *) *
277 page_count, xb_to_km(flags));
278 if (bp->b_pages == NULL)
1da177e4
LT
279 return -ENOMEM;
280 }
ce8e922c 281 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
1da177e4
LT
282 }
283 return 0;
284}
285
286/*
ce8e922c 287 * Frees b_pages if it was allocated.
1da177e4
LT
288 */
289STATIC void
ce8e922c 290_xfs_buf_free_pages(
1da177e4
LT
291 xfs_buf_t *bp)
292{
ce8e922c 293 if (bp->b_pages != bp->b_page_array) {
f0e2d93c 294 kmem_free(bp->b_pages);
3fc98b1a 295 bp->b_pages = NULL;
1da177e4
LT
296 }
297}
298
299/*
300 * Releases the specified buffer.
301 *
302 * The modification state of any associated pages is left unchanged.
ce8e922c 303 * The buffer most not be on any hash - use xfs_buf_rele instead for
1da177e4
LT
304 * hashed and refcounted buffers
305 */
306void
ce8e922c 307xfs_buf_free(
1da177e4
LT
308 xfs_buf_t *bp)
309{
0b1b213f 310 trace_xfs_buf_free(bp, _RET_IP_);
1da177e4 311
ce8e922c 312 ASSERT(list_empty(&bp->b_hash_list));
1da177e4 313
1fa40b01 314 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
1da177e4
LT
315 uint i;
316
ce8e922c 317 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
3a011a17 318 free_address(bp->b_addr - bp->b_offset);
1da177e4 319
948ecdb4
NS
320 for (i = 0; i < bp->b_page_count; i++) {
321 struct page *page = bp->b_pages[i];
322
1fa40b01
CH
323 if (bp->b_flags & _XBF_PAGE_CACHE)
324 ASSERT(!PagePrivate(page));
948ecdb4
NS
325 page_cache_release(page);
326 }
1da177e4 327 }
3fc98b1a 328 _xfs_buf_free_pages(bp);
ce8e922c 329 xfs_buf_deallocate(bp);
1da177e4
LT
330}
331
332/*
333 * Finds all pages for buffer in question and builds it's page list.
334 */
335STATIC int
ce8e922c 336_xfs_buf_lookup_pages(
1da177e4
LT
337 xfs_buf_t *bp,
338 uint flags)
339{
ce8e922c
NS
340 struct address_space *mapping = bp->b_target->bt_mapping;
341 size_t blocksize = bp->b_target->bt_bsize;
342 size_t size = bp->b_count_desired;
1da177e4 343 size_t nbytes, offset;
ce8e922c 344 gfp_t gfp_mask = xb_to_gfp(flags);
1da177e4
LT
345 unsigned short page_count, i;
346 pgoff_t first;
204ab25f 347 xfs_off_t end;
1da177e4
LT
348 int error;
349
ce8e922c
NS
350 end = bp->b_file_offset + bp->b_buffer_length;
351 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
1da177e4 352
ce8e922c 353 error = _xfs_buf_get_pages(bp, page_count, flags);
1da177e4
LT
354 if (unlikely(error))
355 return error;
ce8e922c 356 bp->b_flags |= _XBF_PAGE_CACHE;
1da177e4 357
ce8e922c
NS
358 offset = bp->b_offset;
359 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
1da177e4 360
ce8e922c 361 for (i = 0; i < bp->b_page_count; i++) {
1da177e4
LT
362 struct page *page;
363 uint retries = 0;
364
365 retry:
366 page = find_or_create_page(mapping, first + i, gfp_mask);
367 if (unlikely(page == NULL)) {
ce8e922c
NS
368 if (flags & XBF_READ_AHEAD) {
369 bp->b_page_count = i;
6ab455ee
CH
370 for (i = 0; i < bp->b_page_count; i++)
371 unlock_page(bp->b_pages[i]);
1da177e4
LT
372 return -ENOMEM;
373 }
374
375 /*
376 * This could deadlock.
377 *
378 * But until all the XFS lowlevel code is revamped to
379 * handle buffer allocation failures we can't do much.
380 */
381 if (!(++retries % 100))
382 printk(KERN_ERR
383 "XFS: possible memory allocation "
384 "deadlock in %s (mode:0x%x)\n",
34a622b2 385 __func__, gfp_mask);
1da177e4 386
ce8e922c 387 XFS_STATS_INC(xb_page_retries);
23ea4032 388 xfsbufd_wakeup(0, gfp_mask);
8aa7e847 389 congestion_wait(BLK_RW_ASYNC, HZ/50);
1da177e4
LT
390 goto retry;
391 }
392
ce8e922c 393 XFS_STATS_INC(xb_page_found);
1da177e4
LT
394
395 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
396 size -= nbytes;
397
948ecdb4 398 ASSERT(!PagePrivate(page));
1da177e4
LT
399 if (!PageUptodate(page)) {
400 page_count--;
6ab455ee
CH
401 if (blocksize >= PAGE_CACHE_SIZE) {
402 if (flags & XBF_READ)
403 bp->b_flags |= _XBF_PAGE_LOCKED;
404 } else if (!PagePrivate(page)) {
1da177e4
LT
405 if (test_page_region(page, offset, nbytes))
406 page_count++;
407 }
408 }
409
ce8e922c 410 bp->b_pages[i] = page;
1da177e4
LT
411 offset = 0;
412 }
413
6ab455ee
CH
414 if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
415 for (i = 0; i < bp->b_page_count; i++)
416 unlock_page(bp->b_pages[i]);
417 }
418
ce8e922c
NS
419 if (page_count == bp->b_page_count)
420 bp->b_flags |= XBF_DONE;
1da177e4 421
1da177e4
LT
422 return error;
423}
424
425/*
426 * Map buffer into kernel address-space if nessecary.
427 */
428STATIC int
ce8e922c 429_xfs_buf_map_pages(
1da177e4
LT
430 xfs_buf_t *bp,
431 uint flags)
432{
433 /* A single page buffer is always mappable */
ce8e922c
NS
434 if (bp->b_page_count == 1) {
435 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
436 bp->b_flags |= XBF_MAPPED;
437 } else if (flags & XBF_MAPPED) {
3a011a17
FB
438 if (as_list_len > 64)
439 purge_addresses();
cf7dab80
FB
440 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
441 VM_MAP, PAGE_KERNEL);
ce8e922c 442 if (unlikely(bp->b_addr == NULL))
1da177e4 443 return -ENOMEM;
ce8e922c
NS
444 bp->b_addr += bp->b_offset;
445 bp->b_flags |= XBF_MAPPED;
1da177e4
LT
446 }
447
448 return 0;
449}
450
451/*
452 * Finding and Reading Buffers
453 */
454
455/*
ce8e922c 456 * Look up, and creates if absent, a lockable buffer for
1da177e4
LT
457 * a given range of an inode. The buffer is returned
458 * locked. If other overlapping buffers exist, they are
459 * released before the new buffer is created and locked,
460 * which may imply that this call will block until those buffers
461 * are unlocked. No I/O is implied by this call.
462 */
463xfs_buf_t *
ce8e922c 464_xfs_buf_find(
1da177e4 465 xfs_buftarg_t *btp, /* block device target */
204ab25f 466 xfs_off_t ioff, /* starting offset of range */
1da177e4 467 size_t isize, /* length of range */
ce8e922c
NS
468 xfs_buf_flags_t flags,
469 xfs_buf_t *new_bp)
1da177e4 470{
204ab25f 471 xfs_off_t range_base;
1da177e4
LT
472 size_t range_length;
473 xfs_bufhash_t *hash;
ce8e922c 474 xfs_buf_t *bp, *n;
1da177e4
LT
475
476 range_base = (ioff << BBSHIFT);
477 range_length = (isize << BBSHIFT);
478
479 /* Check for IOs smaller than the sector size / not sector aligned */
ce8e922c 480 ASSERT(!(range_length < (1 << btp->bt_sshift)));
204ab25f 481 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
1da177e4
LT
482
483 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
484
485 spin_lock(&hash->bh_lock);
486
ce8e922c
NS
487 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
488 ASSERT(btp == bp->b_target);
489 if (bp->b_file_offset == range_base &&
490 bp->b_buffer_length == range_length) {
1da177e4 491 /*
ce8e922c 492 * If we look at something, bring it to the
1da177e4
LT
493 * front of the list for next time.
494 */
ce8e922c
NS
495 atomic_inc(&bp->b_hold);
496 list_move(&bp->b_hash_list, &hash->bh_list);
1da177e4
LT
497 goto found;
498 }
499 }
500
501 /* No match found */
ce8e922c
NS
502 if (new_bp) {
503 _xfs_buf_initialize(new_bp, btp, range_base,
1da177e4 504 range_length, flags);
ce8e922c
NS
505 new_bp->b_hash = hash;
506 list_add(&new_bp->b_hash_list, &hash->bh_list);
1da177e4 507 } else {
ce8e922c 508 XFS_STATS_INC(xb_miss_locked);
1da177e4
LT
509 }
510
511 spin_unlock(&hash->bh_lock);
ce8e922c 512 return new_bp;
1da177e4
LT
513
514found:
515 spin_unlock(&hash->bh_lock);
516
517 /* Attempt to get the semaphore without sleeping,
518 * if this does not work then we need to drop the
519 * spinlock and do a hard attempt on the semaphore.
520 */
ce8e922c
NS
521 if (down_trylock(&bp->b_sema)) {
522 if (!(flags & XBF_TRYLOCK)) {
1da177e4 523 /* wait for buffer ownership */
ce8e922c
NS
524 xfs_buf_lock(bp);
525 XFS_STATS_INC(xb_get_locked_waited);
1da177e4
LT
526 } else {
527 /* We asked for a trylock and failed, no need
528 * to look at file offset and length here, we
ce8e922c
NS
529 * know that this buffer at least overlaps our
530 * buffer and is locked, therefore our buffer
531 * either does not exist, or is this buffer.
1da177e4 532 */
ce8e922c
NS
533 xfs_buf_rele(bp);
534 XFS_STATS_INC(xb_busy_locked);
535 return NULL;
1da177e4
LT
536 }
537 } else {
538 /* trylock worked */
ce8e922c 539 XB_SET_OWNER(bp);
1da177e4
LT
540 }
541
ce8e922c
NS
542 if (bp->b_flags & XBF_STALE) {
543 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
544 bp->b_flags &= XBF_MAPPED;
2f926587 545 }
0b1b213f
CH
546
547 trace_xfs_buf_find(bp, flags, _RET_IP_);
ce8e922c
NS
548 XFS_STATS_INC(xb_get_locked);
549 return bp;
1da177e4
LT
550}
551
552/*
ce8e922c 553 * Assembles a buffer covering the specified range.
1da177e4
LT
554 * Storage in memory for all portions of the buffer will be allocated,
555 * although backing storage may not be.
556 */
557xfs_buf_t *
6ad112bf 558xfs_buf_get(
1da177e4 559 xfs_buftarg_t *target,/* target for buffer */
204ab25f 560 xfs_off_t ioff, /* starting offset of range */
1da177e4 561 size_t isize, /* length of range */
ce8e922c 562 xfs_buf_flags_t flags)
1da177e4 563{
ce8e922c 564 xfs_buf_t *bp, *new_bp;
1da177e4
LT
565 int error = 0, i;
566
ce8e922c
NS
567 new_bp = xfs_buf_allocate(flags);
568 if (unlikely(!new_bp))
1da177e4
LT
569 return NULL;
570
ce8e922c
NS
571 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
572 if (bp == new_bp) {
573 error = _xfs_buf_lookup_pages(bp, flags);
1da177e4
LT
574 if (error)
575 goto no_buffer;
576 } else {
ce8e922c
NS
577 xfs_buf_deallocate(new_bp);
578 if (unlikely(bp == NULL))
1da177e4
LT
579 return NULL;
580 }
581
ce8e922c
NS
582 for (i = 0; i < bp->b_page_count; i++)
583 mark_page_accessed(bp->b_pages[i]);
1da177e4 584
ce8e922c
NS
585 if (!(bp->b_flags & XBF_MAPPED)) {
586 error = _xfs_buf_map_pages(bp, flags);
1da177e4
LT
587 if (unlikely(error)) {
588 printk(KERN_WARNING "%s: failed to map pages\n",
34a622b2 589 __func__);
1da177e4
LT
590 goto no_buffer;
591 }
592 }
593
ce8e922c 594 XFS_STATS_INC(xb_get);
1da177e4
LT
595
596 /*
597 * Always fill in the block number now, the mapped cases can do
598 * their own overlay of this later.
599 */
ce8e922c
NS
600 bp->b_bn = ioff;
601 bp->b_count_desired = bp->b_buffer_length;
1da177e4 602
0b1b213f 603 trace_xfs_buf_get(bp, flags, _RET_IP_);
ce8e922c 604 return bp;
1da177e4
LT
605
606 no_buffer:
ce8e922c
NS
607 if (flags & (XBF_LOCK | XBF_TRYLOCK))
608 xfs_buf_unlock(bp);
609 xfs_buf_rele(bp);
1da177e4
LT
610 return NULL;
611}
612
5d765b97
CH
613STATIC int
614_xfs_buf_read(
615 xfs_buf_t *bp,
616 xfs_buf_flags_t flags)
617{
618 int status;
619
5d765b97
CH
620 ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
621 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
622
623 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
624 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
625 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
626 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
627
628 status = xfs_buf_iorequest(bp);
629 if (!status && !(flags & XBF_ASYNC))
630 status = xfs_buf_iowait(bp);
631 return status;
632}
633
1da177e4 634xfs_buf_t *
6ad112bf 635xfs_buf_read(
1da177e4 636 xfs_buftarg_t *target,
204ab25f 637 xfs_off_t ioff,
1da177e4 638 size_t isize,
ce8e922c 639 xfs_buf_flags_t flags)
1da177e4 640{
ce8e922c
NS
641 xfs_buf_t *bp;
642
643 flags |= XBF_READ;
644
6ad112bf 645 bp = xfs_buf_get(target, ioff, isize, flags);
ce8e922c 646 if (bp) {
0b1b213f
CH
647 trace_xfs_buf_read(bp, flags, _RET_IP_);
648
ce8e922c 649 if (!XFS_BUF_ISDONE(bp)) {
ce8e922c 650 XFS_STATS_INC(xb_get_read);
5d765b97 651 _xfs_buf_read(bp, flags);
ce8e922c 652 } else if (flags & XBF_ASYNC) {
1da177e4
LT
653 /*
654 * Read ahead call which is already satisfied,
655 * drop the buffer
656 */
657 goto no_buffer;
658 } else {
1da177e4 659 /* We do not want read in the flags */
ce8e922c 660 bp->b_flags &= ~XBF_READ;
1da177e4
LT
661 }
662 }
663
ce8e922c 664 return bp;
1da177e4
LT
665
666 no_buffer:
ce8e922c
NS
667 if (flags & (XBF_LOCK | XBF_TRYLOCK))
668 xfs_buf_unlock(bp);
669 xfs_buf_rele(bp);
1da177e4
LT
670 return NULL;
671}
672
1da177e4 673/*
ce8e922c
NS
674 * If we are not low on memory then do the readahead in a deadlock
675 * safe manner.
1da177e4
LT
676 */
677void
ce8e922c 678xfs_buf_readahead(
1da177e4 679 xfs_buftarg_t *target,
204ab25f 680 xfs_off_t ioff,
1da177e4 681 size_t isize,
ce8e922c 682 xfs_buf_flags_t flags)
1da177e4
LT
683{
684 struct backing_dev_info *bdi;
685
ce8e922c 686 bdi = target->bt_mapping->backing_dev_info;
1da177e4
LT
687 if (bdi_read_congested(bdi))
688 return;
689
ce8e922c 690 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
6ad112bf 691 xfs_buf_read(target, ioff, isize, flags);
1da177e4
LT
692}
693
694xfs_buf_t *
ce8e922c 695xfs_buf_get_empty(
1da177e4
LT
696 size_t len,
697 xfs_buftarg_t *target)
698{
ce8e922c 699 xfs_buf_t *bp;
1da177e4 700
ce8e922c
NS
701 bp = xfs_buf_allocate(0);
702 if (bp)
703 _xfs_buf_initialize(bp, target, 0, len, 0);
704 return bp;
1da177e4
LT
705}
706
707static inline struct page *
708mem_to_page(
709 void *addr)
710{
9e2779fa 711 if ((!is_vmalloc_addr(addr))) {
1da177e4
LT
712 return virt_to_page(addr);
713 } else {
714 return vmalloc_to_page(addr);
715 }
716}
717
718int
ce8e922c
NS
719xfs_buf_associate_memory(
720 xfs_buf_t *bp,
1da177e4
LT
721 void *mem,
722 size_t len)
723{
724 int rval;
725 int i = 0;
d1afb678
LM
726 unsigned long pageaddr;
727 unsigned long offset;
728 size_t buflen;
1da177e4
LT
729 int page_count;
730
d1afb678
LM
731 pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
732 offset = (unsigned long)mem - pageaddr;
733 buflen = PAGE_CACHE_ALIGN(len + offset);
734 page_count = buflen >> PAGE_CACHE_SHIFT;
1da177e4
LT
735
736 /* Free any previous set of page pointers */
ce8e922c
NS
737 if (bp->b_pages)
738 _xfs_buf_free_pages(bp);
1da177e4 739
ce8e922c
NS
740 bp->b_pages = NULL;
741 bp->b_addr = mem;
1da177e4 742
36fae17a 743 rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
1da177e4
LT
744 if (rval)
745 return rval;
746
ce8e922c 747 bp->b_offset = offset;
d1afb678
LM
748
749 for (i = 0; i < bp->b_page_count; i++) {
750 bp->b_pages[i] = mem_to_page((void *)pageaddr);
751 pageaddr += PAGE_CACHE_SIZE;
1da177e4 752 }
1da177e4 753
d1afb678
LM
754 bp->b_count_desired = len;
755 bp->b_buffer_length = buflen;
ce8e922c 756 bp->b_flags |= XBF_MAPPED;
6ab455ee 757 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1da177e4
LT
758
759 return 0;
760}
761
762xfs_buf_t *
ce8e922c 763xfs_buf_get_noaddr(
1da177e4
LT
764 size_t len,
765 xfs_buftarg_t *target)
766{
1fa40b01
CH
767 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
768 int error, i;
1da177e4 769 xfs_buf_t *bp;
1da177e4 770
ce8e922c 771 bp = xfs_buf_allocate(0);
1da177e4
LT
772 if (unlikely(bp == NULL))
773 goto fail;
ce8e922c 774 _xfs_buf_initialize(bp, target, 0, len, 0);
1da177e4 775
1fa40b01
CH
776 error = _xfs_buf_get_pages(bp, page_count, 0);
777 if (error)
1da177e4
LT
778 goto fail_free_buf;
779
1fa40b01
CH
780 for (i = 0; i < page_count; i++) {
781 bp->b_pages[i] = alloc_page(GFP_KERNEL);
782 if (!bp->b_pages[i])
783 goto fail_free_mem;
1da177e4 784 }
1fa40b01 785 bp->b_flags |= _XBF_PAGES;
1da177e4 786
1fa40b01
CH
787 error = _xfs_buf_map_pages(bp, XBF_MAPPED);
788 if (unlikely(error)) {
789 printk(KERN_WARNING "%s: failed to map pages\n",
34a622b2 790 __func__);
1da177e4 791 goto fail_free_mem;
1fa40b01 792 }
1da177e4 793
ce8e922c 794 xfs_buf_unlock(bp);
1da177e4 795
0b1b213f 796 trace_xfs_buf_get_noaddr(bp, _RET_IP_);
1da177e4 797 return bp;
1fa40b01 798
1da177e4 799 fail_free_mem:
1fa40b01
CH
800 while (--i >= 0)
801 __free_page(bp->b_pages[i]);
ca165b88 802 _xfs_buf_free_pages(bp);
1da177e4 803 fail_free_buf:
ca165b88 804 xfs_buf_deallocate(bp);
1da177e4
LT
805 fail:
806 return NULL;
807}
808
809/*
1da177e4
LT
810 * Increment reference count on buffer, to hold the buffer concurrently
811 * with another thread which may release (free) the buffer asynchronously.
1da177e4
LT
812 * Must hold the buffer already to call this function.
813 */
814void
ce8e922c
NS
815xfs_buf_hold(
816 xfs_buf_t *bp)
1da177e4 817{
0b1b213f 818 trace_xfs_buf_hold(bp, _RET_IP_);
ce8e922c 819 atomic_inc(&bp->b_hold);
1da177e4
LT
820}
821
822/*
ce8e922c
NS
823 * Releases a hold on the specified buffer. If the
824 * the hold count is 1, calls xfs_buf_free.
1da177e4
LT
825 */
826void
ce8e922c
NS
827xfs_buf_rele(
828 xfs_buf_t *bp)
1da177e4 829{
ce8e922c 830 xfs_bufhash_t *hash = bp->b_hash;
1da177e4 831
0b1b213f 832 trace_xfs_buf_rele(bp, _RET_IP_);
1da177e4 833
fad3aa1e
NS
834 if (unlikely(!hash)) {
835 ASSERT(!bp->b_relse);
836 if (atomic_dec_and_test(&bp->b_hold))
837 xfs_buf_free(bp);
838 return;
839 }
840
3790689f 841 ASSERT(atomic_read(&bp->b_hold) > 0);
ce8e922c
NS
842 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
843 if (bp->b_relse) {
844 atomic_inc(&bp->b_hold);
1da177e4 845 spin_unlock(&hash->bh_lock);
ce8e922c
NS
846 (*(bp->b_relse)) (bp);
847 } else if (bp->b_flags & XBF_FS_MANAGED) {
1da177e4 848 spin_unlock(&hash->bh_lock);
1da177e4 849 } else {
ce8e922c
NS
850 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
851 list_del_init(&bp->b_hash_list);
1da177e4 852 spin_unlock(&hash->bh_lock);
ce8e922c 853 xfs_buf_free(bp);
1da177e4
LT
854 }
855 }
856}
857
858
859/*
860 * Mutual exclusion on buffers. Locking model:
861 *
862 * Buffers associated with inodes for which buffer locking
863 * is not enabled are not protected by semaphores, and are
864 * assumed to be exclusively owned by the caller. There is a
865 * spinlock in the buffer, used by the caller when concurrent
866 * access is possible.
867 */
868
869/*
ce8e922c
NS
870 * Locks a buffer object, if it is not already locked.
871 * Note that this in no way locks the underlying pages, so it is only
872 * useful for synchronizing concurrent use of buffer objects, not for
873 * synchronizing independent access to the underlying pages.
1da177e4
LT
874 */
875int
ce8e922c
NS
876xfs_buf_cond_lock(
877 xfs_buf_t *bp)
1da177e4
LT
878{
879 int locked;
880
ce8e922c 881 locked = down_trylock(&bp->b_sema) == 0;
0b1b213f 882 if (locked)
ce8e922c 883 XB_SET_OWNER(bp);
0b1b213f
CH
884
885 trace_xfs_buf_cond_lock(bp, _RET_IP_);
ce8e922c 886 return locked ? 0 : -EBUSY;
1da177e4
LT
887}
888
1da177e4 889int
ce8e922c
NS
890xfs_buf_lock_value(
891 xfs_buf_t *bp)
1da177e4 892{
adaa693b 893 return bp->b_sema.count;
1da177e4 894}
1da177e4
LT
895
896/*
ce8e922c
NS
897 * Locks a buffer object.
898 * Note that this in no way locks the underlying pages, so it is only
899 * useful for synchronizing concurrent use of buffer objects, not for
900 * synchronizing independent access to the underlying pages.
1da177e4 901 */
ce8e922c
NS
902void
903xfs_buf_lock(
904 xfs_buf_t *bp)
1da177e4 905{
0b1b213f
CH
906 trace_xfs_buf_lock(bp, _RET_IP_);
907
ce8e922c
NS
908 if (atomic_read(&bp->b_io_remaining))
909 blk_run_address_space(bp->b_target->bt_mapping);
910 down(&bp->b_sema);
911 XB_SET_OWNER(bp);
0b1b213f
CH
912
913 trace_xfs_buf_lock_done(bp, _RET_IP_);
1da177e4
LT
914}
915
916/*
ce8e922c 917 * Releases the lock on the buffer object.
2f926587 918 * If the buffer is marked delwri but is not queued, do so before we
ce8e922c 919 * unlock the buffer as we need to set flags correctly. We also need to
2f926587
DC
920 * take a reference for the delwri queue because the unlocker is going to
921 * drop their's and they don't know we just queued it.
1da177e4
LT
922 */
923void
ce8e922c
NS
924xfs_buf_unlock(
925 xfs_buf_t *bp)
1da177e4 926{
ce8e922c
NS
927 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
928 atomic_inc(&bp->b_hold);
929 bp->b_flags |= XBF_ASYNC;
930 xfs_buf_delwri_queue(bp, 0);
2f926587
DC
931 }
932
ce8e922c
NS
933 XB_CLEAR_OWNER(bp);
934 up(&bp->b_sema);
0b1b213f
CH
935
936 trace_xfs_buf_unlock(bp, _RET_IP_);
1da177e4
LT
937}
938
939
940/*
941 * Pinning Buffer Storage in Memory
ce8e922c 942 * Ensure that no attempt to force a buffer to disk will succeed.
1da177e4
LT
943 */
944void
ce8e922c
NS
945xfs_buf_pin(
946 xfs_buf_t *bp)
1da177e4 947{
0b1b213f 948 trace_xfs_buf_pin(bp, _RET_IP_);
ce8e922c 949 atomic_inc(&bp->b_pin_count);
1da177e4
LT
950}
951
1da177e4 952void
ce8e922c
NS
953xfs_buf_unpin(
954 xfs_buf_t *bp)
1da177e4 955{
0b1b213f
CH
956 trace_xfs_buf_unpin(bp, _RET_IP_);
957
ce8e922c
NS
958 if (atomic_dec_and_test(&bp->b_pin_count))
959 wake_up_all(&bp->b_waiters);
1da177e4
LT
960}
961
962int
ce8e922c
NS
963xfs_buf_ispin(
964 xfs_buf_t *bp)
1da177e4 965{
ce8e922c 966 return atomic_read(&bp->b_pin_count);
1da177e4
LT
967}
968
ce8e922c
NS
969STATIC void
970xfs_buf_wait_unpin(
971 xfs_buf_t *bp)
1da177e4
LT
972{
973 DECLARE_WAITQUEUE (wait, current);
974
ce8e922c 975 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4
LT
976 return;
977
ce8e922c 978 add_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
979 for (;;) {
980 set_current_state(TASK_UNINTERRUPTIBLE);
ce8e922c 981 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4 982 break;
ce8e922c
NS
983 if (atomic_read(&bp->b_io_remaining))
984 blk_run_address_space(bp->b_target->bt_mapping);
1da177e4
LT
985 schedule();
986 }
ce8e922c 987 remove_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
988 set_current_state(TASK_RUNNING);
989}
990
991/*
992 * Buffer Utility Routines
993 */
994
1da177e4 995STATIC void
ce8e922c 996xfs_buf_iodone_work(
c4028958 997 struct work_struct *work)
1da177e4 998{
c4028958
DH
999 xfs_buf_t *bp =
1000 container_of(work, xfs_buf_t, b_iodone_work);
1da177e4 1001
0bfefc46
DC
1002 /*
1003 * We can get an EOPNOTSUPP to ordered writes. Here we clear the
1004 * ordered flag and reissue them. Because we can't tell the higher
1005 * layers directly that they should not issue ordered I/O anymore, they
73f6aa4d 1006 * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
0bfefc46
DC
1007 */
1008 if ((bp->b_error == EOPNOTSUPP) &&
1009 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
0b1b213f 1010 trace_xfs_buf_ordered_retry(bp, _RET_IP_);
0bfefc46 1011 bp->b_flags &= ~XBF_ORDERED;
73f6aa4d 1012 bp->b_flags |= _XFS_BARRIER_FAILED;
0bfefc46
DC
1013 xfs_buf_iorequest(bp);
1014 } else if (bp->b_iodone)
ce8e922c
NS
1015 (*(bp->b_iodone))(bp);
1016 else if (bp->b_flags & XBF_ASYNC)
1da177e4
LT
1017 xfs_buf_relse(bp);
1018}
1019
1020void
ce8e922c
NS
1021xfs_buf_ioend(
1022 xfs_buf_t *bp,
1da177e4
LT
1023 int schedule)
1024{
0b1b213f
CH
1025 trace_xfs_buf_iodone(bp, _RET_IP_);
1026
77be55a5 1027 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
ce8e922c
NS
1028 if (bp->b_error == 0)
1029 bp->b_flags |= XBF_DONE;
1da177e4 1030
ce8e922c 1031 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1da177e4 1032 if (schedule) {
c4028958 1033 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
ce8e922c 1034 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1da177e4 1035 } else {
c4028958 1036 xfs_buf_iodone_work(&bp->b_iodone_work);
1da177e4
LT
1037 }
1038 } else {
b4dd330b 1039 complete(&bp->b_iowait);
1da177e4
LT
1040 }
1041}
1042
1da177e4 1043void
ce8e922c
NS
1044xfs_buf_ioerror(
1045 xfs_buf_t *bp,
1046 int error)
1da177e4
LT
1047{
1048 ASSERT(error >= 0 && error <= 0xffff);
ce8e922c 1049 bp->b_error = (unsigned short)error;
0b1b213f 1050 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1da177e4
LT
1051}
1052
64e0bc7d
CH
1053int
1054xfs_bwrite(
1055 struct xfs_mount *mp,
1056 struct xfs_buf *bp)
1057{
1058 int iowait = (bp->b_flags & XBF_ASYNC) == 0;
1059 int error = 0;
1060
1061 bp->b_strat = xfs_bdstrat_cb;
1062 bp->b_mount = mp;
1063 bp->b_flags |= XBF_WRITE;
1064 if (!iowait)
1065 bp->b_flags |= _XBF_RUN_QUEUES;
1066
1067 xfs_buf_delwri_dequeue(bp);
1068 xfs_buf_iostrategy(bp);
1069
1070 if (iowait) {
1071 error = xfs_buf_iowait(bp);
1072 if (error)
1073 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1074 xfs_buf_relse(bp);
1075 }
1076
1077 return error;
1078}
1079
1da177e4 1080int
5d765b97
CH
1081xfs_bawrite(
1082 void *mp,
1083 struct xfs_buf *bp)
1da177e4 1084{
0b1b213f 1085 trace_xfs_buf_bawrite(bp, _RET_IP_);
1da177e4 1086
5d765b97 1087 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
1da177e4 1088
5d765b97 1089 xfs_buf_delwri_dequeue(bp);
1da177e4 1090
5d765b97
CH
1091 bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
1092 bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
1da177e4 1093
15ac08a8 1094 bp->b_mount = mp;
5d765b97
CH
1095 bp->b_strat = xfs_bdstrat_cb;
1096 return xfs_bdstrat_cb(bp);
1097}
1da177e4 1098
5d765b97
CH
1099void
1100xfs_bdwrite(
1101 void *mp,
1102 struct xfs_buf *bp)
1103{
0b1b213f 1104 trace_xfs_buf_bdwrite(bp, _RET_IP_);
1da177e4 1105
5d765b97 1106 bp->b_strat = xfs_bdstrat_cb;
15ac08a8 1107 bp->b_mount = mp;
1da177e4 1108
5d765b97
CH
1109 bp->b_flags &= ~XBF_READ;
1110 bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1111
1112 xfs_buf_delwri_queue(bp, 1);
1da177e4
LT
1113}
1114
4e23471a
CH
1115/*
1116 * Called when we want to stop a buffer from getting written or read.
1117 * We attach the EIO error, muck with its flags, and call biodone
1118 * so that the proper iodone callbacks get called.
1119 */
1120STATIC int
1121xfs_bioerror(
1122 xfs_buf_t *bp)
1123{
1124#ifdef XFSERRORDEBUG
1125 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1126#endif
1127
1128 /*
1129 * No need to wait until the buffer is unpinned, we aren't flushing it.
1130 */
1131 XFS_BUF_ERROR(bp, EIO);
1132
1133 /*
1134 * We're calling biodone, so delete XBF_DONE flag.
1135 */
1136 XFS_BUF_UNREAD(bp);
1137 XFS_BUF_UNDELAYWRITE(bp);
1138 XFS_BUF_UNDONE(bp);
1139 XFS_BUF_STALE(bp);
1140
1141 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
1142 xfs_biodone(bp);
1143
1144 return EIO;
1145}
1146
1147/*
1148 * Same as xfs_bioerror, except that we are releasing the buffer
1149 * here ourselves, and avoiding the biodone call.
1150 * This is meant for userdata errors; metadata bufs come with
1151 * iodone functions attached, so that we can track down errors.
1152 */
1153STATIC int
1154xfs_bioerror_relse(
1155 struct xfs_buf *bp)
1156{
1157 int64_t fl = XFS_BUF_BFLAGS(bp);
1158 /*
1159 * No need to wait until the buffer is unpinned.
1160 * We aren't flushing it.
1161 *
1162 * chunkhold expects B_DONE to be set, whether
1163 * we actually finish the I/O or not. We don't want to
1164 * change that interface.
1165 */
1166 XFS_BUF_UNREAD(bp);
1167 XFS_BUF_UNDELAYWRITE(bp);
1168 XFS_BUF_DONE(bp);
1169 XFS_BUF_STALE(bp);
1170 XFS_BUF_CLR_IODONE_FUNC(bp);
1171 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
0cadda1c 1172 if (!(fl & XBF_ASYNC)) {
4e23471a
CH
1173 /*
1174 * Mark b_error and B_ERROR _both_.
1175 * Lot's of chunkcache code assumes that.
1176 * There's no reason to mark error for
1177 * ASYNC buffers.
1178 */
1179 XFS_BUF_ERROR(bp, EIO);
1180 XFS_BUF_FINISH_IOWAIT(bp);
1181 } else {
1182 xfs_buf_relse(bp);
1183 }
1184
1185 return EIO;
1186}
1187
1188
1189/*
1190 * All xfs metadata buffers except log state machine buffers
1191 * get this attached as their b_bdstrat callback function.
1192 * This is so that we can catch a buffer
1193 * after prematurely unpinning it to forcibly shutdown the filesystem.
1194 */
1195int
1196xfs_bdstrat_cb(
1197 struct xfs_buf *bp)
1198{
1199 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
1200 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1201 /*
1202 * Metadata write that didn't get logged but
1203 * written delayed anyway. These aren't associated
1204 * with a transaction, and can be ignored.
1205 */
1206 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1207 return xfs_bioerror_relse(bp);
1208 else
1209 return xfs_bioerror(bp);
1210 }
1211
1212 xfs_buf_iorequest(bp);
1213 return 0;
1214}
1215
1216/*
1217 * Wrapper around bdstrat so that we can stop data from going to disk in case
1218 * we are shutting down the filesystem. Typically user data goes thru this
1219 * path; one of the exceptions is the superblock.
1220 */
1221void
1222xfsbdstrat(
1223 struct xfs_mount *mp,
1224 struct xfs_buf *bp)
1225{
1226 if (XFS_FORCED_SHUTDOWN(mp)) {
1227 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1228 xfs_bioerror_relse(bp);
1229 return;
1230 }
1231
1232 xfs_buf_iorequest(bp);
1233}
1234
b8f82a4a 1235STATIC void
ce8e922c
NS
1236_xfs_buf_ioend(
1237 xfs_buf_t *bp,
1da177e4
LT
1238 int schedule)
1239{
6ab455ee
CH
1240 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1241 bp->b_flags &= ~_XBF_PAGE_LOCKED;
ce8e922c 1242 xfs_buf_ioend(bp, schedule);
6ab455ee 1243 }
1da177e4
LT
1244}
1245
782e3b3b 1246STATIC void
ce8e922c 1247xfs_buf_bio_end_io(
1da177e4 1248 struct bio *bio,
1da177e4
LT
1249 int error)
1250{
ce8e922c
NS
1251 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1252 unsigned int blocksize = bp->b_target->bt_bsize;
eedb5530 1253 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1da177e4 1254
cfbe5267 1255 xfs_buf_ioerror(bp, -error);
1da177e4 1256
eedb5530 1257 do {
1da177e4
LT
1258 struct page *page = bvec->bv_page;
1259
948ecdb4 1260 ASSERT(!PagePrivate(page));
ce8e922c
NS
1261 if (unlikely(bp->b_error)) {
1262 if (bp->b_flags & XBF_READ)
eedb5530 1263 ClearPageUptodate(page);
ce8e922c 1264 } else if (blocksize >= PAGE_CACHE_SIZE) {
1da177e4
LT
1265 SetPageUptodate(page);
1266 } else if (!PagePrivate(page) &&
ce8e922c 1267 (bp->b_flags & _XBF_PAGE_CACHE)) {
1da177e4
LT
1268 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1269 }
1270
eedb5530
NS
1271 if (--bvec >= bio->bi_io_vec)
1272 prefetchw(&bvec->bv_page->flags);
6ab455ee
CH
1273
1274 if (bp->b_flags & _XBF_PAGE_LOCKED)
1275 unlock_page(page);
eedb5530 1276 } while (bvec >= bio->bi_io_vec);
1da177e4 1277
ce8e922c 1278 _xfs_buf_ioend(bp, 1);
1da177e4 1279 bio_put(bio);
1da177e4
LT
1280}
1281
1282STATIC void
ce8e922c
NS
1283_xfs_buf_ioapply(
1284 xfs_buf_t *bp)
1da177e4 1285{
a9759f2d 1286 int rw, map_i, total_nr_pages, nr_pages;
1da177e4 1287 struct bio *bio;
ce8e922c
NS
1288 int offset = bp->b_offset;
1289 int size = bp->b_count_desired;
1290 sector_t sector = bp->b_bn;
1291 unsigned int blocksize = bp->b_target->bt_bsize;
1da177e4 1292
ce8e922c 1293 total_nr_pages = bp->b_page_count;
1da177e4
LT
1294 map_i = 0;
1295
ce8e922c
NS
1296 if (bp->b_flags & XBF_ORDERED) {
1297 ASSERT(!(bp->b_flags & XBF_READ));
f538d4da 1298 rw = WRITE_BARRIER;
2ee1abad 1299 } else if (bp->b_flags & XBF_LOG_BUFFER) {
51bdd706
NS
1300 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1301 bp->b_flags &= ~_XBF_RUN_QUEUES;
1302 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
2ee1abad
DC
1303 } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1304 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1305 bp->b_flags &= ~_XBF_RUN_QUEUES;
1306 rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
51bdd706
NS
1307 } else {
1308 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1309 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
f538d4da
CH
1310 }
1311
ce8e922c 1312 /* Special code path for reading a sub page size buffer in --
1da177e4
LT
1313 * we populate up the whole page, and hence the other metadata
1314 * in the same page. This optimization is only valid when the
ce8e922c 1315 * filesystem block size is not smaller than the page size.
1da177e4 1316 */
ce8e922c 1317 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
6ab455ee
CH
1318 ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1319 (XBF_READ|_XBF_PAGE_LOCKED)) &&
ce8e922c 1320 (blocksize >= PAGE_CACHE_SIZE)) {
1da177e4
LT
1321 bio = bio_alloc(GFP_NOIO, 1);
1322
ce8e922c 1323 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1324 bio->bi_sector = sector - (offset >> BBSHIFT);
ce8e922c
NS
1325 bio->bi_end_io = xfs_buf_bio_end_io;
1326 bio->bi_private = bp;
1da177e4 1327
ce8e922c 1328 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1da177e4
LT
1329 size = 0;
1330
ce8e922c 1331 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1332
1333 goto submit_io;
1334 }
1335
1da177e4 1336next_chunk:
ce8e922c 1337 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1338 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1339 if (nr_pages > total_nr_pages)
1340 nr_pages = total_nr_pages;
1341
1342 bio = bio_alloc(GFP_NOIO, nr_pages);
ce8e922c 1343 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1344 bio->bi_sector = sector;
ce8e922c
NS
1345 bio->bi_end_io = xfs_buf_bio_end_io;
1346 bio->bi_private = bp;
1da177e4
LT
1347
1348 for (; size && nr_pages; nr_pages--, map_i++) {
ce8e922c 1349 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1da177e4
LT
1350
1351 if (nbytes > size)
1352 nbytes = size;
1353
ce8e922c
NS
1354 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1355 if (rbytes < nbytes)
1da177e4
LT
1356 break;
1357
1358 offset = 0;
1359 sector += nbytes >> BBSHIFT;
1360 size -= nbytes;
1361 total_nr_pages--;
1362 }
1363
1364submit_io:
1365 if (likely(bio->bi_size)) {
1366 submit_bio(rw, bio);
1367 if (size)
1368 goto next_chunk;
1369 } else {
1370 bio_put(bio);
ce8e922c 1371 xfs_buf_ioerror(bp, EIO);
1da177e4
LT
1372 }
1373}
1374
1da177e4 1375int
ce8e922c
NS
1376xfs_buf_iorequest(
1377 xfs_buf_t *bp)
1da177e4 1378{
0b1b213f 1379 trace_xfs_buf_iorequest(bp, _RET_IP_);
1da177e4 1380
ce8e922c
NS
1381 if (bp->b_flags & XBF_DELWRI) {
1382 xfs_buf_delwri_queue(bp, 1);
1da177e4
LT
1383 return 0;
1384 }
1385
ce8e922c
NS
1386 if (bp->b_flags & XBF_WRITE) {
1387 xfs_buf_wait_unpin(bp);
1da177e4
LT
1388 }
1389
ce8e922c 1390 xfs_buf_hold(bp);
1da177e4
LT
1391
1392 /* Set the count to 1 initially, this will stop an I/O
1393 * completion callout which happens before we have started
ce8e922c 1394 * all the I/O from calling xfs_buf_ioend too early.
1da177e4 1395 */
ce8e922c
NS
1396 atomic_set(&bp->b_io_remaining, 1);
1397 _xfs_buf_ioapply(bp);
1398 _xfs_buf_ioend(bp, 0);
1da177e4 1399
ce8e922c 1400 xfs_buf_rele(bp);
1da177e4
LT
1401 return 0;
1402}
1403
1404/*
ce8e922c
NS
1405 * Waits for I/O to complete on the buffer supplied.
1406 * It returns immediately if no I/O is pending.
1407 * It returns the I/O error code, if any, or 0 if there was no error.
1da177e4
LT
1408 */
1409int
ce8e922c
NS
1410xfs_buf_iowait(
1411 xfs_buf_t *bp)
1da177e4 1412{
0b1b213f
CH
1413 trace_xfs_buf_iowait(bp, _RET_IP_);
1414
ce8e922c
NS
1415 if (atomic_read(&bp->b_io_remaining))
1416 blk_run_address_space(bp->b_target->bt_mapping);
b4dd330b 1417 wait_for_completion(&bp->b_iowait);
0b1b213f
CH
1418
1419 trace_xfs_buf_iowait_done(bp, _RET_IP_);
ce8e922c 1420 return bp->b_error;
1da177e4
LT
1421}
1422
ce8e922c
NS
1423xfs_caddr_t
1424xfs_buf_offset(
1425 xfs_buf_t *bp,
1da177e4
LT
1426 size_t offset)
1427{
1428 struct page *page;
1429
ce8e922c
NS
1430 if (bp->b_flags & XBF_MAPPED)
1431 return XFS_BUF_PTR(bp) + offset;
1da177e4 1432
ce8e922c
NS
1433 offset += bp->b_offset;
1434 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1435 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1da177e4
LT
1436}
1437
1438/*
1da177e4
LT
1439 * Move data into or out of a buffer.
1440 */
1441void
ce8e922c
NS
1442xfs_buf_iomove(
1443 xfs_buf_t *bp, /* buffer to process */
1da177e4
LT
1444 size_t boff, /* starting buffer offset */
1445 size_t bsize, /* length to copy */
b9c48649 1446 void *data, /* data address */
ce8e922c 1447 xfs_buf_rw_t mode) /* read/write/zero flag */
1da177e4
LT
1448{
1449 size_t bend, cpoff, csize;
1450 struct page *page;
1451
1452 bend = boff + bsize;
1453 while (boff < bend) {
ce8e922c
NS
1454 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1455 cpoff = xfs_buf_poff(boff + bp->b_offset);
1da177e4 1456 csize = min_t(size_t,
ce8e922c 1457 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1da177e4
LT
1458
1459 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1460
1461 switch (mode) {
ce8e922c 1462 case XBRW_ZERO:
1da177e4
LT
1463 memset(page_address(page) + cpoff, 0, csize);
1464 break;
ce8e922c 1465 case XBRW_READ:
1da177e4
LT
1466 memcpy(data, page_address(page) + cpoff, csize);
1467 break;
ce8e922c 1468 case XBRW_WRITE:
1da177e4
LT
1469 memcpy(page_address(page) + cpoff, data, csize);
1470 }
1471
1472 boff += csize;
1473 data += csize;
1474 }
1475}
1476
1477/*
ce8e922c 1478 * Handling of buffer targets (buftargs).
1da177e4
LT
1479 */
1480
1481/*
ce8e922c
NS
1482 * Wait for any bufs with callbacks that have been submitted but
1483 * have not yet returned... walk the hash list for the target.
1da177e4
LT
1484 */
1485void
1486xfs_wait_buftarg(
1487 xfs_buftarg_t *btp)
1488{
1489 xfs_buf_t *bp, *n;
1490 xfs_bufhash_t *hash;
1491 uint i;
1492
1493 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1494 hash = &btp->bt_hash[i];
1495again:
1496 spin_lock(&hash->bh_lock);
ce8e922c
NS
1497 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1498 ASSERT(btp == bp->b_target);
1499 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1da177e4 1500 spin_unlock(&hash->bh_lock);
2f926587
DC
1501 /*
1502 * Catch superblock reference count leaks
1503 * immediately
1504 */
ce8e922c 1505 BUG_ON(bp->b_bn == 0);
1da177e4
LT
1506 delay(100);
1507 goto again;
1508 }
1509 }
1510 spin_unlock(&hash->bh_lock);
1511 }
1512}
1513
1514/*
ce8e922c
NS
1515 * Allocate buffer hash table for a given target.
1516 * For devices containing metadata (i.e. not the log/realtime devices)
1517 * we need to allocate a much larger hash table.
1da177e4
LT
1518 */
1519STATIC void
1520xfs_alloc_bufhash(
1521 xfs_buftarg_t *btp,
1522 int external)
1523{
1524 unsigned int i;
1525
1526 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1527 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
bdfb0430
CH
1528 btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
1529 sizeof(xfs_bufhash_t));
1da177e4
LT
1530 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1531 spin_lock_init(&btp->bt_hash[i].bh_lock);
1532 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1533 }
1534}
1535
1536STATIC void
1537xfs_free_bufhash(
1538 xfs_buftarg_t *btp)
1539{
bdfb0430 1540 kmem_free_large(btp->bt_hash);
1da177e4
LT
1541 btp->bt_hash = NULL;
1542}
1543
a6867a68 1544/*
ce8e922c 1545 * buftarg list for delwrite queue processing
a6867a68 1546 */
e6a0e9cd 1547static LIST_HEAD(xfs_buftarg_list);
7989cb8e 1548static DEFINE_SPINLOCK(xfs_buftarg_lock);
a6867a68
DC
1549
1550STATIC void
1551xfs_register_buftarg(
1552 xfs_buftarg_t *btp)
1553{
1554 spin_lock(&xfs_buftarg_lock);
1555 list_add(&btp->bt_list, &xfs_buftarg_list);
1556 spin_unlock(&xfs_buftarg_lock);
1557}
1558
1559STATIC void
1560xfs_unregister_buftarg(
1561 xfs_buftarg_t *btp)
1562{
1563 spin_lock(&xfs_buftarg_lock);
1564 list_del(&btp->bt_list);
1565 spin_unlock(&xfs_buftarg_lock);
1566}
1567
1da177e4
LT
1568void
1569xfs_free_buftarg(
b7963133
CH
1570 struct xfs_mount *mp,
1571 struct xfs_buftarg *btp)
1da177e4
LT
1572{
1573 xfs_flush_buftarg(btp, 1);
b7963133
CH
1574 if (mp->m_flags & XFS_MOUNT_BARRIER)
1575 xfs_blkdev_issue_flush(btp);
1da177e4 1576 xfs_free_bufhash(btp);
ce8e922c 1577 iput(btp->bt_mapping->host);
a6867a68 1578
ce8e922c
NS
1579 /* Unregister the buftarg first so that we don't get a
1580 * wakeup finding a non-existent task
1581 */
a6867a68
DC
1582 xfs_unregister_buftarg(btp);
1583 kthread_stop(btp->bt_task);
1584
f0e2d93c 1585 kmem_free(btp);
1da177e4
LT
1586}
1587
1da177e4
LT
1588STATIC int
1589xfs_setsize_buftarg_flags(
1590 xfs_buftarg_t *btp,
1591 unsigned int blocksize,
1592 unsigned int sectorsize,
1593 int verbose)
1594{
ce8e922c
NS
1595 btp->bt_bsize = blocksize;
1596 btp->bt_sshift = ffs(sectorsize) - 1;
1597 btp->bt_smask = sectorsize - 1;
1da177e4 1598
ce8e922c 1599 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1da177e4
LT
1600 printk(KERN_WARNING
1601 "XFS: Cannot set_blocksize to %u on device %s\n",
1602 sectorsize, XFS_BUFTARG_NAME(btp));
1603 return EINVAL;
1604 }
1605
1606 if (verbose &&
1607 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1608 printk(KERN_WARNING
1609 "XFS: %u byte sectors in use on device %s. "
1610 "This is suboptimal; %u or greater is ideal.\n",
1611 sectorsize, XFS_BUFTARG_NAME(btp),
1612 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1613 }
1614
1615 return 0;
1616}
1617
1618/*
ce8e922c
NS
1619 * When allocating the initial buffer target we have not yet
1620 * read in the superblock, so don't know what sized sectors
1621 * are being used is at this early stage. Play safe.
1622 */
1da177e4
LT
1623STATIC int
1624xfs_setsize_buftarg_early(
1625 xfs_buftarg_t *btp,
1626 struct block_device *bdev)
1627{
1628 return xfs_setsize_buftarg_flags(btp,
e1defc4f 1629 PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
1da177e4
LT
1630}
1631
1632int
1633xfs_setsize_buftarg(
1634 xfs_buftarg_t *btp,
1635 unsigned int blocksize,
1636 unsigned int sectorsize)
1637{
1638 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1639}
1640
1641STATIC int
1642xfs_mapping_buftarg(
1643 xfs_buftarg_t *btp,
1644 struct block_device *bdev)
1645{
1646 struct backing_dev_info *bdi;
1647 struct inode *inode;
1648 struct address_space *mapping;
f5e54d6e 1649 static const struct address_space_operations mapping_aops = {
1da177e4 1650 .sync_page = block_sync_page,
e965f963 1651 .migratepage = fail_migrate_page,
1da177e4
LT
1652 };
1653
1654 inode = new_inode(bdev->bd_inode->i_sb);
1655 if (!inode) {
1656 printk(KERN_WARNING
1657 "XFS: Cannot allocate mapping inode for device %s\n",
1658 XFS_BUFTARG_NAME(btp));
1659 return ENOMEM;
1660 }
1661 inode->i_mode = S_IFBLK;
1662 inode->i_bdev = bdev;
1663 inode->i_rdev = bdev->bd_dev;
1664 bdi = blk_get_backing_dev_info(bdev);
1665 if (!bdi)
1666 bdi = &default_backing_dev_info;
1667 mapping = &inode->i_data;
1668 mapping->a_ops = &mapping_aops;
1669 mapping->backing_dev_info = bdi;
1670 mapping_set_gfp_mask(mapping, GFP_NOFS);
ce8e922c 1671 btp->bt_mapping = mapping;
1da177e4
LT
1672 return 0;
1673}
1674
a6867a68
DC
1675STATIC int
1676xfs_alloc_delwrite_queue(
1677 xfs_buftarg_t *btp)
1678{
1679 int error = 0;
1680
1681 INIT_LIST_HEAD(&btp->bt_list);
1682 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
007c61c6 1683 spin_lock_init(&btp->bt_delwrite_lock);
a6867a68
DC
1684 btp->bt_flags = 0;
1685 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1686 if (IS_ERR(btp->bt_task)) {
1687 error = PTR_ERR(btp->bt_task);
1688 goto out_error;
1689 }
1690 xfs_register_buftarg(btp);
1691out_error:
1692 return error;
1693}
1694
1da177e4
LT
1695xfs_buftarg_t *
1696xfs_alloc_buftarg(
1697 struct block_device *bdev,
1698 int external)
1699{
1700 xfs_buftarg_t *btp;
1701
1702 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1703
ce8e922c
NS
1704 btp->bt_dev = bdev->bd_dev;
1705 btp->bt_bdev = bdev;
1da177e4
LT
1706 if (xfs_setsize_buftarg_early(btp, bdev))
1707 goto error;
1708 if (xfs_mapping_buftarg(btp, bdev))
1709 goto error;
a6867a68
DC
1710 if (xfs_alloc_delwrite_queue(btp))
1711 goto error;
1da177e4
LT
1712 xfs_alloc_bufhash(btp, external);
1713 return btp;
1714
1715error:
f0e2d93c 1716 kmem_free(btp);
1da177e4
LT
1717 return NULL;
1718}
1719
1720
1721/*
ce8e922c 1722 * Delayed write buffer handling
1da177e4 1723 */
1da177e4 1724STATIC void
ce8e922c
NS
1725xfs_buf_delwri_queue(
1726 xfs_buf_t *bp,
1da177e4
LT
1727 int unlock)
1728{
ce8e922c
NS
1729 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1730 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
a6867a68 1731
0b1b213f
CH
1732 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1733
ce8e922c 1734 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1da177e4 1735
a6867a68 1736 spin_lock(dwlk);
1da177e4 1737 /* If already in the queue, dequeue and place at tail */
ce8e922c
NS
1738 if (!list_empty(&bp->b_list)) {
1739 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1740 if (unlock)
1741 atomic_dec(&bp->b_hold);
1742 list_del(&bp->b_list);
1da177e4
LT
1743 }
1744
c9c12971
DC
1745 if (list_empty(dwq)) {
1746 /* start xfsbufd as it is about to have something to do */
1747 wake_up_process(bp->b_target->bt_task);
1748 }
1749
ce8e922c
NS
1750 bp->b_flags |= _XBF_DELWRI_Q;
1751 list_add_tail(&bp->b_list, dwq);
1752 bp->b_queuetime = jiffies;
a6867a68 1753 spin_unlock(dwlk);
1da177e4
LT
1754
1755 if (unlock)
ce8e922c 1756 xfs_buf_unlock(bp);
1da177e4
LT
1757}
1758
1759void
ce8e922c
NS
1760xfs_buf_delwri_dequeue(
1761 xfs_buf_t *bp)
1da177e4 1762{
ce8e922c 1763 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1da177e4
LT
1764 int dequeued = 0;
1765
a6867a68 1766 spin_lock(dwlk);
ce8e922c
NS
1767 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1768 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1769 list_del_init(&bp->b_list);
1da177e4
LT
1770 dequeued = 1;
1771 }
ce8e922c 1772 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
a6867a68 1773 spin_unlock(dwlk);
1da177e4
LT
1774
1775 if (dequeued)
ce8e922c 1776 xfs_buf_rele(bp);
1da177e4 1777
0b1b213f 1778 trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1da177e4
LT
1779}
1780
d808f617
DC
1781/*
1782 * If a delwri buffer needs to be pushed before it has aged out, then promote
1783 * it to the head of the delwri queue so that it will be flushed on the next
1784 * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
1785 * than the age currently needed to flush the buffer. Hence the next time the
1786 * xfsbufd sees it is guaranteed to be considered old enough to flush.
1787 */
1788void
1789xfs_buf_delwri_promote(
1790 struct xfs_buf *bp)
1791{
1792 struct xfs_buftarg *btp = bp->b_target;
1793 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
1794
1795 ASSERT(bp->b_flags & XBF_DELWRI);
1796 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1797
1798 /*
1799 * Check the buffer age before locking the delayed write queue as we
1800 * don't need to promote buffers that are already past the flush age.
1801 */
1802 if (bp->b_queuetime < jiffies - age)
1803 return;
1804 bp->b_queuetime = jiffies - age;
1805 spin_lock(&btp->bt_delwrite_lock);
1806 list_move(&bp->b_list, &btp->bt_delwrite_queue);
1807 spin_unlock(&btp->bt_delwrite_lock);
1808}
1809
1da177e4 1810STATIC void
ce8e922c 1811xfs_buf_runall_queues(
1da177e4
LT
1812 struct workqueue_struct *queue)
1813{
1814 flush_workqueue(queue);
1815}
1816
1da177e4 1817STATIC int
23ea4032 1818xfsbufd_wakeup(
15c84a47
NS
1819 int priority,
1820 gfp_t mask)
1da177e4 1821{
da7f93e9 1822 xfs_buftarg_t *btp;
a6867a68
DC
1823
1824 spin_lock(&xfs_buftarg_lock);
da7f93e9 1825 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
ce8e922c 1826 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
a6867a68 1827 continue;
c9c12971
DC
1828 if (list_empty(&btp->bt_delwrite_queue))
1829 continue;
ce8e922c 1830 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
a6867a68
DC
1831 wake_up_process(btp->bt_task);
1832 }
1833 spin_unlock(&xfs_buftarg_lock);
1da177e4
LT
1834 return 0;
1835}
1836
585e6d88
DC
1837/*
1838 * Move as many buffers as specified to the supplied list
1839 * idicating if we skipped any buffers to prevent deadlocks.
1840 */
1841STATIC int
1842xfs_buf_delwri_split(
1843 xfs_buftarg_t *target,
1844 struct list_head *list,
5e6a07df 1845 unsigned long age)
585e6d88
DC
1846{
1847 xfs_buf_t *bp, *n;
1848 struct list_head *dwq = &target->bt_delwrite_queue;
1849 spinlock_t *dwlk = &target->bt_delwrite_lock;
1850 int skipped = 0;
5e6a07df 1851 int force;
585e6d88 1852
5e6a07df 1853 force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
585e6d88
DC
1854 INIT_LIST_HEAD(list);
1855 spin_lock(dwlk);
1856 list_for_each_entry_safe(bp, n, dwq, b_list) {
0b1b213f 1857 trace_xfs_buf_delwri_split(bp, _RET_IP_);
585e6d88
DC
1858 ASSERT(bp->b_flags & XBF_DELWRI);
1859
1860 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
5e6a07df 1861 if (!force &&
585e6d88
DC
1862 time_before(jiffies, bp->b_queuetime + age)) {
1863 xfs_buf_unlock(bp);
1864 break;
1865 }
1866
1867 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1868 _XBF_RUN_QUEUES);
1869 bp->b_flags |= XBF_WRITE;
1870 list_move_tail(&bp->b_list, list);
1871 } else
1872 skipped++;
1873 }
1874 spin_unlock(dwlk);
1875
1876 return skipped;
1877
1878}
1879
1da177e4 1880STATIC int
23ea4032 1881xfsbufd(
585e6d88 1882 void *data)
1da177e4 1883{
585e6d88
DC
1884 struct list_head tmp;
1885 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
1886 int count;
1887 xfs_buf_t *bp;
1da177e4 1888
1da177e4
LT
1889 current->flags |= PF_MEMALLOC;
1890
978c7b2f
RW
1891 set_freezable();
1892
1da177e4 1893 do {
c9c12971
DC
1894 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1895 long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1896
3e1d1d28 1897 if (unlikely(freezing(current))) {
ce8e922c 1898 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
3e1d1d28 1899 refrigerator();
abd0cf7a 1900 } else {
ce8e922c 1901 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
abd0cf7a 1902 }
1da177e4 1903
c9c12971
DC
1904 /* sleep for a long time if there is nothing to do. */
1905 if (list_empty(&target->bt_delwrite_queue))
1906 tout = MAX_SCHEDULE_TIMEOUT;
1907 schedule_timeout_interruptible(tout);
1da177e4 1908
c9c12971 1909 xfs_buf_delwri_split(target, &tmp, age);
585e6d88 1910 count = 0;
1da177e4 1911 while (!list_empty(&tmp)) {
ce8e922c
NS
1912 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1913 ASSERT(target == bp->b_target);
1da177e4 1914
ce8e922c
NS
1915 list_del_init(&bp->b_list);
1916 xfs_buf_iostrategy(bp);
585e6d88 1917 count++;
1da177e4
LT
1918 }
1919
3a011a17
FB
1920 if (as_list_len > 0)
1921 purge_addresses();
f07c2250
NS
1922 if (count)
1923 blk_run_address_space(target->bt_mapping);
1da177e4 1924
4df08c52 1925 } while (!kthread_should_stop());
1da177e4 1926
4df08c52 1927 return 0;
1da177e4
LT
1928}
1929
1930/*
ce8e922c
NS
1931 * Go through all incore buffers, and release buffers if they belong to
1932 * the given device. This is used in filesystem error handling to
1933 * preserve the consistency of its metadata.
1da177e4
LT
1934 */
1935int
1936xfs_flush_buftarg(
585e6d88
DC
1937 xfs_buftarg_t *target,
1938 int wait)
1da177e4 1939{
585e6d88
DC
1940 struct list_head tmp;
1941 xfs_buf_t *bp, *n;
1942 int pincount = 0;
1da177e4 1943
c626d174 1944 xfs_buf_runall_queues(xfsconvertd_workqueue);
ce8e922c
NS
1945 xfs_buf_runall_queues(xfsdatad_workqueue);
1946 xfs_buf_runall_queues(xfslogd_workqueue);
1da177e4 1947
5e6a07df
DC
1948 set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1949 pincount = xfs_buf_delwri_split(target, &tmp, 0);
1da177e4
LT
1950
1951 /*
1952 * Dropped the delayed write list lock, now walk the temporary list
1953 */
ce8e922c 1954 list_for_each_entry_safe(bp, n, &tmp, b_list) {
585e6d88 1955 ASSERT(target == bp->b_target);
1da177e4 1956 if (wait)
ce8e922c 1957 bp->b_flags &= ~XBF_ASYNC;
1da177e4 1958 else
ce8e922c 1959 list_del_init(&bp->b_list);
1da177e4 1960
ce8e922c 1961 xfs_buf_iostrategy(bp);
1da177e4
LT
1962 }
1963
f07c2250
NS
1964 if (wait)
1965 blk_run_address_space(target->bt_mapping);
1966
1da177e4
LT
1967 /*
1968 * Remaining list items must be flushed before returning
1969 */
1970 while (!list_empty(&tmp)) {
ce8e922c 1971 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1da177e4 1972
ce8e922c
NS
1973 list_del_init(&bp->b_list);
1974 xfs_iowait(bp);
1975 xfs_buf_relse(bp);
1da177e4
LT
1976 }
1977
1da177e4
LT
1978 return pincount;
1979}
1980
04d8b284 1981int __init
ce8e922c 1982xfs_buf_init(void)
1da177e4 1983{
8758280f
NS
1984 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1985 KM_ZONE_HWALIGN, NULL);
ce8e922c 1986 if (!xfs_buf_zone)
0b1b213f 1987 goto out;
04d8b284 1988
b4337692 1989 xfslogd_workqueue = create_workqueue("xfslogd");
23ea4032 1990 if (!xfslogd_workqueue)
04d8b284 1991 goto out_free_buf_zone;
1da177e4 1992
b4337692 1993 xfsdatad_workqueue = create_workqueue("xfsdatad");
23ea4032
CH
1994 if (!xfsdatad_workqueue)
1995 goto out_destroy_xfslogd_workqueue;
1da177e4 1996
c626d174
DC
1997 xfsconvertd_workqueue = create_workqueue("xfsconvertd");
1998 if (!xfsconvertd_workqueue)
1999 goto out_destroy_xfsdatad_workqueue;
2000
8e1f936b 2001 register_shrinker(&xfs_buf_shake);
23ea4032 2002 return 0;
1da177e4 2003
c626d174
DC
2004 out_destroy_xfsdatad_workqueue:
2005 destroy_workqueue(xfsdatad_workqueue);
23ea4032
CH
2006 out_destroy_xfslogd_workqueue:
2007 destroy_workqueue(xfslogd_workqueue);
23ea4032 2008 out_free_buf_zone:
ce8e922c 2009 kmem_zone_destroy(xfs_buf_zone);
0b1b213f 2010 out:
8758280f 2011 return -ENOMEM;
1da177e4
LT
2012}
2013
1da177e4 2014void
ce8e922c 2015xfs_buf_terminate(void)
1da177e4 2016{
8e1f936b 2017 unregister_shrinker(&xfs_buf_shake);
c626d174 2018 destroy_workqueue(xfsconvertd_workqueue);
04d8b284
CH
2019 destroy_workqueue(xfsdatad_workqueue);
2020 destroy_workqueue(xfslogd_workqueue);
ce8e922c 2021 kmem_zone_destroy(xfs_buf_zone);
1da177e4 2022}
e6a0e9cd
TS
2023
2024#ifdef CONFIG_KDB_MODULES
2025struct list_head *
2026xfs_get_buftarg_list(void)
2027{
2028 return &xfs_buftarg_list;
2029}
2030#endif