]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/xfs/linux-2.6/xfs_buf.c
[PATCH] x86: do_IRQ(): check irq number
[net-next-2.6.git] / fs / xfs / linux-2.6 / xfs_buf.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4
LT
18#include <linux/stddef.h>
19#include <linux/errno.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/init.h>
23#include <linux/vmalloc.h>
24#include <linux/bio.h>
25#include <linux/sysctl.h>
26#include <linux/proc_fs.h>
27#include <linux/workqueue.h>
28#include <linux/percpu.h>
29#include <linux/blkdev.h>
30#include <linux/hash.h>
4df08c52 31#include <linux/kthread.h>
b20a3503 32#include <linux/migrate.h>
1da177e4
LT
33#include "xfs_linux.h"
34
ce8e922c
NS
35STATIC kmem_zone_t *xfs_buf_zone;
36STATIC kmem_shaker_t xfs_buf_shake;
a6867a68 37STATIC int xfsbufd(void *);
27496a8c 38STATIC int xfsbufd_wakeup(int, gfp_t);
ce8e922c 39STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
23ea4032
CH
40
41STATIC struct workqueue_struct *xfslogd_workqueue;
0829c360 42struct workqueue_struct *xfsdatad_workqueue;
1da177e4 43
ce8e922c 44#ifdef XFS_BUF_TRACE
1da177e4 45void
ce8e922c
NS
46xfs_buf_trace(
47 xfs_buf_t *bp,
1da177e4
LT
48 char *id,
49 void *data,
50 void *ra)
51{
ce8e922c
NS
52 ktrace_enter(xfs_buf_trace_buf,
53 bp, id,
54 (void *)(unsigned long)bp->b_flags,
55 (void *)(unsigned long)bp->b_hold.counter,
56 (void *)(unsigned long)bp->b_sema.count.counter,
1da177e4
LT
57 (void *)current,
58 data, ra,
ce8e922c
NS
59 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
60 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
61 (void *)(unsigned long)bp->b_buffer_length,
1da177e4
LT
62 NULL, NULL, NULL, NULL, NULL);
63}
ce8e922c
NS
64ktrace_t *xfs_buf_trace_buf;
65#define XFS_BUF_TRACE_SIZE 4096
66#define XB_TRACE(bp, id, data) \
67 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
1da177e4 68#else
ce8e922c 69#define XB_TRACE(bp, id, data) do { } while (0)
1da177e4
LT
70#endif
71
ce8e922c
NS
72#ifdef XFS_BUF_LOCK_TRACKING
73# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
74# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
75# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
1da177e4 76#else
ce8e922c
NS
77# define XB_SET_OWNER(bp) do { } while (0)
78# define XB_CLEAR_OWNER(bp) do { } while (0)
79# define XB_GET_OWNER(bp) do { } while (0)
1da177e4
LT
80#endif
81
ce8e922c
NS
82#define xb_to_gfp(flags) \
83 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
84 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
1da177e4 85
ce8e922c
NS
86#define xb_to_km(flags) \
87 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
1da177e4 88
ce8e922c
NS
89#define xfs_buf_allocate(flags) \
90 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
91#define xfs_buf_deallocate(bp) \
92 kmem_zone_free(xfs_buf_zone, (bp));
1da177e4
LT
93
94/*
ce8e922c 95 * Page Region interfaces.
1da177e4 96 *
ce8e922c
NS
97 * For pages in filesystems where the blocksize is smaller than the
98 * pagesize, we use the page->private field (long) to hold a bitmap
99 * of uptodate regions within the page.
1da177e4 100 *
ce8e922c 101 * Each such region is "bytes per page / bits per long" bytes long.
1da177e4 102 *
ce8e922c
NS
103 * NBPPR == number-of-bytes-per-page-region
104 * BTOPR == bytes-to-page-region (rounded up)
105 * BTOPRT == bytes-to-page-region-truncated (rounded down)
1da177e4
LT
106 */
107#if (BITS_PER_LONG == 32)
108#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
109#elif (BITS_PER_LONG == 64)
110#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
111#else
112#error BITS_PER_LONG must be 32 or 64
113#endif
114#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
115#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
116#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
117
118STATIC unsigned long
119page_region_mask(
120 size_t offset,
121 size_t length)
122{
123 unsigned long mask;
124 int first, final;
125
126 first = BTOPR(offset);
127 final = BTOPRT(offset + length - 1);
128 first = min(first, final);
129
130 mask = ~0UL;
131 mask <<= BITS_PER_LONG - (final - first);
132 mask >>= BITS_PER_LONG - (final);
133
134 ASSERT(offset + length <= PAGE_CACHE_SIZE);
135 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
136
137 return mask;
138}
139
140STATIC inline void
141set_page_region(
142 struct page *page,
143 size_t offset,
144 size_t length)
145{
4c21e2f2
HD
146 set_page_private(page,
147 page_private(page) | page_region_mask(offset, length));
148 if (page_private(page) == ~0UL)
1da177e4
LT
149 SetPageUptodate(page);
150}
151
152STATIC inline int
153test_page_region(
154 struct page *page,
155 size_t offset,
156 size_t length)
157{
158 unsigned long mask = page_region_mask(offset, length);
159
4c21e2f2 160 return (mask && (page_private(page) & mask) == mask);
1da177e4
LT
161}
162
163/*
ce8e922c 164 * Mapping of multi-page buffers into contiguous virtual space
1da177e4
LT
165 */
166
167typedef struct a_list {
168 void *vm_addr;
169 struct a_list *next;
170} a_list_t;
171
172STATIC a_list_t *as_free_head;
173STATIC int as_list_len;
174STATIC DEFINE_SPINLOCK(as_lock);
175
176/*
ce8e922c 177 * Try to batch vunmaps because they are costly.
1da177e4
LT
178 */
179STATIC void
180free_address(
181 void *addr)
182{
183 a_list_t *aentry;
184
7b04d717 185 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
1da177e4
LT
186 if (likely(aentry)) {
187 spin_lock(&as_lock);
188 aentry->next = as_free_head;
189 aentry->vm_addr = addr;
190 as_free_head = aentry;
191 as_list_len++;
192 spin_unlock(&as_lock);
193 } else {
194 vunmap(addr);
195 }
196}
197
198STATIC void
199purge_addresses(void)
200{
201 a_list_t *aentry, *old;
202
203 if (as_free_head == NULL)
204 return;
205
206 spin_lock(&as_lock);
207 aentry = as_free_head;
208 as_free_head = NULL;
209 as_list_len = 0;
210 spin_unlock(&as_lock);
211
212 while ((old = aentry) != NULL) {
213 vunmap(aentry->vm_addr);
214 aentry = aentry->next;
215 kfree(old);
216 }
217}
218
219/*
ce8e922c 220 * Internal xfs_buf_t object manipulation
1da177e4
LT
221 */
222
223STATIC void
ce8e922c
NS
224_xfs_buf_initialize(
225 xfs_buf_t *bp,
1da177e4 226 xfs_buftarg_t *target,
204ab25f 227 xfs_off_t range_base,
1da177e4 228 size_t range_length,
ce8e922c 229 xfs_buf_flags_t flags)
1da177e4
LT
230{
231 /*
ce8e922c 232 * We don't want certain flags to appear in b_flags.
1da177e4 233 */
ce8e922c
NS
234 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
235
236 memset(bp, 0, sizeof(xfs_buf_t));
237 atomic_set(&bp->b_hold, 1);
238 init_MUTEX_LOCKED(&bp->b_iodonesema);
239 INIT_LIST_HEAD(&bp->b_list);
240 INIT_LIST_HEAD(&bp->b_hash_list);
241 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
242 XB_SET_OWNER(bp);
243 bp->b_target = target;
244 bp->b_file_offset = range_base;
1da177e4
LT
245 /*
246 * Set buffer_length and count_desired to the same value initially.
247 * I/O routines should use count_desired, which will be the same in
248 * most cases but may be reset (e.g. XFS recovery).
249 */
ce8e922c
NS
250 bp->b_buffer_length = bp->b_count_desired = range_length;
251 bp->b_flags = flags;
252 bp->b_bn = XFS_BUF_DADDR_NULL;
253 atomic_set(&bp->b_pin_count, 0);
254 init_waitqueue_head(&bp->b_waiters);
255
256 XFS_STATS_INC(xb_create);
257 XB_TRACE(bp, "initialize", target);
1da177e4
LT
258}
259
260/*
ce8e922c
NS
261 * Allocate a page array capable of holding a specified number
262 * of pages, and point the page buf at it.
1da177e4
LT
263 */
264STATIC int
ce8e922c
NS
265_xfs_buf_get_pages(
266 xfs_buf_t *bp,
1da177e4 267 int page_count,
ce8e922c 268 xfs_buf_flags_t flags)
1da177e4
LT
269{
270 /* Make sure that we have a page list */
ce8e922c
NS
271 if (bp->b_pages == NULL) {
272 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
273 bp->b_page_count = page_count;
274 if (page_count <= XB_PAGES) {
275 bp->b_pages = bp->b_page_array;
1da177e4 276 } else {
ce8e922c
NS
277 bp->b_pages = kmem_alloc(sizeof(struct page *) *
278 page_count, xb_to_km(flags));
279 if (bp->b_pages == NULL)
1da177e4
LT
280 return -ENOMEM;
281 }
ce8e922c 282 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
1da177e4
LT
283 }
284 return 0;
285}
286
287/*
ce8e922c 288 * Frees b_pages if it was allocated.
1da177e4
LT
289 */
290STATIC void
ce8e922c 291_xfs_buf_free_pages(
1da177e4
LT
292 xfs_buf_t *bp)
293{
ce8e922c
NS
294 if (bp->b_pages != bp->b_page_array) {
295 kmem_free(bp->b_pages,
296 bp->b_page_count * sizeof(struct page *));
1da177e4
LT
297 }
298}
299
300/*
301 * Releases the specified buffer.
302 *
303 * The modification state of any associated pages is left unchanged.
ce8e922c 304 * The buffer most not be on any hash - use xfs_buf_rele instead for
1da177e4
LT
305 * hashed and refcounted buffers
306 */
307void
ce8e922c 308xfs_buf_free(
1da177e4
LT
309 xfs_buf_t *bp)
310{
ce8e922c 311 XB_TRACE(bp, "free", 0);
1da177e4 312
ce8e922c 313 ASSERT(list_empty(&bp->b_hash_list));
1da177e4 314
ce8e922c 315 if (bp->b_flags & _XBF_PAGE_CACHE) {
1da177e4
LT
316 uint i;
317
ce8e922c
NS
318 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
319 free_address(bp->b_addr - bp->b_offset);
1da177e4 320
ce8e922c
NS
321 for (i = 0; i < bp->b_page_count; i++)
322 page_cache_release(bp->b_pages[i]);
323 _xfs_buf_free_pages(bp);
324 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
1da177e4 325 /*
ce8e922c
NS
326 * XXX(hch): bp->b_count_desired might be incorrect (see
327 * xfs_buf_associate_memory for details), but fortunately
1da177e4
LT
328 * the Linux version of kmem_free ignores the len argument..
329 */
ce8e922c
NS
330 kmem_free(bp->b_addr, bp->b_count_desired);
331 _xfs_buf_free_pages(bp);
1da177e4
LT
332 }
333
ce8e922c 334 xfs_buf_deallocate(bp);
1da177e4
LT
335}
336
337/*
338 * Finds all pages for buffer in question and builds it's page list.
339 */
340STATIC int
ce8e922c 341_xfs_buf_lookup_pages(
1da177e4
LT
342 xfs_buf_t *bp,
343 uint flags)
344{
ce8e922c
NS
345 struct address_space *mapping = bp->b_target->bt_mapping;
346 size_t blocksize = bp->b_target->bt_bsize;
347 size_t size = bp->b_count_desired;
1da177e4 348 size_t nbytes, offset;
ce8e922c 349 gfp_t gfp_mask = xb_to_gfp(flags);
1da177e4
LT
350 unsigned short page_count, i;
351 pgoff_t first;
204ab25f 352 xfs_off_t end;
1da177e4
LT
353 int error;
354
ce8e922c
NS
355 end = bp->b_file_offset + bp->b_buffer_length;
356 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
1da177e4 357
ce8e922c 358 error = _xfs_buf_get_pages(bp, page_count, flags);
1da177e4
LT
359 if (unlikely(error))
360 return error;
ce8e922c 361 bp->b_flags |= _XBF_PAGE_CACHE;
1da177e4 362
ce8e922c
NS
363 offset = bp->b_offset;
364 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
1da177e4 365
ce8e922c 366 for (i = 0; i < bp->b_page_count; i++) {
1da177e4
LT
367 struct page *page;
368 uint retries = 0;
369
370 retry:
371 page = find_or_create_page(mapping, first + i, gfp_mask);
372 if (unlikely(page == NULL)) {
ce8e922c
NS
373 if (flags & XBF_READ_AHEAD) {
374 bp->b_page_count = i;
375 for (i = 0; i < bp->b_page_count; i++)
376 unlock_page(bp->b_pages[i]);
1da177e4
LT
377 return -ENOMEM;
378 }
379
380 /*
381 * This could deadlock.
382 *
383 * But until all the XFS lowlevel code is revamped to
384 * handle buffer allocation failures we can't do much.
385 */
386 if (!(++retries % 100))
387 printk(KERN_ERR
388 "XFS: possible memory allocation "
389 "deadlock in %s (mode:0x%x)\n",
390 __FUNCTION__, gfp_mask);
391
ce8e922c 392 XFS_STATS_INC(xb_page_retries);
23ea4032 393 xfsbufd_wakeup(0, gfp_mask);
1da177e4
LT
394 blk_congestion_wait(WRITE, HZ/50);
395 goto retry;
396 }
397
ce8e922c 398 XFS_STATS_INC(xb_page_found);
1da177e4
LT
399
400 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
401 size -= nbytes;
402
403 if (!PageUptodate(page)) {
404 page_count--;
405 if (blocksize >= PAGE_CACHE_SIZE) {
ce8e922c
NS
406 if (flags & XBF_READ)
407 bp->b_locked = 1;
1da177e4
LT
408 } else if (!PagePrivate(page)) {
409 if (test_page_region(page, offset, nbytes))
410 page_count++;
411 }
412 }
413
ce8e922c 414 bp->b_pages[i] = page;
1da177e4
LT
415 offset = 0;
416 }
417
ce8e922c
NS
418 if (!bp->b_locked) {
419 for (i = 0; i < bp->b_page_count; i++)
420 unlock_page(bp->b_pages[i]);
1da177e4
LT
421 }
422
ce8e922c
NS
423 if (page_count == bp->b_page_count)
424 bp->b_flags |= XBF_DONE;
1da177e4 425
ce8e922c 426 XB_TRACE(bp, "lookup_pages", (long)page_count);
1da177e4
LT
427 return error;
428}
429
430/*
431 * Map buffer into kernel address-space if nessecary.
432 */
433STATIC int
ce8e922c 434_xfs_buf_map_pages(
1da177e4
LT
435 xfs_buf_t *bp,
436 uint flags)
437{
438 /* A single page buffer is always mappable */
ce8e922c
NS
439 if (bp->b_page_count == 1) {
440 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
441 bp->b_flags |= XBF_MAPPED;
442 } else if (flags & XBF_MAPPED) {
1da177e4
LT
443 if (as_list_len > 64)
444 purge_addresses();
ce8e922c
NS
445 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
446 VM_MAP, PAGE_KERNEL);
447 if (unlikely(bp->b_addr == NULL))
1da177e4 448 return -ENOMEM;
ce8e922c
NS
449 bp->b_addr += bp->b_offset;
450 bp->b_flags |= XBF_MAPPED;
1da177e4
LT
451 }
452
453 return 0;
454}
455
456/*
457 * Finding and Reading Buffers
458 */
459
460/*
ce8e922c 461 * Look up, and creates if absent, a lockable buffer for
1da177e4
LT
462 * a given range of an inode. The buffer is returned
463 * locked. If other overlapping buffers exist, they are
464 * released before the new buffer is created and locked,
465 * which may imply that this call will block until those buffers
466 * are unlocked. No I/O is implied by this call.
467 */
468xfs_buf_t *
ce8e922c 469_xfs_buf_find(
1da177e4 470 xfs_buftarg_t *btp, /* block device target */
204ab25f 471 xfs_off_t ioff, /* starting offset of range */
1da177e4 472 size_t isize, /* length of range */
ce8e922c
NS
473 xfs_buf_flags_t flags,
474 xfs_buf_t *new_bp)
1da177e4 475{
204ab25f 476 xfs_off_t range_base;
1da177e4
LT
477 size_t range_length;
478 xfs_bufhash_t *hash;
ce8e922c 479 xfs_buf_t *bp, *n;
1da177e4
LT
480
481 range_base = (ioff << BBSHIFT);
482 range_length = (isize << BBSHIFT);
483
484 /* Check for IOs smaller than the sector size / not sector aligned */
ce8e922c 485 ASSERT(!(range_length < (1 << btp->bt_sshift)));
204ab25f 486 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
1da177e4
LT
487
488 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
489
490 spin_lock(&hash->bh_lock);
491
ce8e922c
NS
492 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
493 ASSERT(btp == bp->b_target);
494 if (bp->b_file_offset == range_base &&
495 bp->b_buffer_length == range_length) {
1da177e4 496 /*
ce8e922c 497 * If we look at something, bring it to the
1da177e4
LT
498 * front of the list for next time.
499 */
ce8e922c
NS
500 atomic_inc(&bp->b_hold);
501 list_move(&bp->b_hash_list, &hash->bh_list);
1da177e4
LT
502 goto found;
503 }
504 }
505
506 /* No match found */
ce8e922c
NS
507 if (new_bp) {
508 _xfs_buf_initialize(new_bp, btp, range_base,
1da177e4 509 range_length, flags);
ce8e922c
NS
510 new_bp->b_hash = hash;
511 list_add(&new_bp->b_hash_list, &hash->bh_list);
1da177e4 512 } else {
ce8e922c 513 XFS_STATS_INC(xb_miss_locked);
1da177e4
LT
514 }
515
516 spin_unlock(&hash->bh_lock);
ce8e922c 517 return new_bp;
1da177e4
LT
518
519found:
520 spin_unlock(&hash->bh_lock);
521
522 /* Attempt to get the semaphore without sleeping,
523 * if this does not work then we need to drop the
524 * spinlock and do a hard attempt on the semaphore.
525 */
ce8e922c
NS
526 if (down_trylock(&bp->b_sema)) {
527 if (!(flags & XBF_TRYLOCK)) {
1da177e4 528 /* wait for buffer ownership */
ce8e922c
NS
529 XB_TRACE(bp, "get_lock", 0);
530 xfs_buf_lock(bp);
531 XFS_STATS_INC(xb_get_locked_waited);
1da177e4
LT
532 } else {
533 /* We asked for a trylock and failed, no need
534 * to look at file offset and length here, we
ce8e922c
NS
535 * know that this buffer at least overlaps our
536 * buffer and is locked, therefore our buffer
537 * either does not exist, or is this buffer.
1da177e4 538 */
ce8e922c
NS
539 xfs_buf_rele(bp);
540 XFS_STATS_INC(xb_busy_locked);
541 return NULL;
1da177e4
LT
542 }
543 } else {
544 /* trylock worked */
ce8e922c 545 XB_SET_OWNER(bp);
1da177e4
LT
546 }
547
ce8e922c
NS
548 if (bp->b_flags & XBF_STALE) {
549 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
550 bp->b_flags &= XBF_MAPPED;
2f926587 551 }
ce8e922c
NS
552 XB_TRACE(bp, "got_lock", 0);
553 XFS_STATS_INC(xb_get_locked);
554 return bp;
1da177e4
LT
555}
556
557/*
ce8e922c 558 * Assembles a buffer covering the specified range.
1da177e4
LT
559 * Storage in memory for all portions of the buffer will be allocated,
560 * although backing storage may not be.
561 */
562xfs_buf_t *
ce8e922c 563xfs_buf_get_flags(
1da177e4 564 xfs_buftarg_t *target,/* target for buffer */
204ab25f 565 xfs_off_t ioff, /* starting offset of range */
1da177e4 566 size_t isize, /* length of range */
ce8e922c 567 xfs_buf_flags_t flags)
1da177e4 568{
ce8e922c 569 xfs_buf_t *bp, *new_bp;
1da177e4
LT
570 int error = 0, i;
571
ce8e922c
NS
572 new_bp = xfs_buf_allocate(flags);
573 if (unlikely(!new_bp))
1da177e4
LT
574 return NULL;
575
ce8e922c
NS
576 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
577 if (bp == new_bp) {
578 error = _xfs_buf_lookup_pages(bp, flags);
1da177e4
LT
579 if (error)
580 goto no_buffer;
581 } else {
ce8e922c
NS
582 xfs_buf_deallocate(new_bp);
583 if (unlikely(bp == NULL))
1da177e4
LT
584 return NULL;
585 }
586
ce8e922c
NS
587 for (i = 0; i < bp->b_page_count; i++)
588 mark_page_accessed(bp->b_pages[i]);
1da177e4 589
ce8e922c
NS
590 if (!(bp->b_flags & XBF_MAPPED)) {
591 error = _xfs_buf_map_pages(bp, flags);
1da177e4
LT
592 if (unlikely(error)) {
593 printk(KERN_WARNING "%s: failed to map pages\n",
594 __FUNCTION__);
595 goto no_buffer;
596 }
597 }
598
ce8e922c 599 XFS_STATS_INC(xb_get);
1da177e4
LT
600
601 /*
602 * Always fill in the block number now, the mapped cases can do
603 * their own overlay of this later.
604 */
ce8e922c
NS
605 bp->b_bn = ioff;
606 bp->b_count_desired = bp->b_buffer_length;
1da177e4 607
ce8e922c
NS
608 XB_TRACE(bp, "get", (unsigned long)flags);
609 return bp;
1da177e4
LT
610
611 no_buffer:
ce8e922c
NS
612 if (flags & (XBF_LOCK | XBF_TRYLOCK))
613 xfs_buf_unlock(bp);
614 xfs_buf_rele(bp);
1da177e4
LT
615 return NULL;
616}
617
618xfs_buf_t *
619xfs_buf_read_flags(
620 xfs_buftarg_t *target,
204ab25f 621 xfs_off_t ioff,
1da177e4 622 size_t isize,
ce8e922c 623 xfs_buf_flags_t flags)
1da177e4 624{
ce8e922c
NS
625 xfs_buf_t *bp;
626
627 flags |= XBF_READ;
628
629 bp = xfs_buf_get_flags(target, ioff, isize, flags);
630 if (bp) {
631 if (!XFS_BUF_ISDONE(bp)) {
632 XB_TRACE(bp, "read", (unsigned long)flags);
633 XFS_STATS_INC(xb_get_read);
634 xfs_buf_iostart(bp, flags);
635 } else if (flags & XBF_ASYNC) {
636 XB_TRACE(bp, "read_async", (unsigned long)flags);
1da177e4
LT
637 /*
638 * Read ahead call which is already satisfied,
639 * drop the buffer
640 */
641 goto no_buffer;
642 } else {
ce8e922c 643 XB_TRACE(bp, "read_done", (unsigned long)flags);
1da177e4 644 /* We do not want read in the flags */
ce8e922c 645 bp->b_flags &= ~XBF_READ;
1da177e4
LT
646 }
647 }
648
ce8e922c 649 return bp;
1da177e4
LT
650
651 no_buffer:
ce8e922c
NS
652 if (flags & (XBF_LOCK | XBF_TRYLOCK))
653 xfs_buf_unlock(bp);
654 xfs_buf_rele(bp);
1da177e4
LT
655 return NULL;
656}
657
1da177e4 658/*
ce8e922c
NS
659 * If we are not low on memory then do the readahead in a deadlock
660 * safe manner.
1da177e4
LT
661 */
662void
ce8e922c 663xfs_buf_readahead(
1da177e4 664 xfs_buftarg_t *target,
204ab25f 665 xfs_off_t ioff,
1da177e4 666 size_t isize,
ce8e922c 667 xfs_buf_flags_t flags)
1da177e4
LT
668{
669 struct backing_dev_info *bdi;
670
ce8e922c 671 bdi = target->bt_mapping->backing_dev_info;
1da177e4
LT
672 if (bdi_read_congested(bdi))
673 return;
674
ce8e922c 675 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
1da177e4
LT
676 xfs_buf_read_flags(target, ioff, isize, flags);
677}
678
679xfs_buf_t *
ce8e922c 680xfs_buf_get_empty(
1da177e4
LT
681 size_t len,
682 xfs_buftarg_t *target)
683{
ce8e922c 684 xfs_buf_t *bp;
1da177e4 685
ce8e922c
NS
686 bp = xfs_buf_allocate(0);
687 if (bp)
688 _xfs_buf_initialize(bp, target, 0, len, 0);
689 return bp;
1da177e4
LT
690}
691
692static inline struct page *
693mem_to_page(
694 void *addr)
695{
696 if (((unsigned long)addr < VMALLOC_START) ||
697 ((unsigned long)addr >= VMALLOC_END)) {
698 return virt_to_page(addr);
699 } else {
700 return vmalloc_to_page(addr);
701 }
702}
703
704int
ce8e922c
NS
705xfs_buf_associate_memory(
706 xfs_buf_t *bp,
1da177e4
LT
707 void *mem,
708 size_t len)
709{
710 int rval;
711 int i = 0;
712 size_t ptr;
713 size_t end, end_cur;
714 off_t offset;
715 int page_count;
716
717 page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
718 offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
719 if (offset && (len > PAGE_CACHE_SIZE))
720 page_count++;
721
722 /* Free any previous set of page pointers */
ce8e922c
NS
723 if (bp->b_pages)
724 _xfs_buf_free_pages(bp);
1da177e4 725
ce8e922c
NS
726 bp->b_pages = NULL;
727 bp->b_addr = mem;
1da177e4 728
ce8e922c 729 rval = _xfs_buf_get_pages(bp, page_count, 0);
1da177e4
LT
730 if (rval)
731 return rval;
732
ce8e922c 733 bp->b_offset = offset;
1da177e4
LT
734 ptr = (size_t) mem & PAGE_CACHE_MASK;
735 end = PAGE_CACHE_ALIGN((size_t) mem + len);
736 end_cur = end;
737 /* set up first page */
ce8e922c 738 bp->b_pages[0] = mem_to_page(mem);
1da177e4
LT
739
740 ptr += PAGE_CACHE_SIZE;
ce8e922c 741 bp->b_page_count = ++i;
1da177e4 742 while (ptr < end) {
ce8e922c
NS
743 bp->b_pages[i] = mem_to_page((void *)ptr);
744 bp->b_page_count = ++i;
1da177e4
LT
745 ptr += PAGE_CACHE_SIZE;
746 }
ce8e922c 747 bp->b_locked = 0;
1da177e4 748
ce8e922c
NS
749 bp->b_count_desired = bp->b_buffer_length = len;
750 bp->b_flags |= XBF_MAPPED;
1da177e4
LT
751
752 return 0;
753}
754
755xfs_buf_t *
ce8e922c 756xfs_buf_get_noaddr(
1da177e4
LT
757 size_t len,
758 xfs_buftarg_t *target)
759{
760 size_t malloc_len = len;
761 xfs_buf_t *bp;
762 void *data;
763 int error;
764
ce8e922c 765 bp = xfs_buf_allocate(0);
1da177e4
LT
766 if (unlikely(bp == NULL))
767 goto fail;
ce8e922c 768 _xfs_buf_initialize(bp, target, 0, len, 0);
1da177e4
LT
769
770 try_again:
771 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
772 if (unlikely(data == NULL))
773 goto fail_free_buf;
774
775 /* check whether alignment matches.. */
776 if ((__psunsigned_t)data !=
ce8e922c 777 ((__psunsigned_t)data & ~target->bt_smask)) {
1da177e4
LT
778 /* .. else double the size and try again */
779 kmem_free(data, malloc_len);
780 malloc_len <<= 1;
781 goto try_again;
782 }
783
ce8e922c 784 error = xfs_buf_associate_memory(bp, data, len);
1da177e4
LT
785 if (error)
786 goto fail_free_mem;
ce8e922c 787 bp->b_flags |= _XBF_KMEM_ALLOC;
1da177e4 788
ce8e922c 789 xfs_buf_unlock(bp);
1da177e4 790
ce8e922c 791 XB_TRACE(bp, "no_daddr", data);
1da177e4
LT
792 return bp;
793 fail_free_mem:
794 kmem_free(data, malloc_len);
795 fail_free_buf:
ce8e922c 796 xfs_buf_free(bp);
1da177e4
LT
797 fail:
798 return NULL;
799}
800
801/*
1da177e4
LT
802 * Increment reference count on buffer, to hold the buffer concurrently
803 * with another thread which may release (free) the buffer asynchronously.
1da177e4
LT
804 * Must hold the buffer already to call this function.
805 */
806void
ce8e922c
NS
807xfs_buf_hold(
808 xfs_buf_t *bp)
1da177e4 809{
ce8e922c
NS
810 atomic_inc(&bp->b_hold);
811 XB_TRACE(bp, "hold", 0);
1da177e4
LT
812}
813
814/*
ce8e922c
NS
815 * Releases a hold on the specified buffer. If the
816 * the hold count is 1, calls xfs_buf_free.
1da177e4
LT
817 */
818void
ce8e922c
NS
819xfs_buf_rele(
820 xfs_buf_t *bp)
1da177e4 821{
ce8e922c 822 xfs_bufhash_t *hash = bp->b_hash;
1da177e4 823
ce8e922c 824 XB_TRACE(bp, "rele", bp->b_relse);
1da177e4 825
fad3aa1e
NS
826 if (unlikely(!hash)) {
827 ASSERT(!bp->b_relse);
828 if (atomic_dec_and_test(&bp->b_hold))
829 xfs_buf_free(bp);
830 return;
831 }
832
ce8e922c
NS
833 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
834 if (bp->b_relse) {
835 atomic_inc(&bp->b_hold);
1da177e4 836 spin_unlock(&hash->bh_lock);
ce8e922c
NS
837 (*(bp->b_relse)) (bp);
838 } else if (bp->b_flags & XBF_FS_MANAGED) {
1da177e4 839 spin_unlock(&hash->bh_lock);
1da177e4 840 } else {
ce8e922c
NS
841 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
842 list_del_init(&bp->b_hash_list);
1da177e4 843 spin_unlock(&hash->bh_lock);
ce8e922c 844 xfs_buf_free(bp);
1da177e4 845 }
2f926587
DC
846 } else {
847 /*
848 * Catch reference count leaks
849 */
ce8e922c 850 ASSERT(atomic_read(&bp->b_hold) >= 0);
1da177e4
LT
851 }
852}
853
854
855/*
856 * Mutual exclusion on buffers. Locking model:
857 *
858 * Buffers associated with inodes for which buffer locking
859 * is not enabled are not protected by semaphores, and are
860 * assumed to be exclusively owned by the caller. There is a
861 * spinlock in the buffer, used by the caller when concurrent
862 * access is possible.
863 */
864
865/*
ce8e922c
NS
866 * Locks a buffer object, if it is not already locked.
867 * Note that this in no way locks the underlying pages, so it is only
868 * useful for synchronizing concurrent use of buffer objects, not for
869 * synchronizing independent access to the underlying pages.
1da177e4
LT
870 */
871int
ce8e922c
NS
872xfs_buf_cond_lock(
873 xfs_buf_t *bp)
1da177e4
LT
874{
875 int locked;
876
ce8e922c 877 locked = down_trylock(&bp->b_sema) == 0;
1da177e4 878 if (locked) {
ce8e922c 879 XB_SET_OWNER(bp);
1da177e4 880 }
ce8e922c
NS
881 XB_TRACE(bp, "cond_lock", (long)locked);
882 return locked ? 0 : -EBUSY;
1da177e4
LT
883}
884
885#if defined(DEBUG) || defined(XFS_BLI_TRACE)
1da177e4 886int
ce8e922c
NS
887xfs_buf_lock_value(
888 xfs_buf_t *bp)
1da177e4 889{
ce8e922c 890 return atomic_read(&bp->b_sema.count);
1da177e4
LT
891}
892#endif
893
894/*
ce8e922c
NS
895 * Locks a buffer object.
896 * Note that this in no way locks the underlying pages, so it is only
897 * useful for synchronizing concurrent use of buffer objects, not for
898 * synchronizing independent access to the underlying pages.
1da177e4 899 */
ce8e922c
NS
900void
901xfs_buf_lock(
902 xfs_buf_t *bp)
1da177e4 903{
ce8e922c
NS
904 XB_TRACE(bp, "lock", 0);
905 if (atomic_read(&bp->b_io_remaining))
906 blk_run_address_space(bp->b_target->bt_mapping);
907 down(&bp->b_sema);
908 XB_SET_OWNER(bp);
909 XB_TRACE(bp, "locked", 0);
1da177e4
LT
910}
911
912/*
ce8e922c 913 * Releases the lock on the buffer object.
2f926587 914 * If the buffer is marked delwri but is not queued, do so before we
ce8e922c 915 * unlock the buffer as we need to set flags correctly. We also need to
2f926587
DC
916 * take a reference for the delwri queue because the unlocker is going to
917 * drop their's and they don't know we just queued it.
1da177e4
LT
918 */
919void
ce8e922c
NS
920xfs_buf_unlock(
921 xfs_buf_t *bp)
1da177e4 922{
ce8e922c
NS
923 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
924 atomic_inc(&bp->b_hold);
925 bp->b_flags |= XBF_ASYNC;
926 xfs_buf_delwri_queue(bp, 0);
2f926587
DC
927 }
928
ce8e922c
NS
929 XB_CLEAR_OWNER(bp);
930 up(&bp->b_sema);
931 XB_TRACE(bp, "unlock", 0);
1da177e4
LT
932}
933
934
935/*
936 * Pinning Buffer Storage in Memory
ce8e922c 937 * Ensure that no attempt to force a buffer to disk will succeed.
1da177e4
LT
938 */
939void
ce8e922c
NS
940xfs_buf_pin(
941 xfs_buf_t *bp)
1da177e4 942{
ce8e922c
NS
943 atomic_inc(&bp->b_pin_count);
944 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
1da177e4
LT
945}
946
1da177e4 947void
ce8e922c
NS
948xfs_buf_unpin(
949 xfs_buf_t *bp)
1da177e4 950{
ce8e922c
NS
951 if (atomic_dec_and_test(&bp->b_pin_count))
952 wake_up_all(&bp->b_waiters);
953 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
1da177e4
LT
954}
955
956int
ce8e922c
NS
957xfs_buf_ispin(
958 xfs_buf_t *bp)
1da177e4 959{
ce8e922c 960 return atomic_read(&bp->b_pin_count);
1da177e4
LT
961}
962
ce8e922c
NS
963STATIC void
964xfs_buf_wait_unpin(
965 xfs_buf_t *bp)
1da177e4
LT
966{
967 DECLARE_WAITQUEUE (wait, current);
968
ce8e922c 969 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4
LT
970 return;
971
ce8e922c 972 add_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
973 for (;;) {
974 set_current_state(TASK_UNINTERRUPTIBLE);
ce8e922c 975 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4 976 break;
ce8e922c
NS
977 if (atomic_read(&bp->b_io_remaining))
978 blk_run_address_space(bp->b_target->bt_mapping);
1da177e4
LT
979 schedule();
980 }
ce8e922c 981 remove_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
982 set_current_state(TASK_RUNNING);
983}
984
985/*
986 * Buffer Utility Routines
987 */
988
1da177e4 989STATIC void
ce8e922c 990xfs_buf_iodone_work(
1da177e4
LT
991 void *v)
992{
993 xfs_buf_t *bp = (xfs_buf_t *)v;
994
ce8e922c
NS
995 if (bp->b_iodone)
996 (*(bp->b_iodone))(bp);
997 else if (bp->b_flags & XBF_ASYNC)
1da177e4
LT
998 xfs_buf_relse(bp);
999}
1000
1001void
ce8e922c
NS
1002xfs_buf_ioend(
1003 xfs_buf_t *bp,
1da177e4
LT
1004 int schedule)
1005{
ce8e922c
NS
1006 bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1007 if (bp->b_error == 0)
1008 bp->b_flags |= XBF_DONE;
1da177e4 1009
ce8e922c 1010 XB_TRACE(bp, "iodone", bp->b_iodone);
1da177e4 1011
ce8e922c 1012 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1da177e4 1013 if (schedule) {
ce8e922c
NS
1014 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
1015 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1da177e4 1016 } else {
ce8e922c 1017 xfs_buf_iodone_work(bp);
1da177e4
LT
1018 }
1019 } else {
ce8e922c 1020 up(&bp->b_iodonesema);
1da177e4
LT
1021 }
1022}
1023
1da177e4 1024void
ce8e922c
NS
1025xfs_buf_ioerror(
1026 xfs_buf_t *bp,
1027 int error)
1da177e4
LT
1028{
1029 ASSERT(error >= 0 && error <= 0xffff);
ce8e922c
NS
1030 bp->b_error = (unsigned short)error;
1031 XB_TRACE(bp, "ioerror", (unsigned long)error);
1da177e4
LT
1032}
1033
1034/*
ce8e922c
NS
1035 * Initiate I/O on a buffer, based on the flags supplied.
1036 * The b_iodone routine in the buffer supplied will only be called
1da177e4 1037 * when all of the subsidiary I/O requests, if any, have been completed.
1da177e4
LT
1038 */
1039int
ce8e922c
NS
1040xfs_buf_iostart(
1041 xfs_buf_t *bp,
1042 xfs_buf_flags_t flags)
1da177e4
LT
1043{
1044 int status = 0;
1045
ce8e922c 1046 XB_TRACE(bp, "iostart", (unsigned long)flags);
1da177e4 1047
ce8e922c
NS
1048 if (flags & XBF_DELWRI) {
1049 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1050 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1051 xfs_buf_delwri_queue(bp, 1);
1da177e4
LT
1052 return status;
1053 }
1054
ce8e922c
NS
1055 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1056 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1057 bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1058 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1da177e4 1059
ce8e922c 1060 BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1da177e4
LT
1061
1062 /* For writes allow an alternate strategy routine to precede
1063 * the actual I/O request (which may not be issued at all in
1064 * a shutdown situation, for example).
1065 */
ce8e922c
NS
1066 status = (flags & XBF_WRITE) ?
1067 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1da177e4
LT
1068
1069 /* Wait for I/O if we are not an async request.
1070 * Note: async I/O request completion will release the buffer,
1071 * and that can already be done by this point. So using the
1072 * buffer pointer from here on, after async I/O, is invalid.
1073 */
ce8e922c
NS
1074 if (!status && !(flags & XBF_ASYNC))
1075 status = xfs_buf_iowait(bp);
1da177e4
LT
1076
1077 return status;
1078}
1079
1da177e4 1080STATIC __inline__ int
ce8e922c
NS
1081_xfs_buf_iolocked(
1082 xfs_buf_t *bp)
1da177e4 1083{
ce8e922c
NS
1084 ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1085 if (bp->b_flags & XBF_READ)
1086 return bp->b_locked;
1da177e4
LT
1087 return 0;
1088}
1089
1090STATIC __inline__ void
ce8e922c
NS
1091_xfs_buf_ioend(
1092 xfs_buf_t *bp,
1da177e4
LT
1093 int schedule)
1094{
ce8e922c
NS
1095 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1096 bp->b_locked = 0;
1097 xfs_buf_ioend(bp, schedule);
1da177e4
LT
1098 }
1099}
1100
1101STATIC int
ce8e922c 1102xfs_buf_bio_end_io(
1da177e4
LT
1103 struct bio *bio,
1104 unsigned int bytes_done,
1105 int error)
1106{
ce8e922c
NS
1107 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1108 unsigned int blocksize = bp->b_target->bt_bsize;
eedb5530 1109 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1da177e4
LT
1110
1111 if (bio->bi_size)
1112 return 1;
1113
1114 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
ce8e922c 1115 bp->b_error = EIO;
1da177e4 1116
eedb5530 1117 do {
1da177e4
LT
1118 struct page *page = bvec->bv_page;
1119
ce8e922c
NS
1120 if (unlikely(bp->b_error)) {
1121 if (bp->b_flags & XBF_READ)
eedb5530 1122 ClearPageUptodate(page);
1da177e4 1123 SetPageError(page);
ce8e922c 1124 } else if (blocksize >= PAGE_CACHE_SIZE) {
1da177e4
LT
1125 SetPageUptodate(page);
1126 } else if (!PagePrivate(page) &&
ce8e922c 1127 (bp->b_flags & _XBF_PAGE_CACHE)) {
1da177e4
LT
1128 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1129 }
1130
eedb5530
NS
1131 if (--bvec >= bio->bi_io_vec)
1132 prefetchw(&bvec->bv_page->flags);
1133
ce8e922c 1134 if (_xfs_buf_iolocked(bp)) {
1da177e4
LT
1135 unlock_page(page);
1136 }
eedb5530 1137 } while (bvec >= bio->bi_io_vec);
1da177e4 1138
ce8e922c 1139 _xfs_buf_ioend(bp, 1);
1da177e4
LT
1140 bio_put(bio);
1141 return 0;
1142}
1143
1144STATIC void
ce8e922c
NS
1145_xfs_buf_ioapply(
1146 xfs_buf_t *bp)
1da177e4
LT
1147{
1148 int i, rw, map_i, total_nr_pages, nr_pages;
1149 struct bio *bio;
ce8e922c
NS
1150 int offset = bp->b_offset;
1151 int size = bp->b_count_desired;
1152 sector_t sector = bp->b_bn;
1153 unsigned int blocksize = bp->b_target->bt_bsize;
1154 int locking = _xfs_buf_iolocked(bp);
1da177e4 1155
ce8e922c 1156 total_nr_pages = bp->b_page_count;
1da177e4
LT
1157 map_i = 0;
1158
ce8e922c
NS
1159 if (bp->b_flags & _XBF_RUN_QUEUES) {
1160 bp->b_flags &= ~_XBF_RUN_QUEUES;
1161 rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
1da177e4 1162 } else {
ce8e922c 1163 rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
1da177e4
LT
1164 }
1165
ce8e922c
NS
1166 if (bp->b_flags & XBF_ORDERED) {
1167 ASSERT(!(bp->b_flags & XBF_READ));
f538d4da
CH
1168 rw = WRITE_BARRIER;
1169 }
1170
ce8e922c 1171 /* Special code path for reading a sub page size buffer in --
1da177e4
LT
1172 * we populate up the whole page, and hence the other metadata
1173 * in the same page. This optimization is only valid when the
ce8e922c 1174 * filesystem block size is not smaller than the page size.
1da177e4 1175 */
ce8e922c
NS
1176 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1177 (bp->b_flags & XBF_READ) && locking &&
1178 (blocksize >= PAGE_CACHE_SIZE)) {
1da177e4
LT
1179 bio = bio_alloc(GFP_NOIO, 1);
1180
ce8e922c 1181 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1182 bio->bi_sector = sector - (offset >> BBSHIFT);
ce8e922c
NS
1183 bio->bi_end_io = xfs_buf_bio_end_io;
1184 bio->bi_private = bp;
1da177e4 1185
ce8e922c 1186 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1da177e4
LT
1187 size = 0;
1188
ce8e922c 1189 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1190
1191 goto submit_io;
1192 }
1193
1194 /* Lock down the pages which we need to for the request */
ce8e922c 1195 if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1da177e4
LT
1196 for (i = 0; size; i++) {
1197 int nbytes = PAGE_CACHE_SIZE - offset;
ce8e922c 1198 struct page *page = bp->b_pages[i];
1da177e4
LT
1199
1200 if (nbytes > size)
1201 nbytes = size;
1202
1203 lock_page(page);
1204
1205 size -= nbytes;
1206 offset = 0;
1207 }
ce8e922c
NS
1208 offset = bp->b_offset;
1209 size = bp->b_count_desired;
1da177e4
LT
1210 }
1211
1212next_chunk:
ce8e922c 1213 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1214 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1215 if (nr_pages > total_nr_pages)
1216 nr_pages = total_nr_pages;
1217
1218 bio = bio_alloc(GFP_NOIO, nr_pages);
ce8e922c 1219 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1220 bio->bi_sector = sector;
ce8e922c
NS
1221 bio->bi_end_io = xfs_buf_bio_end_io;
1222 bio->bi_private = bp;
1da177e4
LT
1223
1224 for (; size && nr_pages; nr_pages--, map_i++) {
ce8e922c 1225 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1da177e4
LT
1226
1227 if (nbytes > size)
1228 nbytes = size;
1229
ce8e922c
NS
1230 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1231 if (rbytes < nbytes)
1da177e4
LT
1232 break;
1233
1234 offset = 0;
1235 sector += nbytes >> BBSHIFT;
1236 size -= nbytes;
1237 total_nr_pages--;
1238 }
1239
1240submit_io:
1241 if (likely(bio->bi_size)) {
1242 submit_bio(rw, bio);
1243 if (size)
1244 goto next_chunk;
1245 } else {
1246 bio_put(bio);
ce8e922c 1247 xfs_buf_ioerror(bp, EIO);
1da177e4
LT
1248 }
1249}
1250
1da177e4 1251int
ce8e922c
NS
1252xfs_buf_iorequest(
1253 xfs_buf_t *bp)
1da177e4 1254{
ce8e922c 1255 XB_TRACE(bp, "iorequest", 0);
1da177e4 1256
ce8e922c
NS
1257 if (bp->b_flags & XBF_DELWRI) {
1258 xfs_buf_delwri_queue(bp, 1);
1da177e4
LT
1259 return 0;
1260 }
1261
ce8e922c
NS
1262 if (bp->b_flags & XBF_WRITE) {
1263 xfs_buf_wait_unpin(bp);
1da177e4
LT
1264 }
1265
ce8e922c 1266 xfs_buf_hold(bp);
1da177e4
LT
1267
1268 /* Set the count to 1 initially, this will stop an I/O
1269 * completion callout which happens before we have started
ce8e922c 1270 * all the I/O from calling xfs_buf_ioend too early.
1da177e4 1271 */
ce8e922c
NS
1272 atomic_set(&bp->b_io_remaining, 1);
1273 _xfs_buf_ioapply(bp);
1274 _xfs_buf_ioend(bp, 0);
1da177e4 1275
ce8e922c 1276 xfs_buf_rele(bp);
1da177e4
LT
1277 return 0;
1278}
1279
1280/*
ce8e922c
NS
1281 * Waits for I/O to complete on the buffer supplied.
1282 * It returns immediately if no I/O is pending.
1283 * It returns the I/O error code, if any, or 0 if there was no error.
1da177e4
LT
1284 */
1285int
ce8e922c
NS
1286xfs_buf_iowait(
1287 xfs_buf_t *bp)
1da177e4 1288{
ce8e922c
NS
1289 XB_TRACE(bp, "iowait", 0);
1290 if (atomic_read(&bp->b_io_remaining))
1291 blk_run_address_space(bp->b_target->bt_mapping);
1292 down(&bp->b_iodonesema);
1293 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1294 return bp->b_error;
1da177e4
LT
1295}
1296
ce8e922c
NS
1297xfs_caddr_t
1298xfs_buf_offset(
1299 xfs_buf_t *bp,
1da177e4
LT
1300 size_t offset)
1301{
1302 struct page *page;
1303
ce8e922c
NS
1304 if (bp->b_flags & XBF_MAPPED)
1305 return XFS_BUF_PTR(bp) + offset;
1da177e4 1306
ce8e922c
NS
1307 offset += bp->b_offset;
1308 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1309 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1da177e4
LT
1310}
1311
1312/*
1da177e4
LT
1313 * Move data into or out of a buffer.
1314 */
1315void
ce8e922c
NS
1316xfs_buf_iomove(
1317 xfs_buf_t *bp, /* buffer to process */
1da177e4
LT
1318 size_t boff, /* starting buffer offset */
1319 size_t bsize, /* length to copy */
1320 caddr_t data, /* data address */
ce8e922c 1321 xfs_buf_rw_t mode) /* read/write/zero flag */
1da177e4
LT
1322{
1323 size_t bend, cpoff, csize;
1324 struct page *page;
1325
1326 bend = boff + bsize;
1327 while (boff < bend) {
ce8e922c
NS
1328 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1329 cpoff = xfs_buf_poff(boff + bp->b_offset);
1da177e4 1330 csize = min_t(size_t,
ce8e922c 1331 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1da177e4
LT
1332
1333 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1334
1335 switch (mode) {
ce8e922c 1336 case XBRW_ZERO:
1da177e4
LT
1337 memset(page_address(page) + cpoff, 0, csize);
1338 break;
ce8e922c 1339 case XBRW_READ:
1da177e4
LT
1340 memcpy(data, page_address(page) + cpoff, csize);
1341 break;
ce8e922c 1342 case XBRW_WRITE:
1da177e4
LT
1343 memcpy(page_address(page) + cpoff, data, csize);
1344 }
1345
1346 boff += csize;
1347 data += csize;
1348 }
1349}
1350
1351/*
ce8e922c 1352 * Handling of buffer targets (buftargs).
1da177e4
LT
1353 */
1354
1355/*
ce8e922c
NS
1356 * Wait for any bufs with callbacks that have been submitted but
1357 * have not yet returned... walk the hash list for the target.
1da177e4
LT
1358 */
1359void
1360xfs_wait_buftarg(
1361 xfs_buftarg_t *btp)
1362{
1363 xfs_buf_t *bp, *n;
1364 xfs_bufhash_t *hash;
1365 uint i;
1366
1367 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1368 hash = &btp->bt_hash[i];
1369again:
1370 spin_lock(&hash->bh_lock);
ce8e922c
NS
1371 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1372 ASSERT(btp == bp->b_target);
1373 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1da177e4 1374 spin_unlock(&hash->bh_lock);
2f926587
DC
1375 /*
1376 * Catch superblock reference count leaks
1377 * immediately
1378 */
ce8e922c 1379 BUG_ON(bp->b_bn == 0);
1da177e4
LT
1380 delay(100);
1381 goto again;
1382 }
1383 }
1384 spin_unlock(&hash->bh_lock);
1385 }
1386}
1387
1388/*
ce8e922c
NS
1389 * Allocate buffer hash table for a given target.
1390 * For devices containing metadata (i.e. not the log/realtime devices)
1391 * we need to allocate a much larger hash table.
1da177e4
LT
1392 */
1393STATIC void
1394xfs_alloc_bufhash(
1395 xfs_buftarg_t *btp,
1396 int external)
1397{
1398 unsigned int i;
1399
1400 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1401 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1402 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1403 sizeof(xfs_bufhash_t), KM_SLEEP);
1404 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1405 spin_lock_init(&btp->bt_hash[i].bh_lock);
1406 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1407 }
1408}
1409
1410STATIC void
1411xfs_free_bufhash(
1412 xfs_buftarg_t *btp)
1413{
ce8e922c 1414 kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1da177e4
LT
1415 btp->bt_hash = NULL;
1416}
1417
a6867a68 1418/*
ce8e922c 1419 * buftarg list for delwrite queue processing
a6867a68
DC
1420 */
1421STATIC LIST_HEAD(xfs_buftarg_list);
1422STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1423
1424STATIC void
1425xfs_register_buftarg(
1426 xfs_buftarg_t *btp)
1427{
1428 spin_lock(&xfs_buftarg_lock);
1429 list_add(&btp->bt_list, &xfs_buftarg_list);
1430 spin_unlock(&xfs_buftarg_lock);
1431}
1432
1433STATIC void
1434xfs_unregister_buftarg(
1435 xfs_buftarg_t *btp)
1436{
1437 spin_lock(&xfs_buftarg_lock);
1438 list_del(&btp->bt_list);
1439 spin_unlock(&xfs_buftarg_lock);
1440}
1441
1da177e4
LT
1442void
1443xfs_free_buftarg(
1444 xfs_buftarg_t *btp,
1445 int external)
1446{
1447 xfs_flush_buftarg(btp, 1);
1448 if (external)
ce8e922c 1449 xfs_blkdev_put(btp->bt_bdev);
1da177e4 1450 xfs_free_bufhash(btp);
ce8e922c 1451 iput(btp->bt_mapping->host);
a6867a68 1452
ce8e922c
NS
1453 /* Unregister the buftarg first so that we don't get a
1454 * wakeup finding a non-existent task
1455 */
a6867a68
DC
1456 xfs_unregister_buftarg(btp);
1457 kthread_stop(btp->bt_task);
1458
1da177e4
LT
1459 kmem_free(btp, sizeof(*btp));
1460}
1461
1da177e4
LT
1462STATIC int
1463xfs_setsize_buftarg_flags(
1464 xfs_buftarg_t *btp,
1465 unsigned int blocksize,
1466 unsigned int sectorsize,
1467 int verbose)
1468{
ce8e922c
NS
1469 btp->bt_bsize = blocksize;
1470 btp->bt_sshift = ffs(sectorsize) - 1;
1471 btp->bt_smask = sectorsize - 1;
1da177e4 1472
ce8e922c 1473 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1da177e4
LT
1474 printk(KERN_WARNING
1475 "XFS: Cannot set_blocksize to %u on device %s\n",
1476 sectorsize, XFS_BUFTARG_NAME(btp));
1477 return EINVAL;
1478 }
1479
1480 if (verbose &&
1481 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1482 printk(KERN_WARNING
1483 "XFS: %u byte sectors in use on device %s. "
1484 "This is suboptimal; %u or greater is ideal.\n",
1485 sectorsize, XFS_BUFTARG_NAME(btp),
1486 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1487 }
1488
1489 return 0;
1490}
1491
1492/*
ce8e922c
NS
1493 * When allocating the initial buffer target we have not yet
1494 * read in the superblock, so don't know what sized sectors
1495 * are being used is at this early stage. Play safe.
1496 */
1da177e4
LT
1497STATIC int
1498xfs_setsize_buftarg_early(
1499 xfs_buftarg_t *btp,
1500 struct block_device *bdev)
1501{
1502 return xfs_setsize_buftarg_flags(btp,
1503 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1504}
1505
1506int
1507xfs_setsize_buftarg(
1508 xfs_buftarg_t *btp,
1509 unsigned int blocksize,
1510 unsigned int sectorsize)
1511{
1512 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1513}
1514
1515STATIC int
1516xfs_mapping_buftarg(
1517 xfs_buftarg_t *btp,
1518 struct block_device *bdev)
1519{
1520 struct backing_dev_info *bdi;
1521 struct inode *inode;
1522 struct address_space *mapping;
1523 static struct address_space_operations mapping_aops = {
1524 .sync_page = block_sync_page,
e965f963 1525 .migratepage = fail_migrate_page,
1da177e4
LT
1526 };
1527
1528 inode = new_inode(bdev->bd_inode->i_sb);
1529 if (!inode) {
1530 printk(KERN_WARNING
1531 "XFS: Cannot allocate mapping inode for device %s\n",
1532 XFS_BUFTARG_NAME(btp));
1533 return ENOMEM;
1534 }
1535 inode->i_mode = S_IFBLK;
1536 inode->i_bdev = bdev;
1537 inode->i_rdev = bdev->bd_dev;
1538 bdi = blk_get_backing_dev_info(bdev);
1539 if (!bdi)
1540 bdi = &default_backing_dev_info;
1541 mapping = &inode->i_data;
1542 mapping->a_ops = &mapping_aops;
1543 mapping->backing_dev_info = bdi;
1544 mapping_set_gfp_mask(mapping, GFP_NOFS);
ce8e922c 1545 btp->bt_mapping = mapping;
1da177e4
LT
1546 return 0;
1547}
1548
a6867a68
DC
1549STATIC int
1550xfs_alloc_delwrite_queue(
1551 xfs_buftarg_t *btp)
1552{
1553 int error = 0;
1554
1555 INIT_LIST_HEAD(&btp->bt_list);
1556 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1557 spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1558 btp->bt_flags = 0;
1559 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1560 if (IS_ERR(btp->bt_task)) {
1561 error = PTR_ERR(btp->bt_task);
1562 goto out_error;
1563 }
1564 xfs_register_buftarg(btp);
1565out_error:
1566 return error;
1567}
1568
1da177e4
LT
1569xfs_buftarg_t *
1570xfs_alloc_buftarg(
1571 struct block_device *bdev,
1572 int external)
1573{
1574 xfs_buftarg_t *btp;
1575
1576 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1577
ce8e922c
NS
1578 btp->bt_dev = bdev->bd_dev;
1579 btp->bt_bdev = bdev;
1da177e4
LT
1580 if (xfs_setsize_buftarg_early(btp, bdev))
1581 goto error;
1582 if (xfs_mapping_buftarg(btp, bdev))
1583 goto error;
a6867a68
DC
1584 if (xfs_alloc_delwrite_queue(btp))
1585 goto error;
1da177e4
LT
1586 xfs_alloc_bufhash(btp, external);
1587 return btp;
1588
1589error:
1590 kmem_free(btp, sizeof(*btp));
1591 return NULL;
1592}
1593
1594
1595/*
ce8e922c 1596 * Delayed write buffer handling
1da177e4 1597 */
1da177e4 1598STATIC void
ce8e922c
NS
1599xfs_buf_delwri_queue(
1600 xfs_buf_t *bp,
1da177e4
LT
1601 int unlock)
1602{
ce8e922c
NS
1603 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1604 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
a6867a68 1605
ce8e922c
NS
1606 XB_TRACE(bp, "delwri_q", (long)unlock);
1607 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1da177e4 1608
a6867a68 1609 spin_lock(dwlk);
1da177e4 1610 /* If already in the queue, dequeue and place at tail */
ce8e922c
NS
1611 if (!list_empty(&bp->b_list)) {
1612 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1613 if (unlock)
1614 atomic_dec(&bp->b_hold);
1615 list_del(&bp->b_list);
1da177e4
LT
1616 }
1617
ce8e922c
NS
1618 bp->b_flags |= _XBF_DELWRI_Q;
1619 list_add_tail(&bp->b_list, dwq);
1620 bp->b_queuetime = jiffies;
a6867a68 1621 spin_unlock(dwlk);
1da177e4
LT
1622
1623 if (unlock)
ce8e922c 1624 xfs_buf_unlock(bp);
1da177e4
LT
1625}
1626
1627void
ce8e922c
NS
1628xfs_buf_delwri_dequeue(
1629 xfs_buf_t *bp)
1da177e4 1630{
ce8e922c 1631 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1da177e4
LT
1632 int dequeued = 0;
1633
a6867a68 1634 spin_lock(dwlk);
ce8e922c
NS
1635 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1636 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1637 list_del_init(&bp->b_list);
1da177e4
LT
1638 dequeued = 1;
1639 }
ce8e922c 1640 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
a6867a68 1641 spin_unlock(dwlk);
1da177e4
LT
1642
1643 if (dequeued)
ce8e922c 1644 xfs_buf_rele(bp);
1da177e4 1645
ce8e922c 1646 XB_TRACE(bp, "delwri_dq", (long)dequeued);
1da177e4
LT
1647}
1648
1649STATIC void
ce8e922c 1650xfs_buf_runall_queues(
1da177e4
LT
1651 struct workqueue_struct *queue)
1652{
1653 flush_workqueue(queue);
1654}
1655
1da177e4 1656STATIC int
23ea4032 1657xfsbufd_wakeup(
15c84a47
NS
1658 int priority,
1659 gfp_t mask)
1da177e4 1660{
da7f93e9 1661 xfs_buftarg_t *btp;
a6867a68
DC
1662
1663 spin_lock(&xfs_buftarg_lock);
da7f93e9 1664 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
ce8e922c 1665 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
a6867a68 1666 continue;
ce8e922c 1667 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
a6867a68
DC
1668 wake_up_process(btp->bt_task);
1669 }
1670 spin_unlock(&xfs_buftarg_lock);
1da177e4
LT
1671 return 0;
1672}
1673
1674STATIC int
23ea4032 1675xfsbufd(
1da177e4
LT
1676 void *data)
1677{
1678 struct list_head tmp;
1679 unsigned long age;
a6867a68 1680 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
ce8e922c 1681 xfs_buf_t *bp, *n;
a6867a68
DC
1682 struct list_head *dwq = &target->bt_delwrite_queue;
1683 spinlock_t *dwlk = &target->bt_delwrite_lock;
1da177e4 1684
1da177e4
LT
1685 current->flags |= PF_MEMALLOC;
1686
1da177e4
LT
1687 INIT_LIST_HEAD(&tmp);
1688 do {
3e1d1d28 1689 if (unlikely(freezing(current))) {
ce8e922c 1690 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
3e1d1d28 1691 refrigerator();
abd0cf7a 1692 } else {
ce8e922c 1693 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
abd0cf7a 1694 }
1da177e4 1695
15c84a47
NS
1696 schedule_timeout_interruptible(
1697 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1da177e4 1698
041e0e3b 1699 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
a6867a68 1700 spin_lock(dwlk);
ce8e922c
NS
1701 list_for_each_entry_safe(bp, n, dwq, b_list) {
1702 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1703 ASSERT(bp->b_flags & XBF_DELWRI);
1da177e4 1704
ce8e922c
NS
1705 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1706 if (!test_bit(XBT_FORCE_FLUSH,
a6867a68 1707 &target->bt_flags) &&
1da177e4 1708 time_before(jiffies,
ce8e922c
NS
1709 bp->b_queuetime + age)) {
1710 xfs_buf_unlock(bp);
1da177e4
LT
1711 break;
1712 }
1713
ce8e922c
NS
1714 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1715 bp->b_flags |= XBF_WRITE;
1716 list_move(&bp->b_list, &tmp);
1da177e4
LT
1717 }
1718 }
a6867a68 1719 spin_unlock(dwlk);
1da177e4
LT
1720
1721 while (!list_empty(&tmp)) {
ce8e922c
NS
1722 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1723 ASSERT(target == bp->b_target);
1da177e4 1724
ce8e922c
NS
1725 list_del_init(&bp->b_list);
1726 xfs_buf_iostrategy(bp);
1da177e4 1727
ce8e922c 1728 blk_run_address_space(target->bt_mapping);
1da177e4
LT
1729 }
1730
1731 if (as_list_len > 0)
1732 purge_addresses();
1733
ce8e922c 1734 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
4df08c52 1735 } while (!kthread_should_stop());
1da177e4 1736
4df08c52 1737 return 0;
1da177e4
LT
1738}
1739
1740/*
ce8e922c
NS
1741 * Go through all incore buffers, and release buffers if they belong to
1742 * the given device. This is used in filesystem error handling to
1743 * preserve the consistency of its metadata.
1da177e4
LT
1744 */
1745int
1746xfs_flush_buftarg(
1747 xfs_buftarg_t *target,
1748 int wait)
1749{
1750 struct list_head tmp;
ce8e922c 1751 xfs_buf_t *bp, *n;
1da177e4 1752 int pincount = 0;
a6867a68
DC
1753 struct list_head *dwq = &target->bt_delwrite_queue;
1754 spinlock_t *dwlk = &target->bt_delwrite_lock;
1da177e4 1755
ce8e922c
NS
1756 xfs_buf_runall_queues(xfsdatad_workqueue);
1757 xfs_buf_runall_queues(xfslogd_workqueue);
1da177e4
LT
1758
1759 INIT_LIST_HEAD(&tmp);
a6867a68 1760 spin_lock(dwlk);
ce8e922c
NS
1761 list_for_each_entry_safe(bp, n, dwq, b_list) {
1762 ASSERT(bp->b_target == target);
1763 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1764 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1765 if (xfs_buf_ispin(bp)) {
1da177e4
LT
1766 pincount++;
1767 continue;
1768 }
1769
ce8e922c 1770 list_move(&bp->b_list, &tmp);
1da177e4 1771 }
a6867a68 1772 spin_unlock(dwlk);
1da177e4
LT
1773
1774 /*
1775 * Dropped the delayed write list lock, now walk the temporary list
1776 */
ce8e922c
NS
1777 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1778 xfs_buf_lock(bp);
1779 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1780 bp->b_flags |= XBF_WRITE;
1da177e4 1781 if (wait)
ce8e922c 1782 bp->b_flags &= ~XBF_ASYNC;
1da177e4 1783 else
ce8e922c 1784 list_del_init(&bp->b_list);
1da177e4 1785
ce8e922c 1786 xfs_buf_iostrategy(bp);
1da177e4
LT
1787 }
1788
1789 /*
1790 * Remaining list items must be flushed before returning
1791 */
1792 while (!list_empty(&tmp)) {
ce8e922c 1793 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1da177e4 1794
ce8e922c
NS
1795 list_del_init(&bp->b_list);
1796 xfs_iowait(bp);
1797 xfs_buf_relse(bp);
1da177e4
LT
1798 }
1799
1800 if (wait)
ce8e922c 1801 blk_run_address_space(target->bt_mapping);
1da177e4
LT
1802
1803 return pincount;
1804}
1805
04d8b284 1806int __init
ce8e922c 1807xfs_buf_init(void)
1da177e4 1808{
ce8e922c
NS
1809#ifdef XFS_BUF_TRACE
1810 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
04d8b284
CH
1811#endif
1812
8758280f
NS
1813 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1814 KM_ZONE_HWALIGN, NULL);
ce8e922c 1815 if (!xfs_buf_zone)
04d8b284
CH
1816 goto out_free_trace_buf;
1817
23ea4032
CH
1818 xfslogd_workqueue = create_workqueue("xfslogd");
1819 if (!xfslogd_workqueue)
04d8b284 1820 goto out_free_buf_zone;
1da177e4 1821
23ea4032
CH
1822 xfsdatad_workqueue = create_workqueue("xfsdatad");
1823 if (!xfsdatad_workqueue)
1824 goto out_destroy_xfslogd_workqueue;
1da177e4 1825
ce8e922c
NS
1826 xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1827 if (!xfs_buf_shake)
a6867a68 1828 goto out_destroy_xfsdatad_workqueue;
04d8b284 1829
23ea4032 1830 return 0;
1da177e4 1831
23ea4032
CH
1832 out_destroy_xfsdatad_workqueue:
1833 destroy_workqueue(xfsdatad_workqueue);
1834 out_destroy_xfslogd_workqueue:
1835 destroy_workqueue(xfslogd_workqueue);
23ea4032 1836 out_free_buf_zone:
ce8e922c 1837 kmem_zone_destroy(xfs_buf_zone);
04d8b284 1838 out_free_trace_buf:
ce8e922c
NS
1839#ifdef XFS_BUF_TRACE
1840 ktrace_free(xfs_buf_trace_buf);
23ea4032 1841#endif
8758280f 1842 return -ENOMEM;
1da177e4
LT
1843}
1844
1da177e4 1845void
ce8e922c 1846xfs_buf_terminate(void)
1da177e4 1847{
ce8e922c 1848 kmem_shake_deregister(xfs_buf_shake);
04d8b284
CH
1849 destroy_workqueue(xfsdatad_workqueue);
1850 destroy_workqueue(xfslogd_workqueue);
ce8e922c
NS
1851 kmem_zone_destroy(xfs_buf_zone);
1852#ifdef XFS_BUF_TRACE
1853 ktrace_free(xfs_buf_trace_buf);
1da177e4 1854#endif
1da177e4 1855}