]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/btrfs/extent_io.c
Btrfs: Retry metadata reads in the face of checksum failures
[net-next-2.6.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include "extent_io.h"
16#include "extent_map.h"
17
18/* temporary define until extent_map moves out of btrfs */
19struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
20 unsigned long extra_flags,
21 void (*ctor)(void *, struct kmem_cache *,
22 unsigned long));
23
24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
26
27static LIST_HEAD(buffers);
28static LIST_HEAD(states);
2d2ae547 29static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
d1310b2e 30
d1310b2e
CM
31#define BUFFER_LRU_MAX 64
32
33struct tree_entry {
34 u64 start;
35 u64 end;
d1310b2e
CM
36 struct rb_node rb_node;
37};
38
39struct extent_page_data {
40 struct bio *bio;
41 struct extent_io_tree *tree;
42 get_extent_t *get_extent;
43};
44
45int __init extent_io_init(void)
46{
47 extent_state_cache = btrfs_cache_create("extent_state",
48 sizeof(struct extent_state), 0,
49 NULL);
50 if (!extent_state_cache)
51 return -ENOMEM;
52
53 extent_buffer_cache = btrfs_cache_create("extent_buffers",
54 sizeof(struct extent_buffer), 0,
55 NULL);
56 if (!extent_buffer_cache)
57 goto free_state_cache;
58 return 0;
59
60free_state_cache:
61 kmem_cache_destroy(extent_state_cache);
62 return -ENOMEM;
63}
64
65void extent_io_exit(void)
66{
67 struct extent_state *state;
2d2ae547 68 struct extent_buffer *eb;
d1310b2e
CM
69
70 while (!list_empty(&states)) {
2d2ae547 71 state = list_entry(states.next, struct extent_state, leak_list);
70dec807 72 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
2d2ae547 73 list_del(&state->leak_list);
d1310b2e
CM
74 kmem_cache_free(extent_state_cache, state);
75
76 }
77
2d2ae547
CM
78 while (!list_empty(&buffers)) {
79 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
80 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
81 list_del(&eb->leak_list);
82 kmem_cache_free(extent_buffer_cache, eb);
83 }
d1310b2e
CM
84 if (extent_state_cache)
85 kmem_cache_destroy(extent_state_cache);
86 if (extent_buffer_cache)
87 kmem_cache_destroy(extent_buffer_cache);
88}
89
90void extent_io_tree_init(struct extent_io_tree *tree,
91 struct address_space *mapping, gfp_t mask)
92{
93 tree->state.rb_node = NULL;
94 tree->ops = NULL;
95 tree->dirty_bytes = 0;
70dec807 96 spin_lock_init(&tree->lock);
d1310b2e
CM
97 spin_lock_init(&tree->lru_lock);
98 tree->mapping = mapping;
99 INIT_LIST_HEAD(&tree->buffer_lru);
100 tree->lru_size = 0;
80ea96b1 101 tree->last = NULL;
d1310b2e
CM
102}
103EXPORT_SYMBOL(extent_io_tree_init);
104
105void extent_io_tree_empty_lru(struct extent_io_tree *tree)
106{
107 struct extent_buffer *eb;
108 while(!list_empty(&tree->buffer_lru)) {
109 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
110 lru);
111 list_del_init(&eb->lru);
112 free_extent_buffer(eb);
113 }
114}
115EXPORT_SYMBOL(extent_io_tree_empty_lru);
116
117struct extent_state *alloc_extent_state(gfp_t mask)
118{
119 struct extent_state *state;
2d2ae547 120 unsigned long flags;
d1310b2e
CM
121
122 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 123 if (!state)
d1310b2e
CM
124 return state;
125 state->state = 0;
d1310b2e 126 state->private = 0;
70dec807 127 state->tree = NULL;
2d2ae547
CM
128 spin_lock_irqsave(&leak_lock, flags);
129 list_add(&state->leak_list, &states);
130 spin_unlock_irqrestore(&leak_lock, flags);
d1310b2e
CM
131
132 atomic_set(&state->refs, 1);
133 init_waitqueue_head(&state->wq);
134 return state;
135}
136EXPORT_SYMBOL(alloc_extent_state);
137
138void free_extent_state(struct extent_state *state)
139{
d1310b2e
CM
140 if (!state)
141 return;
142 if (atomic_dec_and_test(&state->refs)) {
2d2ae547 143 unsigned long flags;
70dec807 144 WARN_ON(state->tree);
2d2ae547
CM
145 spin_lock_irqsave(&leak_lock, flags);
146 list_del(&state->leak_list);
147 spin_unlock_irqrestore(&leak_lock, flags);
d1310b2e
CM
148 kmem_cache_free(extent_state_cache, state);
149 }
150}
151EXPORT_SYMBOL(free_extent_state);
152
153static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
154 struct rb_node *node)
155{
156 struct rb_node ** p = &root->rb_node;
157 struct rb_node * parent = NULL;
158 struct tree_entry *entry;
159
160 while(*p) {
161 parent = *p;
162 entry = rb_entry(parent, struct tree_entry, rb_node);
163
164 if (offset < entry->start)
165 p = &(*p)->rb_left;
166 else if (offset > entry->end)
167 p = &(*p)->rb_right;
168 else
169 return parent;
170 }
171
172 entry = rb_entry(node, struct tree_entry, rb_node);
d1310b2e
CM
173 rb_link_node(node, parent, p);
174 rb_insert_color(node, root);
175 return NULL;
176}
177
80ea96b1 178static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
179 struct rb_node **prev_ret,
180 struct rb_node **next_ret)
181{
80ea96b1 182 struct rb_root *root = &tree->state;
d1310b2e
CM
183 struct rb_node * n = root->rb_node;
184 struct rb_node *prev = NULL;
185 struct rb_node *orig_prev = NULL;
186 struct tree_entry *entry;
187 struct tree_entry *prev_entry = NULL;
188
80ea96b1
CM
189 if (tree->last) {
190 struct extent_state *state;
191 state = tree->last;
192 if (state->start <= offset && offset <= state->end)
193 return &tree->last->rb_node;
194 }
d1310b2e
CM
195 while(n) {
196 entry = rb_entry(n, struct tree_entry, rb_node);
197 prev = n;
198 prev_entry = entry;
199
200 if (offset < entry->start)
201 n = n->rb_left;
202 else if (offset > entry->end)
203 n = n->rb_right;
80ea96b1
CM
204 else {
205 tree->last = rb_entry(n, struct extent_state, rb_node);
d1310b2e 206 return n;
80ea96b1 207 }
d1310b2e
CM
208 }
209
210 if (prev_ret) {
211 orig_prev = prev;
212 while(prev && offset > prev_entry->end) {
213 prev = rb_next(prev);
214 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
215 }
216 *prev_ret = prev;
217 prev = orig_prev;
218 }
219
220 if (next_ret) {
221 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
222 while(prev && offset < prev_entry->start) {
223 prev = rb_prev(prev);
224 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
225 }
226 *next_ret = prev;
227 }
228 return NULL;
229}
230
80ea96b1
CM
231static inline struct rb_node *tree_search(struct extent_io_tree *tree,
232 u64 offset)
d1310b2e 233{
70dec807 234 struct rb_node *prev = NULL;
d1310b2e 235 struct rb_node *ret;
70dec807 236
80ea96b1
CM
237 ret = __etree_search(tree, offset, &prev, NULL);
238 if (!ret) {
239 if (prev) {
240 tree->last = rb_entry(prev, struct extent_state,
241 rb_node);
242 }
d1310b2e 243 return prev;
80ea96b1 244 }
d1310b2e
CM
245 return ret;
246}
247
248/*
249 * utility function to look for merge candidates inside a given range.
250 * Any extents with matching state are merged together into a single
251 * extent in the tree. Extents with EXTENT_IO in their state field
252 * are not merged because the end_io handlers need to be able to do
253 * operations on them without sleeping (or doing allocations/splits).
254 *
255 * This should be called with the tree lock held.
256 */
257static int merge_state(struct extent_io_tree *tree,
258 struct extent_state *state)
259{
260 struct extent_state *other;
261 struct rb_node *other_node;
262
263 if (state->state & EXTENT_IOBITS)
264 return 0;
265
266 other_node = rb_prev(&state->rb_node);
267 if (other_node) {
268 other = rb_entry(other_node, struct extent_state, rb_node);
269 if (other->end == state->start - 1 &&
270 other->state == state->state) {
271 state->start = other->start;
70dec807 272 other->tree = NULL;
80ea96b1 273 if (tree->last == other)
d7fc640e 274 tree->last = state;
d1310b2e
CM
275 rb_erase(&other->rb_node, &tree->state);
276 free_extent_state(other);
277 }
278 }
279 other_node = rb_next(&state->rb_node);
280 if (other_node) {
281 other = rb_entry(other_node, struct extent_state, rb_node);
282 if (other->start == state->end + 1 &&
283 other->state == state->state) {
284 other->start = state->start;
70dec807 285 state->tree = NULL;
80ea96b1 286 if (tree->last == state)
d7fc640e 287 tree->last = other;
d1310b2e
CM
288 rb_erase(&state->rb_node, &tree->state);
289 free_extent_state(state);
290 }
291 }
292 return 0;
293}
294
291d673e
CM
295static void set_state_cb(struct extent_io_tree *tree,
296 struct extent_state *state,
297 unsigned long bits)
298{
299 if (tree->ops && tree->ops->set_bit_hook) {
300 tree->ops->set_bit_hook(tree->mapping->host, state->start,
b0c68f8b 301 state->end, state->state, bits);
291d673e
CM
302 }
303}
304
305static void clear_state_cb(struct extent_io_tree *tree,
306 struct extent_state *state,
307 unsigned long bits)
308{
309 if (tree->ops && tree->ops->set_bit_hook) {
310 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
b0c68f8b 311 state->end, state->state, bits);
291d673e
CM
312 }
313}
314
d1310b2e
CM
315/*
316 * insert an extent_state struct into the tree. 'bits' are set on the
317 * struct before it is inserted.
318 *
319 * This may return -EEXIST if the extent is already there, in which case the
320 * state struct is freed.
321 *
322 * The tree lock is not taken internally. This is a utility function and
323 * probably isn't what you want to call (see set/clear_extent_bit).
324 */
325static int insert_state(struct extent_io_tree *tree,
326 struct extent_state *state, u64 start, u64 end,
327 int bits)
328{
329 struct rb_node *node;
330
331 if (end < start) {
332 printk("end < start %Lu %Lu\n", end, start);
333 WARN_ON(1);
334 }
335 if (bits & EXTENT_DIRTY)
336 tree->dirty_bytes += end - start + 1;
b0c68f8b 337 set_state_cb(tree, state, bits);
d1310b2e
CM
338 state->state |= bits;
339 state->start = start;
340 state->end = end;
341 node = tree_insert(&tree->state, end, &state->rb_node);
342 if (node) {
343 struct extent_state *found;
344 found = rb_entry(node, struct extent_state, rb_node);
345 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
346 free_extent_state(state);
347 return -EEXIST;
348 }
70dec807 349 state->tree = tree;
80ea96b1 350 tree->last = state;
d1310b2e
CM
351 merge_state(tree, state);
352 return 0;
353}
354
355/*
356 * split a given extent state struct in two, inserting the preallocated
357 * struct 'prealloc' as the newly created second half. 'split' indicates an
358 * offset inside 'orig' where it should be split.
359 *
360 * Before calling,
361 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
362 * are two extent state structs in the tree:
363 * prealloc: [orig->start, split - 1]
364 * orig: [ split, orig->end ]
365 *
366 * The tree locks are not taken by this function. They need to be held
367 * by the caller.
368 */
369static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
370 struct extent_state *prealloc, u64 split)
371{
372 struct rb_node *node;
373 prealloc->start = orig->start;
374 prealloc->end = split - 1;
375 prealloc->state = orig->state;
376 orig->start = split;
377
378 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
379 if (node) {
380 struct extent_state *found;
381 found = rb_entry(node, struct extent_state, rb_node);
382 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
383 free_extent_state(prealloc);
384 return -EEXIST;
385 }
70dec807 386 prealloc->tree = tree;
d1310b2e
CM
387 return 0;
388}
389
390/*
391 * utility function to clear some bits in an extent state struct.
392 * it will optionally wake up any one waiting on this state (wake == 1), or
393 * forcibly remove the state from the tree (delete == 1).
394 *
395 * If no bits are set on the state struct after clearing things, the
396 * struct is freed and removed from the tree
397 */
398static int clear_state_bit(struct extent_io_tree *tree,
399 struct extent_state *state, int bits, int wake,
400 int delete)
401{
402 int ret = state->state & bits;
403
404 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
405 u64 range = state->end - state->start + 1;
406 WARN_ON(range > tree->dirty_bytes);
407 tree->dirty_bytes -= range;
408 }
291d673e 409 clear_state_cb(tree, state, bits);
b0c68f8b 410 state->state &= ~bits;
d1310b2e
CM
411 if (wake)
412 wake_up(&state->wq);
413 if (delete || state->state == 0) {
70dec807 414 if (state->tree) {
ae9d1285 415 clear_state_cb(tree, state, state->state);
d7fc640e
CM
416 if (tree->last == state) {
417 tree->last = extent_state_next(state);
418 }
d1310b2e 419 rb_erase(&state->rb_node, &tree->state);
70dec807 420 state->tree = NULL;
d1310b2e
CM
421 free_extent_state(state);
422 } else {
423 WARN_ON(1);
424 }
425 } else {
426 merge_state(tree, state);
427 }
428 return ret;
429}
430
431/*
432 * clear some bits on a range in the tree. This may require splitting
433 * or inserting elements in the tree, so the gfp mask is used to
434 * indicate which allocations or sleeping are allowed.
435 *
436 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
437 * the given range from the tree regardless of state (ie for truncate).
438 *
439 * the range [start, end] is inclusive.
440 *
441 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
442 * bits were already set, or zero if none of the bits were already set.
443 */
444int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
445 int bits, int wake, int delete, gfp_t mask)
446{
447 struct extent_state *state;
448 struct extent_state *prealloc = NULL;
449 struct rb_node *node;
450 unsigned long flags;
451 int err;
452 int set = 0;
453
454again:
455 if (!prealloc && (mask & __GFP_WAIT)) {
456 prealloc = alloc_extent_state(mask);
457 if (!prealloc)
458 return -ENOMEM;
459 }
460
70dec807 461 spin_lock_irqsave(&tree->lock, flags);
d1310b2e
CM
462 /*
463 * this search will find the extents that end after
464 * our range starts
465 */
80ea96b1 466 node = tree_search(tree, start);
d1310b2e
CM
467 if (!node)
468 goto out;
469 state = rb_entry(node, struct extent_state, rb_node);
470 if (state->start > end)
471 goto out;
472 WARN_ON(state->end < start);
473
474 /*
475 * | ---- desired range ---- |
476 * | state | or
477 * | ------------- state -------------- |
478 *
479 * We need to split the extent we found, and may flip
480 * bits on second half.
481 *
482 * If the extent we found extends past our range, we
483 * just split and search again. It'll get split again
484 * the next time though.
485 *
486 * If the extent we found is inside our range, we clear
487 * the desired bit on it.
488 */
489
490 if (state->start < start) {
70dec807
CM
491 if (!prealloc)
492 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
493 err = split_state(tree, state, prealloc, start);
494 BUG_ON(err == -EEXIST);
495 prealloc = NULL;
496 if (err)
497 goto out;
498 if (state->end <= end) {
499 start = state->end + 1;
500 set |= clear_state_bit(tree, state, bits,
501 wake, delete);
502 } else {
503 start = state->start;
504 }
505 goto search_again;
506 }
507 /*
508 * | ---- desired range ---- |
509 * | state |
510 * We need to split the extent, and clear the bit
511 * on the first half
512 */
513 if (state->start <= end && state->end > end) {
70dec807
CM
514 if (!prealloc)
515 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
516 err = split_state(tree, state, prealloc, end + 1);
517 BUG_ON(err == -EEXIST);
518
519 if (wake)
520 wake_up(&state->wq);
521 set |= clear_state_bit(tree, prealloc, bits,
522 wake, delete);
523 prealloc = NULL;
524 goto out;
525 }
526
527 start = state->end + 1;
528 set |= clear_state_bit(tree, state, bits, wake, delete);
529 goto search_again;
530
531out:
70dec807 532 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
533 if (prealloc)
534 free_extent_state(prealloc);
535
536 return set;
537
538search_again:
539 if (start > end)
540 goto out;
70dec807 541 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
542 if (mask & __GFP_WAIT)
543 cond_resched();
544 goto again;
545}
546EXPORT_SYMBOL(clear_extent_bit);
547
548static int wait_on_state(struct extent_io_tree *tree,
549 struct extent_state *state)
550{
551 DEFINE_WAIT(wait);
552 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
70dec807 553 spin_unlock_irq(&tree->lock);
d1310b2e 554 schedule();
70dec807 555 spin_lock_irq(&tree->lock);
d1310b2e
CM
556 finish_wait(&state->wq, &wait);
557 return 0;
558}
559
560/*
561 * waits for one or more bits to clear on a range in the state tree.
562 * The range [start, end] is inclusive.
563 * The tree lock is taken by this function
564 */
565int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
566{
567 struct extent_state *state;
568 struct rb_node *node;
569
70dec807 570 spin_lock_irq(&tree->lock);
d1310b2e
CM
571again:
572 while (1) {
573 /*
574 * this search will find all the extents that end after
575 * our range starts
576 */
80ea96b1 577 node = tree_search(tree, start);
d1310b2e
CM
578 if (!node)
579 break;
580
581 state = rb_entry(node, struct extent_state, rb_node);
582
583 if (state->start > end)
584 goto out;
585
586 if (state->state & bits) {
587 start = state->start;
588 atomic_inc(&state->refs);
589 wait_on_state(tree, state);
590 free_extent_state(state);
591 goto again;
592 }
593 start = state->end + 1;
594
595 if (start > end)
596 break;
597
598 if (need_resched()) {
70dec807 599 spin_unlock_irq(&tree->lock);
d1310b2e 600 cond_resched();
70dec807 601 spin_lock_irq(&tree->lock);
d1310b2e
CM
602 }
603 }
604out:
70dec807 605 spin_unlock_irq(&tree->lock);
d1310b2e
CM
606 return 0;
607}
608EXPORT_SYMBOL(wait_extent_bit);
609
610static void set_state_bits(struct extent_io_tree *tree,
611 struct extent_state *state,
612 int bits)
613{
614 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
615 u64 range = state->end - state->start + 1;
616 tree->dirty_bytes += range;
617 }
291d673e 618 set_state_cb(tree, state, bits);
b0c68f8b 619 state->state |= bits;
d1310b2e
CM
620}
621
622/*
623 * set some bits on a range in the tree. This may require allocations
624 * or sleeping, so the gfp mask is used to indicate what is allowed.
625 *
626 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
627 * range already has the desired bits set. The start of the existing
628 * range is returned in failed_start in this case.
629 *
630 * [start, end] is inclusive
631 * This takes the tree lock.
632 */
633int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
634 int exclusive, u64 *failed_start, gfp_t mask)
635{
636 struct extent_state *state;
637 struct extent_state *prealloc = NULL;
638 struct rb_node *node;
639 unsigned long flags;
640 int err = 0;
641 int set;
642 u64 last_start;
643 u64 last_end;
644again:
645 if (!prealloc && (mask & __GFP_WAIT)) {
646 prealloc = alloc_extent_state(mask);
647 if (!prealloc)
648 return -ENOMEM;
649 }
650
70dec807 651 spin_lock_irqsave(&tree->lock, flags);
d1310b2e
CM
652 /*
653 * this search will find all the extents that end after
654 * our range starts.
655 */
80ea96b1 656 node = tree_search(tree, start);
d1310b2e
CM
657 if (!node) {
658 err = insert_state(tree, prealloc, start, end, bits);
659 prealloc = NULL;
660 BUG_ON(err == -EEXIST);
661 goto out;
662 }
663
664 state = rb_entry(node, struct extent_state, rb_node);
665 last_start = state->start;
666 last_end = state->end;
667
668 /*
669 * | ---- desired range ---- |
670 * | state |
671 *
672 * Just lock what we found and keep going
673 */
674 if (state->start == start && state->end <= end) {
675 set = state->state & bits;
676 if (set && exclusive) {
677 *failed_start = state->start;
678 err = -EEXIST;
679 goto out;
680 }
681 set_state_bits(tree, state, bits);
682 start = state->end + 1;
683 merge_state(tree, state);
684 goto search_again;
685 }
686
687 /*
688 * | ---- desired range ---- |
689 * | state |
690 * or
691 * | ------------- state -------------- |
692 *
693 * We need to split the extent we found, and may flip bits on
694 * second half.
695 *
696 * If the extent we found extends past our
697 * range, we just split and search again. It'll get split
698 * again the next time though.
699 *
700 * If the extent we found is inside our range, we set the
701 * desired bit on it.
702 */
703 if (state->start < start) {
704 set = state->state & bits;
705 if (exclusive && set) {
706 *failed_start = start;
707 err = -EEXIST;
708 goto out;
709 }
710 err = split_state(tree, state, prealloc, start);
711 BUG_ON(err == -EEXIST);
712 prealloc = NULL;
713 if (err)
714 goto out;
715 if (state->end <= end) {
716 set_state_bits(tree, state, bits);
717 start = state->end + 1;
718 merge_state(tree, state);
719 } else {
720 start = state->start;
721 }
722 goto search_again;
723 }
724 /*
725 * | ---- desired range ---- |
726 * | state | or | state |
727 *
728 * There's a hole, we need to insert something in it and
729 * ignore the extent we found.
730 */
731 if (state->start > start) {
732 u64 this_end;
733 if (end < last_start)
734 this_end = end;
735 else
736 this_end = last_start -1;
737 err = insert_state(tree, prealloc, start, this_end,
738 bits);
739 prealloc = NULL;
740 BUG_ON(err == -EEXIST);
741 if (err)
742 goto out;
743 start = this_end + 1;
744 goto search_again;
745 }
746 /*
747 * | ---- desired range ---- |
748 * | state |
749 * We need to split the extent, and set the bit
750 * on the first half
751 */
752 if (state->start <= end && state->end > end) {
753 set = state->state & bits;
754 if (exclusive && set) {
755 *failed_start = start;
756 err = -EEXIST;
757 goto out;
758 }
759 err = split_state(tree, state, prealloc, end + 1);
760 BUG_ON(err == -EEXIST);
761
762 set_state_bits(tree, prealloc, bits);
763 merge_state(tree, prealloc);
764 prealloc = NULL;
765 goto out;
766 }
767
768 goto search_again;
769
770out:
70dec807 771 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
772 if (prealloc)
773 free_extent_state(prealloc);
774
775 return err;
776
777search_again:
778 if (start > end)
779 goto out;
70dec807 780 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
781 if (mask & __GFP_WAIT)
782 cond_resched();
783 goto again;
784}
785EXPORT_SYMBOL(set_extent_bit);
786
787/* wrappers around set/clear extent bit */
788int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
789 gfp_t mask)
790{
791 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
792 mask);
793}
794EXPORT_SYMBOL(set_extent_dirty);
795
796int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
797 int bits, gfp_t mask)
798{
799 return set_extent_bit(tree, start, end, bits, 0, NULL,
800 mask);
801}
802EXPORT_SYMBOL(set_extent_bits);
803
804int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
805 int bits, gfp_t mask)
806{
807 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
808}
809EXPORT_SYMBOL(clear_extent_bits);
810
811int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
812 gfp_t mask)
813{
814 return set_extent_bit(tree, start, end,
815 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
816 mask);
817}
818EXPORT_SYMBOL(set_extent_delalloc);
819
820int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
821 gfp_t mask)
822{
823 return clear_extent_bit(tree, start, end,
824 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
825}
826EXPORT_SYMBOL(clear_extent_dirty);
827
828int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
829 gfp_t mask)
830{
831 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
832 mask);
833}
834EXPORT_SYMBOL(set_extent_new);
835
836int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
837 gfp_t mask)
838{
839 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
840}
841EXPORT_SYMBOL(clear_extent_new);
842
843int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
844 gfp_t mask)
845{
846 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
847 mask);
848}
849EXPORT_SYMBOL(set_extent_uptodate);
850
851int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
852 gfp_t mask)
853{
854 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
855}
856EXPORT_SYMBOL(clear_extent_uptodate);
857
858int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
859 gfp_t mask)
860{
861 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
862 0, NULL, mask);
863}
864EXPORT_SYMBOL(set_extent_writeback);
865
866int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
867 gfp_t mask)
868{
869 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
870}
871EXPORT_SYMBOL(clear_extent_writeback);
872
873int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
874{
875 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
876}
877EXPORT_SYMBOL(wait_on_extent_writeback);
878
d1310b2e
CM
879int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
880{
881 int err;
882 u64 failed_start;
883 while (1) {
884 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
885 &failed_start, mask);
886 if (err == -EEXIST && (mask & __GFP_WAIT)) {
887 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
888 start = failed_start;
889 } else {
890 break;
891 }
892 WARN_ON(start > end);
893 }
894 return err;
895}
896EXPORT_SYMBOL(lock_extent);
897
898int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
899 gfp_t mask)
900{
901 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
902}
903EXPORT_SYMBOL(unlock_extent);
904
905/*
906 * helper function to set pages and extents in the tree dirty
907 */
908int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
909{
910 unsigned long index = start >> PAGE_CACHE_SHIFT;
911 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
912 struct page *page;
913
914 while (index <= end_index) {
915 page = find_get_page(tree->mapping, index);
916 BUG_ON(!page);
917 __set_page_dirty_nobuffers(page);
918 page_cache_release(page);
919 index++;
920 }
921 set_extent_dirty(tree, start, end, GFP_NOFS);
922 return 0;
923}
924EXPORT_SYMBOL(set_range_dirty);
925
926/*
927 * helper function to set both pages and extents in the tree writeback
928 */
929int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
930{
931 unsigned long index = start >> PAGE_CACHE_SHIFT;
932 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
933 struct page *page;
934
935 while (index <= end_index) {
936 page = find_get_page(tree->mapping, index);
937 BUG_ON(!page);
938 set_page_writeback(page);
939 page_cache_release(page);
940 index++;
941 }
942 set_extent_writeback(tree, start, end, GFP_NOFS);
943 return 0;
944}
945EXPORT_SYMBOL(set_range_writeback);
946
947int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
948 u64 *start_ret, u64 *end_ret, int bits)
949{
950 struct rb_node *node;
951 struct extent_state *state;
952 int ret = 1;
953
70dec807 954 spin_lock_irq(&tree->lock);
d1310b2e
CM
955 /*
956 * this search will find all the extents that end after
957 * our range starts.
958 */
80ea96b1 959 node = tree_search(tree, start);
2b114d1d 960 if (!node) {
d1310b2e
CM
961 goto out;
962 }
963
964 while(1) {
965 state = rb_entry(node, struct extent_state, rb_node);
966 if (state->end >= start && (state->state & bits)) {
967 *start_ret = state->start;
968 *end_ret = state->end;
969 ret = 0;
970 break;
971 }
972 node = rb_next(node);
973 if (!node)
974 break;
975 }
976out:
70dec807 977 spin_unlock_irq(&tree->lock);
d1310b2e
CM
978 return ret;
979}
980EXPORT_SYMBOL(find_first_extent_bit);
981
d7fc640e
CM
982struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
983 u64 start, int bits)
984{
985 struct rb_node *node;
986 struct extent_state *state;
987
988 /*
989 * this search will find all the extents that end after
990 * our range starts.
991 */
992 node = tree_search(tree, start);
2b114d1d 993 if (!node) {
d7fc640e
CM
994 goto out;
995 }
996
997 while(1) {
998 state = rb_entry(node, struct extent_state, rb_node);
999 if (state->end >= start && (state->state & bits)) {
1000 return state;
1001 }
1002 node = rb_next(node);
1003 if (!node)
1004 break;
1005 }
1006out:
1007 return NULL;
1008}
1009EXPORT_SYMBOL(find_first_extent_bit_state);
1010
d1310b2e
CM
1011u64 find_lock_delalloc_range(struct extent_io_tree *tree,
1012 u64 *start, u64 *end, u64 max_bytes)
1013{
1014 struct rb_node *node;
1015 struct extent_state *state;
1016 u64 cur_start = *start;
1017 u64 found = 0;
1018 u64 total_bytes = 0;
1019
70dec807 1020 spin_lock_irq(&tree->lock);
d1310b2e
CM
1021 /*
1022 * this search will find all the extents that end after
1023 * our range starts.
1024 */
1025search_again:
80ea96b1 1026 node = tree_search(tree, cur_start);
2b114d1d 1027 if (!node) {
d1310b2e
CM
1028 *end = (u64)-1;
1029 goto out;
1030 }
1031
1032 while(1) {
1033 state = rb_entry(node, struct extent_state, rb_node);
1034 if (found && state->start != cur_start) {
1035 goto out;
1036 }
1037 if (!(state->state & EXTENT_DELALLOC)) {
1038 if (!found)
1039 *end = state->end;
1040 goto out;
1041 }
1042 if (!found) {
1043 struct extent_state *prev_state;
1044 struct rb_node *prev_node = node;
1045 while(1) {
1046 prev_node = rb_prev(prev_node);
1047 if (!prev_node)
1048 break;
1049 prev_state = rb_entry(prev_node,
1050 struct extent_state,
1051 rb_node);
1052 if (!(prev_state->state & EXTENT_DELALLOC))
1053 break;
1054 state = prev_state;
1055 node = prev_node;
1056 }
1057 }
1058 if (state->state & EXTENT_LOCKED) {
1059 DEFINE_WAIT(wait);
1060 atomic_inc(&state->refs);
1061 prepare_to_wait(&state->wq, &wait,
1062 TASK_UNINTERRUPTIBLE);
70dec807 1063 spin_unlock_irq(&tree->lock);
d1310b2e 1064 schedule();
70dec807 1065 spin_lock_irq(&tree->lock);
d1310b2e
CM
1066 finish_wait(&state->wq, &wait);
1067 free_extent_state(state);
1068 goto search_again;
1069 }
291d673e 1070 set_state_cb(tree, state, EXTENT_LOCKED);
b0c68f8b 1071 state->state |= EXTENT_LOCKED;
d1310b2e
CM
1072 if (!found)
1073 *start = state->start;
1074 found++;
1075 *end = state->end;
1076 cur_start = state->end + 1;
1077 node = rb_next(node);
1078 if (!node)
1079 break;
1080 total_bytes += state->end - state->start + 1;
1081 if (total_bytes >= max_bytes)
1082 break;
1083 }
1084out:
70dec807 1085 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1086 return found;
1087}
1088
1089u64 count_range_bits(struct extent_io_tree *tree,
1090 u64 *start, u64 search_end, u64 max_bytes,
1091 unsigned long bits)
1092{
1093 struct rb_node *node;
1094 struct extent_state *state;
1095 u64 cur_start = *start;
1096 u64 total_bytes = 0;
1097 int found = 0;
1098
1099 if (search_end <= cur_start) {
1100 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1101 WARN_ON(1);
1102 return 0;
1103 }
1104
70dec807 1105 spin_lock_irq(&tree->lock);
d1310b2e
CM
1106 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1107 total_bytes = tree->dirty_bytes;
1108 goto out;
1109 }
1110 /*
1111 * this search will find all the extents that end after
1112 * our range starts.
1113 */
80ea96b1 1114 node = tree_search(tree, cur_start);
2b114d1d 1115 if (!node) {
d1310b2e
CM
1116 goto out;
1117 }
1118
1119 while(1) {
1120 state = rb_entry(node, struct extent_state, rb_node);
1121 if (state->start > search_end)
1122 break;
1123 if (state->end >= cur_start && (state->state & bits)) {
1124 total_bytes += min(search_end, state->end) + 1 -
1125 max(cur_start, state->start);
1126 if (total_bytes >= max_bytes)
1127 break;
1128 if (!found) {
1129 *start = state->start;
1130 found = 1;
1131 }
1132 }
1133 node = rb_next(node);
1134 if (!node)
1135 break;
1136 }
1137out:
70dec807 1138 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1139 return total_bytes;
1140}
1141/*
1142 * helper function to lock both pages and extents in the tree.
1143 * pages must be locked first.
1144 */
1145int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1146{
1147 unsigned long index = start >> PAGE_CACHE_SHIFT;
1148 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1149 struct page *page;
1150 int err;
1151
1152 while (index <= end_index) {
1153 page = grab_cache_page(tree->mapping, index);
1154 if (!page) {
1155 err = -ENOMEM;
1156 goto failed;
1157 }
1158 if (IS_ERR(page)) {
1159 err = PTR_ERR(page);
1160 goto failed;
1161 }
1162 index++;
1163 }
1164 lock_extent(tree, start, end, GFP_NOFS);
1165 return 0;
1166
1167failed:
1168 /*
1169 * we failed above in getting the page at 'index', so we undo here
1170 * up to but not including the page at 'index'
1171 */
1172 end_index = index;
1173 index = start >> PAGE_CACHE_SHIFT;
1174 while (index < end_index) {
1175 page = find_get_page(tree->mapping, index);
1176 unlock_page(page);
1177 page_cache_release(page);
1178 index++;
1179 }
1180 return err;
1181}
1182EXPORT_SYMBOL(lock_range);
1183
1184/*
1185 * helper function to unlock both pages and extents in the tree.
1186 */
1187int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1188{
1189 unsigned long index = start >> PAGE_CACHE_SHIFT;
1190 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1191 struct page *page;
1192
1193 while (index <= end_index) {
1194 page = find_get_page(tree->mapping, index);
1195 unlock_page(page);
1196 page_cache_release(page);
1197 index++;
1198 }
1199 unlock_extent(tree, start, end, GFP_NOFS);
1200 return 0;
1201}
1202EXPORT_SYMBOL(unlock_range);
1203
1204int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1205{
1206 struct rb_node *node;
1207 struct extent_state *state;
1208 int ret = 0;
1209
70dec807 1210 spin_lock_irq(&tree->lock);
d1310b2e
CM
1211 /*
1212 * this search will find all the extents that end after
1213 * our range starts.
1214 */
80ea96b1 1215 node = tree_search(tree, start);
2b114d1d 1216 if (!node) {
d1310b2e
CM
1217 ret = -ENOENT;
1218 goto out;
1219 }
1220 state = rb_entry(node, struct extent_state, rb_node);
1221 if (state->start != start) {
1222 ret = -ENOENT;
1223 goto out;
1224 }
1225 state->private = private;
1226out:
70dec807 1227 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1228 return ret;
1229}
1230
1231int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1232{
1233 struct rb_node *node;
1234 struct extent_state *state;
1235 int ret = 0;
1236
70dec807 1237 spin_lock_irq(&tree->lock);
d1310b2e
CM
1238 /*
1239 * this search will find all the extents that end after
1240 * our range starts.
1241 */
80ea96b1 1242 node = tree_search(tree, start);
2b114d1d 1243 if (!node) {
d1310b2e
CM
1244 ret = -ENOENT;
1245 goto out;
1246 }
1247 state = rb_entry(node, struct extent_state, rb_node);
1248 if (state->start != start) {
1249 ret = -ENOENT;
1250 goto out;
1251 }
1252 *private = state->private;
1253out:
70dec807 1254 spin_unlock_irq(&tree->lock);
d1310b2e
CM
1255 return ret;
1256}
1257
1258/*
1259 * searches a range in the state tree for a given mask.
70dec807 1260 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1261 * has the bits set. Otherwise, 1 is returned if any bit in the
1262 * range is found set.
1263 */
1264int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1265 int bits, int filled)
1266{
1267 struct extent_state *state = NULL;
1268 struct rb_node *node;
1269 int bitset = 0;
1270 unsigned long flags;
1271
70dec807 1272 spin_lock_irqsave(&tree->lock, flags);
80ea96b1 1273 node = tree_search(tree, start);
d1310b2e
CM
1274 while (node && start <= end) {
1275 state = rb_entry(node, struct extent_state, rb_node);
1276
1277 if (filled && state->start > start) {
1278 bitset = 0;
1279 break;
1280 }
1281
1282 if (state->start > end)
1283 break;
1284
1285 if (state->state & bits) {
1286 bitset = 1;
1287 if (!filled)
1288 break;
1289 } else if (filled) {
1290 bitset = 0;
1291 break;
1292 }
1293 start = state->end + 1;
1294 if (start > end)
1295 break;
1296 node = rb_next(node);
1297 if (!node) {
1298 if (filled)
1299 bitset = 0;
1300 break;
1301 }
1302 }
70dec807 1303 spin_unlock_irqrestore(&tree->lock, flags);
d1310b2e
CM
1304 return bitset;
1305}
1306EXPORT_SYMBOL(test_range_bit);
1307
1308/*
1309 * helper function to set a given page up to date if all the
1310 * extents in the tree for that page are up to date
1311 */
1312static int check_page_uptodate(struct extent_io_tree *tree,
1313 struct page *page)
1314{
1315 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1316 u64 end = start + PAGE_CACHE_SIZE - 1;
1317 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1318 SetPageUptodate(page);
1319 return 0;
1320}
1321
1322/*
1323 * helper function to unlock a page if all the extents in the tree
1324 * for that page are unlocked
1325 */
1326static int check_page_locked(struct extent_io_tree *tree,
1327 struct page *page)
1328{
1329 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1330 u64 end = start + PAGE_CACHE_SIZE - 1;
1331 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1332 unlock_page(page);
1333 return 0;
1334}
1335
1336/*
1337 * helper function to end page writeback if all the extents
1338 * in the tree for that page are done with writeback
1339 */
1340static int check_page_writeback(struct extent_io_tree *tree,
1341 struct page *page)
1342{
1343 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1344 u64 end = start + PAGE_CACHE_SIZE - 1;
1345 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1346 end_page_writeback(page);
1347 return 0;
1348}
1349
1350/* lots and lots of room for performance fixes in the end_bio funcs */
1351
1352/*
1353 * after a writepage IO is done, we need to:
1354 * clear the uptodate bits on error
1355 * clear the writeback bits in the extent tree for this IO
1356 * end_page_writeback if the page has no more pending IO
1357 *
1358 * Scheduling is not allowed, so the extent state tree is expected
1359 * to have one and only one object corresponding to this IO.
1360 */
1361#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1362static void end_bio_extent_writepage(struct bio *bio, int err)
1363#else
1364static int end_bio_extent_writepage(struct bio *bio,
1365 unsigned int bytes_done, int err)
1366#endif
1367{
1368 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1369 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
70dec807
CM
1370 struct extent_state *state = bio->bi_private;
1371 struct extent_io_tree *tree = state->tree;
1372 struct rb_node *node;
d1310b2e
CM
1373 u64 start;
1374 u64 end;
70dec807 1375 u64 cur;
d1310b2e 1376 int whole_page;
70dec807 1377 unsigned long flags;
d1310b2e
CM
1378
1379#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1380 if (bio->bi_size)
1381 return 1;
1382#endif
d1310b2e
CM
1383 do {
1384 struct page *page = bvec->bv_page;
1385 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1386 bvec->bv_offset;
1387 end = start + bvec->bv_len - 1;
1388
1389 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1390 whole_page = 1;
1391 else
1392 whole_page = 0;
1393
1394 if (--bvec >= bio->bi_io_vec)
1395 prefetchw(&bvec->bv_page->flags);
1396
1397 if (!uptodate) {
1398 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1399 ClearPageUptodate(page);
1400 SetPageError(page);
1401 }
70dec807
CM
1402
1403 if (tree->ops && tree->ops->writepage_end_io_hook) {
1404 tree->ops->writepage_end_io_hook(page, start, end,
1405 state);
1406 }
1407
1408 /*
1409 * bios can get merged in funny ways, and so we need to
1410 * be careful with the state variable. We know the
1411 * state won't be merged with others because it has
1412 * WRITEBACK set, but we can't be sure each biovec is
1413 * sequential in the file. So, if our cached state
1414 * doesn't match the expected end, search the tree
1415 * for the correct one.
1416 */
1417
1418 spin_lock_irqsave(&tree->lock, flags);
1419 if (!state || state->end != end) {
1420 state = NULL;
80ea96b1 1421 node = __etree_search(tree, start, NULL, NULL);
70dec807
CM
1422 if (node) {
1423 state = rb_entry(node, struct extent_state,
1424 rb_node);
1425 if (state->end != end ||
1426 !(state->state & EXTENT_WRITEBACK))
1427 state = NULL;
1428 }
1429 if (!state) {
1430 spin_unlock_irqrestore(&tree->lock, flags);
1431 clear_extent_writeback(tree, start,
1432 end, GFP_ATOMIC);
1433 goto next_io;
1434 }
1435 }
1436 cur = end;
1437 while(1) {
1438 struct extent_state *clear = state;
1439 cur = state->start;
1440 node = rb_prev(&state->rb_node);
1441 if (node) {
1442 state = rb_entry(node,
1443 struct extent_state,
1444 rb_node);
1445 } else {
1446 state = NULL;
1447 }
1448
1449 clear_state_bit(tree, clear, EXTENT_WRITEBACK,
1450 1, 0);
1451 if (cur == start)
1452 break;
1453 if (cur < start) {
1454 WARN_ON(1);
1455 break;
1456 }
1457 if (!node)
1458 break;
1459 }
1460 /* before releasing the lock, make sure the next state
1461 * variable has the expected bits set and corresponds
1462 * to the correct offsets in the file
1463 */
1464 if (state && (state->end + 1 != start ||
c2e639f0 1465 !(state->state & EXTENT_WRITEBACK))) {
70dec807
CM
1466 state = NULL;
1467 }
1468 spin_unlock_irqrestore(&tree->lock, flags);
1469next_io:
d1310b2e
CM
1470
1471 if (whole_page)
1472 end_page_writeback(page);
1473 else
1474 check_page_writeback(tree, page);
d1310b2e 1475 } while (bvec >= bio->bi_io_vec);
d1310b2e
CM
1476 bio_put(bio);
1477#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1478 return 0;
1479#endif
1480}
1481
1482/*
1483 * after a readpage IO is done, we need to:
1484 * clear the uptodate bits on error
1485 * set the uptodate bits if things worked
1486 * set the page up to date if all extents in the tree are uptodate
1487 * clear the lock bit in the extent tree
1488 * unlock the page if there are no other extents locked for it
1489 *
1490 * Scheduling is not allowed, so the extent state tree is expected
1491 * to have one and only one object corresponding to this IO.
1492 */
1493#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1494static void end_bio_extent_readpage(struct bio *bio, int err)
1495#else
1496static int end_bio_extent_readpage(struct bio *bio,
1497 unsigned int bytes_done, int err)
1498#endif
1499{
1500 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1501 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
70dec807
CM
1502 struct extent_state *state = bio->bi_private;
1503 struct extent_io_tree *tree = state->tree;
1504 struct rb_node *node;
d1310b2e
CM
1505 u64 start;
1506 u64 end;
70dec807
CM
1507 u64 cur;
1508 unsigned long flags;
d1310b2e
CM
1509 int whole_page;
1510 int ret;
1511
1512#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1513 if (bio->bi_size)
1514 return 1;
1515#endif
1516
1517 do {
1518 struct page *page = bvec->bv_page;
1519 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1520 bvec->bv_offset;
1521 end = start + bvec->bv_len - 1;
1522
1523 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1524 whole_page = 1;
1525 else
1526 whole_page = 0;
1527
1528 if (--bvec >= bio->bi_io_vec)
1529 prefetchw(&bvec->bv_page->flags);
1530
1531 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
70dec807
CM
1532 ret = tree->ops->readpage_end_io_hook(page, start, end,
1533 state);
d1310b2e
CM
1534 if (ret)
1535 uptodate = 0;
1536 }
d1310b2e 1537
70dec807
CM
1538 spin_lock_irqsave(&tree->lock, flags);
1539 if (!state || state->end != end) {
1540 state = NULL;
80ea96b1 1541 node = __etree_search(tree, start, NULL, NULL);
70dec807
CM
1542 if (node) {
1543 state = rb_entry(node, struct extent_state,
1544 rb_node);
1545 if (state->end != end ||
1546 !(state->state & EXTENT_LOCKED))
1547 state = NULL;
1548 }
f188591e 1549 if (!state && uptodate) {
70dec807
CM
1550 spin_unlock_irqrestore(&tree->lock, flags);
1551 set_extent_uptodate(tree, start, end,
1552 GFP_ATOMIC);
1553 unlock_extent(tree, start, end, GFP_ATOMIC);
1554 goto next_io;
1555 }
1556 }
d1310b2e 1557
70dec807
CM
1558 cur = end;
1559 while(1) {
1560 struct extent_state *clear = state;
1561 cur = state->start;
1562 node = rb_prev(&state->rb_node);
1563 if (node) {
1564 state = rb_entry(node,
1565 struct extent_state,
1566 rb_node);
1567 } else {
1568 state = NULL;
1569 }
f188591e
CM
1570 if (uptodate) {
1571 set_state_cb(tree, clear, EXTENT_UPTODATE);
1572 clear->state |= EXTENT_UPTODATE;
1573 }
70dec807
CM
1574 clear_state_bit(tree, clear, EXTENT_LOCKED,
1575 1, 0);
1576 if (cur == start)
1577 break;
1578 if (cur < start) {
1579 WARN_ON(1);
1580 break;
1581 }
1582 if (!node)
1583 break;
1584 }
1585 /* before releasing the lock, make sure the next state
1586 * variable has the expected bits set and corresponds
1587 * to the correct offsets in the file
1588 */
1589 if (state && (state->end + 1 != start ||
c2e639f0 1590 !(state->state & EXTENT_LOCKED))) {
70dec807
CM
1591 state = NULL;
1592 }
1593 spin_unlock_irqrestore(&tree->lock, flags);
1594next_io:
1595 if (whole_page) {
1596 if (uptodate) {
1597 SetPageUptodate(page);
1598 } else {
1599 ClearPageUptodate(page);
1600 SetPageError(page);
1601 }
d1310b2e 1602 unlock_page(page);
70dec807
CM
1603 } else {
1604 if (uptodate) {
1605 check_page_uptodate(tree, page);
1606 } else {
1607 ClearPageUptodate(page);
1608 SetPageError(page);
1609 }
d1310b2e 1610 check_page_locked(tree, page);
70dec807 1611 }
d1310b2e
CM
1612 } while (bvec >= bio->bi_io_vec);
1613
1614 bio_put(bio);
1615#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1616 return 0;
1617#endif
1618}
1619
1620/*
1621 * IO done from prepare_write is pretty simple, we just unlock
1622 * the structs in the extent tree when done, and set the uptodate bits
1623 * as appropriate.
1624 */
1625#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1626static void end_bio_extent_preparewrite(struct bio *bio, int err)
1627#else
1628static int end_bio_extent_preparewrite(struct bio *bio,
1629 unsigned int bytes_done, int err)
1630#endif
1631{
1632 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1633 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
70dec807
CM
1634 struct extent_state *state = bio->bi_private;
1635 struct extent_io_tree *tree = state->tree;
d1310b2e
CM
1636 u64 start;
1637 u64 end;
1638
1639#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1640 if (bio->bi_size)
1641 return 1;
1642#endif
1643
1644 do {
1645 struct page *page = bvec->bv_page;
1646 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1647 bvec->bv_offset;
1648 end = start + bvec->bv_len - 1;
1649
1650 if (--bvec >= bio->bi_io_vec)
1651 prefetchw(&bvec->bv_page->flags);
1652
1653 if (uptodate) {
1654 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1655 } else {
1656 ClearPageUptodate(page);
1657 SetPageError(page);
1658 }
1659
1660 unlock_extent(tree, start, end, GFP_ATOMIC);
1661
1662 } while (bvec >= bio->bi_io_vec);
1663
1664 bio_put(bio);
1665#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1666 return 0;
1667#endif
1668}
1669
1670static struct bio *
1671extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1672 gfp_t gfp_flags)
1673{
1674 struct bio *bio;
1675
1676 bio = bio_alloc(gfp_flags, nr_vecs);
1677
1678 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1679 while (!bio && (nr_vecs /= 2))
1680 bio = bio_alloc(gfp_flags, nr_vecs);
1681 }
1682
1683 if (bio) {
1684 bio->bi_bdev = bdev;
1685 bio->bi_sector = first_sector;
1686 }
1687 return bio;
1688}
1689
f188591e 1690static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
d1310b2e
CM
1691{
1692 u64 maxsector;
1693 int ret = 0;
70dec807
CM
1694 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1695 struct page *page = bvec->bv_page;
1696 struct extent_io_tree *tree = bio->bi_private;
1697 struct rb_node *node;
1698 struct extent_state *state;
1699 u64 start;
1700 u64 end;
1701
1702 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1703 end = start + bvec->bv_len - 1;
1704
1705 spin_lock_irq(&tree->lock);
80ea96b1 1706 node = __etree_search(tree, start, NULL, NULL);
70dec807
CM
1707 BUG_ON(!node);
1708 state = rb_entry(node, struct extent_state, rb_node);
1709 while(state->end < end) {
1710 node = rb_next(node);
1711 state = rb_entry(node, struct extent_state, rb_node);
1712 }
1713 BUG_ON(state->end != end);
1714 spin_unlock_irq(&tree->lock);
1715
1716 bio->bi_private = state;
d1310b2e
CM
1717
1718 bio_get(bio);
1719
1720 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1721 if (maxsector < bio->bi_sector) {
1722 printk("sector too large max %Lu got %llu\n", maxsector,
1723 (unsigned long long)bio->bi_sector);
1724 WARN_ON(1);
1725 }
065631f6 1726 if (tree->ops && tree->ops->submit_bio_hook)
f188591e
CM
1727 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1728 mirror_num);
0b86a832
CM
1729 else
1730 submit_bio(rw, bio);
d1310b2e
CM
1731 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1732 ret = -EOPNOTSUPP;
1733 bio_put(bio);
1734 return ret;
1735}
1736
1737static int submit_extent_page(int rw, struct extent_io_tree *tree,
1738 struct page *page, sector_t sector,
1739 size_t size, unsigned long offset,
1740 struct block_device *bdev,
1741 struct bio **bio_ret,
1742 unsigned long max_pages,
f188591e
CM
1743 bio_end_io_t end_io_func,
1744 int mirror_num)
d1310b2e
CM
1745{
1746 int ret = 0;
1747 struct bio *bio;
1748 int nr;
1749
1750 if (bio_ret && *bio_ret) {
1751 bio = *bio_ret;
1752 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
239b14b3
CM
1753 (tree->ops && tree->ops->merge_bio_hook &&
1754 tree->ops->merge_bio_hook(page, offset, size, bio)) ||
d1310b2e 1755 bio_add_page(bio, page, size, offset) < size) {
f188591e 1756 ret = submit_one_bio(rw, bio, mirror_num);
d1310b2e
CM
1757 bio = NULL;
1758 } else {
1759 return 0;
1760 }
1761 }
961d0232 1762 nr = bio_get_nr_vecs(bdev);
d1310b2e
CM
1763 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1764 if (!bio) {
1765 printk("failed to allocate bio nr %d\n", nr);
1766 }
70dec807
CM
1767
1768
d1310b2e
CM
1769 bio_add_page(bio, page, size, offset);
1770 bio->bi_end_io = end_io_func;
1771 bio->bi_private = tree;
70dec807 1772
d1310b2e
CM
1773 if (bio_ret) {
1774 *bio_ret = bio;
1775 } else {
f188591e 1776 ret = submit_one_bio(rw, bio, mirror_num);
d1310b2e
CM
1777 }
1778
1779 return ret;
1780}
1781
1782void set_page_extent_mapped(struct page *page)
1783{
1784 if (!PagePrivate(page)) {
1785 SetPagePrivate(page);
1786 WARN_ON(!page->mapping->a_ops->invalidatepage);
1787 set_page_private(page, EXTENT_PAGE_PRIVATE);
1788 page_cache_get(page);
1789 }
1790}
1791
1792void set_page_extent_head(struct page *page, unsigned long len)
1793{
1794 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1795}
1796
1797/*
1798 * basic readpage implementation. Locked extent state structs are inserted
1799 * into the tree that are removed when the IO is done (by the end_io
1800 * handlers)
1801 */
1802static int __extent_read_full_page(struct extent_io_tree *tree,
1803 struct page *page,
1804 get_extent_t *get_extent,
f188591e 1805 struct bio **bio, int mirror_num)
d1310b2e
CM
1806{
1807 struct inode *inode = page->mapping->host;
1808 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1809 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1810 u64 end;
1811 u64 cur = start;
1812 u64 extent_offset;
1813 u64 last_byte = i_size_read(inode);
1814 u64 block_start;
1815 u64 cur_end;
1816 sector_t sector;
1817 struct extent_map *em;
1818 struct block_device *bdev;
1819 int ret;
1820 int nr = 0;
1821 size_t page_offset = 0;
1822 size_t iosize;
1823 size_t blocksize = inode->i_sb->s_blocksize;
1824
1825 set_page_extent_mapped(page);
1826
1827 end = page_end;
1828 lock_extent(tree, start, end, GFP_NOFS);
1829
1830 while (cur <= end) {
1831 if (cur >= last_byte) {
1832 char *userpage;
1833 iosize = PAGE_CACHE_SIZE - page_offset;
1834 userpage = kmap_atomic(page, KM_USER0);
1835 memset(userpage + page_offset, 0, iosize);
1836 flush_dcache_page(page);
1837 kunmap_atomic(userpage, KM_USER0);
1838 set_extent_uptodate(tree, cur, cur + iosize - 1,
1839 GFP_NOFS);
1840 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1841 break;
1842 }
1843 em = get_extent(inode, page, page_offset, cur,
1844 end - cur + 1, 0);
1845 if (IS_ERR(em) || !em) {
1846 SetPageError(page);
1847 unlock_extent(tree, cur, end, GFP_NOFS);
1848 break;
1849 }
1850
1851 extent_offset = cur - em->start;
1852 BUG_ON(extent_map_end(em) <= cur);
1853 BUG_ON(end < cur);
1854
1855 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1856 cur_end = min(extent_map_end(em) - 1, end);
1857 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1858 sector = (em->block_start + extent_offset) >> 9;
1859 bdev = em->bdev;
1860 block_start = em->block_start;
1861 free_extent_map(em);
1862 em = NULL;
1863
1864 /* we've found a hole, just zero and go on */
1865 if (block_start == EXTENT_MAP_HOLE) {
1866 char *userpage;
1867 userpage = kmap_atomic(page, KM_USER0);
1868 memset(userpage + page_offset, 0, iosize);
1869 flush_dcache_page(page);
1870 kunmap_atomic(userpage, KM_USER0);
1871
1872 set_extent_uptodate(tree, cur, cur + iosize - 1,
1873 GFP_NOFS);
1874 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1875 cur = cur + iosize;
1876 page_offset += iosize;
1877 continue;
1878 }
1879 /* the get_extent function already copied into the page */
1880 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1881 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1882 cur = cur + iosize;
1883 page_offset += iosize;
1884 continue;
1885 }
70dec807
CM
1886 /* we have an inline extent but it didn't get marked up
1887 * to date. Error out
1888 */
1889 if (block_start == EXTENT_MAP_INLINE) {
1890 SetPageError(page);
1891 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1892 cur = cur + iosize;
1893 page_offset += iosize;
1894 continue;
1895 }
d1310b2e
CM
1896
1897 ret = 0;
1898 if (tree->ops && tree->ops->readpage_io_hook) {
1899 ret = tree->ops->readpage_io_hook(page, cur,
1900 cur + iosize - 1);
1901 }
1902 if (!ret) {
1903 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1904 nr -= page->index;
1905 ret = submit_extent_page(READ, tree, page,
1906 sector, iosize, page_offset,
1907 bdev, bio, nr,
f188591e 1908 end_bio_extent_readpage, mirror_num);
d1310b2e
CM
1909 }
1910 if (ret)
1911 SetPageError(page);
1912 cur = cur + iosize;
1913 page_offset += iosize;
1914 nr++;
1915 }
1916 if (!nr) {
1917 if (!PageError(page))
1918 SetPageUptodate(page);
1919 unlock_page(page);
1920 }
1921 return 0;
1922}
1923
1924int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1925 get_extent_t *get_extent)
1926{
1927 struct bio *bio = NULL;
1928 int ret;
1929
f188591e 1930 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
d1310b2e 1931 if (bio)
f188591e 1932 submit_one_bio(READ, bio, 0);
d1310b2e
CM
1933 return ret;
1934}
1935EXPORT_SYMBOL(extent_read_full_page);
1936
1937/*
1938 * the writepage semantics are similar to regular writepage. extent
1939 * records are inserted to lock ranges in the tree, and as dirty areas
1940 * are found, they are marked writeback. Then the lock bits are removed
1941 * and the end_io handler clears the writeback ranges
1942 */
1943static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1944 void *data)
1945{
1946 struct inode *inode = page->mapping->host;
1947 struct extent_page_data *epd = data;
1948 struct extent_io_tree *tree = epd->tree;
1949 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1950 u64 delalloc_start;
1951 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1952 u64 end;
1953 u64 cur = start;
1954 u64 extent_offset;
1955 u64 last_byte = i_size_read(inode);
1956 u64 block_start;
1957 u64 iosize;
1958 sector_t sector;
1959 struct extent_map *em;
1960 struct block_device *bdev;
1961 int ret;
1962 int nr = 0;
1963 size_t page_offset = 0;
1964 size_t blocksize;
1965 loff_t i_size = i_size_read(inode);
1966 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1967 u64 nr_delalloc;
1968 u64 delalloc_end;
1969
1970 WARN_ON(!PageLocked(page));
1971 if (page->index > end_index) {
1972 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1973 unlock_page(page);
1974 return 0;
1975 }
1976
1977 if (page->index == end_index) {
1978 char *userpage;
1979
1980 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1981
1982 userpage = kmap_atomic(page, KM_USER0);
1983 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1984 flush_dcache_page(page);
1985 kunmap_atomic(userpage, KM_USER0);
1986 }
1987
1988 set_page_extent_mapped(page);
1989
1990 delalloc_start = start;
1991 delalloc_end = 0;
1992 while(delalloc_end < page_end) {
1993 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1994 &delalloc_end,
1995 128 * 1024 * 1024);
1996 if (nr_delalloc == 0) {
1997 delalloc_start = delalloc_end + 1;
1998 continue;
1999 }
2000 tree->ops->fill_delalloc(inode, delalloc_start,
2001 delalloc_end);
2002 clear_extent_bit(tree, delalloc_start,
2003 delalloc_end,
2004 EXTENT_LOCKED | EXTENT_DELALLOC,
2005 1, 0, GFP_NOFS);
2006 delalloc_start = delalloc_end + 1;
2007 }
2008 lock_extent(tree, start, page_end, GFP_NOFS);
2009
2010 end = page_end;
2011 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
2012 printk("found delalloc bits after lock_extent\n");
2013 }
2014
2015 if (last_byte <= start) {
2016 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2017 goto done;
2018 }
2019
2020 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2021 blocksize = inode->i_sb->s_blocksize;
2022
2023 while (cur <= end) {
2024 if (cur >= last_byte) {
2025 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2026 break;
2027 }
2028 em = epd->get_extent(inode, page, page_offset, cur,
2029 end - cur + 1, 1);
2030 if (IS_ERR(em) || !em) {
2031 SetPageError(page);
2032 break;
2033 }
2034
2035 extent_offset = cur - em->start;
2036 BUG_ON(extent_map_end(em) <= cur);
2037 BUG_ON(end < cur);
2038 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2039 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2040 sector = (em->block_start + extent_offset) >> 9;
2041 bdev = em->bdev;
2042 block_start = em->block_start;
2043 free_extent_map(em);
2044 em = NULL;
2045
2046 if (block_start == EXTENT_MAP_HOLE ||
2047 block_start == EXTENT_MAP_INLINE) {
2048 clear_extent_dirty(tree, cur,
2049 cur + iosize - 1, GFP_NOFS);
2050 cur = cur + iosize;
2051 page_offset += iosize;
2052 continue;
2053 }
2054
2055 /* leave this out until we have a page_mkwrite call */
2056 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2057 EXTENT_DIRTY, 0)) {
2058 cur = cur + iosize;
2059 page_offset += iosize;
2060 continue;
2061 }
2062 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2063 if (tree->ops && tree->ops->writepage_io_hook) {
2064 ret = tree->ops->writepage_io_hook(page, cur,
2065 cur + iosize - 1);
2066 } else {
2067 ret = 0;
2068 }
2069 if (ret)
2070 SetPageError(page);
2071 else {
2072 unsigned long max_nr = end_index + 1;
2073 set_range_writeback(tree, cur, cur + iosize - 1);
2074 if (!PageWriteback(page)) {
2075 printk("warning page %lu not writeback, "
2076 "cur %llu end %llu\n", page->index,
2077 (unsigned long long)cur,
2078 (unsigned long long)end);
2079 }
2080
2081 ret = submit_extent_page(WRITE, tree, page, sector,
2082 iosize, page_offset, bdev,
2083 &epd->bio, max_nr,
f188591e 2084 end_bio_extent_writepage, 0);
d1310b2e
CM
2085 if (ret)
2086 SetPageError(page);
2087 }
2088 cur = cur + iosize;
2089 page_offset += iosize;
2090 nr++;
2091 }
2092done:
2093 if (nr == 0) {
2094 /* make sure the mapping tag for page dirty gets cleared */
2095 set_page_writeback(page);
2096 end_page_writeback(page);
2097 }
2098 unlock_extent(tree, start, page_end, GFP_NOFS);
2099 unlock_page(page);
2100 return 0;
2101}
2102
2103#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
2104
2105/* Taken directly from 2.6.23 for 2.6.18 back port */
2106typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2107 void *data);
2108
2109/**
2110 * write_cache_pages - walk the list of dirty pages of the given address space
2111 * and write all of them.
2112 * @mapping: address space structure to write
2113 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2114 * @writepage: function called for each page
2115 * @data: data passed to writepage function
2116 *
2117 * If a page is already under I/O, write_cache_pages() skips it, even
2118 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2119 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2120 * and msync() need to guarantee that all the data which was dirty at the time
2121 * the call was made get new I/O started against them. If wbc->sync_mode is
2122 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2123 * existing IO to complete.
2124 */
2125static int write_cache_pages(struct address_space *mapping,
2126 struct writeback_control *wbc, writepage_t writepage,
2127 void *data)
2128{
2129 struct backing_dev_info *bdi = mapping->backing_dev_info;
2130 int ret = 0;
2131 int done = 0;
2132 struct pagevec pvec;
2133 int nr_pages;
2134 pgoff_t index;
2135 pgoff_t end; /* Inclusive */
2136 int scanned = 0;
2137 int range_whole = 0;
2138
2139 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2140 wbc->encountered_congestion = 1;
2141 return 0;
2142 }
2143
2144 pagevec_init(&pvec, 0);
2145 if (wbc->range_cyclic) {
2146 index = mapping->writeback_index; /* Start from prev offset */
2147 end = -1;
2148 } else {
2149 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2150 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2151 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2152 range_whole = 1;
2153 scanned = 1;
2154 }
2155retry:
2156 while (!done && (index <= end) &&
2157 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2158 PAGECACHE_TAG_DIRTY,
2159 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2160 unsigned i;
2161
2162 scanned = 1;
2163 for (i = 0; i < nr_pages; i++) {
2164 struct page *page = pvec.pages[i];
2165
2166 /*
2167 * At this point we hold neither mapping->tree_lock nor
2168 * lock on the page itself: the page may be truncated or
2169 * invalidated (changing page->mapping to NULL), or even
2170 * swizzled back from swapper_space to tmpfs file
2171 * mapping
2172 */
2173 lock_page(page);
2174
2175 if (unlikely(page->mapping != mapping)) {
2176 unlock_page(page);
2177 continue;
2178 }
2179
2180 if (!wbc->range_cyclic && page->index > end) {
2181 done = 1;
2182 unlock_page(page);
2183 continue;
2184 }
2185
2186 if (wbc->sync_mode != WB_SYNC_NONE)
2187 wait_on_page_writeback(page);
2188
2189 if (PageWriteback(page) ||
2190 !clear_page_dirty_for_io(page)) {
2191 unlock_page(page);
2192 continue;
2193 }
2194
2195 ret = (*writepage)(page, wbc, data);
2196
2197 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2198 unlock_page(page);
2199 ret = 0;
2200 }
2201 if (ret || (--(wbc->nr_to_write) <= 0))
2202 done = 1;
2203 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2204 wbc->encountered_congestion = 1;
2205 done = 1;
2206 }
2207 }
2208 pagevec_release(&pvec);
2209 cond_resched();
2210 }
2211 if (!scanned && !done) {
2212 /*
2213 * We hit the last page and there is more work to be done: wrap
2214 * back to the start of the file
2215 */
2216 scanned = 1;
2217 index = 0;
2218 goto retry;
2219 }
2220 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2221 mapping->writeback_index = index;
2222 return ret;
2223}
2224#endif
2225
2226int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2227 get_extent_t *get_extent,
2228 struct writeback_control *wbc)
2229{
2230 int ret;
2231 struct address_space *mapping = page->mapping;
2232 struct extent_page_data epd = {
2233 .bio = NULL,
2234 .tree = tree,
2235 .get_extent = get_extent,
2236 };
2237 struct writeback_control wbc_writepages = {
2238 .bdi = wbc->bdi,
2239 .sync_mode = WB_SYNC_NONE,
2240 .older_than_this = NULL,
2241 .nr_to_write = 64,
2242 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2243 .range_end = (loff_t)-1,
2244 };
2245
2246
2247 ret = __extent_writepage(page, wbc, &epd);
2248
2249 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2250 if (epd.bio) {
f188591e 2251 submit_one_bio(WRITE, epd.bio, 0);
d1310b2e
CM
2252 }
2253 return ret;
2254}
2255EXPORT_SYMBOL(extent_write_full_page);
2256
2257
2258int extent_writepages(struct extent_io_tree *tree,
2259 struct address_space *mapping,
2260 get_extent_t *get_extent,
2261 struct writeback_control *wbc)
2262{
2263 int ret = 0;
2264 struct extent_page_data epd = {
2265 .bio = NULL,
2266 .tree = tree,
2267 .get_extent = get_extent,
2268 };
2269
2270 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2271 if (epd.bio) {
f188591e 2272 submit_one_bio(WRITE, epd.bio, 0);
d1310b2e
CM
2273 }
2274 return ret;
2275}
2276EXPORT_SYMBOL(extent_writepages);
2277
2278int extent_readpages(struct extent_io_tree *tree,
2279 struct address_space *mapping,
2280 struct list_head *pages, unsigned nr_pages,
2281 get_extent_t get_extent)
2282{
2283 struct bio *bio = NULL;
2284 unsigned page_idx;
2285 struct pagevec pvec;
2286
2287 pagevec_init(&pvec, 0);
2288 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2289 struct page *page = list_entry(pages->prev, struct page, lru);
2290
2291 prefetchw(&page->flags);
2292 list_del(&page->lru);
2293 /*
2294 * what we want to do here is call add_to_page_cache_lru,
2295 * but that isn't exported, so we reproduce it here
2296 */
2297 if (!add_to_page_cache(page, mapping,
2298 page->index, GFP_KERNEL)) {
2299
2300 /* open coding of lru_cache_add, also not exported */
2301 page_cache_get(page);
2302 if (!pagevec_add(&pvec, page))
2303 __pagevec_lru_add(&pvec);
f188591e
CM
2304 __extent_read_full_page(tree, page, get_extent,
2305 &bio, 0);
d1310b2e
CM
2306 }
2307 page_cache_release(page);
2308 }
2309 if (pagevec_count(&pvec))
2310 __pagevec_lru_add(&pvec);
2311 BUG_ON(!list_empty(pages));
2312 if (bio)
f188591e 2313 submit_one_bio(READ, bio, 0);
d1310b2e
CM
2314 return 0;
2315}
2316EXPORT_SYMBOL(extent_readpages);
2317
2318/*
2319 * basic invalidatepage code, this waits on any locked or writeback
2320 * ranges corresponding to the page, and then deletes any extent state
2321 * records from the tree
2322 */
2323int extent_invalidatepage(struct extent_io_tree *tree,
2324 struct page *page, unsigned long offset)
2325{
2326 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2327 u64 end = start + PAGE_CACHE_SIZE - 1;
2328 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2329
2330 start += (offset + blocksize -1) & ~(blocksize - 1);
2331 if (start > end)
2332 return 0;
2333
2334 lock_extent(tree, start, end, GFP_NOFS);
2335 wait_on_extent_writeback(tree, start, end);
2336 clear_extent_bit(tree, start, end,
2337 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2338 1, 1, GFP_NOFS);
2339 return 0;
2340}
2341EXPORT_SYMBOL(extent_invalidatepage);
2342
2343/*
2344 * simple commit_write call, set_range_dirty is used to mark both
2345 * the pages and the extent records as dirty
2346 */
2347int extent_commit_write(struct extent_io_tree *tree,
2348 struct inode *inode, struct page *page,
2349 unsigned from, unsigned to)
2350{
2351 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2352
2353 set_page_extent_mapped(page);
2354 set_page_dirty(page);
2355
2356 if (pos > inode->i_size) {
2357 i_size_write(inode, pos);
2358 mark_inode_dirty(inode);
2359 }
2360 return 0;
2361}
2362EXPORT_SYMBOL(extent_commit_write);
2363
2364int extent_prepare_write(struct extent_io_tree *tree,
2365 struct inode *inode, struct page *page,
2366 unsigned from, unsigned to, get_extent_t *get_extent)
2367{
2368 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2369 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2370 u64 block_start;
2371 u64 orig_block_start;
2372 u64 block_end;
2373 u64 cur_end;
2374 struct extent_map *em;
2375 unsigned blocksize = 1 << inode->i_blkbits;
2376 size_t page_offset = 0;
2377 size_t block_off_start;
2378 size_t block_off_end;
2379 int err = 0;
2380 int iocount = 0;
2381 int ret = 0;
2382 int isnew;
2383
2384 set_page_extent_mapped(page);
2385
2386 block_start = (page_start + from) & ~((u64)blocksize - 1);
2387 block_end = (page_start + to - 1) | (blocksize - 1);
2388 orig_block_start = block_start;
2389
2390 lock_extent(tree, page_start, page_end, GFP_NOFS);
2391 while(block_start <= block_end) {
2392 em = get_extent(inode, page, page_offset, block_start,
2393 block_end - block_start + 1, 1);
2394 if (IS_ERR(em) || !em) {
2395 goto err;
2396 }
2397 cur_end = min(block_end, extent_map_end(em) - 1);
2398 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2399 block_off_end = block_off_start + blocksize;
2400 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2401
2402 if (!PageUptodate(page) && isnew &&
2403 (block_off_end > to || block_off_start < from)) {
2404 void *kaddr;
2405
2406 kaddr = kmap_atomic(page, KM_USER0);
2407 if (block_off_end > to)
2408 memset(kaddr + to, 0, block_off_end - to);
2409 if (block_off_start < from)
2410 memset(kaddr + block_off_start, 0,
2411 from - block_off_start);
2412 flush_dcache_page(page);
2413 kunmap_atomic(kaddr, KM_USER0);
2414 }
2415 if ((em->block_start != EXTENT_MAP_HOLE &&
2416 em->block_start != EXTENT_MAP_INLINE) &&
2417 !isnew && !PageUptodate(page) &&
2418 (block_off_end > to || block_off_start < from) &&
2419 !test_range_bit(tree, block_start, cur_end,
2420 EXTENT_UPTODATE, 1)) {
2421 u64 sector;
2422 u64 extent_offset = block_start - em->start;
2423 size_t iosize;
2424 sector = (em->block_start + extent_offset) >> 9;
2425 iosize = (cur_end - block_start + blocksize) &
2426 ~((u64)blocksize - 1);
2427 /*
2428 * we've already got the extent locked, but we
2429 * need to split the state such that our end_bio
2430 * handler can clear the lock.
2431 */
2432 set_extent_bit(tree, block_start,
2433 block_start + iosize - 1,
2434 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2435 ret = submit_extent_page(READ, tree, page,
2436 sector, iosize, page_offset, em->bdev,
2437 NULL, 1,
f188591e 2438 end_bio_extent_preparewrite, 0);
d1310b2e
CM
2439 iocount++;
2440 block_start = block_start + iosize;
2441 } else {
2442 set_extent_uptodate(tree, block_start, cur_end,
2443 GFP_NOFS);
2444 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2445 block_start = cur_end + 1;
2446 }
2447 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2448 free_extent_map(em);
2449 }
2450 if (iocount) {
2451 wait_extent_bit(tree, orig_block_start,
2452 block_end, EXTENT_LOCKED);
2453 }
2454 check_page_uptodate(tree, page);
2455err:
2456 /* FIXME, zero out newly allocated blocks on error */
2457 return err;
2458}
2459EXPORT_SYMBOL(extent_prepare_write);
2460
2461/*
2462 * a helper for releasepage. As long as there are no locked extents
2463 * in the range corresponding to the page, both state records and extent
2464 * map records are removed
2465 */
2466int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
2467 struct extent_io_tree *tree, struct page *page,
2468 gfp_t mask)
d1310b2e
CM
2469{
2470 struct extent_map *em;
2471 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2472 u64 end = start + PAGE_CACHE_SIZE - 1;
2473 u64 orig_start = start;
2474 int ret = 1;
70dec807
CM
2475 if ((mask & __GFP_WAIT) &&
2476 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 2477 u64 len;
70dec807 2478 while (start <= end) {
39b5637f 2479 len = end - start + 1;
70dec807 2480 spin_lock(&map->lock);
39b5637f 2481 em = lookup_extent_mapping(map, start, len);
70dec807
CM
2482 if (!em || IS_ERR(em)) {
2483 spin_unlock(&map->lock);
2484 break;
2485 }
2486 if (em->start != start) {
2487 spin_unlock(&map->lock);
2488 free_extent_map(em);
2489 break;
2490 }
2491 if (!test_range_bit(tree, em->start,
2492 extent_map_end(em) - 1,
2493 EXTENT_LOCKED, 0)) {
2494 remove_extent_mapping(map, em);
2495 /* once for the rb tree */
2496 free_extent_map(em);
2497 }
2498 start = extent_map_end(em);
d1310b2e 2499 spin_unlock(&map->lock);
70dec807
CM
2500
2501 /* once for us */
d1310b2e
CM
2502 free_extent_map(em);
2503 }
d1310b2e 2504 }
70dec807 2505 if (test_range_bit(tree, orig_start, end, EXTENT_IOBITS, 0))
d1310b2e 2506 ret = 0;
70dec807
CM
2507 else {
2508 if ((mask & GFP_NOFS) == GFP_NOFS)
2509 mask = GFP_NOFS;
d1310b2e 2510 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
70dec807
CM
2511 1, 1, mask);
2512 }
d1310b2e
CM
2513 return ret;
2514}
2515EXPORT_SYMBOL(try_release_extent_mapping);
2516
2517sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2518 get_extent_t *get_extent)
2519{
2520 struct inode *inode = mapping->host;
2521 u64 start = iblock << inode->i_blkbits;
2522 sector_t sector = 0;
2523 struct extent_map *em;
2524
2525 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2526 if (!em || IS_ERR(em))
2527 return 0;
2528
2529 if (em->block_start == EXTENT_MAP_INLINE ||
2530 em->block_start == EXTENT_MAP_HOLE)
2531 goto out;
2532
2533 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
d1310b2e
CM
2534out:
2535 free_extent_map(em);
2536 return sector;
2537}
2538
2539static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb)
2540{
2541 if (list_empty(&eb->lru)) {
2542 extent_buffer_get(eb);
2543 list_add(&eb->lru, &tree->buffer_lru);
2544 tree->lru_size++;
2545 if (tree->lru_size >= BUFFER_LRU_MAX) {
2546 struct extent_buffer *rm;
2547 rm = list_entry(tree->buffer_lru.prev,
2548 struct extent_buffer, lru);
2549 tree->lru_size--;
2550 list_del_init(&rm->lru);
2551 free_extent_buffer(rm);
2552 }
2553 } else
2554 list_move(&eb->lru, &tree->buffer_lru);
2555 return 0;
2556}
2557static struct extent_buffer *find_lru(struct extent_io_tree *tree,
2558 u64 start, unsigned long len)
2559{
2560 struct list_head *lru = &tree->buffer_lru;
2561 struct list_head *cur = lru->next;
2562 struct extent_buffer *eb;
2563
2564 if (list_empty(lru))
2565 return NULL;
2566
2567 do {
2568 eb = list_entry(cur, struct extent_buffer, lru);
2569 if (eb->start == start && eb->len == len) {
2570 extent_buffer_get(eb);
2571 return eb;
2572 }
2573 cur = cur->next;
2574 } while (cur != lru);
2575 return NULL;
2576}
2577
2578static inline unsigned long num_extent_pages(u64 start, u64 len)
2579{
2580 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2581 (start >> PAGE_CACHE_SHIFT);
2582}
2583
2584static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2585 unsigned long i)
2586{
2587 struct page *p;
2588 struct address_space *mapping;
2589
2590 if (i == 0)
2591 return eb->first_page;
2592 i += eb->start >> PAGE_CACHE_SHIFT;
2593 mapping = eb->first_page->mapping;
2594 read_lock_irq(&mapping->tree_lock);
2595 p = radix_tree_lookup(&mapping->page_tree, i);
2596 read_unlock_irq(&mapping->tree_lock);
2597 return p;
2598}
2599
ce9adaa5
CM
2600int release_extent_buffer_tail_pages(struct extent_buffer *eb)
2601{
2602 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2603 struct page *page;
2604 unsigned long i;
2605
2606 if (num_pages == 1)
2607 return 0;
2608 for (i = 1; i < num_pages; i++) {
2609 page = extent_buffer_page(eb, i);
2610 page_cache_release(page);
2611 }
2612 return 0;
2613}
2614
2615
728131d8
CM
2616int invalidate_extent_lru(struct extent_io_tree *tree, u64 start,
2617 unsigned long len)
2618{
2619 struct list_head *lru = &tree->buffer_lru;
2620 struct list_head *cur = lru->next;
2621 struct extent_buffer *eb;
2622 int found = 0;
2623
2624 spin_lock(&tree->lru_lock);
2625 if (list_empty(lru))
2626 goto out;
2627
2628 do {
2629 eb = list_entry(cur, struct extent_buffer, lru);
2630 if (eb->start <= start && eb->start + eb->len > start) {
2631 eb->flags &= ~EXTENT_UPTODATE;
2632 }
728131d8
CM
2633 cur = cur->next;
2634 } while (cur != lru);
2635out:
2636 spin_unlock(&tree->lru_lock);
2637 return found;
2638}
2639
d1310b2e
CM
2640static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2641 u64 start,
2642 unsigned long len,
2643 gfp_t mask)
2644{
2645 struct extent_buffer *eb = NULL;
2d2ae547 2646 unsigned long flags;
d1310b2e
CM
2647
2648 spin_lock(&tree->lru_lock);
2649 eb = find_lru(tree, start, len);
2650 spin_unlock(&tree->lru_lock);
2651 if (eb) {
2652 return eb;
2653 }
2654
2655 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2656 INIT_LIST_HEAD(&eb->lru);
2657 eb->start = start;
2658 eb->len = len;
2d2ae547
CM
2659 spin_lock_irqsave(&leak_lock, flags);
2660 list_add(&eb->leak_list, &buffers);
2661 spin_unlock_irqrestore(&leak_lock, flags);
d1310b2e
CM
2662 atomic_set(&eb->refs, 1);
2663
2664 return eb;
2665}
2666
2667static void __free_extent_buffer(struct extent_buffer *eb)
2668{
2d2ae547
CM
2669 unsigned long flags;
2670 spin_lock_irqsave(&leak_lock, flags);
2671 list_del(&eb->leak_list);
2672 spin_unlock_irqrestore(&leak_lock, flags);
d1310b2e
CM
2673 kmem_cache_free(extent_buffer_cache, eb);
2674}
2675
2676struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2677 u64 start, unsigned long len,
2678 struct page *page0,
2679 gfp_t mask)
2680{
2681 unsigned long num_pages = num_extent_pages(start, len);
2682 unsigned long i;
2683 unsigned long index = start >> PAGE_CACHE_SHIFT;
2684 struct extent_buffer *eb;
2685 struct page *p;
2686 struct address_space *mapping = tree->mapping;
2687 int uptodate = 1;
2688
2689 eb = __alloc_extent_buffer(tree, start, len, mask);
2b114d1d 2690 if (!eb)
d1310b2e
CM
2691 return NULL;
2692
2693 if (eb->flags & EXTENT_BUFFER_FILLED)
2694 goto lru_add;
2695
2696 if (page0) {
2697 eb->first_page = page0;
2698 i = 1;
2699 index++;
2700 page_cache_get(page0);
2701 mark_page_accessed(page0);
2702 set_page_extent_mapped(page0);
d1310b2e 2703 set_page_extent_head(page0, len);
f188591e 2704 uptodate = PageUptodate(page0);
d1310b2e
CM
2705 } else {
2706 i = 0;
2707 }
2708 for (; i < num_pages; i++, index++) {
2709 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2710 if (!p) {
2711 WARN_ON(1);
2712 goto fail;
2713 }
2714 set_page_extent_mapped(p);
2715 mark_page_accessed(p);
2716 if (i == 0) {
2717 eb->first_page = p;
2718 set_page_extent_head(p, len);
2719 } else {
2720 set_page_private(p, EXTENT_PAGE_PRIVATE);
2721 }
2722 if (!PageUptodate(p))
2723 uptodate = 0;
2724 unlock_page(p);
2725 }
2726 if (uptodate)
2727 eb->flags |= EXTENT_UPTODATE;
2728 eb->flags |= EXTENT_BUFFER_FILLED;
2729
2730lru_add:
2731 spin_lock(&tree->lru_lock);
2732 add_lru(tree, eb);
2733 spin_unlock(&tree->lru_lock);
2734 return eb;
2735
2736fail:
2737 spin_lock(&tree->lru_lock);
2738 list_del_init(&eb->lru);
2739 spin_unlock(&tree->lru_lock);
2740 if (!atomic_dec_and_test(&eb->refs))
2741 return NULL;
2742 for (index = 1; index < i; index++) {
2743 page_cache_release(extent_buffer_page(eb, index));
2744 }
2745 if (i > 0)
2746 page_cache_release(extent_buffer_page(eb, 0));
2747 __free_extent_buffer(eb);
2748 return NULL;
2749}
2750EXPORT_SYMBOL(alloc_extent_buffer);
2751
2752struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2753 u64 start, unsigned long len,
2754 gfp_t mask)
2755{
2756 unsigned long num_pages = num_extent_pages(start, len);
2757 unsigned long i;
2758 unsigned long index = start >> PAGE_CACHE_SHIFT;
2759 struct extent_buffer *eb;
2760 struct page *p;
2761 struct address_space *mapping = tree->mapping;
2762 int uptodate = 1;
2763
2764 eb = __alloc_extent_buffer(tree, start, len, mask);
2b114d1d 2765 if (!eb)
d1310b2e
CM
2766 return NULL;
2767
2768 if (eb->flags & EXTENT_BUFFER_FILLED)
2769 goto lru_add;
2770
2771 for (i = 0; i < num_pages; i++, index++) {
2772 p = find_lock_page(mapping, index);
2773 if (!p) {
2774 goto fail;
2775 }
2776 set_page_extent_mapped(p);
2777 mark_page_accessed(p);
2778
2779 if (i == 0) {
2780 eb->first_page = p;
2781 set_page_extent_head(p, len);
2782 } else {
2783 set_page_private(p, EXTENT_PAGE_PRIVATE);
2784 }
2785
2786 if (!PageUptodate(p))
2787 uptodate = 0;
2788 unlock_page(p);
2789 }
2790 if (uptodate)
2791 eb->flags |= EXTENT_UPTODATE;
2792 eb->flags |= EXTENT_BUFFER_FILLED;
2793
2794lru_add:
2795 spin_lock(&tree->lru_lock);
2796 add_lru(tree, eb);
2797 spin_unlock(&tree->lru_lock);
2798 return eb;
2799fail:
2800 spin_lock(&tree->lru_lock);
2801 list_del_init(&eb->lru);
2802 spin_unlock(&tree->lru_lock);
2803 if (!atomic_dec_and_test(&eb->refs))
2804 return NULL;
2805 for (index = 1; index < i; index++) {
2806 page_cache_release(extent_buffer_page(eb, index));
2807 }
2808 if (i > 0)
2809 page_cache_release(extent_buffer_page(eb, 0));
2810 __free_extent_buffer(eb);
2811 return NULL;
2812}
2813EXPORT_SYMBOL(find_extent_buffer);
2814
2815void free_extent_buffer(struct extent_buffer *eb)
2816{
2817 unsigned long i;
2818 unsigned long num_pages;
2819
2820 if (!eb)
2821 return;
2822
2823 if (!atomic_dec_and_test(&eb->refs))
2824 return;
2825
2826 WARN_ON(!list_empty(&eb->lru));
2827 num_pages = num_extent_pages(eb->start, eb->len);
2828
2829 for (i = 1; i < num_pages; i++) {
2830 page_cache_release(extent_buffer_page(eb, i));
2831 }
2832 page_cache_release(extent_buffer_page(eb, 0));
2833 __free_extent_buffer(eb);
2834}
2835EXPORT_SYMBOL(free_extent_buffer);
2836
2837int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2838 struct extent_buffer *eb)
2839{
2840 int set;
2841 unsigned long i;
2842 unsigned long num_pages;
2843 struct page *page;
2844
2845 u64 start = eb->start;
2846 u64 end = start + eb->len - 1;
2847
2848 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2849 num_pages = num_extent_pages(eb->start, eb->len);
2850
2851 for (i = 0; i < num_pages; i++) {
2852 page = extent_buffer_page(eb, i);
2853 lock_page(page);
2854 if (i == 0)
2855 set_page_extent_head(page, eb->len);
2856 else
2857 set_page_private(page, EXTENT_PAGE_PRIVATE);
2858
2859 /*
2860 * if we're on the last page or the first page and the
2861 * block isn't aligned on a page boundary, do extra checks
2862 * to make sure we don't clean page that is partially dirty
2863 */
2864 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2865 ((i == num_pages - 1) &&
2866 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2867 start = (u64)page->index << PAGE_CACHE_SHIFT;
2868 end = start + PAGE_CACHE_SIZE - 1;
2869 if (test_range_bit(tree, start, end,
2870 EXTENT_DIRTY, 0)) {
2871 unlock_page(page);
2872 continue;
2873 }
2874 }
2875 clear_page_dirty_for_io(page);
70dec807 2876 read_lock_irq(&page->mapping->tree_lock);
d1310b2e
CM
2877 if (!PageDirty(page)) {
2878 radix_tree_tag_clear(&page->mapping->page_tree,
2879 page_index(page),
2880 PAGECACHE_TAG_DIRTY);
2881 }
70dec807 2882 read_unlock_irq(&page->mapping->tree_lock);
d1310b2e
CM
2883 unlock_page(page);
2884 }
2885 return 0;
2886}
2887EXPORT_SYMBOL(clear_extent_buffer_dirty);
2888
2889int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2890 struct extent_buffer *eb)
2891{
2892 return wait_on_extent_writeback(tree, eb->start,
2893 eb->start + eb->len - 1);
2894}
2895EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2896
2897int set_extent_buffer_dirty(struct extent_io_tree *tree,
2898 struct extent_buffer *eb)
2899{
2900 unsigned long i;
2901 unsigned long num_pages;
2902
2903 num_pages = num_extent_pages(eb->start, eb->len);
2904 for (i = 0; i < num_pages; i++) {
2905 struct page *page = extent_buffer_page(eb, i);
2906 /* writepage may need to do something special for the
2907 * first page, we have to make sure page->private is
2908 * properly set. releasepage may drop page->private
2909 * on us if the page isn't already dirty.
2910 */
2911 if (i == 0) {
2912 lock_page(page);
2913 set_page_extent_head(page, eb->len);
2914 } else if (PagePrivate(page) &&
2915 page->private != EXTENT_PAGE_PRIVATE) {
2916 lock_page(page);
2917 set_page_extent_mapped(page);
2918 unlock_page(page);
2919 }
2920 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2921 if (i == 0)
2922 unlock_page(page);
2923 }
2924 return set_extent_dirty(tree, eb->start,
2925 eb->start + eb->len - 1, GFP_NOFS);
2926}
2927EXPORT_SYMBOL(set_extent_buffer_dirty);
2928
2929int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2930 struct extent_buffer *eb)
2931{
2932 unsigned long i;
2933 struct page *page;
2934 unsigned long num_pages;
2935
2936 num_pages = num_extent_pages(eb->start, eb->len);
2937
2938 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2939 GFP_NOFS);
2940 for (i = 0; i < num_pages; i++) {
2941 page = extent_buffer_page(eb, i);
2942 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2943 ((i == num_pages - 1) &&
2944 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2945 check_page_uptodate(tree, page);
2946 continue;
2947 }
2948 SetPageUptodate(page);
2949 }
2950 return 0;
2951}
2952EXPORT_SYMBOL(set_extent_buffer_uptodate);
2953
ce9adaa5
CM
2954int extent_range_uptodate(struct extent_io_tree *tree,
2955 u64 start, u64 end)
2956{
2957 struct page *page;
2958 int ret;
2959 int pg_uptodate = 1;
2960 int uptodate;
2961 unsigned long index;
2962
2963 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
2964 if (ret)
2965 return 1;
2966 while(start <= end) {
2967 index = start >> PAGE_CACHE_SHIFT;
2968 page = find_get_page(tree->mapping, index);
2969 uptodate = PageUptodate(page);
2970 page_cache_release(page);
2971 if (!uptodate) {
2972 pg_uptodate = 0;
2973 break;
2974 }
2975 start += PAGE_CACHE_SIZE;
2976 }
2977 return pg_uptodate;
2978}
2979
d1310b2e 2980int extent_buffer_uptodate(struct extent_io_tree *tree,
ce9adaa5 2981 struct extent_buffer *eb)
d1310b2e 2982{
728131d8
CM
2983 int ret = 0;
2984 int ret2;
ce9adaa5
CM
2985 unsigned long num_pages;
2986 unsigned long i;
728131d8
CM
2987 struct page *page;
2988 int pg_uptodate = 1;
2989
d1310b2e 2990 if (eb->flags & EXTENT_UPTODATE)
728131d8
CM
2991 ret = 1;
2992
2993 ret2 = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
d1310b2e 2994 EXTENT_UPTODATE, 1);
728131d8
CM
2995
2996 num_pages = num_extent_pages(eb->start, eb->len);
2997 for (i = 0; i < num_pages; i++) {
2998 page = extent_buffer_page(eb, i);
2999 if (!PageUptodate(page)) {
3000 pg_uptodate = 0;
3001 break;
3002 }
3003 }
3004 if ((ret || ret2) && !pg_uptodate) {
3005printk("uptodate error2 eb %Lu ret %d ret2 %d pg_uptodate %d\n", eb->start, ret, ret2, pg_uptodate);
3006 WARN_ON(1);
3007 }
3008 return (ret || ret2);
d1310b2e
CM
3009}
3010EXPORT_SYMBOL(extent_buffer_uptodate);
3011
3012int read_extent_buffer_pages(struct extent_io_tree *tree,
3013 struct extent_buffer *eb,
a86c12c7 3014 u64 start, int wait,
f188591e 3015 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
3016{
3017 unsigned long i;
3018 unsigned long start_i;
3019 struct page *page;
3020 int err;
3021 int ret = 0;
ce9adaa5
CM
3022 int locked_pages = 0;
3023 int all_uptodate = 1;
3024 int inc_all_pages = 0;
d1310b2e 3025 unsigned long num_pages;
a86c12c7
CM
3026 struct bio *bio = NULL;
3027
d1310b2e
CM
3028 if (eb->flags & EXTENT_UPTODATE)
3029 return 0;
3030
ce9adaa5 3031 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
d1310b2e
CM
3032 EXTENT_UPTODATE, 1)) {
3033 return 0;
3034 }
3035
3036 if (start) {
3037 WARN_ON(start < eb->start);
3038 start_i = (start >> PAGE_CACHE_SHIFT) -
3039 (eb->start >> PAGE_CACHE_SHIFT);
3040 } else {
3041 start_i = 0;
3042 }
3043
3044 num_pages = num_extent_pages(eb->start, eb->len);
3045 for (i = start_i; i < num_pages; i++) {
3046 page = extent_buffer_page(eb, i);
d1310b2e 3047 if (!wait) {
ce9adaa5
CM
3048 if (TestSetPageLocked(page))
3049 goto unlock_exit;
d1310b2e
CM
3050 } else {
3051 lock_page(page);
3052 }
ce9adaa5 3053 locked_pages++;
d1310b2e 3054 if (!PageUptodate(page)) {
ce9adaa5
CM
3055 all_uptodate = 0;
3056 }
3057 }
3058 if (all_uptodate) {
3059 if (start_i == 0)
3060 eb->flags |= EXTENT_UPTODATE;
3061 goto unlock_exit;
3062 }
3063
3064 for (i = start_i; i < num_pages; i++) {
3065 page = extent_buffer_page(eb, i);
3066 if (inc_all_pages)
3067 page_cache_get(page);
3068 if (!PageUptodate(page)) {
3069 if (start_i == 0)
3070 inc_all_pages = 1;
f188591e 3071 ClearPageError(page);
a86c12c7 3072 err = __extent_read_full_page(tree, page,
f188591e
CM
3073 get_extent, &bio,
3074 mirror_num);
d1310b2e
CM
3075 if (err) {
3076 ret = err;
3077 }
3078 } else {
3079 unlock_page(page);
3080 }
3081 }
3082
a86c12c7 3083 if (bio)
f188591e 3084 submit_one_bio(READ, bio, mirror_num);
a86c12c7 3085
d1310b2e
CM
3086 if (ret || !wait) {
3087 return ret;
3088 }
d1310b2e
CM
3089 for (i = start_i; i < num_pages; i++) {
3090 page = extent_buffer_page(eb, i);
3091 wait_on_page_locked(page);
3092 if (!PageUptodate(page)) {
3093 ret = -EIO;
3094 }
3095 }
3096 if (!ret)
3097 eb->flags |= EXTENT_UPTODATE;
3098 return ret;
ce9adaa5
CM
3099
3100unlock_exit:
3101 i = start_i;
3102 while(locked_pages > 0) {
3103 page = extent_buffer_page(eb, i);
3104 i++;
3105 unlock_page(page);
3106 locked_pages--;
3107 }
3108 return ret;
d1310b2e
CM
3109}
3110EXPORT_SYMBOL(read_extent_buffer_pages);
3111
3112void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3113 unsigned long start,
3114 unsigned long len)
3115{
3116 size_t cur;
3117 size_t offset;
3118 struct page *page;
3119 char *kaddr;
3120 char *dst = (char *)dstv;
3121 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3122 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
3123
3124 WARN_ON(start > eb->len);
3125 WARN_ON(start + len > eb->start + eb->len);
3126
3127 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3128
3129 while(len > 0) {
3130 page = extent_buffer_page(eb, i);
d1310b2e
CM
3131
3132 cur = min(len, (PAGE_CACHE_SIZE - offset));
3133 kaddr = kmap_atomic(page, KM_USER1);
3134 memcpy(dst, kaddr + offset, cur);
3135 kunmap_atomic(kaddr, KM_USER1);
3136
3137 dst += cur;
3138 len -= cur;
3139 offset = 0;
3140 i++;
3141 }
3142}
3143EXPORT_SYMBOL(read_extent_buffer);
3144
3145int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3146 unsigned long min_len, char **token, char **map,
3147 unsigned long *map_start,
3148 unsigned long *map_len, int km)
3149{
3150 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3151 char *kaddr;
3152 struct page *p;
3153 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3154 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3155 unsigned long end_i = (start_offset + start + min_len - 1) >>
3156 PAGE_CACHE_SHIFT;
3157
3158 if (i != end_i)
3159 return -EINVAL;
3160
3161 if (i == 0) {
3162 offset = start_offset;
3163 *map_start = 0;
3164 } else {
3165 offset = 0;
3166 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3167 }
3168 if (start + min_len > eb->len) {
3169printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3170 WARN_ON(1);
3171 }
3172
3173 p = extent_buffer_page(eb, i);
d1310b2e
CM
3174 kaddr = kmap_atomic(p, km);
3175 *token = kaddr;
3176 *map = kaddr + offset;
3177 *map_len = PAGE_CACHE_SIZE - offset;
3178 return 0;
3179}
3180EXPORT_SYMBOL(map_private_extent_buffer);
3181
3182int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3183 unsigned long min_len,
3184 char **token, char **map,
3185 unsigned long *map_start,
3186 unsigned long *map_len, int km)
3187{
3188 int err;
3189 int save = 0;
3190 if (eb->map_token) {
3191 unmap_extent_buffer(eb, eb->map_token, km);
3192 eb->map_token = NULL;
3193 save = 1;
3194 }
3195 err = map_private_extent_buffer(eb, start, min_len, token, map,
3196 map_start, map_len, km);
3197 if (!err && save) {
3198 eb->map_token = *token;
3199 eb->kaddr = *map;
3200 eb->map_start = *map_start;
3201 eb->map_len = *map_len;
3202 }
3203 return err;
3204}
3205EXPORT_SYMBOL(map_extent_buffer);
3206
3207void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3208{
3209 kunmap_atomic(token, km);
3210}
3211EXPORT_SYMBOL(unmap_extent_buffer);
3212
3213int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3214 unsigned long start,
3215 unsigned long len)
3216{
3217 size_t cur;
3218 size_t offset;
3219 struct page *page;
3220 char *kaddr;
3221 char *ptr = (char *)ptrv;
3222 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3223 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3224 int ret = 0;
3225
3226 WARN_ON(start > eb->len);
3227 WARN_ON(start + len > eb->start + eb->len);
3228
3229 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3230
3231 while(len > 0) {
3232 page = extent_buffer_page(eb, i);
d1310b2e
CM
3233
3234 cur = min(len, (PAGE_CACHE_SIZE - offset));
3235
3236 kaddr = kmap_atomic(page, KM_USER0);
3237 ret = memcmp(ptr, kaddr + offset, cur);
3238 kunmap_atomic(kaddr, KM_USER0);
3239 if (ret)
3240 break;
3241
3242 ptr += cur;
3243 len -= cur;
3244 offset = 0;
3245 i++;
3246 }
3247 return ret;
3248}
3249EXPORT_SYMBOL(memcmp_extent_buffer);
3250
3251void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3252 unsigned long start, unsigned long len)
3253{
3254 size_t cur;
3255 size_t offset;
3256 struct page *page;
3257 char *kaddr;
3258 char *src = (char *)srcv;
3259 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3260 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3261
3262 WARN_ON(start > eb->len);
3263 WARN_ON(start + len > eb->start + eb->len);
3264
3265 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3266
3267 while(len > 0) {
3268 page = extent_buffer_page(eb, i);
3269 WARN_ON(!PageUptodate(page));
3270
3271 cur = min(len, PAGE_CACHE_SIZE - offset);
3272 kaddr = kmap_atomic(page, KM_USER1);
3273 memcpy(kaddr + offset, src, cur);
3274 kunmap_atomic(kaddr, KM_USER1);
3275
3276 src += cur;
3277 len -= cur;
3278 offset = 0;
3279 i++;
3280 }
3281}
3282EXPORT_SYMBOL(write_extent_buffer);
3283
3284void memset_extent_buffer(struct extent_buffer *eb, char c,
3285 unsigned long start, unsigned long len)
3286{
3287 size_t cur;
3288 size_t offset;
3289 struct page *page;
3290 char *kaddr;
3291 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3292 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3293
3294 WARN_ON(start > eb->len);
3295 WARN_ON(start + len > eb->start + eb->len);
3296
3297 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3298
3299 while(len > 0) {
3300 page = extent_buffer_page(eb, i);
3301 WARN_ON(!PageUptodate(page));
3302
3303 cur = min(len, PAGE_CACHE_SIZE - offset);
3304 kaddr = kmap_atomic(page, KM_USER0);
3305 memset(kaddr + offset, c, cur);
3306 kunmap_atomic(kaddr, KM_USER0);
3307
3308 len -= cur;
3309 offset = 0;
3310 i++;
3311 }
3312}
3313EXPORT_SYMBOL(memset_extent_buffer);
3314
3315void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3316 unsigned long dst_offset, unsigned long src_offset,
3317 unsigned long len)
3318{
3319 u64 dst_len = dst->len;
3320 size_t cur;
3321 size_t offset;
3322 struct page *page;
3323 char *kaddr;
3324 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3325 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3326
3327 WARN_ON(src->len != dst_len);
3328
3329 offset = (start_offset + dst_offset) &
3330 ((unsigned long)PAGE_CACHE_SIZE - 1);
3331
3332 while(len > 0) {
3333 page = extent_buffer_page(dst, i);
3334 WARN_ON(!PageUptodate(page));
3335
3336 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3337
3338 kaddr = kmap_atomic(page, KM_USER0);
3339 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3340 kunmap_atomic(kaddr, KM_USER0);
3341
3342 src_offset += cur;
3343 len -= cur;
3344 offset = 0;
3345 i++;
3346 }
3347}
3348EXPORT_SYMBOL(copy_extent_buffer);
3349
3350static void move_pages(struct page *dst_page, struct page *src_page,
3351 unsigned long dst_off, unsigned long src_off,
3352 unsigned long len)
3353{
3354 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3355 if (dst_page == src_page) {
3356 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3357 } else {
3358 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3359 char *p = dst_kaddr + dst_off + len;
3360 char *s = src_kaddr + src_off + len;
3361
3362 while (len--)
3363 *--p = *--s;
3364
3365 kunmap_atomic(src_kaddr, KM_USER1);
3366 }
3367 kunmap_atomic(dst_kaddr, KM_USER0);
3368}
3369
3370static void copy_pages(struct page *dst_page, struct page *src_page,
3371 unsigned long dst_off, unsigned long src_off,
3372 unsigned long len)
3373{
3374 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3375 char *src_kaddr;
3376
3377 if (dst_page != src_page)
3378 src_kaddr = kmap_atomic(src_page, KM_USER1);
3379 else
3380 src_kaddr = dst_kaddr;
3381
3382 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3383 kunmap_atomic(dst_kaddr, KM_USER0);
3384 if (dst_page != src_page)
3385 kunmap_atomic(src_kaddr, KM_USER1);
3386}
3387
3388void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3389 unsigned long src_offset, unsigned long len)
3390{
3391 size_t cur;
3392 size_t dst_off_in_page;
3393 size_t src_off_in_page;
3394 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3395 unsigned long dst_i;
3396 unsigned long src_i;
3397
3398 if (src_offset + len > dst->len) {
3399 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3400 src_offset, len, dst->len);
3401 BUG_ON(1);
3402 }
3403 if (dst_offset + len > dst->len) {
3404 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3405 dst_offset, len, dst->len);
3406 BUG_ON(1);
3407 }
3408
3409 while(len > 0) {
3410 dst_off_in_page = (start_offset + dst_offset) &
3411 ((unsigned long)PAGE_CACHE_SIZE - 1);
3412 src_off_in_page = (start_offset + src_offset) &
3413 ((unsigned long)PAGE_CACHE_SIZE - 1);
3414
3415 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3416 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3417
3418 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3419 src_off_in_page));
3420 cur = min_t(unsigned long, cur,
3421 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3422
3423 copy_pages(extent_buffer_page(dst, dst_i),
3424 extent_buffer_page(dst, src_i),
3425 dst_off_in_page, src_off_in_page, cur);
3426
3427 src_offset += cur;
3428 dst_offset += cur;
3429 len -= cur;
3430 }
3431}
3432EXPORT_SYMBOL(memcpy_extent_buffer);
3433
3434void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3435 unsigned long src_offset, unsigned long len)
3436{
3437 size_t cur;
3438 size_t dst_off_in_page;
3439 size_t src_off_in_page;
3440 unsigned long dst_end = dst_offset + len - 1;
3441 unsigned long src_end = src_offset + len - 1;
3442 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3443 unsigned long dst_i;
3444 unsigned long src_i;
3445
3446 if (src_offset + len > dst->len) {
3447 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3448 src_offset, len, dst->len);
3449 BUG_ON(1);
3450 }
3451 if (dst_offset + len > dst->len) {
3452 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3453 dst_offset, len, dst->len);
3454 BUG_ON(1);
3455 }
3456 if (dst_offset < src_offset) {
3457 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3458 return;
3459 }
3460 while(len > 0) {
3461 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3462 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3463
3464 dst_off_in_page = (start_offset + dst_end) &
3465 ((unsigned long)PAGE_CACHE_SIZE - 1);
3466 src_off_in_page = (start_offset + src_end) &
3467 ((unsigned long)PAGE_CACHE_SIZE - 1);
3468
3469 cur = min_t(unsigned long, len, src_off_in_page + 1);
3470 cur = min(cur, dst_off_in_page + 1);
3471 move_pages(extent_buffer_page(dst, dst_i),
3472 extent_buffer_page(dst, src_i),
3473 dst_off_in_page - cur + 1,
3474 src_off_in_page - cur + 1, cur);
3475
3476 dst_end -= cur;
3477 src_end -= cur;
3478 len -= cur;
3479 }
3480}
3481EXPORT_SYMBOL(memmove_extent_buffer);