]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/btrfs/extent_io.c
Btrfs: Fix variables set but not read (bugs found by gcc 4.6)
[net-next-2.6.git] / fs / btrfs / extent_io.c
CommitLineData
d1310b2e
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
d1310b2e
CM
5#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
d1310b2e
CM
11#include <linux/writeback.h>
12#include <linux/pagevec.h>
13#include "extent_io.h"
14#include "extent_map.h"
2db04966 15#include "compat.h"
902b22f3
DW
16#include "ctree.h"
17#include "btrfs_inode.h"
d1310b2e 18
d1310b2e
CM
19static struct kmem_cache *extent_state_cache;
20static struct kmem_cache *extent_buffer_cache;
21
22static LIST_HEAD(buffers);
23static LIST_HEAD(states);
4bef0848 24
b47eda86 25#define LEAK_DEBUG 0
3935127c 26#if LEAK_DEBUG
d397712b 27static DEFINE_SPINLOCK(leak_lock);
4bef0848 28#endif
d1310b2e 29
d1310b2e
CM
30#define BUFFER_LRU_MAX 64
31
32struct tree_entry {
33 u64 start;
34 u64 end;
d1310b2e
CM
35 struct rb_node rb_node;
36};
37
38struct extent_page_data {
39 struct bio *bio;
40 struct extent_io_tree *tree;
41 get_extent_t *get_extent;
771ed689
CM
42
43 /* tells writepage not to lock the state bits for this range
44 * it still does the unlocking
45 */
ffbd517d
CM
46 unsigned int extent_locked:1;
47
48 /* tells the submit_bio code to use a WRITE_SYNC */
49 unsigned int sync_io:1;
d1310b2e
CM
50};
51
52int __init extent_io_init(void)
53{
9601e3f6
CH
54 extent_state_cache = kmem_cache_create("extent_state",
55 sizeof(struct extent_state), 0,
56 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
57 if (!extent_state_cache)
58 return -ENOMEM;
59
9601e3f6
CH
60 extent_buffer_cache = kmem_cache_create("extent_buffers",
61 sizeof(struct extent_buffer), 0,
62 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
d1310b2e
CM
63 if (!extent_buffer_cache)
64 goto free_state_cache;
65 return 0;
66
67free_state_cache:
68 kmem_cache_destroy(extent_state_cache);
69 return -ENOMEM;
70}
71
72void extent_io_exit(void)
73{
74 struct extent_state *state;
2d2ae547 75 struct extent_buffer *eb;
d1310b2e
CM
76
77 while (!list_empty(&states)) {
2d2ae547 78 state = list_entry(states.next, struct extent_state, leak_list);
d397712b
CM
79 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
80 "state %lu in tree %p refs %d\n",
81 (unsigned long long)state->start,
82 (unsigned long long)state->end,
83 state->state, state->tree, atomic_read(&state->refs));
2d2ae547 84 list_del(&state->leak_list);
d1310b2e
CM
85 kmem_cache_free(extent_state_cache, state);
86
87 }
88
2d2ae547
CM
89 while (!list_empty(&buffers)) {
90 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
d397712b
CM
91 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
92 "refs %d\n", (unsigned long long)eb->start,
93 eb->len, atomic_read(&eb->refs));
2d2ae547
CM
94 list_del(&eb->leak_list);
95 kmem_cache_free(extent_buffer_cache, eb);
96 }
d1310b2e
CM
97 if (extent_state_cache)
98 kmem_cache_destroy(extent_state_cache);
99 if (extent_buffer_cache)
100 kmem_cache_destroy(extent_buffer_cache);
101}
102
103void extent_io_tree_init(struct extent_io_tree *tree,
104 struct address_space *mapping, gfp_t mask)
105{
6bef4d31 106 tree->state = RB_ROOT;
19fe0a8b 107 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
d1310b2e
CM
108 tree->ops = NULL;
109 tree->dirty_bytes = 0;
70dec807 110 spin_lock_init(&tree->lock);
6af118ce 111 spin_lock_init(&tree->buffer_lock);
d1310b2e 112 tree->mapping = mapping;
d1310b2e 113}
d1310b2e 114
b2950863 115static struct extent_state *alloc_extent_state(gfp_t mask)
d1310b2e
CM
116{
117 struct extent_state *state;
3935127c 118#if LEAK_DEBUG
2d2ae547 119 unsigned long flags;
4bef0848 120#endif
d1310b2e
CM
121
122 state = kmem_cache_alloc(extent_state_cache, mask);
2b114d1d 123 if (!state)
d1310b2e
CM
124 return state;
125 state->state = 0;
d1310b2e 126 state->private = 0;
70dec807 127 state->tree = NULL;
3935127c 128#if LEAK_DEBUG
2d2ae547
CM
129 spin_lock_irqsave(&leak_lock, flags);
130 list_add(&state->leak_list, &states);
131 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 132#endif
d1310b2e
CM
133 atomic_set(&state->refs, 1);
134 init_waitqueue_head(&state->wq);
135 return state;
136}
d1310b2e 137
4845e44f 138void free_extent_state(struct extent_state *state)
d1310b2e 139{
d1310b2e
CM
140 if (!state)
141 return;
142 if (atomic_dec_and_test(&state->refs)) {
3935127c 143#if LEAK_DEBUG
2d2ae547 144 unsigned long flags;
4bef0848 145#endif
70dec807 146 WARN_ON(state->tree);
3935127c 147#if LEAK_DEBUG
2d2ae547
CM
148 spin_lock_irqsave(&leak_lock, flags);
149 list_del(&state->leak_list);
150 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 151#endif
d1310b2e
CM
152 kmem_cache_free(extent_state_cache, state);
153 }
154}
d1310b2e
CM
155
156static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
157 struct rb_node *node)
158{
d397712b
CM
159 struct rb_node **p = &root->rb_node;
160 struct rb_node *parent = NULL;
d1310b2e
CM
161 struct tree_entry *entry;
162
d397712b 163 while (*p) {
d1310b2e
CM
164 parent = *p;
165 entry = rb_entry(parent, struct tree_entry, rb_node);
166
167 if (offset < entry->start)
168 p = &(*p)->rb_left;
169 else if (offset > entry->end)
170 p = &(*p)->rb_right;
171 else
172 return parent;
173 }
174
175 entry = rb_entry(node, struct tree_entry, rb_node);
d1310b2e
CM
176 rb_link_node(node, parent, p);
177 rb_insert_color(node, root);
178 return NULL;
179}
180
80ea96b1 181static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
d1310b2e
CM
182 struct rb_node **prev_ret,
183 struct rb_node **next_ret)
184{
80ea96b1 185 struct rb_root *root = &tree->state;
d397712b 186 struct rb_node *n = root->rb_node;
d1310b2e
CM
187 struct rb_node *prev = NULL;
188 struct rb_node *orig_prev = NULL;
189 struct tree_entry *entry;
190 struct tree_entry *prev_entry = NULL;
191
d397712b 192 while (n) {
d1310b2e
CM
193 entry = rb_entry(n, struct tree_entry, rb_node);
194 prev = n;
195 prev_entry = entry;
196
197 if (offset < entry->start)
198 n = n->rb_left;
199 else if (offset > entry->end)
200 n = n->rb_right;
d397712b 201 else
d1310b2e
CM
202 return n;
203 }
204
205 if (prev_ret) {
206 orig_prev = prev;
d397712b 207 while (prev && offset > prev_entry->end) {
d1310b2e
CM
208 prev = rb_next(prev);
209 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
210 }
211 *prev_ret = prev;
212 prev = orig_prev;
213 }
214
215 if (next_ret) {
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
d397712b 217 while (prev && offset < prev_entry->start) {
d1310b2e
CM
218 prev = rb_prev(prev);
219 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
220 }
221 *next_ret = prev;
222 }
223 return NULL;
224}
225
80ea96b1
CM
226static inline struct rb_node *tree_search(struct extent_io_tree *tree,
227 u64 offset)
d1310b2e 228{
70dec807 229 struct rb_node *prev = NULL;
d1310b2e 230 struct rb_node *ret;
70dec807 231
80ea96b1 232 ret = __etree_search(tree, offset, &prev, NULL);
d397712b 233 if (!ret)
d1310b2e
CM
234 return prev;
235 return ret;
236}
237
9ed74f2d
JB
238static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
239 struct extent_state *other)
240{
241 if (tree->ops && tree->ops->merge_extent_hook)
242 tree->ops->merge_extent_hook(tree->mapping->host, new,
243 other);
244}
245
d1310b2e
CM
246/*
247 * utility function to look for merge candidates inside a given range.
248 * Any extents with matching state are merged together into a single
249 * extent in the tree. Extents with EXTENT_IO in their state field
250 * are not merged because the end_io handlers need to be able to do
251 * operations on them without sleeping (or doing allocations/splits).
252 *
253 * This should be called with the tree lock held.
254 */
255static int merge_state(struct extent_io_tree *tree,
256 struct extent_state *state)
257{
258 struct extent_state *other;
259 struct rb_node *other_node;
260
5b21f2ed 261 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
d1310b2e
CM
262 return 0;
263
264 other_node = rb_prev(&state->rb_node);
265 if (other_node) {
266 other = rb_entry(other_node, struct extent_state, rb_node);
267 if (other->end == state->start - 1 &&
268 other->state == state->state) {
9ed74f2d 269 merge_cb(tree, state, other);
d1310b2e 270 state->start = other->start;
70dec807 271 other->tree = NULL;
d1310b2e
CM
272 rb_erase(&other->rb_node, &tree->state);
273 free_extent_state(other);
274 }
275 }
276 other_node = rb_next(&state->rb_node);
277 if (other_node) {
278 other = rb_entry(other_node, struct extent_state, rb_node);
279 if (other->start == state->end + 1 &&
280 other->state == state->state) {
9ed74f2d 281 merge_cb(tree, state, other);
d1310b2e 282 other->start = state->start;
70dec807 283 state->tree = NULL;
d1310b2e
CM
284 rb_erase(&state->rb_node, &tree->state);
285 free_extent_state(state);
9ed74f2d 286 state = NULL;
d1310b2e
CM
287 }
288 }
9ed74f2d 289
d1310b2e
CM
290 return 0;
291}
292
9ed74f2d 293static int set_state_cb(struct extent_io_tree *tree,
0ca1f7ce 294 struct extent_state *state, int *bits)
291d673e
CM
295{
296 if (tree->ops && tree->ops->set_bit_hook) {
9ed74f2d 297 return tree->ops->set_bit_hook(tree->mapping->host,
0ca1f7ce 298 state, bits);
291d673e 299 }
9ed74f2d
JB
300
301 return 0;
291d673e
CM
302}
303
304static void clear_state_cb(struct extent_io_tree *tree,
0ca1f7ce 305 struct extent_state *state, int *bits)
291d673e 306{
9ed74f2d
JB
307 if (tree->ops && tree->ops->clear_bit_hook)
308 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
291d673e
CM
309}
310
d1310b2e
CM
311/*
312 * insert an extent_state struct into the tree. 'bits' are set on the
313 * struct before it is inserted.
314 *
315 * This may return -EEXIST if the extent is already there, in which case the
316 * state struct is freed.
317 *
318 * The tree lock is not taken internally. This is a utility function and
319 * probably isn't what you want to call (see set/clear_extent_bit).
320 */
321static int insert_state(struct extent_io_tree *tree,
322 struct extent_state *state, u64 start, u64 end,
0ca1f7ce 323 int *bits)
d1310b2e
CM
324{
325 struct rb_node *node;
0ca1f7ce 326 int bits_to_set = *bits & ~EXTENT_CTLBITS;
9ed74f2d 327 int ret;
d1310b2e
CM
328
329 if (end < start) {
d397712b
CM
330 printk(KERN_ERR "btrfs end < start %llu %llu\n",
331 (unsigned long long)end,
332 (unsigned long long)start);
d1310b2e
CM
333 WARN_ON(1);
334 }
d1310b2e
CM
335 state->start = start;
336 state->end = end;
9ed74f2d
JB
337 ret = set_state_cb(tree, state, bits);
338 if (ret)
339 return ret;
340
0ca1f7ce 341 if (bits_to_set & EXTENT_DIRTY)
9ed74f2d 342 tree->dirty_bytes += end - start + 1;
0ca1f7ce 343 state->state |= bits_to_set;
d1310b2e
CM
344 node = tree_insert(&tree->state, end, &state->rb_node);
345 if (node) {
346 struct extent_state *found;
347 found = rb_entry(node, struct extent_state, rb_node);
d397712b
CM
348 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
349 "%llu %llu\n", (unsigned long long)found->start,
350 (unsigned long long)found->end,
351 (unsigned long long)start, (unsigned long long)end);
d1310b2e
CM
352 free_extent_state(state);
353 return -EEXIST;
354 }
70dec807 355 state->tree = tree;
d1310b2e
CM
356 merge_state(tree, state);
357 return 0;
358}
359
9ed74f2d
JB
360static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
361 u64 split)
362{
363 if (tree->ops && tree->ops->split_extent_hook)
364 return tree->ops->split_extent_hook(tree->mapping->host,
365 orig, split);
366 return 0;
367}
368
d1310b2e
CM
369/*
370 * split a given extent state struct in two, inserting the preallocated
371 * struct 'prealloc' as the newly created second half. 'split' indicates an
372 * offset inside 'orig' where it should be split.
373 *
374 * Before calling,
375 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
376 * are two extent state structs in the tree:
377 * prealloc: [orig->start, split - 1]
378 * orig: [ split, orig->end ]
379 *
380 * The tree locks are not taken by this function. They need to be held
381 * by the caller.
382 */
383static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
384 struct extent_state *prealloc, u64 split)
385{
386 struct rb_node *node;
9ed74f2d
JB
387
388 split_cb(tree, orig, split);
389
d1310b2e
CM
390 prealloc->start = orig->start;
391 prealloc->end = split - 1;
392 prealloc->state = orig->state;
393 orig->start = split;
394
395 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
396 if (node) {
d1310b2e
CM
397 free_extent_state(prealloc);
398 return -EEXIST;
399 }
70dec807 400 prealloc->tree = tree;
d1310b2e
CM
401 return 0;
402}
403
404/*
405 * utility function to clear some bits in an extent state struct.
406 * it will optionally wake up any one waiting on this state (wake == 1), or
407 * forcibly remove the state from the tree (delete == 1).
408 *
409 * If no bits are set on the state struct after clearing things, the
410 * struct is freed and removed from the tree
411 */
412static int clear_state_bit(struct extent_io_tree *tree,
0ca1f7ce
YZ
413 struct extent_state *state,
414 int *bits, int wake)
d1310b2e 415{
0ca1f7ce 416 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
32c00aff 417 int ret = state->state & bits_to_clear;
d1310b2e 418
0ca1f7ce 419 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
d1310b2e
CM
420 u64 range = state->end - state->start + 1;
421 WARN_ON(range > tree->dirty_bytes);
422 tree->dirty_bytes -= range;
423 }
291d673e 424 clear_state_cb(tree, state, bits);
32c00aff 425 state->state &= ~bits_to_clear;
d1310b2e
CM
426 if (wake)
427 wake_up(&state->wq);
0ca1f7ce 428 if (state->state == 0) {
70dec807 429 if (state->tree) {
d1310b2e 430 rb_erase(&state->rb_node, &tree->state);
70dec807 431 state->tree = NULL;
d1310b2e
CM
432 free_extent_state(state);
433 } else {
434 WARN_ON(1);
435 }
436 } else {
437 merge_state(tree, state);
438 }
439 return ret;
440}
441
442/*
443 * clear some bits on a range in the tree. This may require splitting
444 * or inserting elements in the tree, so the gfp mask is used to
445 * indicate which allocations or sleeping are allowed.
446 *
447 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
448 * the given range from the tree regardless of state (ie for truncate).
449 *
450 * the range [start, end] is inclusive.
451 *
452 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
453 * bits were already set, or zero if none of the bits were already set.
454 */
455int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
2c64c53d
CM
456 int bits, int wake, int delete,
457 struct extent_state **cached_state,
458 gfp_t mask)
d1310b2e
CM
459{
460 struct extent_state *state;
2c64c53d 461 struct extent_state *cached;
d1310b2e 462 struct extent_state *prealloc = NULL;
2c64c53d 463 struct rb_node *next_node;
d1310b2e 464 struct rb_node *node;
5c939df5 465 u64 last_end;
d1310b2e
CM
466 int err;
467 int set = 0;
2ac55d41 468 int clear = 0;
d1310b2e 469
0ca1f7ce
YZ
470 if (delete)
471 bits |= ~EXTENT_CTLBITS;
472 bits |= EXTENT_FIRST_DELALLOC;
473
2ac55d41
JB
474 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
475 clear = 1;
d1310b2e
CM
476again:
477 if (!prealloc && (mask & __GFP_WAIT)) {
478 prealloc = alloc_extent_state(mask);
479 if (!prealloc)
480 return -ENOMEM;
481 }
482
cad321ad 483 spin_lock(&tree->lock);
2c64c53d
CM
484 if (cached_state) {
485 cached = *cached_state;
2ac55d41
JB
486
487 if (clear) {
488 *cached_state = NULL;
489 cached_state = NULL;
490 }
491
42daec29 492 if (cached && cached->tree && cached->start == start) {
2ac55d41
JB
493 if (clear)
494 atomic_dec(&cached->refs);
2c64c53d 495 state = cached;
42daec29 496 goto hit_next;
2c64c53d 497 }
2ac55d41
JB
498 if (clear)
499 free_extent_state(cached);
2c64c53d 500 }
d1310b2e
CM
501 /*
502 * this search will find the extents that end after
503 * our range starts
504 */
80ea96b1 505 node = tree_search(tree, start);
d1310b2e
CM
506 if (!node)
507 goto out;
508 state = rb_entry(node, struct extent_state, rb_node);
2c64c53d 509hit_next:
d1310b2e
CM
510 if (state->start > end)
511 goto out;
512 WARN_ON(state->end < start);
5c939df5 513 last_end = state->end;
d1310b2e
CM
514
515 /*
516 * | ---- desired range ---- |
517 * | state | or
518 * | ------------- state -------------- |
519 *
520 * We need to split the extent we found, and may flip
521 * bits on second half.
522 *
523 * If the extent we found extends past our range, we
524 * just split and search again. It'll get split again
525 * the next time though.
526 *
527 * If the extent we found is inside our range, we clear
528 * the desired bit on it.
529 */
530
531 if (state->start < start) {
70dec807
CM
532 if (!prealloc)
533 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
534 err = split_state(tree, state, prealloc, start);
535 BUG_ON(err == -EEXIST);
536 prealloc = NULL;
537 if (err)
538 goto out;
539 if (state->end <= end) {
0ca1f7ce 540 set |= clear_state_bit(tree, state, &bits, wake);
5c939df5
YZ
541 if (last_end == (u64)-1)
542 goto out;
543 start = last_end + 1;
d1310b2e
CM
544 }
545 goto search_again;
546 }
547 /*
548 * | ---- desired range ---- |
549 * | state |
550 * We need to split the extent, and clear the bit
551 * on the first half
552 */
553 if (state->start <= end && state->end > end) {
70dec807
CM
554 if (!prealloc)
555 prealloc = alloc_extent_state(GFP_ATOMIC);
d1310b2e
CM
556 err = split_state(tree, state, prealloc, end + 1);
557 BUG_ON(err == -EEXIST);
d1310b2e
CM
558 if (wake)
559 wake_up(&state->wq);
42daec29 560
0ca1f7ce 561 set |= clear_state_bit(tree, prealloc, &bits, wake);
9ed74f2d 562
d1310b2e
CM
563 prealloc = NULL;
564 goto out;
565 }
42daec29 566
2c64c53d
CM
567 if (state->end < end && prealloc && !need_resched())
568 next_node = rb_next(&state->rb_node);
569 else
570 next_node = NULL;
42daec29 571
0ca1f7ce 572 set |= clear_state_bit(tree, state, &bits, wake);
5c939df5
YZ
573 if (last_end == (u64)-1)
574 goto out;
575 start = last_end + 1;
2c64c53d
CM
576 if (start <= end && next_node) {
577 state = rb_entry(next_node, struct extent_state,
578 rb_node);
579 if (state->start == start)
580 goto hit_next;
581 }
d1310b2e
CM
582 goto search_again;
583
584out:
cad321ad 585 spin_unlock(&tree->lock);
d1310b2e
CM
586 if (prealloc)
587 free_extent_state(prealloc);
588
589 return set;
590
591search_again:
592 if (start > end)
593 goto out;
cad321ad 594 spin_unlock(&tree->lock);
d1310b2e
CM
595 if (mask & __GFP_WAIT)
596 cond_resched();
597 goto again;
598}
d1310b2e
CM
599
600static int wait_on_state(struct extent_io_tree *tree,
601 struct extent_state *state)
641f5219
CH
602 __releases(tree->lock)
603 __acquires(tree->lock)
d1310b2e
CM
604{
605 DEFINE_WAIT(wait);
606 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
cad321ad 607 spin_unlock(&tree->lock);
d1310b2e 608 schedule();
cad321ad 609 spin_lock(&tree->lock);
d1310b2e
CM
610 finish_wait(&state->wq, &wait);
611 return 0;
612}
613
614/*
615 * waits for one or more bits to clear on a range in the state tree.
616 * The range [start, end] is inclusive.
617 * The tree lock is taken by this function
618 */
619int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
620{
621 struct extent_state *state;
622 struct rb_node *node;
623
cad321ad 624 spin_lock(&tree->lock);
d1310b2e
CM
625again:
626 while (1) {
627 /*
628 * this search will find all the extents that end after
629 * our range starts
630 */
80ea96b1 631 node = tree_search(tree, start);
d1310b2e
CM
632 if (!node)
633 break;
634
635 state = rb_entry(node, struct extent_state, rb_node);
636
637 if (state->start > end)
638 goto out;
639
640 if (state->state & bits) {
641 start = state->start;
642 atomic_inc(&state->refs);
643 wait_on_state(tree, state);
644 free_extent_state(state);
645 goto again;
646 }
647 start = state->end + 1;
648
649 if (start > end)
650 break;
651
652 if (need_resched()) {
cad321ad 653 spin_unlock(&tree->lock);
d1310b2e 654 cond_resched();
cad321ad 655 spin_lock(&tree->lock);
d1310b2e
CM
656 }
657 }
658out:
cad321ad 659 spin_unlock(&tree->lock);
d1310b2e
CM
660 return 0;
661}
d1310b2e 662
9ed74f2d 663static int set_state_bits(struct extent_io_tree *tree,
d1310b2e 664 struct extent_state *state,
0ca1f7ce 665 int *bits)
d1310b2e 666{
9ed74f2d 667 int ret;
0ca1f7ce 668 int bits_to_set = *bits & ~EXTENT_CTLBITS;
9ed74f2d
JB
669
670 ret = set_state_cb(tree, state, bits);
671 if (ret)
672 return ret;
0ca1f7ce 673 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
d1310b2e
CM
674 u64 range = state->end - state->start + 1;
675 tree->dirty_bytes += range;
676 }
0ca1f7ce 677 state->state |= bits_to_set;
9ed74f2d
JB
678
679 return 0;
d1310b2e
CM
680}
681
2c64c53d
CM
682static void cache_state(struct extent_state *state,
683 struct extent_state **cached_ptr)
684{
685 if (cached_ptr && !(*cached_ptr)) {
686 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
687 *cached_ptr = state;
688 atomic_inc(&state->refs);
689 }
690 }
691}
692
d1310b2e 693/*
1edbb734
CM
694 * set some bits on a range in the tree. This may require allocations or
695 * sleeping, so the gfp mask is used to indicate what is allowed.
d1310b2e 696 *
1edbb734
CM
697 * If any of the exclusive bits are set, this will fail with -EEXIST if some
698 * part of the range already has the desired bits set. The start of the
699 * existing range is returned in failed_start in this case.
d1310b2e 700 *
1edbb734 701 * [start, end] is inclusive This takes the tree lock.
d1310b2e 702 */
1edbb734 703
4845e44f
CM
704int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
705 int bits, int exclusive_bits, u64 *failed_start,
706 struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
707{
708 struct extent_state *state;
709 struct extent_state *prealloc = NULL;
710 struct rb_node *node;
d1310b2e 711 int err = 0;
d1310b2e
CM
712 u64 last_start;
713 u64 last_end;
42daec29 714
0ca1f7ce 715 bits |= EXTENT_FIRST_DELALLOC;
d1310b2e
CM
716again:
717 if (!prealloc && (mask & __GFP_WAIT)) {
718 prealloc = alloc_extent_state(mask);
719 if (!prealloc)
720 return -ENOMEM;
721 }
722
cad321ad 723 spin_lock(&tree->lock);
9655d298
CM
724 if (cached_state && *cached_state) {
725 state = *cached_state;
726 if (state->start == start && state->tree) {
727 node = &state->rb_node;
728 goto hit_next;
729 }
730 }
d1310b2e
CM
731 /*
732 * this search will find all the extents that end after
733 * our range starts.
734 */
80ea96b1 735 node = tree_search(tree, start);
d1310b2e 736 if (!node) {
0ca1f7ce 737 err = insert_state(tree, prealloc, start, end, &bits);
d1310b2e
CM
738 prealloc = NULL;
739 BUG_ON(err == -EEXIST);
740 goto out;
741 }
d1310b2e 742 state = rb_entry(node, struct extent_state, rb_node);
40431d6c 743hit_next:
d1310b2e
CM
744 last_start = state->start;
745 last_end = state->end;
746
747 /*
748 * | ---- desired range ---- |
749 * | state |
750 *
751 * Just lock what we found and keep going
752 */
753 if (state->start == start && state->end <= end) {
40431d6c 754 struct rb_node *next_node;
1edbb734 755 if (state->state & exclusive_bits) {
d1310b2e
CM
756 *failed_start = state->start;
757 err = -EEXIST;
758 goto out;
759 }
42daec29 760
0ca1f7ce 761 err = set_state_bits(tree, state, &bits);
9ed74f2d
JB
762 if (err)
763 goto out;
764
2c64c53d 765 cache_state(state, cached_state);
d1310b2e 766 merge_state(tree, state);
5c939df5
YZ
767 if (last_end == (u64)-1)
768 goto out;
40431d6c 769
5c939df5 770 start = last_end + 1;
40431d6c
CM
771 if (start < end && prealloc && !need_resched()) {
772 next_node = rb_next(node);
773 if (next_node) {
774 state = rb_entry(next_node, struct extent_state,
775 rb_node);
776 if (state->start == start)
777 goto hit_next;
778 }
779 }
d1310b2e
CM
780 goto search_again;
781 }
782
783 /*
784 * | ---- desired range ---- |
785 * | state |
786 * or
787 * | ------------- state -------------- |
788 *
789 * We need to split the extent we found, and may flip bits on
790 * second half.
791 *
792 * If the extent we found extends past our
793 * range, we just split and search again. It'll get split
794 * again the next time though.
795 *
796 * If the extent we found is inside our range, we set the
797 * desired bit on it.
798 */
799 if (state->start < start) {
1edbb734 800 if (state->state & exclusive_bits) {
d1310b2e
CM
801 *failed_start = start;
802 err = -EEXIST;
803 goto out;
804 }
805 err = split_state(tree, state, prealloc, start);
806 BUG_ON(err == -EEXIST);
807 prealloc = NULL;
808 if (err)
809 goto out;
810 if (state->end <= end) {
0ca1f7ce 811 err = set_state_bits(tree, state, &bits);
9ed74f2d
JB
812 if (err)
813 goto out;
2c64c53d 814 cache_state(state, cached_state);
d1310b2e 815 merge_state(tree, state);
5c939df5
YZ
816 if (last_end == (u64)-1)
817 goto out;
818 start = last_end + 1;
d1310b2e
CM
819 }
820 goto search_again;
821 }
822 /*
823 * | ---- desired range ---- |
824 * | state | or | state |
825 *
826 * There's a hole, we need to insert something in it and
827 * ignore the extent we found.
828 */
829 if (state->start > start) {
830 u64 this_end;
831 if (end < last_start)
832 this_end = end;
833 else
d397712b 834 this_end = last_start - 1;
d1310b2e 835 err = insert_state(tree, prealloc, start, this_end,
0ca1f7ce 836 &bits);
d1310b2e 837 BUG_ON(err == -EEXIST);
9ed74f2d
JB
838 if (err) {
839 prealloc = NULL;
d1310b2e 840 goto out;
9ed74f2d
JB
841 }
842 cache_state(prealloc, cached_state);
843 prealloc = NULL;
d1310b2e
CM
844 start = this_end + 1;
845 goto search_again;
846 }
847 /*
848 * | ---- desired range ---- |
849 * | state |
850 * We need to split the extent, and set the bit
851 * on the first half
852 */
853 if (state->start <= end && state->end > end) {
1edbb734 854 if (state->state & exclusive_bits) {
d1310b2e
CM
855 *failed_start = start;
856 err = -EEXIST;
857 goto out;
858 }
859 err = split_state(tree, state, prealloc, end + 1);
860 BUG_ON(err == -EEXIST);
861
0ca1f7ce 862 err = set_state_bits(tree, prealloc, &bits);
9ed74f2d
JB
863 if (err) {
864 prealloc = NULL;
865 goto out;
866 }
2c64c53d 867 cache_state(prealloc, cached_state);
d1310b2e
CM
868 merge_state(tree, prealloc);
869 prealloc = NULL;
870 goto out;
871 }
872
873 goto search_again;
874
875out:
cad321ad 876 spin_unlock(&tree->lock);
d1310b2e
CM
877 if (prealloc)
878 free_extent_state(prealloc);
879
880 return err;
881
882search_again:
883 if (start > end)
884 goto out;
cad321ad 885 spin_unlock(&tree->lock);
d1310b2e
CM
886 if (mask & __GFP_WAIT)
887 cond_resched();
888 goto again;
889}
d1310b2e
CM
890
891/* wrappers around set/clear extent bit */
892int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
893 gfp_t mask)
894{
895 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
2c64c53d 896 NULL, mask);
d1310b2e 897}
d1310b2e
CM
898
899int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
900 int bits, gfp_t mask)
901{
902 return set_extent_bit(tree, start, end, bits, 0, NULL,
2c64c53d 903 NULL, mask);
d1310b2e 904}
d1310b2e
CM
905
906int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
907 int bits, gfp_t mask)
908{
2c64c53d 909 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
d1310b2e 910}
d1310b2e
CM
911
912int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
2ac55d41 913 struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
914{
915 return set_extent_bit(tree, start, end,
40431d6c 916 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
2ac55d41 917 0, NULL, cached_state, mask);
d1310b2e 918}
d1310b2e
CM
919
920int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
921 gfp_t mask)
922{
923 return clear_extent_bit(tree, start, end,
32c00aff 924 EXTENT_DIRTY | EXTENT_DELALLOC |
0ca1f7ce 925 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
d1310b2e 926}
d1310b2e
CM
927
928int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
929 gfp_t mask)
930{
931 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
2c64c53d 932 NULL, mask);
d1310b2e 933}
d1310b2e 934
b2950863 935static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
d1310b2e
CM
936 gfp_t mask)
937{
2c64c53d
CM
938 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
939 NULL, mask);
d1310b2e 940}
d1310b2e
CM
941
942int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
943 gfp_t mask)
944{
945 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
2c64c53d 946 NULL, mask);
d1310b2e 947}
d1310b2e 948
d397712b 949static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
2ac55d41
JB
950 u64 end, struct extent_state **cached_state,
951 gfp_t mask)
d1310b2e 952{
2c64c53d 953 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
2ac55d41 954 cached_state, mask);
d1310b2e 955}
d1310b2e 956
d1310b2e
CM
957int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
958{
959 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
960}
d1310b2e 961
d352ac68
CM
962/*
963 * either insert or lock state struct between start and end use mask to tell
964 * us if waiting is desired.
965 */
1edbb734 966int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
2c64c53d 967 int bits, struct extent_state **cached_state, gfp_t mask)
d1310b2e
CM
968{
969 int err;
970 u64 failed_start;
971 while (1) {
1edbb734 972 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
2c64c53d
CM
973 EXTENT_LOCKED, &failed_start,
974 cached_state, mask);
d1310b2e
CM
975 if (err == -EEXIST && (mask & __GFP_WAIT)) {
976 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
977 start = failed_start;
978 } else {
979 break;
980 }
981 WARN_ON(start > end);
982 }
983 return err;
984}
d1310b2e 985
1edbb734
CM
986int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
987{
2c64c53d 988 return lock_extent_bits(tree, start, end, 0, NULL, mask);
1edbb734
CM
989}
990
25179201
JB
991int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
992 gfp_t mask)
993{
994 int err;
995 u64 failed_start;
996
2c64c53d
CM
997 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
998 &failed_start, NULL, mask);
6643558d
YZ
999 if (err == -EEXIST) {
1000 if (failed_start > start)
1001 clear_extent_bit(tree, start, failed_start - 1,
2c64c53d 1002 EXTENT_LOCKED, 1, 0, NULL, mask);
25179201 1003 return 0;
6643558d 1004 }
25179201
JB
1005 return 1;
1006}
25179201 1007
2c64c53d
CM
1008int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1009 struct extent_state **cached, gfp_t mask)
1010{
1011 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1012 mask);
1013}
1014
d1310b2e
CM
1015int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1016 gfp_t mask)
1017{
2c64c53d
CM
1018 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1019 mask);
d1310b2e 1020}
d1310b2e
CM
1021
1022/*
1023 * helper function to set pages and extents in the tree dirty
1024 */
1025int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1026{
1027 unsigned long index = start >> PAGE_CACHE_SHIFT;
1028 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1029 struct page *page;
1030
1031 while (index <= end_index) {
1032 page = find_get_page(tree->mapping, index);
1033 BUG_ON(!page);
1034 __set_page_dirty_nobuffers(page);
1035 page_cache_release(page);
1036 index++;
1037 }
d1310b2e
CM
1038 return 0;
1039}
d1310b2e
CM
1040
1041/*
1042 * helper function to set both pages and extents in the tree writeback
1043 */
b2950863 1044static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
d1310b2e
CM
1045{
1046 unsigned long index = start >> PAGE_CACHE_SHIFT;
1047 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1048 struct page *page;
1049
1050 while (index <= end_index) {
1051 page = find_get_page(tree->mapping, index);
1052 BUG_ON(!page);
1053 set_page_writeback(page);
1054 page_cache_release(page);
1055 index++;
1056 }
d1310b2e
CM
1057 return 0;
1058}
d1310b2e 1059
d352ac68
CM
1060/*
1061 * find the first offset in the io tree with 'bits' set. zero is
1062 * returned if we find something, and *start_ret and *end_ret are
1063 * set to reflect the state struct that was found.
1064 *
1065 * If nothing was found, 1 is returned, < 0 on error
1066 */
d1310b2e
CM
1067int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1068 u64 *start_ret, u64 *end_ret, int bits)
1069{
1070 struct rb_node *node;
1071 struct extent_state *state;
1072 int ret = 1;
1073
cad321ad 1074 spin_lock(&tree->lock);
d1310b2e
CM
1075 /*
1076 * this search will find all the extents that end after
1077 * our range starts.
1078 */
80ea96b1 1079 node = tree_search(tree, start);
d397712b 1080 if (!node)
d1310b2e 1081 goto out;
d1310b2e 1082
d397712b 1083 while (1) {
d1310b2e
CM
1084 state = rb_entry(node, struct extent_state, rb_node);
1085 if (state->end >= start && (state->state & bits)) {
1086 *start_ret = state->start;
1087 *end_ret = state->end;
1088 ret = 0;
1089 break;
1090 }
1091 node = rb_next(node);
1092 if (!node)
1093 break;
1094 }
1095out:
cad321ad 1096 spin_unlock(&tree->lock);
d1310b2e
CM
1097 return ret;
1098}
d1310b2e 1099
d352ac68
CM
1100/* find the first state struct with 'bits' set after 'start', and
1101 * return it. tree->lock must be held. NULL will returned if
1102 * nothing was found after 'start'
1103 */
d7fc640e
CM
1104struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1105 u64 start, int bits)
1106{
1107 struct rb_node *node;
1108 struct extent_state *state;
1109
1110 /*
1111 * this search will find all the extents that end after
1112 * our range starts.
1113 */
1114 node = tree_search(tree, start);
d397712b 1115 if (!node)
d7fc640e 1116 goto out;
d7fc640e 1117
d397712b 1118 while (1) {
d7fc640e 1119 state = rb_entry(node, struct extent_state, rb_node);
d397712b 1120 if (state->end >= start && (state->state & bits))
d7fc640e 1121 return state;
d397712b 1122
d7fc640e
CM
1123 node = rb_next(node);
1124 if (!node)
1125 break;
1126 }
1127out:
1128 return NULL;
1129}
d7fc640e 1130
d352ac68
CM
1131/*
1132 * find a contiguous range of bytes in the file marked as delalloc, not
1133 * more than 'max_bytes'. start and end are used to return the range,
1134 *
1135 * 1 is returned if we find something, 0 if nothing was in the tree
1136 */
c8b97818 1137static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
c2a128d2
JB
1138 u64 *start, u64 *end, u64 max_bytes,
1139 struct extent_state **cached_state)
d1310b2e
CM
1140{
1141 struct rb_node *node;
1142 struct extent_state *state;
1143 u64 cur_start = *start;
1144 u64 found = 0;
1145 u64 total_bytes = 0;
1146
cad321ad 1147 spin_lock(&tree->lock);
c8b97818 1148
d1310b2e
CM
1149 /*
1150 * this search will find all the extents that end after
1151 * our range starts.
1152 */
80ea96b1 1153 node = tree_search(tree, cur_start);
2b114d1d 1154 if (!node) {
3b951516
CM
1155 if (!found)
1156 *end = (u64)-1;
d1310b2e
CM
1157 goto out;
1158 }
1159
d397712b 1160 while (1) {
d1310b2e 1161 state = rb_entry(node, struct extent_state, rb_node);
5b21f2ed
ZY
1162 if (found && (state->start != cur_start ||
1163 (state->state & EXTENT_BOUNDARY))) {
d1310b2e
CM
1164 goto out;
1165 }
1166 if (!(state->state & EXTENT_DELALLOC)) {
1167 if (!found)
1168 *end = state->end;
1169 goto out;
1170 }
c2a128d2 1171 if (!found) {
d1310b2e 1172 *start = state->start;
c2a128d2
JB
1173 *cached_state = state;
1174 atomic_inc(&state->refs);
1175 }
d1310b2e
CM
1176 found++;
1177 *end = state->end;
1178 cur_start = state->end + 1;
1179 node = rb_next(node);
1180 if (!node)
1181 break;
1182 total_bytes += state->end - state->start + 1;
1183 if (total_bytes >= max_bytes)
1184 break;
1185 }
1186out:
cad321ad 1187 spin_unlock(&tree->lock);
d1310b2e
CM
1188 return found;
1189}
1190
c8b97818
CM
1191static noinline int __unlock_for_delalloc(struct inode *inode,
1192 struct page *locked_page,
1193 u64 start, u64 end)
1194{
1195 int ret;
1196 struct page *pages[16];
1197 unsigned long index = start >> PAGE_CACHE_SHIFT;
1198 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1199 unsigned long nr_pages = end_index - index + 1;
1200 int i;
1201
1202 if (index == locked_page->index && end_index == index)
1203 return 0;
1204
d397712b 1205 while (nr_pages > 0) {
c8b97818 1206 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1207 min_t(unsigned long, nr_pages,
1208 ARRAY_SIZE(pages)), pages);
c8b97818
CM
1209 for (i = 0; i < ret; i++) {
1210 if (pages[i] != locked_page)
1211 unlock_page(pages[i]);
1212 page_cache_release(pages[i]);
1213 }
1214 nr_pages -= ret;
1215 index += ret;
1216 cond_resched();
1217 }
1218 return 0;
1219}
1220
1221static noinline int lock_delalloc_pages(struct inode *inode,
1222 struct page *locked_page,
1223 u64 delalloc_start,
1224 u64 delalloc_end)
1225{
1226 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1227 unsigned long start_index = index;
1228 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1229 unsigned long pages_locked = 0;
1230 struct page *pages[16];
1231 unsigned long nrpages;
1232 int ret;
1233 int i;
1234
1235 /* the caller is responsible for locking the start index */
1236 if (index == locked_page->index && index == end_index)
1237 return 0;
1238
1239 /* skip the page at the start index */
1240 nrpages = end_index - index + 1;
d397712b 1241 while (nrpages > 0) {
c8b97818 1242 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1243 min_t(unsigned long,
1244 nrpages, ARRAY_SIZE(pages)), pages);
c8b97818
CM
1245 if (ret == 0) {
1246 ret = -EAGAIN;
1247 goto done;
1248 }
1249 /* now we have an array of pages, lock them all */
1250 for (i = 0; i < ret; i++) {
1251 /*
1252 * the caller is taking responsibility for
1253 * locked_page
1254 */
771ed689 1255 if (pages[i] != locked_page) {
c8b97818 1256 lock_page(pages[i]);
f2b1c41c
CM
1257 if (!PageDirty(pages[i]) ||
1258 pages[i]->mapping != inode->i_mapping) {
771ed689
CM
1259 ret = -EAGAIN;
1260 unlock_page(pages[i]);
1261 page_cache_release(pages[i]);
1262 goto done;
1263 }
1264 }
c8b97818 1265 page_cache_release(pages[i]);
771ed689 1266 pages_locked++;
c8b97818 1267 }
c8b97818
CM
1268 nrpages -= ret;
1269 index += ret;
1270 cond_resched();
1271 }
1272 ret = 0;
1273done:
1274 if (ret && pages_locked) {
1275 __unlock_for_delalloc(inode, locked_page,
1276 delalloc_start,
1277 ((u64)(start_index + pages_locked - 1)) <<
1278 PAGE_CACHE_SHIFT);
1279 }
1280 return ret;
1281}
1282
1283/*
1284 * find a contiguous range of bytes in the file marked as delalloc, not
1285 * more than 'max_bytes'. start and end are used to return the range,
1286 *
1287 * 1 is returned if we find something, 0 if nothing was in the tree
1288 */
1289static noinline u64 find_lock_delalloc_range(struct inode *inode,
1290 struct extent_io_tree *tree,
1291 struct page *locked_page,
1292 u64 *start, u64 *end,
1293 u64 max_bytes)
1294{
1295 u64 delalloc_start;
1296 u64 delalloc_end;
1297 u64 found;
9655d298 1298 struct extent_state *cached_state = NULL;
c8b97818
CM
1299 int ret;
1300 int loops = 0;
1301
1302again:
1303 /* step one, find a bunch of delalloc bytes starting at start */
1304 delalloc_start = *start;
1305 delalloc_end = 0;
1306 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
c2a128d2 1307 max_bytes, &cached_state);
70b99e69 1308 if (!found || delalloc_end <= *start) {
c8b97818
CM
1309 *start = delalloc_start;
1310 *end = delalloc_end;
c2a128d2 1311 free_extent_state(cached_state);
c8b97818
CM
1312 return found;
1313 }
1314
70b99e69
CM
1315 /*
1316 * start comes from the offset of locked_page. We have to lock
1317 * pages in order, so we can't process delalloc bytes before
1318 * locked_page
1319 */
d397712b 1320 if (delalloc_start < *start)
70b99e69 1321 delalloc_start = *start;
70b99e69 1322
c8b97818
CM
1323 /*
1324 * make sure to limit the number of pages we try to lock down
1325 * if we're looping.
1326 */
d397712b 1327 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
771ed689 1328 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
d397712b 1329
c8b97818
CM
1330 /* step two, lock all the pages after the page that has start */
1331 ret = lock_delalloc_pages(inode, locked_page,
1332 delalloc_start, delalloc_end);
1333 if (ret == -EAGAIN) {
1334 /* some of the pages are gone, lets avoid looping by
1335 * shortening the size of the delalloc range we're searching
1336 */
9655d298 1337 free_extent_state(cached_state);
c8b97818
CM
1338 if (!loops) {
1339 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1340 max_bytes = PAGE_CACHE_SIZE - offset;
1341 loops = 1;
1342 goto again;
1343 } else {
1344 found = 0;
1345 goto out_failed;
1346 }
1347 }
1348 BUG_ON(ret);
1349
1350 /* step three, lock the state bits for the whole range */
9655d298
CM
1351 lock_extent_bits(tree, delalloc_start, delalloc_end,
1352 0, &cached_state, GFP_NOFS);
c8b97818
CM
1353
1354 /* then test to make sure it is all still delalloc */
1355 ret = test_range_bit(tree, delalloc_start, delalloc_end,
9655d298 1356 EXTENT_DELALLOC, 1, cached_state);
c8b97818 1357 if (!ret) {
9655d298
CM
1358 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1359 &cached_state, GFP_NOFS);
c8b97818
CM
1360 __unlock_for_delalloc(inode, locked_page,
1361 delalloc_start, delalloc_end);
1362 cond_resched();
1363 goto again;
1364 }
9655d298 1365 free_extent_state(cached_state);
c8b97818
CM
1366 *start = delalloc_start;
1367 *end = delalloc_end;
1368out_failed:
1369 return found;
1370}
1371
1372int extent_clear_unlock_delalloc(struct inode *inode,
1373 struct extent_io_tree *tree,
1374 u64 start, u64 end, struct page *locked_page,
a791e35e 1375 unsigned long op)
c8b97818
CM
1376{
1377 int ret;
1378 struct page *pages[16];
1379 unsigned long index = start >> PAGE_CACHE_SHIFT;
1380 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1381 unsigned long nr_pages = end_index - index + 1;
1382 int i;
771ed689 1383 int clear_bits = 0;
c8b97818 1384
a791e35e 1385 if (op & EXTENT_CLEAR_UNLOCK)
771ed689 1386 clear_bits |= EXTENT_LOCKED;
a791e35e 1387 if (op & EXTENT_CLEAR_DIRTY)
c8b97818
CM
1388 clear_bits |= EXTENT_DIRTY;
1389
a791e35e 1390 if (op & EXTENT_CLEAR_DELALLOC)
771ed689
CM
1391 clear_bits |= EXTENT_DELALLOC;
1392
2c64c53d 1393 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
32c00aff
JB
1394 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1395 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1396 EXTENT_SET_PRIVATE2)))
771ed689 1397 return 0;
c8b97818 1398
d397712b 1399 while (nr_pages > 0) {
c8b97818 1400 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
1401 min_t(unsigned long,
1402 nr_pages, ARRAY_SIZE(pages)), pages);
c8b97818 1403 for (i = 0; i < ret; i++) {
8b62b72b 1404
a791e35e 1405 if (op & EXTENT_SET_PRIVATE2)
8b62b72b
CM
1406 SetPagePrivate2(pages[i]);
1407
c8b97818
CM
1408 if (pages[i] == locked_page) {
1409 page_cache_release(pages[i]);
1410 continue;
1411 }
a791e35e 1412 if (op & EXTENT_CLEAR_DIRTY)
c8b97818 1413 clear_page_dirty_for_io(pages[i]);
a791e35e 1414 if (op & EXTENT_SET_WRITEBACK)
c8b97818 1415 set_page_writeback(pages[i]);
a791e35e 1416 if (op & EXTENT_END_WRITEBACK)
c8b97818 1417 end_page_writeback(pages[i]);
a791e35e 1418 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
771ed689 1419 unlock_page(pages[i]);
c8b97818
CM
1420 page_cache_release(pages[i]);
1421 }
1422 nr_pages -= ret;
1423 index += ret;
1424 cond_resched();
1425 }
1426 return 0;
1427}
c8b97818 1428
d352ac68
CM
1429/*
1430 * count the number of bytes in the tree that have a given bit(s)
1431 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1432 * cached. The total number found is returned.
1433 */
d1310b2e
CM
1434u64 count_range_bits(struct extent_io_tree *tree,
1435 u64 *start, u64 search_end, u64 max_bytes,
1436 unsigned long bits)
1437{
1438 struct rb_node *node;
1439 struct extent_state *state;
1440 u64 cur_start = *start;
1441 u64 total_bytes = 0;
1442 int found = 0;
1443
1444 if (search_end <= cur_start) {
d1310b2e
CM
1445 WARN_ON(1);
1446 return 0;
1447 }
1448
cad321ad 1449 spin_lock(&tree->lock);
d1310b2e
CM
1450 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1451 total_bytes = tree->dirty_bytes;
1452 goto out;
1453 }
1454 /*
1455 * this search will find all the extents that end after
1456 * our range starts.
1457 */
80ea96b1 1458 node = tree_search(tree, cur_start);
d397712b 1459 if (!node)
d1310b2e 1460 goto out;
d1310b2e 1461
d397712b 1462 while (1) {
d1310b2e
CM
1463 state = rb_entry(node, struct extent_state, rb_node);
1464 if (state->start > search_end)
1465 break;
1466 if (state->end >= cur_start && (state->state & bits)) {
1467 total_bytes += min(search_end, state->end) + 1 -
1468 max(cur_start, state->start);
1469 if (total_bytes >= max_bytes)
1470 break;
1471 if (!found) {
1472 *start = state->start;
1473 found = 1;
1474 }
1475 }
1476 node = rb_next(node);
1477 if (!node)
1478 break;
1479 }
1480out:
cad321ad 1481 spin_unlock(&tree->lock);
d1310b2e
CM
1482 return total_bytes;
1483}
b2950863 1484
d352ac68
CM
1485/*
1486 * set the private field for a given byte offset in the tree. If there isn't
1487 * an extent_state there already, this does nothing.
1488 */
d1310b2e
CM
1489int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1490{
1491 struct rb_node *node;
1492 struct extent_state *state;
1493 int ret = 0;
1494
cad321ad 1495 spin_lock(&tree->lock);
d1310b2e
CM
1496 /*
1497 * this search will find all the extents that end after
1498 * our range starts.
1499 */
80ea96b1 1500 node = tree_search(tree, start);
2b114d1d 1501 if (!node) {
d1310b2e
CM
1502 ret = -ENOENT;
1503 goto out;
1504 }
1505 state = rb_entry(node, struct extent_state, rb_node);
1506 if (state->start != start) {
1507 ret = -ENOENT;
1508 goto out;
1509 }
1510 state->private = private;
1511out:
cad321ad 1512 spin_unlock(&tree->lock);
d1310b2e
CM
1513 return ret;
1514}
1515
1516int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1517{
1518 struct rb_node *node;
1519 struct extent_state *state;
1520 int ret = 0;
1521
cad321ad 1522 spin_lock(&tree->lock);
d1310b2e
CM
1523 /*
1524 * this search will find all the extents that end after
1525 * our range starts.
1526 */
80ea96b1 1527 node = tree_search(tree, start);
2b114d1d 1528 if (!node) {
d1310b2e
CM
1529 ret = -ENOENT;
1530 goto out;
1531 }
1532 state = rb_entry(node, struct extent_state, rb_node);
1533 if (state->start != start) {
1534 ret = -ENOENT;
1535 goto out;
1536 }
1537 *private = state->private;
1538out:
cad321ad 1539 spin_unlock(&tree->lock);
d1310b2e
CM
1540 return ret;
1541}
1542
1543/*
1544 * searches a range in the state tree for a given mask.
70dec807 1545 * If 'filled' == 1, this returns 1 only if every extent in the tree
d1310b2e
CM
1546 * has the bits set. Otherwise, 1 is returned if any bit in the
1547 * range is found set.
1548 */
1549int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
9655d298 1550 int bits, int filled, struct extent_state *cached)
d1310b2e
CM
1551{
1552 struct extent_state *state = NULL;
1553 struct rb_node *node;
1554 int bitset = 0;
d1310b2e 1555
cad321ad 1556 spin_lock(&tree->lock);
9655d298
CM
1557 if (cached && cached->tree && cached->start == start)
1558 node = &cached->rb_node;
1559 else
1560 node = tree_search(tree, start);
d1310b2e
CM
1561 while (node && start <= end) {
1562 state = rb_entry(node, struct extent_state, rb_node);
1563
1564 if (filled && state->start > start) {
1565 bitset = 0;
1566 break;
1567 }
1568
1569 if (state->start > end)
1570 break;
1571
1572 if (state->state & bits) {
1573 bitset = 1;
1574 if (!filled)
1575 break;
1576 } else if (filled) {
1577 bitset = 0;
1578 break;
1579 }
46562cec
CM
1580
1581 if (state->end == (u64)-1)
1582 break;
1583
d1310b2e
CM
1584 start = state->end + 1;
1585 if (start > end)
1586 break;
1587 node = rb_next(node);
1588 if (!node) {
1589 if (filled)
1590 bitset = 0;
1591 break;
1592 }
1593 }
cad321ad 1594 spin_unlock(&tree->lock);
d1310b2e
CM
1595 return bitset;
1596}
d1310b2e
CM
1597
1598/*
1599 * helper function to set a given page up to date if all the
1600 * extents in the tree for that page are up to date
1601 */
1602static int check_page_uptodate(struct extent_io_tree *tree,
1603 struct page *page)
1604{
1605 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1606 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1607 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
d1310b2e
CM
1608 SetPageUptodate(page);
1609 return 0;
1610}
1611
1612/*
1613 * helper function to unlock a page if all the extents in the tree
1614 * for that page are unlocked
1615 */
1616static int check_page_locked(struct extent_io_tree *tree,
1617 struct page *page)
1618{
1619 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1620 u64 end = start + PAGE_CACHE_SIZE - 1;
9655d298 1621 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
d1310b2e
CM
1622 unlock_page(page);
1623 return 0;
1624}
1625
1626/*
1627 * helper function to end page writeback if all the extents
1628 * in the tree for that page are done with writeback
1629 */
1630static int check_page_writeback(struct extent_io_tree *tree,
1631 struct page *page)
1632{
1edbb734 1633 end_page_writeback(page);
d1310b2e
CM
1634 return 0;
1635}
1636
1637/* lots and lots of room for performance fixes in the end_bio funcs */
1638
1639/*
1640 * after a writepage IO is done, we need to:
1641 * clear the uptodate bits on error
1642 * clear the writeback bits in the extent tree for this IO
1643 * end_page_writeback if the page has no more pending IO
1644 *
1645 * Scheduling is not allowed, so the extent state tree is expected
1646 * to have one and only one object corresponding to this IO.
1647 */
d1310b2e 1648static void end_bio_extent_writepage(struct bio *bio, int err)
d1310b2e 1649{
1259ab75 1650 int uptodate = err == 0;
d1310b2e 1651 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1652 struct extent_io_tree *tree;
d1310b2e
CM
1653 u64 start;
1654 u64 end;
1655 int whole_page;
1259ab75 1656 int ret;
d1310b2e 1657
d1310b2e
CM
1658 do {
1659 struct page *page = bvec->bv_page;
902b22f3
DW
1660 tree = &BTRFS_I(page->mapping->host)->io_tree;
1661
d1310b2e
CM
1662 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1663 bvec->bv_offset;
1664 end = start + bvec->bv_len - 1;
1665
1666 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1667 whole_page = 1;
1668 else
1669 whole_page = 0;
1670
1671 if (--bvec >= bio->bi_io_vec)
1672 prefetchw(&bvec->bv_page->flags);
1259ab75
CM
1673 if (tree->ops && tree->ops->writepage_end_io_hook) {
1674 ret = tree->ops->writepage_end_io_hook(page, start,
902b22f3 1675 end, NULL, uptodate);
1259ab75
CM
1676 if (ret)
1677 uptodate = 0;
1678 }
1679
1680 if (!uptodate && tree->ops &&
1681 tree->ops->writepage_io_failed_hook) {
1682 ret = tree->ops->writepage_io_failed_hook(bio, page,
902b22f3 1683 start, end, NULL);
1259ab75 1684 if (ret == 0) {
1259ab75
CM
1685 uptodate = (err == 0);
1686 continue;
1687 }
1688 }
1689
d1310b2e 1690 if (!uptodate) {
2ac55d41 1691 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
d1310b2e
CM
1692 ClearPageUptodate(page);
1693 SetPageError(page);
1694 }
70dec807 1695
d1310b2e
CM
1696 if (whole_page)
1697 end_page_writeback(page);
1698 else
1699 check_page_writeback(tree, page);
d1310b2e 1700 } while (bvec >= bio->bi_io_vec);
2b1f55b0 1701
d1310b2e 1702 bio_put(bio);
d1310b2e
CM
1703}
1704
1705/*
1706 * after a readpage IO is done, we need to:
1707 * clear the uptodate bits on error
1708 * set the uptodate bits if things worked
1709 * set the page up to date if all extents in the tree are uptodate
1710 * clear the lock bit in the extent tree
1711 * unlock the page if there are no other extents locked for it
1712 *
1713 * Scheduling is not allowed, so the extent state tree is expected
1714 * to have one and only one object corresponding to this IO.
1715 */
d1310b2e 1716static void end_bio_extent_readpage(struct bio *bio, int err)
d1310b2e
CM
1717{
1718 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4125bf76
CM
1719 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1720 struct bio_vec *bvec = bio->bi_io_vec;
902b22f3 1721 struct extent_io_tree *tree;
d1310b2e
CM
1722 u64 start;
1723 u64 end;
1724 int whole_page;
1725 int ret;
1726
d20f7043
CM
1727 if (err)
1728 uptodate = 0;
1729
d1310b2e
CM
1730 do {
1731 struct page *page = bvec->bv_page;
902b22f3
DW
1732 tree = &BTRFS_I(page->mapping->host)->io_tree;
1733
d1310b2e
CM
1734 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1735 bvec->bv_offset;
1736 end = start + bvec->bv_len - 1;
1737
1738 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1739 whole_page = 1;
1740 else
1741 whole_page = 0;
1742
4125bf76 1743 if (++bvec <= bvec_end)
d1310b2e
CM
1744 prefetchw(&bvec->bv_page->flags);
1745
1746 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
70dec807 1747 ret = tree->ops->readpage_end_io_hook(page, start, end,
902b22f3 1748 NULL);
d1310b2e
CM
1749 if (ret)
1750 uptodate = 0;
1751 }
7e38326f
CM
1752 if (!uptodate && tree->ops &&
1753 tree->ops->readpage_io_failed_hook) {
1754 ret = tree->ops->readpage_io_failed_hook(bio, page,
902b22f3 1755 start, end, NULL);
7e38326f 1756 if (ret == 0) {
3b951516
CM
1757 uptodate =
1758 test_bit(BIO_UPTODATE, &bio->bi_flags);
d20f7043
CM
1759 if (err)
1760 uptodate = 0;
7e38326f
CM
1761 continue;
1762 }
1763 }
d1310b2e 1764
771ed689 1765 if (uptodate) {
902b22f3
DW
1766 set_extent_uptodate(tree, start, end,
1767 GFP_ATOMIC);
771ed689 1768 }
902b22f3 1769 unlock_extent(tree, start, end, GFP_ATOMIC);
d1310b2e 1770
70dec807
CM
1771 if (whole_page) {
1772 if (uptodate) {
1773 SetPageUptodate(page);
1774 } else {
1775 ClearPageUptodate(page);
1776 SetPageError(page);
1777 }
d1310b2e 1778 unlock_page(page);
70dec807
CM
1779 } else {
1780 if (uptodate) {
1781 check_page_uptodate(tree, page);
1782 } else {
1783 ClearPageUptodate(page);
1784 SetPageError(page);
1785 }
d1310b2e 1786 check_page_locked(tree, page);
70dec807 1787 }
4125bf76 1788 } while (bvec <= bvec_end);
d1310b2e
CM
1789
1790 bio_put(bio);
d1310b2e
CM
1791}
1792
1793/*
1794 * IO done from prepare_write is pretty simple, we just unlock
1795 * the structs in the extent tree when done, and set the uptodate bits
1796 * as appropriate.
1797 */
d1310b2e 1798static void end_bio_extent_preparewrite(struct bio *bio, int err)
d1310b2e
CM
1799{
1800 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1801 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
902b22f3 1802 struct extent_io_tree *tree;
d1310b2e
CM
1803 u64 start;
1804 u64 end;
1805
d1310b2e
CM
1806 do {
1807 struct page *page = bvec->bv_page;
902b22f3
DW
1808 tree = &BTRFS_I(page->mapping->host)->io_tree;
1809
d1310b2e
CM
1810 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1811 bvec->bv_offset;
1812 end = start + bvec->bv_len - 1;
1813
1814 if (--bvec >= bio->bi_io_vec)
1815 prefetchw(&bvec->bv_page->flags);
1816
1817 if (uptodate) {
1818 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1819 } else {
1820 ClearPageUptodate(page);
1821 SetPageError(page);
1822 }
1823
1824 unlock_extent(tree, start, end, GFP_ATOMIC);
1825
1826 } while (bvec >= bio->bi_io_vec);
1827
1828 bio_put(bio);
d1310b2e
CM
1829}
1830
1831static struct bio *
1832extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1833 gfp_t gfp_flags)
1834{
1835 struct bio *bio;
1836
1837 bio = bio_alloc(gfp_flags, nr_vecs);
1838
1839 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1840 while (!bio && (nr_vecs /= 2))
1841 bio = bio_alloc(gfp_flags, nr_vecs);
1842 }
1843
1844 if (bio) {
e1c4b745 1845 bio->bi_size = 0;
d1310b2e
CM
1846 bio->bi_bdev = bdev;
1847 bio->bi_sector = first_sector;
1848 }
1849 return bio;
1850}
1851
c8b97818
CM
1852static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1853 unsigned long bio_flags)
d1310b2e 1854{
d1310b2e 1855 int ret = 0;
70dec807
CM
1856 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1857 struct page *page = bvec->bv_page;
1858 struct extent_io_tree *tree = bio->bi_private;
70dec807
CM
1859 u64 start;
1860 u64 end;
1861
1862 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1863 end = start + bvec->bv_len - 1;
1864
902b22f3 1865 bio->bi_private = NULL;
d1310b2e
CM
1866
1867 bio_get(bio);
1868
065631f6 1869 if (tree->ops && tree->ops->submit_bio_hook)
f188591e 1870 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
eaf25d93 1871 mirror_num, bio_flags, start);
0b86a832
CM
1872 else
1873 submit_bio(rw, bio);
d1310b2e
CM
1874 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1875 ret = -EOPNOTSUPP;
1876 bio_put(bio);
1877 return ret;
1878}
1879
1880static int submit_extent_page(int rw, struct extent_io_tree *tree,
1881 struct page *page, sector_t sector,
1882 size_t size, unsigned long offset,
1883 struct block_device *bdev,
1884 struct bio **bio_ret,
1885 unsigned long max_pages,
f188591e 1886 bio_end_io_t end_io_func,
c8b97818
CM
1887 int mirror_num,
1888 unsigned long prev_bio_flags,
1889 unsigned long bio_flags)
d1310b2e
CM
1890{
1891 int ret = 0;
1892 struct bio *bio;
1893 int nr;
c8b97818
CM
1894 int contig = 0;
1895 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1896 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
5b050f04 1897 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
d1310b2e
CM
1898
1899 if (bio_ret && *bio_ret) {
1900 bio = *bio_ret;
c8b97818
CM
1901 if (old_compressed)
1902 contig = bio->bi_sector == sector;
1903 else
1904 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1905 sector;
1906
1907 if (prev_bio_flags != bio_flags || !contig ||
239b14b3 1908 (tree->ops && tree->ops->merge_bio_hook &&
c8b97818
CM
1909 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1910 bio_flags)) ||
1911 bio_add_page(bio, page, page_size, offset) < page_size) {
1912 ret = submit_one_bio(rw, bio, mirror_num,
1913 prev_bio_flags);
d1310b2e
CM
1914 bio = NULL;
1915 } else {
1916 return 0;
1917 }
1918 }
c8b97818
CM
1919 if (this_compressed)
1920 nr = BIO_MAX_PAGES;
1921 else
1922 nr = bio_get_nr_vecs(bdev);
1923
d1310b2e 1924 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
70dec807 1925
c8b97818 1926 bio_add_page(bio, page, page_size, offset);
d1310b2e
CM
1927 bio->bi_end_io = end_io_func;
1928 bio->bi_private = tree;
70dec807 1929
d397712b 1930 if (bio_ret)
d1310b2e 1931 *bio_ret = bio;
d397712b 1932 else
c8b97818 1933 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
d1310b2e
CM
1934
1935 return ret;
1936}
1937
1938void set_page_extent_mapped(struct page *page)
1939{
1940 if (!PagePrivate(page)) {
1941 SetPagePrivate(page);
d1310b2e 1942 page_cache_get(page);
6af118ce 1943 set_page_private(page, EXTENT_PAGE_PRIVATE);
d1310b2e
CM
1944 }
1945}
1946
b2950863 1947static void set_page_extent_head(struct page *page, unsigned long len)
d1310b2e
CM
1948{
1949 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1950}
1951
1952/*
1953 * basic readpage implementation. Locked extent state structs are inserted
1954 * into the tree that are removed when the IO is done (by the end_io
1955 * handlers)
1956 */
1957static int __extent_read_full_page(struct extent_io_tree *tree,
1958 struct page *page,
1959 get_extent_t *get_extent,
c8b97818
CM
1960 struct bio **bio, int mirror_num,
1961 unsigned long *bio_flags)
d1310b2e
CM
1962{
1963 struct inode *inode = page->mapping->host;
1964 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1965 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1966 u64 end;
1967 u64 cur = start;
1968 u64 extent_offset;
1969 u64 last_byte = i_size_read(inode);
1970 u64 block_start;
1971 u64 cur_end;
1972 sector_t sector;
1973 struct extent_map *em;
1974 struct block_device *bdev;
11c65dcc 1975 struct btrfs_ordered_extent *ordered;
d1310b2e
CM
1976 int ret;
1977 int nr = 0;
1978 size_t page_offset = 0;
1979 size_t iosize;
c8b97818 1980 size_t disk_io_size;
d1310b2e 1981 size_t blocksize = inode->i_sb->s_blocksize;
c8b97818 1982 unsigned long this_bio_flag = 0;
d1310b2e
CM
1983
1984 set_page_extent_mapped(page);
1985
1986 end = page_end;
11c65dcc
JB
1987 while (1) {
1988 lock_extent(tree, start, end, GFP_NOFS);
1989 ordered = btrfs_lookup_ordered_extent(inode, start);
1990 if (!ordered)
1991 break;
1992 unlock_extent(tree, start, end, GFP_NOFS);
1993 btrfs_start_ordered_extent(inode, ordered, 1);
1994 btrfs_put_ordered_extent(ordered);
1995 }
d1310b2e 1996
c8b97818
CM
1997 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1998 char *userpage;
1999 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2000
2001 if (zero_offset) {
2002 iosize = PAGE_CACHE_SIZE - zero_offset;
2003 userpage = kmap_atomic(page, KM_USER0);
2004 memset(userpage + zero_offset, 0, iosize);
2005 flush_dcache_page(page);
2006 kunmap_atomic(userpage, KM_USER0);
2007 }
2008 }
d1310b2e
CM
2009 while (cur <= end) {
2010 if (cur >= last_byte) {
2011 char *userpage;
2012 iosize = PAGE_CACHE_SIZE - page_offset;
2013 userpage = kmap_atomic(page, KM_USER0);
2014 memset(userpage + page_offset, 0, iosize);
2015 flush_dcache_page(page);
2016 kunmap_atomic(userpage, KM_USER0);
2017 set_extent_uptodate(tree, cur, cur + iosize - 1,
2018 GFP_NOFS);
2019 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2020 break;
2021 }
2022 em = get_extent(inode, page, page_offset, cur,
2023 end - cur + 1, 0);
2024 if (IS_ERR(em) || !em) {
2025 SetPageError(page);
2026 unlock_extent(tree, cur, end, GFP_NOFS);
2027 break;
2028 }
d1310b2e
CM
2029 extent_offset = cur - em->start;
2030 BUG_ON(extent_map_end(em) <= cur);
2031 BUG_ON(end < cur);
2032
c8b97818
CM
2033 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2034 this_bio_flag = EXTENT_BIO_COMPRESSED;
2035
d1310b2e
CM
2036 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2037 cur_end = min(extent_map_end(em) - 1, end);
2038 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
c8b97818
CM
2039 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2040 disk_io_size = em->block_len;
2041 sector = em->block_start >> 9;
2042 } else {
2043 sector = (em->block_start + extent_offset) >> 9;
2044 disk_io_size = iosize;
2045 }
d1310b2e
CM
2046 bdev = em->bdev;
2047 block_start = em->block_start;
d899e052
YZ
2048 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2049 block_start = EXTENT_MAP_HOLE;
d1310b2e
CM
2050 free_extent_map(em);
2051 em = NULL;
2052
2053 /* we've found a hole, just zero and go on */
2054 if (block_start == EXTENT_MAP_HOLE) {
2055 char *userpage;
2056 userpage = kmap_atomic(page, KM_USER0);
2057 memset(userpage + page_offset, 0, iosize);
2058 flush_dcache_page(page);
2059 kunmap_atomic(userpage, KM_USER0);
2060
2061 set_extent_uptodate(tree, cur, cur + iosize - 1,
2062 GFP_NOFS);
2063 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2064 cur = cur + iosize;
2065 page_offset += iosize;
2066 continue;
2067 }
2068 /* the get_extent function already copied into the page */
9655d298
CM
2069 if (test_range_bit(tree, cur, cur_end,
2070 EXTENT_UPTODATE, 1, NULL)) {
a1b32a59 2071 check_page_uptodate(tree, page);
d1310b2e
CM
2072 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2073 cur = cur + iosize;
2074 page_offset += iosize;
2075 continue;
2076 }
70dec807
CM
2077 /* we have an inline extent but it didn't get marked up
2078 * to date. Error out
2079 */
2080 if (block_start == EXTENT_MAP_INLINE) {
2081 SetPageError(page);
2082 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2083 cur = cur + iosize;
2084 page_offset += iosize;
2085 continue;
2086 }
d1310b2e
CM
2087
2088 ret = 0;
2089 if (tree->ops && tree->ops->readpage_io_hook) {
2090 ret = tree->ops->readpage_io_hook(page, cur,
2091 cur + iosize - 1);
2092 }
2093 if (!ret) {
89642229
CM
2094 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2095 pnr -= page->index;
d1310b2e 2096 ret = submit_extent_page(READ, tree, page,
c8b97818 2097 sector, disk_io_size, page_offset,
89642229 2098 bdev, bio, pnr,
c8b97818
CM
2099 end_bio_extent_readpage, mirror_num,
2100 *bio_flags,
2101 this_bio_flag);
89642229 2102 nr++;
c8b97818 2103 *bio_flags = this_bio_flag;
d1310b2e
CM
2104 }
2105 if (ret)
2106 SetPageError(page);
2107 cur = cur + iosize;
2108 page_offset += iosize;
d1310b2e
CM
2109 }
2110 if (!nr) {
2111 if (!PageError(page))
2112 SetPageUptodate(page);
2113 unlock_page(page);
2114 }
2115 return 0;
2116}
2117
2118int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2119 get_extent_t *get_extent)
2120{
2121 struct bio *bio = NULL;
c8b97818 2122 unsigned long bio_flags = 0;
d1310b2e
CM
2123 int ret;
2124
c8b97818
CM
2125 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2126 &bio_flags);
d1310b2e 2127 if (bio)
c8b97818 2128 submit_one_bio(READ, bio, 0, bio_flags);
d1310b2e
CM
2129 return ret;
2130}
d1310b2e 2131
11c8349b
CM
2132static noinline void update_nr_written(struct page *page,
2133 struct writeback_control *wbc,
2134 unsigned long nr_written)
2135{
2136 wbc->nr_to_write -= nr_written;
2137 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2138 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2139 page->mapping->writeback_index = page->index + nr_written;
2140}
2141
d1310b2e
CM
2142/*
2143 * the writepage semantics are similar to regular writepage. extent
2144 * records are inserted to lock ranges in the tree, and as dirty areas
2145 * are found, they are marked writeback. Then the lock bits are removed
2146 * and the end_io handler clears the writeback ranges
2147 */
2148static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2149 void *data)
2150{
2151 struct inode *inode = page->mapping->host;
2152 struct extent_page_data *epd = data;
2153 struct extent_io_tree *tree = epd->tree;
2154 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2155 u64 delalloc_start;
2156 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2157 u64 end;
2158 u64 cur = start;
2159 u64 extent_offset;
2160 u64 last_byte = i_size_read(inode);
2161 u64 block_start;
2162 u64 iosize;
e6dcd2dc 2163 u64 unlock_start;
d1310b2e 2164 sector_t sector;
2c64c53d 2165 struct extent_state *cached_state = NULL;
d1310b2e
CM
2166 struct extent_map *em;
2167 struct block_device *bdev;
2168 int ret;
2169 int nr = 0;
7f3c74fb 2170 size_t pg_offset = 0;
d1310b2e
CM
2171 size_t blocksize;
2172 loff_t i_size = i_size_read(inode);
2173 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2174 u64 nr_delalloc;
2175 u64 delalloc_end;
c8b97818
CM
2176 int page_started;
2177 int compressed;
ffbd517d 2178 int write_flags;
771ed689 2179 unsigned long nr_written = 0;
d1310b2e 2180
ffbd517d
CM
2181 if (wbc->sync_mode == WB_SYNC_ALL)
2182 write_flags = WRITE_SYNC_PLUG;
2183 else
2184 write_flags = WRITE;
2185
d1310b2e 2186 WARN_ON(!PageLocked(page));
7f3c74fb 2187 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
211c17f5 2188 if (page->index > end_index ||
7f3c74fb 2189 (page->index == end_index && !pg_offset)) {
39be25cd 2190 page->mapping->a_ops->invalidatepage(page, 0);
d1310b2e
CM
2191 unlock_page(page);
2192 return 0;
2193 }
2194
2195 if (page->index == end_index) {
2196 char *userpage;
2197
d1310b2e 2198 userpage = kmap_atomic(page, KM_USER0);
7f3c74fb
CM
2199 memset(userpage + pg_offset, 0,
2200 PAGE_CACHE_SIZE - pg_offset);
d1310b2e 2201 kunmap_atomic(userpage, KM_USER0);
211c17f5 2202 flush_dcache_page(page);
d1310b2e 2203 }
7f3c74fb 2204 pg_offset = 0;
d1310b2e
CM
2205
2206 set_page_extent_mapped(page);
2207
2208 delalloc_start = start;
2209 delalloc_end = 0;
c8b97818 2210 page_started = 0;
771ed689 2211 if (!epd->extent_locked) {
f85d7d6c 2212 u64 delalloc_to_write = 0;
11c8349b
CM
2213 /*
2214 * make sure the wbc mapping index is at least updated
2215 * to this page.
2216 */
2217 update_nr_written(page, wbc, 0);
2218
d397712b 2219 while (delalloc_end < page_end) {
771ed689 2220 nr_delalloc = find_lock_delalloc_range(inode, tree,
c8b97818
CM
2221 page,
2222 &delalloc_start,
d1310b2e
CM
2223 &delalloc_end,
2224 128 * 1024 * 1024);
771ed689
CM
2225 if (nr_delalloc == 0) {
2226 delalloc_start = delalloc_end + 1;
2227 continue;
2228 }
2229 tree->ops->fill_delalloc(inode, page, delalloc_start,
2230 delalloc_end, &page_started,
2231 &nr_written);
f85d7d6c
CM
2232 /*
2233 * delalloc_end is already one less than the total
2234 * length, so we don't subtract one from
2235 * PAGE_CACHE_SIZE
2236 */
2237 delalloc_to_write += (delalloc_end - delalloc_start +
2238 PAGE_CACHE_SIZE) >>
2239 PAGE_CACHE_SHIFT;
d1310b2e 2240 delalloc_start = delalloc_end + 1;
d1310b2e 2241 }
f85d7d6c
CM
2242 if (wbc->nr_to_write < delalloc_to_write) {
2243 int thresh = 8192;
2244
2245 if (delalloc_to_write < thresh * 2)
2246 thresh = delalloc_to_write;
2247 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2248 thresh);
2249 }
c8b97818 2250
771ed689
CM
2251 /* did the fill delalloc function already unlock and start
2252 * the IO?
2253 */
2254 if (page_started) {
2255 ret = 0;
11c8349b
CM
2256 /*
2257 * we've unlocked the page, so we can't update
2258 * the mapping's writeback index, just update
2259 * nr_to_write.
2260 */
2261 wbc->nr_to_write -= nr_written;
2262 goto done_unlocked;
771ed689 2263 }
c8b97818 2264 }
247e743c 2265 if (tree->ops && tree->ops->writepage_start_hook) {
c8b97818
CM
2266 ret = tree->ops->writepage_start_hook(page, start,
2267 page_end);
247e743c 2268 if (ret == -EAGAIN) {
247e743c 2269 redirty_page_for_writepage(wbc, page);
11c8349b 2270 update_nr_written(page, wbc, nr_written);
247e743c 2271 unlock_page(page);
771ed689 2272 ret = 0;
11c8349b 2273 goto done_unlocked;
247e743c
CM
2274 }
2275 }
2276
11c8349b
CM
2277 /*
2278 * we don't want to touch the inode after unlocking the page,
2279 * so we update the mapping writeback index now
2280 */
2281 update_nr_written(page, wbc, nr_written + 1);
771ed689 2282
d1310b2e 2283 end = page_end;
d1310b2e 2284 if (last_byte <= start) {
e6dcd2dc
CM
2285 if (tree->ops && tree->ops->writepage_end_io_hook)
2286 tree->ops->writepage_end_io_hook(page, start,
2287 page_end, NULL, 1);
2288 unlock_start = page_end + 1;
d1310b2e
CM
2289 goto done;
2290 }
2291
d1310b2e
CM
2292 blocksize = inode->i_sb->s_blocksize;
2293
2294 while (cur <= end) {
2295 if (cur >= last_byte) {
e6dcd2dc
CM
2296 if (tree->ops && tree->ops->writepage_end_io_hook)
2297 tree->ops->writepage_end_io_hook(page, cur,
2298 page_end, NULL, 1);
2299 unlock_start = page_end + 1;
d1310b2e
CM
2300 break;
2301 }
7f3c74fb 2302 em = epd->get_extent(inode, page, pg_offset, cur,
d1310b2e
CM
2303 end - cur + 1, 1);
2304 if (IS_ERR(em) || !em) {
2305 SetPageError(page);
2306 break;
2307 }
2308
2309 extent_offset = cur - em->start;
2310 BUG_ON(extent_map_end(em) <= cur);
2311 BUG_ON(end < cur);
2312 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2313 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2314 sector = (em->block_start + extent_offset) >> 9;
2315 bdev = em->bdev;
2316 block_start = em->block_start;
c8b97818 2317 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
d1310b2e
CM
2318 free_extent_map(em);
2319 em = NULL;
2320
c8b97818
CM
2321 /*
2322 * compressed and inline extents are written through other
2323 * paths in the FS
2324 */
2325 if (compressed || block_start == EXTENT_MAP_HOLE ||
d1310b2e 2326 block_start == EXTENT_MAP_INLINE) {
c8b97818
CM
2327 /*
2328 * end_io notification does not happen here for
2329 * compressed extents
2330 */
2331 if (!compressed && tree->ops &&
2332 tree->ops->writepage_end_io_hook)
e6dcd2dc
CM
2333 tree->ops->writepage_end_io_hook(page, cur,
2334 cur + iosize - 1,
2335 NULL, 1);
c8b97818
CM
2336 else if (compressed) {
2337 /* we don't want to end_page_writeback on
2338 * a compressed extent. this happens
2339 * elsewhere
2340 */
2341 nr++;
2342 }
2343
2344 cur += iosize;
7f3c74fb 2345 pg_offset += iosize;
e6dcd2dc 2346 unlock_start = cur;
d1310b2e
CM
2347 continue;
2348 }
d1310b2e
CM
2349 /* leave this out until we have a page_mkwrite call */
2350 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
9655d298 2351 EXTENT_DIRTY, 0, NULL)) {
d1310b2e 2352 cur = cur + iosize;
7f3c74fb 2353 pg_offset += iosize;
d1310b2e
CM
2354 continue;
2355 }
c8b97818 2356
d1310b2e
CM
2357 if (tree->ops && tree->ops->writepage_io_hook) {
2358 ret = tree->ops->writepage_io_hook(page, cur,
2359 cur + iosize - 1);
2360 } else {
2361 ret = 0;
2362 }
1259ab75 2363 if (ret) {
d1310b2e 2364 SetPageError(page);
1259ab75 2365 } else {
d1310b2e 2366 unsigned long max_nr = end_index + 1;
7f3c74fb 2367
d1310b2e
CM
2368 set_range_writeback(tree, cur, cur + iosize - 1);
2369 if (!PageWriteback(page)) {
d397712b
CM
2370 printk(KERN_ERR "btrfs warning page %lu not "
2371 "writeback, cur %llu end %llu\n",
2372 page->index, (unsigned long long)cur,
d1310b2e
CM
2373 (unsigned long long)end);
2374 }
2375
ffbd517d
CM
2376 ret = submit_extent_page(write_flags, tree, page,
2377 sector, iosize, pg_offset,
2378 bdev, &epd->bio, max_nr,
c8b97818
CM
2379 end_bio_extent_writepage,
2380 0, 0, 0);
d1310b2e
CM
2381 if (ret)
2382 SetPageError(page);
2383 }
2384 cur = cur + iosize;
7f3c74fb 2385 pg_offset += iosize;
d1310b2e
CM
2386 nr++;
2387 }
2388done:
2389 if (nr == 0) {
2390 /* make sure the mapping tag for page dirty gets cleared */
2391 set_page_writeback(page);
2392 end_page_writeback(page);
2393 }
d1310b2e 2394 unlock_page(page);
771ed689 2395
11c8349b
CM
2396done_unlocked:
2397
2c64c53d
CM
2398 /* drop our reference on any cached states */
2399 free_extent_state(cached_state);
d1310b2e
CM
2400 return 0;
2401}
2402
d1310b2e 2403/**
4bef0848 2404 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
d1310b2e
CM
2405 * @mapping: address space structure to write
2406 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2407 * @writepage: function called for each page
2408 * @data: data passed to writepage function
2409 *
2410 * If a page is already under I/O, write_cache_pages() skips it, even
2411 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2412 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2413 * and msync() need to guarantee that all the data which was dirty at the time
2414 * the call was made get new I/O started against them. If wbc->sync_mode is
2415 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2416 * existing IO to complete.
2417 */
b2950863 2418static int extent_write_cache_pages(struct extent_io_tree *tree,
4bef0848
CM
2419 struct address_space *mapping,
2420 struct writeback_control *wbc,
d2c3f4f6
CM
2421 writepage_t writepage, void *data,
2422 void (*flush_fn)(void *))
d1310b2e 2423{
d1310b2e
CM
2424 int ret = 0;
2425 int done = 0;
f85d7d6c 2426 int nr_to_write_done = 0;
d1310b2e
CM
2427 struct pagevec pvec;
2428 int nr_pages;
2429 pgoff_t index;
2430 pgoff_t end; /* Inclusive */
2431 int scanned = 0;
2432 int range_whole = 0;
2433
d1310b2e
CM
2434 pagevec_init(&pvec, 0);
2435 if (wbc->range_cyclic) {
2436 index = mapping->writeback_index; /* Start from prev offset */
2437 end = -1;
2438 } else {
2439 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2440 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2441 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2442 range_whole = 1;
2443 scanned = 1;
2444 }
2445retry:
f85d7d6c 2446 while (!done && !nr_to_write_done && (index <= end) &&
d1310b2e 2447 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
d397712b
CM
2448 PAGECACHE_TAG_DIRTY, min(end - index,
2449 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
d1310b2e
CM
2450 unsigned i;
2451
2452 scanned = 1;
2453 for (i = 0; i < nr_pages; i++) {
2454 struct page *page = pvec.pages[i];
2455
2456 /*
2457 * At this point we hold neither mapping->tree_lock nor
2458 * lock on the page itself: the page may be truncated or
2459 * invalidated (changing page->mapping to NULL), or even
2460 * swizzled back from swapper_space to tmpfs file
2461 * mapping
2462 */
4bef0848
CM
2463 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2464 tree->ops->write_cache_pages_lock_hook(page);
2465 else
2466 lock_page(page);
d1310b2e
CM
2467
2468 if (unlikely(page->mapping != mapping)) {
2469 unlock_page(page);
2470 continue;
2471 }
2472
2473 if (!wbc->range_cyclic && page->index > end) {
2474 done = 1;
2475 unlock_page(page);
2476 continue;
2477 }
2478
d2c3f4f6 2479 if (wbc->sync_mode != WB_SYNC_NONE) {
0e6bd956
CM
2480 if (PageWriteback(page))
2481 flush_fn(data);
d1310b2e 2482 wait_on_page_writeback(page);
d2c3f4f6 2483 }
d1310b2e
CM
2484
2485 if (PageWriteback(page) ||
2486 !clear_page_dirty_for_io(page)) {
2487 unlock_page(page);
2488 continue;
2489 }
2490
2491 ret = (*writepage)(page, wbc, data);
2492
2493 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2494 unlock_page(page);
2495 ret = 0;
2496 }
f85d7d6c 2497 if (ret)
d1310b2e 2498 done = 1;
f85d7d6c
CM
2499
2500 /*
2501 * the filesystem may choose to bump up nr_to_write.
2502 * We have to make sure to honor the new nr_to_write
2503 * at any time
2504 */
2505 nr_to_write_done = wbc->nr_to_write <= 0;
d1310b2e
CM
2506 }
2507 pagevec_release(&pvec);
2508 cond_resched();
2509 }
2510 if (!scanned && !done) {
2511 /*
2512 * We hit the last page and there is more work to be done: wrap
2513 * back to the start of the file
2514 */
2515 scanned = 1;
2516 index = 0;
2517 goto retry;
2518 }
d1310b2e
CM
2519 return ret;
2520}
d1310b2e 2521
ffbd517d 2522static void flush_epd_write_bio(struct extent_page_data *epd)
d2c3f4f6 2523{
d2c3f4f6 2524 if (epd->bio) {
ffbd517d
CM
2525 if (epd->sync_io)
2526 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2527 else
2528 submit_one_bio(WRITE, epd->bio, 0, 0);
d2c3f4f6
CM
2529 epd->bio = NULL;
2530 }
2531}
2532
ffbd517d
CM
2533static noinline void flush_write_bio(void *data)
2534{
2535 struct extent_page_data *epd = data;
2536 flush_epd_write_bio(epd);
2537}
2538
d1310b2e
CM
2539int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2540 get_extent_t *get_extent,
2541 struct writeback_control *wbc)
2542{
2543 int ret;
2544 struct address_space *mapping = page->mapping;
2545 struct extent_page_data epd = {
2546 .bio = NULL,
2547 .tree = tree,
2548 .get_extent = get_extent,
771ed689 2549 .extent_locked = 0,
ffbd517d 2550 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
d1310b2e
CM
2551 };
2552 struct writeback_control wbc_writepages = {
d313d7a3 2553 .sync_mode = wbc->sync_mode,
d1310b2e
CM
2554 .older_than_this = NULL,
2555 .nr_to_write = 64,
2556 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2557 .range_end = (loff_t)-1,
2558 };
2559
d1310b2e
CM
2560 ret = __extent_writepage(page, wbc, &epd);
2561
4bef0848 2562 extent_write_cache_pages(tree, mapping, &wbc_writepages,
d2c3f4f6 2563 __extent_writepage, &epd, flush_write_bio);
ffbd517d 2564 flush_epd_write_bio(&epd);
d1310b2e
CM
2565 return ret;
2566}
d1310b2e 2567
771ed689
CM
2568int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2569 u64 start, u64 end, get_extent_t *get_extent,
2570 int mode)
2571{
2572 int ret = 0;
2573 struct address_space *mapping = inode->i_mapping;
2574 struct page *page;
2575 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2576 PAGE_CACHE_SHIFT;
2577
2578 struct extent_page_data epd = {
2579 .bio = NULL,
2580 .tree = tree,
2581 .get_extent = get_extent,
2582 .extent_locked = 1,
ffbd517d 2583 .sync_io = mode == WB_SYNC_ALL,
771ed689
CM
2584 };
2585 struct writeback_control wbc_writepages = {
771ed689
CM
2586 .sync_mode = mode,
2587 .older_than_this = NULL,
2588 .nr_to_write = nr_pages * 2,
2589 .range_start = start,
2590 .range_end = end + 1,
2591 };
2592
d397712b 2593 while (start <= end) {
771ed689
CM
2594 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2595 if (clear_page_dirty_for_io(page))
2596 ret = __extent_writepage(page, &wbc_writepages, &epd);
2597 else {
2598 if (tree->ops && tree->ops->writepage_end_io_hook)
2599 tree->ops->writepage_end_io_hook(page, start,
2600 start + PAGE_CACHE_SIZE - 1,
2601 NULL, 1);
2602 unlock_page(page);
2603 }
2604 page_cache_release(page);
2605 start += PAGE_CACHE_SIZE;
2606 }
2607
ffbd517d 2608 flush_epd_write_bio(&epd);
771ed689
CM
2609 return ret;
2610}
d1310b2e
CM
2611
2612int extent_writepages(struct extent_io_tree *tree,
2613 struct address_space *mapping,
2614 get_extent_t *get_extent,
2615 struct writeback_control *wbc)
2616{
2617 int ret = 0;
2618 struct extent_page_data epd = {
2619 .bio = NULL,
2620 .tree = tree,
2621 .get_extent = get_extent,
771ed689 2622 .extent_locked = 0,
ffbd517d 2623 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
d1310b2e
CM
2624 };
2625
4bef0848 2626 ret = extent_write_cache_pages(tree, mapping, wbc,
d2c3f4f6
CM
2627 __extent_writepage, &epd,
2628 flush_write_bio);
ffbd517d 2629 flush_epd_write_bio(&epd);
d1310b2e
CM
2630 return ret;
2631}
d1310b2e
CM
2632
2633int extent_readpages(struct extent_io_tree *tree,
2634 struct address_space *mapping,
2635 struct list_head *pages, unsigned nr_pages,
2636 get_extent_t get_extent)
2637{
2638 struct bio *bio = NULL;
2639 unsigned page_idx;
c8b97818 2640 unsigned long bio_flags = 0;
d1310b2e 2641
d1310b2e
CM
2642 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2643 struct page *page = list_entry(pages->prev, struct page, lru);
2644
2645 prefetchw(&page->flags);
2646 list_del(&page->lru);
28ecb609 2647 if (!add_to_page_cache_lru(page, mapping,
d1310b2e 2648 page->index, GFP_KERNEL)) {
f188591e 2649 __extent_read_full_page(tree, page, get_extent,
c8b97818 2650 &bio, 0, &bio_flags);
d1310b2e
CM
2651 }
2652 page_cache_release(page);
2653 }
d1310b2e
CM
2654 BUG_ON(!list_empty(pages));
2655 if (bio)
c8b97818 2656 submit_one_bio(READ, bio, 0, bio_flags);
d1310b2e
CM
2657 return 0;
2658}
d1310b2e
CM
2659
2660/*
2661 * basic invalidatepage code, this waits on any locked or writeback
2662 * ranges corresponding to the page, and then deletes any extent state
2663 * records from the tree
2664 */
2665int extent_invalidatepage(struct extent_io_tree *tree,
2666 struct page *page, unsigned long offset)
2667{
2ac55d41 2668 struct extent_state *cached_state = NULL;
d1310b2e
CM
2669 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2670 u64 end = start + PAGE_CACHE_SIZE - 1;
2671 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2672
d397712b 2673 start += (offset + blocksize - 1) & ~(blocksize - 1);
d1310b2e
CM
2674 if (start > end)
2675 return 0;
2676
2ac55d41 2677 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
1edbb734 2678 wait_on_page_writeback(page);
d1310b2e 2679 clear_extent_bit(tree, start, end,
32c00aff
JB
2680 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2681 EXTENT_DO_ACCOUNTING,
2ac55d41 2682 1, 1, &cached_state, GFP_NOFS);
d1310b2e
CM
2683 return 0;
2684}
d1310b2e
CM
2685
2686/*
2687 * simple commit_write call, set_range_dirty is used to mark both
2688 * the pages and the extent records as dirty
2689 */
2690int extent_commit_write(struct extent_io_tree *tree,
2691 struct inode *inode, struct page *page,
2692 unsigned from, unsigned to)
2693{
2694 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2695
2696 set_page_extent_mapped(page);
2697 set_page_dirty(page);
2698
2699 if (pos > inode->i_size) {
2700 i_size_write(inode, pos);
2701 mark_inode_dirty(inode);
2702 }
2703 return 0;
2704}
d1310b2e
CM
2705
2706int extent_prepare_write(struct extent_io_tree *tree,
2707 struct inode *inode, struct page *page,
2708 unsigned from, unsigned to, get_extent_t *get_extent)
2709{
2710 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2711 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2712 u64 block_start;
2713 u64 orig_block_start;
2714 u64 block_end;
2715 u64 cur_end;
2716 struct extent_map *em;
2717 unsigned blocksize = 1 << inode->i_blkbits;
2718 size_t page_offset = 0;
2719 size_t block_off_start;
2720 size_t block_off_end;
2721 int err = 0;
2722 int iocount = 0;
2723 int ret = 0;
2724 int isnew;
2725
2726 set_page_extent_mapped(page);
2727
2728 block_start = (page_start + from) & ~((u64)blocksize - 1);
2729 block_end = (page_start + to - 1) | (blocksize - 1);
2730 orig_block_start = block_start;
2731
2732 lock_extent(tree, page_start, page_end, GFP_NOFS);
d397712b 2733 while (block_start <= block_end) {
d1310b2e
CM
2734 em = get_extent(inode, page, page_offset, block_start,
2735 block_end - block_start + 1, 1);
d397712b 2736 if (IS_ERR(em) || !em)
d1310b2e 2737 goto err;
d397712b 2738
d1310b2e
CM
2739 cur_end = min(block_end, extent_map_end(em) - 1);
2740 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2741 block_off_end = block_off_start + blocksize;
2742 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2743
2744 if (!PageUptodate(page) && isnew &&
2745 (block_off_end > to || block_off_start < from)) {
2746 void *kaddr;
2747
2748 kaddr = kmap_atomic(page, KM_USER0);
2749 if (block_off_end > to)
2750 memset(kaddr + to, 0, block_off_end - to);
2751 if (block_off_start < from)
2752 memset(kaddr + block_off_start, 0,
2753 from - block_off_start);
2754 flush_dcache_page(page);
2755 kunmap_atomic(kaddr, KM_USER0);
2756 }
2757 if ((em->block_start != EXTENT_MAP_HOLE &&
2758 em->block_start != EXTENT_MAP_INLINE) &&
2759 !isnew && !PageUptodate(page) &&
2760 (block_off_end > to || block_off_start < from) &&
2761 !test_range_bit(tree, block_start, cur_end,
9655d298 2762 EXTENT_UPTODATE, 1, NULL)) {
d1310b2e
CM
2763 u64 sector;
2764 u64 extent_offset = block_start - em->start;
2765 size_t iosize;
2766 sector = (em->block_start + extent_offset) >> 9;
2767 iosize = (cur_end - block_start + blocksize) &
2768 ~((u64)blocksize - 1);
2769 /*
2770 * we've already got the extent locked, but we
2771 * need to split the state such that our end_bio
2772 * handler can clear the lock.
2773 */
2774 set_extent_bit(tree, block_start,
2775 block_start + iosize - 1,
2c64c53d 2776 EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
d1310b2e
CM
2777 ret = submit_extent_page(READ, tree, page,
2778 sector, iosize, page_offset, em->bdev,
2779 NULL, 1,
c8b97818
CM
2780 end_bio_extent_preparewrite, 0,
2781 0, 0);
411fc6bc
AK
2782 if (ret && !err)
2783 err = ret;
d1310b2e
CM
2784 iocount++;
2785 block_start = block_start + iosize;
2786 } else {
2787 set_extent_uptodate(tree, block_start, cur_end,
2788 GFP_NOFS);
2789 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2790 block_start = cur_end + 1;
2791 }
2792 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2793 free_extent_map(em);
2794 }
2795 if (iocount) {
2796 wait_extent_bit(tree, orig_block_start,
2797 block_end, EXTENT_LOCKED);
2798 }
2799 check_page_uptodate(tree, page);
2800err:
2801 /* FIXME, zero out newly allocated blocks on error */
2802 return err;
2803}
d1310b2e 2804
7b13b7b1
CM
2805/*
2806 * a helper for releasepage, this tests for areas of the page that
2807 * are locked or under IO and drops the related state bits if it is safe
2808 * to drop the page.
2809 */
2810int try_release_extent_state(struct extent_map_tree *map,
2811 struct extent_io_tree *tree, struct page *page,
2812 gfp_t mask)
2813{
2814 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2815 u64 end = start + PAGE_CACHE_SIZE - 1;
2816 int ret = 1;
2817
211f90e6 2818 if (test_range_bit(tree, start, end,
8b62b72b 2819 EXTENT_IOBITS, 0, NULL))
7b13b7b1
CM
2820 ret = 0;
2821 else {
2822 if ((mask & GFP_NOFS) == GFP_NOFS)
2823 mask = GFP_NOFS;
11ef160f
CM
2824 /*
2825 * at this point we can safely clear everything except the
2826 * locked bit and the nodatasum bit
2827 */
2828 clear_extent_bit(tree, start, end,
2829 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2830 0, 0, NULL, mask);
7b13b7b1
CM
2831 }
2832 return ret;
2833}
7b13b7b1 2834
d1310b2e
CM
2835/*
2836 * a helper for releasepage. As long as there are no locked extents
2837 * in the range corresponding to the page, both state records and extent
2838 * map records are removed
2839 */
2840int try_release_extent_mapping(struct extent_map_tree *map,
70dec807
CM
2841 struct extent_io_tree *tree, struct page *page,
2842 gfp_t mask)
d1310b2e
CM
2843{
2844 struct extent_map *em;
2845 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2846 u64 end = start + PAGE_CACHE_SIZE - 1;
7b13b7b1 2847
70dec807
CM
2848 if ((mask & __GFP_WAIT) &&
2849 page->mapping->host->i_size > 16 * 1024 * 1024) {
39b5637f 2850 u64 len;
70dec807 2851 while (start <= end) {
39b5637f 2852 len = end - start + 1;
890871be 2853 write_lock(&map->lock);
39b5637f 2854 em = lookup_extent_mapping(map, start, len);
70dec807 2855 if (!em || IS_ERR(em)) {
890871be 2856 write_unlock(&map->lock);
70dec807
CM
2857 break;
2858 }
7f3c74fb
CM
2859 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2860 em->start != start) {
890871be 2861 write_unlock(&map->lock);
70dec807
CM
2862 free_extent_map(em);
2863 break;
2864 }
2865 if (!test_range_bit(tree, em->start,
2866 extent_map_end(em) - 1,
8b62b72b 2867 EXTENT_LOCKED | EXTENT_WRITEBACK,
9655d298 2868 0, NULL)) {
70dec807
CM
2869 remove_extent_mapping(map, em);
2870 /* once for the rb tree */
2871 free_extent_map(em);
2872 }
2873 start = extent_map_end(em);
890871be 2874 write_unlock(&map->lock);
70dec807
CM
2875
2876 /* once for us */
d1310b2e
CM
2877 free_extent_map(em);
2878 }
d1310b2e 2879 }
7b13b7b1 2880 return try_release_extent_state(map, tree, page, mask);
d1310b2e 2881}
d1310b2e
CM
2882
2883sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2884 get_extent_t *get_extent)
2885{
2886 struct inode *inode = mapping->host;
2ac55d41 2887 struct extent_state *cached_state = NULL;
d1310b2e
CM
2888 u64 start = iblock << inode->i_blkbits;
2889 sector_t sector = 0;
d899e052 2890 size_t blksize = (1 << inode->i_blkbits);
d1310b2e
CM
2891 struct extent_map *em;
2892
2ac55d41
JB
2893 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2894 0, &cached_state, GFP_NOFS);
d899e052 2895 em = get_extent(inode, NULL, 0, start, blksize, 0);
2ac55d41
JB
2896 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
2897 start + blksize - 1, &cached_state, GFP_NOFS);
d1310b2e
CM
2898 if (!em || IS_ERR(em))
2899 return 0;
2900
d899e052 2901 if (em->block_start > EXTENT_MAP_LAST_BYTE)
d1310b2e
CM
2902 goto out;
2903
2904 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
d1310b2e
CM
2905out:
2906 free_extent_map(em);
2907 return sector;
2908}
2909
1506fcc8
YS
2910int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2911 __u64 start, __u64 len, get_extent_t *get_extent)
2912{
2913 int ret;
2914 u64 off = start;
2915 u64 max = start + len;
2916 u32 flags = 0;
2917 u64 disko = 0;
2918 struct extent_map *em = NULL;
2ac55d41 2919 struct extent_state *cached_state = NULL;
1506fcc8
YS
2920 int end = 0;
2921 u64 em_start = 0, em_len = 0;
2922 unsigned long emflags;
2923 ret = 0;
2924
2925 if (len == 0)
2926 return -EINVAL;
2927
2ac55d41
JB
2928 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2929 &cached_state, GFP_NOFS);
1506fcc8
YS
2930 em = get_extent(inode, NULL, 0, off, max - off, 0);
2931 if (!em)
2932 goto out;
2933 if (IS_ERR(em)) {
2934 ret = PTR_ERR(em);
2935 goto out;
2936 }
2937 while (!end) {
2938 off = em->start + em->len;
2939 if (off >= max)
2940 end = 1;
2941
2942 em_start = em->start;
2943 em_len = em->len;
2944
2945 disko = 0;
2946 flags = 0;
2947
93dbfad7 2948 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
1506fcc8
YS
2949 end = 1;
2950 flags |= FIEMAP_EXTENT_LAST;
93dbfad7 2951 } else if (em->block_start == EXTENT_MAP_HOLE) {
1506fcc8 2952 flags |= FIEMAP_EXTENT_UNWRITTEN;
93dbfad7 2953 } else if (em->block_start == EXTENT_MAP_INLINE) {
1506fcc8
YS
2954 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2955 FIEMAP_EXTENT_NOT_ALIGNED);
93dbfad7 2956 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
1506fcc8
YS
2957 flags |= (FIEMAP_EXTENT_DELALLOC |
2958 FIEMAP_EXTENT_UNKNOWN);
93dbfad7 2959 } else {
1506fcc8 2960 disko = em->block_start;
1506fcc8
YS
2961 }
2962 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2963 flags |= FIEMAP_EXTENT_ENCODED;
2964
2965 emflags = em->flags;
2966 free_extent_map(em);
2967 em = NULL;
2968
2969 if (!end) {
2970 em = get_extent(inode, NULL, 0, off, max - off, 0);
2971 if (!em)
2972 goto out;
2973 if (IS_ERR(em)) {
2974 ret = PTR_ERR(em);
2975 goto out;
2976 }
2977 emflags = em->flags;
2978 }
2979 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
2980 flags |= FIEMAP_EXTENT_LAST;
2981 end = 1;
2982 }
2983
2984 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2985 em_len, flags);
2986 if (ret)
2987 goto out_free;
2988 }
2989out_free:
2990 free_extent_map(em);
2991out:
2ac55d41
JB
2992 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
2993 &cached_state, GFP_NOFS);
1506fcc8
YS
2994 return ret;
2995}
2996
d1310b2e
CM
2997static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2998 unsigned long i)
2999{
3000 struct page *p;
3001 struct address_space *mapping;
3002
3003 if (i == 0)
3004 return eb->first_page;
3005 i += eb->start >> PAGE_CACHE_SHIFT;
3006 mapping = eb->first_page->mapping;
33958dc6
CM
3007 if (!mapping)
3008 return NULL;
0ee0fda0
SW
3009
3010 /*
3011 * extent_buffer_page is only called after pinning the page
3012 * by increasing the reference count. So we know the page must
3013 * be in the radix tree.
3014 */
0ee0fda0 3015 rcu_read_lock();
d1310b2e 3016 p = radix_tree_lookup(&mapping->page_tree, i);
0ee0fda0 3017 rcu_read_unlock();
2b1f55b0 3018
d1310b2e
CM
3019 return p;
3020}
3021
6af118ce 3022static inline unsigned long num_extent_pages(u64 start, u64 len)
728131d8 3023{
6af118ce
CM
3024 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3025 (start >> PAGE_CACHE_SHIFT);
728131d8
CM
3026}
3027
d1310b2e
CM
3028static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3029 u64 start,
3030 unsigned long len,
3031 gfp_t mask)
3032{
3033 struct extent_buffer *eb = NULL;
3935127c 3034#if LEAK_DEBUG
2d2ae547 3035 unsigned long flags;
4bef0848 3036#endif
d1310b2e 3037
d1310b2e 3038 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
d1310b2e
CM
3039 eb->start = start;
3040 eb->len = len;
b4ce94de
CM
3041 spin_lock_init(&eb->lock);
3042 init_waitqueue_head(&eb->lock_wq);
19fe0a8b 3043 INIT_RCU_HEAD(&eb->rcu_head);
b4ce94de 3044
3935127c 3045#if LEAK_DEBUG
2d2ae547
CM
3046 spin_lock_irqsave(&leak_lock, flags);
3047 list_add(&eb->leak_list, &buffers);
3048 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 3049#endif
d1310b2e
CM
3050 atomic_set(&eb->refs, 1);
3051
3052 return eb;
3053}
3054
3055static void __free_extent_buffer(struct extent_buffer *eb)
3056{
3935127c 3057#if LEAK_DEBUG
2d2ae547
CM
3058 unsigned long flags;
3059 spin_lock_irqsave(&leak_lock, flags);
3060 list_del(&eb->leak_list);
3061 spin_unlock_irqrestore(&leak_lock, flags);
4bef0848 3062#endif
d1310b2e
CM
3063 kmem_cache_free(extent_buffer_cache, eb);
3064}
3065
897ca6e9
MX
3066/*
3067 * Helper for releasing extent buffer page.
3068 */
3069static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3070 unsigned long start_idx)
3071{
3072 unsigned long index;
3073 struct page *page;
3074
3075 if (!eb->first_page)
3076 return;
3077
3078 index = num_extent_pages(eb->start, eb->len);
3079 if (start_idx >= index)
3080 return;
3081
3082 do {
3083 index--;
3084 page = extent_buffer_page(eb, index);
3085 if (page)
3086 page_cache_release(page);
3087 } while (index != start_idx);
3088}
3089
3090/*
3091 * Helper for releasing the extent buffer.
3092 */
3093static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3094{
3095 btrfs_release_extent_buffer_page(eb, 0);
3096 __free_extent_buffer(eb);
3097}
3098
d1310b2e
CM
3099struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3100 u64 start, unsigned long len,
3101 struct page *page0,
3102 gfp_t mask)
3103{
3104 unsigned long num_pages = num_extent_pages(start, len);
3105 unsigned long i;
3106 unsigned long index = start >> PAGE_CACHE_SHIFT;
3107 struct extent_buffer *eb;
6af118ce 3108 struct extent_buffer *exists = NULL;
d1310b2e
CM
3109 struct page *p;
3110 struct address_space *mapping = tree->mapping;
3111 int uptodate = 1;
19fe0a8b 3112 int ret;
d1310b2e 3113
19fe0a8b
MX
3114 rcu_read_lock();
3115 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3116 if (eb && atomic_inc_not_zero(&eb->refs)) {
3117 rcu_read_unlock();
0f9dd46c 3118 mark_page_accessed(eb->first_page);
6af118ce
CM
3119 return eb;
3120 }
19fe0a8b 3121 rcu_read_unlock();
6af118ce 3122
d1310b2e 3123 eb = __alloc_extent_buffer(tree, start, len, mask);
2b114d1d 3124 if (!eb)
d1310b2e
CM
3125 return NULL;
3126
d1310b2e
CM
3127 if (page0) {
3128 eb->first_page = page0;
3129 i = 1;
3130 index++;
3131 page_cache_get(page0);
3132 mark_page_accessed(page0);
3133 set_page_extent_mapped(page0);
d1310b2e 3134 set_page_extent_head(page0, len);
f188591e 3135 uptodate = PageUptodate(page0);
d1310b2e
CM
3136 } else {
3137 i = 0;
3138 }
3139 for (; i < num_pages; i++, index++) {
3140 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
3141 if (!p) {
3142 WARN_ON(1);
6af118ce 3143 goto free_eb;
d1310b2e
CM
3144 }
3145 set_page_extent_mapped(p);
3146 mark_page_accessed(p);
3147 if (i == 0) {
3148 eb->first_page = p;
3149 set_page_extent_head(p, len);
3150 } else {
3151 set_page_private(p, EXTENT_PAGE_PRIVATE);
3152 }
3153 if (!PageUptodate(p))
3154 uptodate = 0;
3155 unlock_page(p);
3156 }
3157 if (uptodate)
b4ce94de 3158 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 3159
19fe0a8b
MX
3160 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3161 if (ret)
3162 goto free_eb;
3163
6af118ce 3164 spin_lock(&tree->buffer_lock);
19fe0a8b
MX
3165 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3166 if (ret == -EEXIST) {
3167 exists = radix_tree_lookup(&tree->buffer,
3168 start >> PAGE_CACHE_SHIFT);
6af118ce
CM
3169 /* add one reference for the caller */
3170 atomic_inc(&exists->refs);
3171 spin_unlock(&tree->buffer_lock);
19fe0a8b 3172 radix_tree_preload_end();
6af118ce
CM
3173 goto free_eb;
3174 }
6af118ce
CM
3175 /* add one reference for the tree */
3176 atomic_inc(&eb->refs);
f044ba78 3177 spin_unlock(&tree->buffer_lock);
19fe0a8b 3178 radix_tree_preload_end();
d1310b2e
CM
3179 return eb;
3180
6af118ce 3181free_eb:
d1310b2e 3182 if (!atomic_dec_and_test(&eb->refs))
6af118ce 3183 return exists;
897ca6e9 3184 btrfs_release_extent_buffer(eb);
6af118ce 3185 return exists;
d1310b2e 3186}
d1310b2e
CM
3187
3188struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3189 u64 start, unsigned long len,
3190 gfp_t mask)
3191{
d1310b2e 3192 struct extent_buffer *eb;
d1310b2e 3193
19fe0a8b
MX
3194 rcu_read_lock();
3195 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3196 if (eb && atomic_inc_not_zero(&eb->refs)) {
3197 rcu_read_unlock();
0f9dd46c 3198 mark_page_accessed(eb->first_page);
19fe0a8b
MX
3199 return eb;
3200 }
3201 rcu_read_unlock();
0f9dd46c 3202
19fe0a8b 3203 return NULL;
d1310b2e 3204}
d1310b2e
CM
3205
3206void free_extent_buffer(struct extent_buffer *eb)
3207{
d1310b2e
CM
3208 if (!eb)
3209 return;
3210
3211 if (!atomic_dec_and_test(&eb->refs))
3212 return;
3213
6af118ce 3214 WARN_ON(1);
d1310b2e 3215}
d1310b2e
CM
3216
3217int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3218 struct extent_buffer *eb)
3219{
d1310b2e
CM
3220 unsigned long i;
3221 unsigned long num_pages;
3222 struct page *page;
3223
d1310b2e
CM
3224 num_pages = num_extent_pages(eb->start, eb->len);
3225
3226 for (i = 0; i < num_pages; i++) {
3227 page = extent_buffer_page(eb, i);
b9473439 3228 if (!PageDirty(page))
d2c3f4f6
CM
3229 continue;
3230
a61e6f29 3231 lock_page(page);
d1310b2e
CM
3232 if (i == 0)
3233 set_page_extent_head(page, eb->len);
3234 else
3235 set_page_private(page, EXTENT_PAGE_PRIVATE);
3236
d1310b2e 3237 clear_page_dirty_for_io(page);
0ee0fda0 3238 spin_lock_irq(&page->mapping->tree_lock);
d1310b2e
CM
3239 if (!PageDirty(page)) {
3240 radix_tree_tag_clear(&page->mapping->page_tree,
3241 page_index(page),
3242 PAGECACHE_TAG_DIRTY);
3243 }
0ee0fda0 3244 spin_unlock_irq(&page->mapping->tree_lock);
a61e6f29 3245 unlock_page(page);
d1310b2e
CM
3246 }
3247 return 0;
3248}
d1310b2e
CM
3249
3250int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3251 struct extent_buffer *eb)
3252{
3253 return wait_on_extent_writeback(tree, eb->start,
3254 eb->start + eb->len - 1);
3255}
d1310b2e
CM
3256
3257int set_extent_buffer_dirty(struct extent_io_tree *tree,
3258 struct extent_buffer *eb)
3259{
3260 unsigned long i;
3261 unsigned long num_pages;
b9473439 3262 int was_dirty = 0;
d1310b2e 3263
b9473439 3264 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
d1310b2e 3265 num_pages = num_extent_pages(eb->start, eb->len);
b9473439 3266 for (i = 0; i < num_pages; i++)
d1310b2e 3267 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
b9473439 3268 return was_dirty;
d1310b2e 3269}
d1310b2e 3270
1259ab75 3271int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2ac55d41
JB
3272 struct extent_buffer *eb,
3273 struct extent_state **cached_state)
1259ab75
CM
3274{
3275 unsigned long i;
3276 struct page *page;
3277 unsigned long num_pages;
3278
3279 num_pages = num_extent_pages(eb->start, eb->len);
b4ce94de 3280 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1259ab75
CM
3281
3282 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2ac55d41 3283 cached_state, GFP_NOFS);
1259ab75
CM
3284 for (i = 0; i < num_pages; i++) {
3285 page = extent_buffer_page(eb, i);
33958dc6
CM
3286 if (page)
3287 ClearPageUptodate(page);
1259ab75
CM
3288 }
3289 return 0;
3290}
3291
d1310b2e
CM
3292int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3293 struct extent_buffer *eb)
3294{
3295 unsigned long i;
3296 struct page *page;
3297 unsigned long num_pages;
3298
3299 num_pages = num_extent_pages(eb->start, eb->len);
3300
3301 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3302 GFP_NOFS);
3303 for (i = 0; i < num_pages; i++) {
3304 page = extent_buffer_page(eb, i);
3305 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3306 ((i == num_pages - 1) &&
3307 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3308 check_page_uptodate(tree, page);
3309 continue;
3310 }
3311 SetPageUptodate(page);
3312 }
3313 return 0;
3314}
d1310b2e 3315
ce9adaa5
CM
3316int extent_range_uptodate(struct extent_io_tree *tree,
3317 u64 start, u64 end)
3318{
3319 struct page *page;
3320 int ret;
3321 int pg_uptodate = 1;
3322 int uptodate;
3323 unsigned long index;
3324
9655d298 3325 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
ce9adaa5
CM
3326 if (ret)
3327 return 1;
d397712b 3328 while (start <= end) {
ce9adaa5
CM
3329 index = start >> PAGE_CACHE_SHIFT;
3330 page = find_get_page(tree->mapping, index);
3331 uptodate = PageUptodate(page);
3332 page_cache_release(page);
3333 if (!uptodate) {
3334 pg_uptodate = 0;
3335 break;
3336 }
3337 start += PAGE_CACHE_SIZE;
3338 }
3339 return pg_uptodate;
3340}
3341
d1310b2e 3342int extent_buffer_uptodate(struct extent_io_tree *tree,
2ac55d41
JB
3343 struct extent_buffer *eb,
3344 struct extent_state *cached_state)
d1310b2e 3345{
728131d8 3346 int ret = 0;
ce9adaa5
CM
3347 unsigned long num_pages;
3348 unsigned long i;
728131d8
CM
3349 struct page *page;
3350 int pg_uptodate = 1;
3351
b4ce94de 3352 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4235298e 3353 return 1;
728131d8 3354
4235298e 3355 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2ac55d41 3356 EXTENT_UPTODATE, 1, cached_state);
4235298e
CM
3357 if (ret)
3358 return ret;
728131d8
CM
3359
3360 num_pages = num_extent_pages(eb->start, eb->len);
3361 for (i = 0; i < num_pages; i++) {
3362 page = extent_buffer_page(eb, i);
3363 if (!PageUptodate(page)) {
3364 pg_uptodate = 0;
3365 break;
3366 }
3367 }
4235298e 3368 return pg_uptodate;
d1310b2e 3369}
d1310b2e
CM
3370
3371int read_extent_buffer_pages(struct extent_io_tree *tree,
3372 struct extent_buffer *eb,
a86c12c7 3373 u64 start, int wait,
f188591e 3374 get_extent_t *get_extent, int mirror_num)
d1310b2e
CM
3375{
3376 unsigned long i;
3377 unsigned long start_i;
3378 struct page *page;
3379 int err;
3380 int ret = 0;
ce9adaa5
CM
3381 int locked_pages = 0;
3382 int all_uptodate = 1;
3383 int inc_all_pages = 0;
d1310b2e 3384 unsigned long num_pages;
a86c12c7 3385 struct bio *bio = NULL;
c8b97818 3386 unsigned long bio_flags = 0;
a86c12c7 3387
b4ce94de 3388 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
d1310b2e
CM
3389 return 0;
3390
ce9adaa5 3391 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
9655d298 3392 EXTENT_UPTODATE, 1, NULL)) {
d1310b2e
CM
3393 return 0;
3394 }
3395
3396 if (start) {
3397 WARN_ON(start < eb->start);
3398 start_i = (start >> PAGE_CACHE_SHIFT) -
3399 (eb->start >> PAGE_CACHE_SHIFT);
3400 } else {
3401 start_i = 0;
3402 }
3403
3404 num_pages = num_extent_pages(eb->start, eb->len);
3405 for (i = start_i; i < num_pages; i++) {
3406 page = extent_buffer_page(eb, i);
d1310b2e 3407 if (!wait) {
2db04966 3408 if (!trylock_page(page))
ce9adaa5 3409 goto unlock_exit;
d1310b2e
CM
3410 } else {
3411 lock_page(page);
3412 }
ce9adaa5 3413 locked_pages++;
d397712b 3414 if (!PageUptodate(page))
ce9adaa5 3415 all_uptodate = 0;
ce9adaa5
CM
3416 }
3417 if (all_uptodate) {
3418 if (start_i == 0)
b4ce94de 3419 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
ce9adaa5
CM
3420 goto unlock_exit;
3421 }
3422
3423 for (i = start_i; i < num_pages; i++) {
3424 page = extent_buffer_page(eb, i);
3425 if (inc_all_pages)
3426 page_cache_get(page);
3427 if (!PageUptodate(page)) {
3428 if (start_i == 0)
3429 inc_all_pages = 1;
f188591e 3430 ClearPageError(page);
a86c12c7 3431 err = __extent_read_full_page(tree, page,
f188591e 3432 get_extent, &bio,
c8b97818 3433 mirror_num, &bio_flags);
d397712b 3434 if (err)
d1310b2e 3435 ret = err;
d1310b2e
CM
3436 } else {
3437 unlock_page(page);
3438 }
3439 }
3440
a86c12c7 3441 if (bio)
c8b97818 3442 submit_one_bio(READ, bio, mirror_num, bio_flags);
a86c12c7 3443
d397712b 3444 if (ret || !wait)
d1310b2e 3445 return ret;
d397712b 3446
d1310b2e
CM
3447 for (i = start_i; i < num_pages; i++) {
3448 page = extent_buffer_page(eb, i);
3449 wait_on_page_locked(page);
d397712b 3450 if (!PageUptodate(page))
d1310b2e 3451 ret = -EIO;
d1310b2e 3452 }
d397712b 3453
d1310b2e 3454 if (!ret)
b4ce94de 3455 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
d1310b2e 3456 return ret;
ce9adaa5
CM
3457
3458unlock_exit:
3459 i = start_i;
d397712b 3460 while (locked_pages > 0) {
ce9adaa5
CM
3461 page = extent_buffer_page(eb, i);
3462 i++;
3463 unlock_page(page);
3464 locked_pages--;
3465 }
3466 return ret;
d1310b2e 3467}
d1310b2e
CM
3468
3469void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3470 unsigned long start,
3471 unsigned long len)
3472{
3473 size_t cur;
3474 size_t offset;
3475 struct page *page;
3476 char *kaddr;
3477 char *dst = (char *)dstv;
3478 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3479 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
d1310b2e
CM
3480
3481 WARN_ON(start > eb->len);
3482 WARN_ON(start + len > eb->start + eb->len);
3483
3484 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3485
d397712b 3486 while (len > 0) {
d1310b2e 3487 page = extent_buffer_page(eb, i);
d1310b2e
CM
3488
3489 cur = min(len, (PAGE_CACHE_SIZE - offset));
3490 kaddr = kmap_atomic(page, KM_USER1);
3491 memcpy(dst, kaddr + offset, cur);
3492 kunmap_atomic(kaddr, KM_USER1);
3493
3494 dst += cur;
3495 len -= cur;
3496 offset = 0;
3497 i++;
3498 }
3499}
d1310b2e
CM
3500
3501int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3502 unsigned long min_len, char **token, char **map,
3503 unsigned long *map_start,
3504 unsigned long *map_len, int km)
3505{
3506 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3507 char *kaddr;
3508 struct page *p;
3509 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3510 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3511 unsigned long end_i = (start_offset + start + min_len - 1) >>
3512 PAGE_CACHE_SHIFT;
3513
3514 if (i != end_i)
3515 return -EINVAL;
3516
3517 if (i == 0) {
3518 offset = start_offset;
3519 *map_start = 0;
3520 } else {
3521 offset = 0;
3522 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3523 }
d397712b 3524
d1310b2e 3525 if (start + min_len > eb->len) {
d397712b
CM
3526 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3527 "wanted %lu %lu\n", (unsigned long long)eb->start,
3528 eb->len, start, min_len);
d1310b2e
CM
3529 WARN_ON(1);
3530 }
3531
3532 p = extent_buffer_page(eb, i);
d1310b2e
CM
3533 kaddr = kmap_atomic(p, km);
3534 *token = kaddr;
3535 *map = kaddr + offset;
3536 *map_len = PAGE_CACHE_SIZE - offset;
3537 return 0;
3538}
d1310b2e
CM
3539
3540int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3541 unsigned long min_len,
3542 char **token, char **map,
3543 unsigned long *map_start,
3544 unsigned long *map_len, int km)
3545{
3546 int err;
3547 int save = 0;
3548 if (eb->map_token) {
3549 unmap_extent_buffer(eb, eb->map_token, km);
3550 eb->map_token = NULL;
3551 save = 1;
3552 }
3553 err = map_private_extent_buffer(eb, start, min_len, token, map,
3554 map_start, map_len, km);
3555 if (!err && save) {
3556 eb->map_token = *token;
3557 eb->kaddr = *map;
3558 eb->map_start = *map_start;
3559 eb->map_len = *map_len;
3560 }
3561 return err;
3562}
d1310b2e
CM
3563
3564void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3565{
3566 kunmap_atomic(token, km);
3567}
d1310b2e
CM
3568
3569int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3570 unsigned long start,
3571 unsigned long len)
3572{
3573 size_t cur;
3574 size_t offset;
3575 struct page *page;
3576 char *kaddr;
3577 char *ptr = (char *)ptrv;
3578 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3579 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3580 int ret = 0;
3581
3582 WARN_ON(start > eb->len);
3583 WARN_ON(start + len > eb->start + eb->len);
3584
3585 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3586
d397712b 3587 while (len > 0) {
d1310b2e 3588 page = extent_buffer_page(eb, i);
d1310b2e
CM
3589
3590 cur = min(len, (PAGE_CACHE_SIZE - offset));
3591
3592 kaddr = kmap_atomic(page, KM_USER0);
3593 ret = memcmp(ptr, kaddr + offset, cur);
3594 kunmap_atomic(kaddr, KM_USER0);
3595 if (ret)
3596 break;
3597
3598 ptr += cur;
3599 len -= cur;
3600 offset = 0;
3601 i++;
3602 }
3603 return ret;
3604}
d1310b2e
CM
3605
3606void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3607 unsigned long start, unsigned long len)
3608{
3609 size_t cur;
3610 size_t offset;
3611 struct page *page;
3612 char *kaddr;
3613 char *src = (char *)srcv;
3614 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3615 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3616
3617 WARN_ON(start > eb->len);
3618 WARN_ON(start + len > eb->start + eb->len);
3619
3620 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3621
d397712b 3622 while (len > 0) {
d1310b2e
CM
3623 page = extent_buffer_page(eb, i);
3624 WARN_ON(!PageUptodate(page));
3625
3626 cur = min(len, PAGE_CACHE_SIZE - offset);
3627 kaddr = kmap_atomic(page, KM_USER1);
3628 memcpy(kaddr + offset, src, cur);
3629 kunmap_atomic(kaddr, KM_USER1);
3630
3631 src += cur;
3632 len -= cur;
3633 offset = 0;
3634 i++;
3635 }
3636}
d1310b2e
CM
3637
3638void memset_extent_buffer(struct extent_buffer *eb, char c,
3639 unsigned long start, unsigned long len)
3640{
3641 size_t cur;
3642 size_t offset;
3643 struct page *page;
3644 char *kaddr;
3645 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3646 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3647
3648 WARN_ON(start > eb->len);
3649 WARN_ON(start + len > eb->start + eb->len);
3650
3651 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3652
d397712b 3653 while (len > 0) {
d1310b2e
CM
3654 page = extent_buffer_page(eb, i);
3655 WARN_ON(!PageUptodate(page));
3656
3657 cur = min(len, PAGE_CACHE_SIZE - offset);
3658 kaddr = kmap_atomic(page, KM_USER0);
3659 memset(kaddr + offset, c, cur);
3660 kunmap_atomic(kaddr, KM_USER0);
3661
3662 len -= cur;
3663 offset = 0;
3664 i++;
3665 }
3666}
d1310b2e
CM
3667
3668void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3669 unsigned long dst_offset, unsigned long src_offset,
3670 unsigned long len)
3671{
3672 u64 dst_len = dst->len;
3673 size_t cur;
3674 size_t offset;
3675 struct page *page;
3676 char *kaddr;
3677 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3678 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3679
3680 WARN_ON(src->len != dst_len);
3681
3682 offset = (start_offset + dst_offset) &
3683 ((unsigned long)PAGE_CACHE_SIZE - 1);
3684
d397712b 3685 while (len > 0) {
d1310b2e
CM
3686 page = extent_buffer_page(dst, i);
3687 WARN_ON(!PageUptodate(page));
3688
3689 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3690
3691 kaddr = kmap_atomic(page, KM_USER0);
3692 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3693 kunmap_atomic(kaddr, KM_USER0);
3694
3695 src_offset += cur;
3696 len -= cur;
3697 offset = 0;
3698 i++;
3699 }
3700}
d1310b2e
CM
3701
3702static void move_pages(struct page *dst_page, struct page *src_page,
3703 unsigned long dst_off, unsigned long src_off,
3704 unsigned long len)
3705{
3706 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3707 if (dst_page == src_page) {
3708 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3709 } else {
3710 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3711 char *p = dst_kaddr + dst_off + len;
3712 char *s = src_kaddr + src_off + len;
3713
3714 while (len--)
3715 *--p = *--s;
3716
3717 kunmap_atomic(src_kaddr, KM_USER1);
3718 }
3719 kunmap_atomic(dst_kaddr, KM_USER0);
3720}
3721
3722static void copy_pages(struct page *dst_page, struct page *src_page,
3723 unsigned long dst_off, unsigned long src_off,
3724 unsigned long len)
3725{
3726 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3727 char *src_kaddr;
3728
3729 if (dst_page != src_page)
3730 src_kaddr = kmap_atomic(src_page, KM_USER1);
3731 else
3732 src_kaddr = dst_kaddr;
3733
3734 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3735 kunmap_atomic(dst_kaddr, KM_USER0);
3736 if (dst_page != src_page)
3737 kunmap_atomic(src_kaddr, KM_USER1);
3738}
3739
3740void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3741 unsigned long src_offset, unsigned long len)
3742{
3743 size_t cur;
3744 size_t dst_off_in_page;
3745 size_t src_off_in_page;
3746 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3747 unsigned long dst_i;
3748 unsigned long src_i;
3749
3750 if (src_offset + len > dst->len) {
d397712b
CM
3751 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3752 "len %lu dst len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
3753 BUG_ON(1);
3754 }
3755 if (dst_offset + len > dst->len) {
d397712b
CM
3756 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3757 "len %lu dst len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
3758 BUG_ON(1);
3759 }
3760
d397712b 3761 while (len > 0) {
d1310b2e
CM
3762 dst_off_in_page = (start_offset + dst_offset) &
3763 ((unsigned long)PAGE_CACHE_SIZE - 1);
3764 src_off_in_page = (start_offset + src_offset) &
3765 ((unsigned long)PAGE_CACHE_SIZE - 1);
3766
3767 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3768 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3769
3770 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3771 src_off_in_page));
3772 cur = min_t(unsigned long, cur,
3773 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3774
3775 copy_pages(extent_buffer_page(dst, dst_i),
3776 extent_buffer_page(dst, src_i),
3777 dst_off_in_page, src_off_in_page, cur);
3778
3779 src_offset += cur;
3780 dst_offset += cur;
3781 len -= cur;
3782 }
3783}
d1310b2e
CM
3784
3785void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3786 unsigned long src_offset, unsigned long len)
3787{
3788 size_t cur;
3789 size_t dst_off_in_page;
3790 size_t src_off_in_page;
3791 unsigned long dst_end = dst_offset + len - 1;
3792 unsigned long src_end = src_offset + len - 1;
3793 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3794 unsigned long dst_i;
3795 unsigned long src_i;
3796
3797 if (src_offset + len > dst->len) {
d397712b
CM
3798 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3799 "len %lu len %lu\n", src_offset, len, dst->len);
d1310b2e
CM
3800 BUG_ON(1);
3801 }
3802 if (dst_offset + len > dst->len) {
d397712b
CM
3803 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3804 "len %lu len %lu\n", dst_offset, len, dst->len);
d1310b2e
CM
3805 BUG_ON(1);
3806 }
3807 if (dst_offset < src_offset) {
3808 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3809 return;
3810 }
d397712b 3811 while (len > 0) {
d1310b2e
CM
3812 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3813 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3814
3815 dst_off_in_page = (start_offset + dst_end) &
3816 ((unsigned long)PAGE_CACHE_SIZE - 1);
3817 src_off_in_page = (start_offset + src_end) &
3818 ((unsigned long)PAGE_CACHE_SIZE - 1);
3819
3820 cur = min_t(unsigned long, len, src_off_in_page + 1);
3821 cur = min(cur, dst_off_in_page + 1);
3822 move_pages(extent_buffer_page(dst, dst_i),
3823 extent_buffer_page(dst, src_i),
3824 dst_off_in_page - cur + 1,
3825 src_off_in_page - cur + 1, cur);
3826
3827 dst_end -= cur;
3828 src_end -= cur;
3829 len -= cur;
3830 }
3831}
6af118ce 3832
19fe0a8b
MX
3833static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3834{
3835 struct extent_buffer *eb =
3836 container_of(head, struct extent_buffer, rcu_head);
3837
3838 btrfs_release_extent_buffer(eb);
3839}
3840
6af118ce
CM
3841int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3842{
3843 u64 start = page_offset(page);
3844 struct extent_buffer *eb;
3845 int ret = 1;
6af118ce
CM
3846
3847 spin_lock(&tree->buffer_lock);
19fe0a8b 3848 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
6af118ce
CM
3849 if (!eb)
3850 goto out;
3851
19fe0a8b 3852 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
6af118ce
CM
3853 ret = 0;
3854 goto out;
3855 }
19fe0a8b
MX
3856
3857 /*
3858 * set @eb->refs to 0 if it is already 1, and then release the @eb.
3859 * Or go back.
3860 */
3861 if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
b9473439
CM
3862 ret = 0;
3863 goto out;
3864 }
897ca6e9 3865
19fe0a8b 3866 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
6af118ce
CM
3867out:
3868 spin_unlock(&tree->buffer_lock);
19fe0a8b
MX
3869
3870 /* at this point we can safely release the extent buffer */
3871 if (atomic_read(&eb->refs) == 0)
3872 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
6af118ce
CM
3873 return ret;
3874}