]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/btrfs/volumes.c
Btrfs: Drop some debugging around the extent_map pinned flag
[net-next-2.6.git] / fs / btrfs / volumes.c
CommitLineData
0b86a832
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
8a4b83cc 20#include <linux/buffer_head.h>
f2d8d74d 21#include <linux/blkdev.h>
788f20eb 22#include <linux/random.h>
593060d7 23#include <asm/div64.h>
0b86a832
CM
24#include "ctree.h"
25#include "extent_map.h"
26#include "disk-io.h"
27#include "transaction.h"
28#include "print-tree.h"
29#include "volumes.h"
8b712842 30#include "async-thread.h"
0b86a832 31
593060d7
CM
32struct map_lookup {
33 u64 type;
34 int io_align;
35 int io_width;
36 int stripe_len;
37 int sector_size;
38 int num_stripes;
321aecc6 39 int sub_stripes;
cea9e445 40 struct btrfs_bio_stripe stripes[];
593060d7
CM
41};
42
43#define map_lookup_size(n) (sizeof(struct map_lookup) + \
cea9e445 44 (sizeof(struct btrfs_bio_stripe) * (n)))
593060d7 45
8a4b83cc
CM
46static DEFINE_MUTEX(uuid_mutex);
47static LIST_HEAD(fs_uuids);
48
a061fc8d
CM
49void btrfs_lock_volumes(void)
50{
51 mutex_lock(&uuid_mutex);
52}
53
54void btrfs_unlock_volumes(void)
55{
56 mutex_unlock(&uuid_mutex);
57}
58
7d9eb12c
CM
59static void lock_chunks(struct btrfs_root *root)
60{
61 mutex_lock(&root->fs_info->alloc_mutex);
62 mutex_lock(&root->fs_info->chunk_mutex);
63}
64
65static void unlock_chunks(struct btrfs_root *root)
66{
67 mutex_unlock(&root->fs_info->alloc_mutex);
68 mutex_unlock(&root->fs_info->chunk_mutex);
69}
70
8a4b83cc
CM
71int btrfs_cleanup_fs_uuids(void)
72{
73 struct btrfs_fs_devices *fs_devices;
74 struct list_head *uuid_cur;
75 struct list_head *devices_cur;
76 struct btrfs_device *dev;
77
78 list_for_each(uuid_cur, &fs_uuids) {
79 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
80 list);
81 while(!list_empty(&fs_devices->devices)) {
82 devices_cur = fs_devices->devices.next;
83 dev = list_entry(devices_cur, struct btrfs_device,
84 dev_list);
8a4b83cc 85 if (dev->bdev) {
8a4b83cc 86 close_bdev_excl(dev->bdev);
a0af469b 87 fs_devices->open_devices--;
8a4b83cc
CM
88 }
89 list_del(&dev->dev_list);
dfe25020 90 kfree(dev->name);
8a4b83cc
CM
91 kfree(dev);
92 }
93 }
94 return 0;
95}
96
a443755f
CM
97static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
98 u8 *uuid)
8a4b83cc
CM
99{
100 struct btrfs_device *dev;
101 struct list_head *cur;
102
103 list_for_each(cur, head) {
104 dev = list_entry(cur, struct btrfs_device, dev_list);
a443755f 105 if (dev->devid == devid &&
8f18cf13 106 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
8a4b83cc 107 return dev;
a443755f 108 }
8a4b83cc
CM
109 }
110 return NULL;
111}
112
113static struct btrfs_fs_devices *find_fsid(u8 *fsid)
114{
115 struct list_head *cur;
116 struct btrfs_fs_devices *fs_devices;
117
118 list_for_each(cur, &fs_uuids) {
119 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
120 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
121 return fs_devices;
122 }
123 return NULL;
124}
125
8b712842
CM
126/*
127 * we try to collect pending bios for a device so we don't get a large
128 * number of procs sending bios down to the same device. This greatly
129 * improves the schedulers ability to collect and merge the bios.
130 *
131 * But, it also turns into a long list of bios to process and that is sure
132 * to eventually make the worker thread block. The solution here is to
133 * make some progress and then put this work struct back at the end of
134 * the list if the block device is congested. This way, multiple devices
135 * can make progress from a single worker thread.
136 */
137int run_scheduled_bios(struct btrfs_device *device)
138{
139 struct bio *pending;
140 struct backing_dev_info *bdi;
141 struct bio *tail;
142 struct bio *cur;
143 int again = 0;
144 unsigned long num_run = 0;
145
146 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
147loop:
148 spin_lock(&device->io_lock);
149
150 /* take all the bios off the list at once and process them
151 * later on (without the lock held). But, remember the
152 * tail and other pointers so the bios can be properly reinserted
153 * into the list if we hit congestion
154 */
155 pending = device->pending_bios;
156 tail = device->pending_bio_tail;
157 WARN_ON(pending && !tail);
158 device->pending_bios = NULL;
159 device->pending_bio_tail = NULL;
160
161 /*
162 * if pending was null this time around, no bios need processing
163 * at all and we can stop. Otherwise it'll loop back up again
164 * and do an additional check so no bios are missed.
165 *
166 * device->running_pending is used to synchronize with the
167 * schedule_bio code.
168 */
169 if (pending) {
170 again = 1;
171 device->running_pending = 1;
172 } else {
173 again = 0;
174 device->running_pending = 0;
175 }
176 spin_unlock(&device->io_lock);
177
178 while(pending) {
179 cur = pending;
180 pending = pending->bi_next;
181 cur->bi_next = NULL;
182 atomic_dec(&device->dev_root->fs_info->nr_async_submits);
183 submit_bio(cur->bi_rw, cur);
184 num_run++;
185
186 /*
187 * we made progress, there is more work to do and the bdi
188 * is now congested. Back off and let other work structs
189 * run instead
190 */
191 if (pending && num_run && bdi_write_congested(bdi)) {
192 struct bio *old_head;
193
194 spin_lock(&device->io_lock);
195 old_head = device->pending_bios;
196 device->pending_bios = pending;
197 if (device->pending_bio_tail)
198 tail->bi_next = old_head;
199 else
200 device->pending_bio_tail = tail;
201
202 spin_unlock(&device->io_lock);
203 btrfs_requeue_work(&device->work);
204 goto done;
205 }
206 }
207 if (again)
208 goto loop;
209done:
210 return 0;
211}
212
213void pending_bios_fn(struct btrfs_work *work)
214{
215 struct btrfs_device *device;
216
217 device = container_of(work, struct btrfs_device, work);
218 run_scheduled_bios(device);
219}
220
8a4b83cc
CM
221static int device_list_add(const char *path,
222 struct btrfs_super_block *disk_super,
223 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
224{
225 struct btrfs_device *device;
226 struct btrfs_fs_devices *fs_devices;
227 u64 found_transid = btrfs_super_generation(disk_super);
228
229 fs_devices = find_fsid(disk_super->fsid);
230 if (!fs_devices) {
515dc322 231 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
8a4b83cc
CM
232 if (!fs_devices)
233 return -ENOMEM;
234 INIT_LIST_HEAD(&fs_devices->devices);
b3075717 235 INIT_LIST_HEAD(&fs_devices->alloc_list);
8a4b83cc
CM
236 list_add(&fs_devices->list, &fs_uuids);
237 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
238 fs_devices->latest_devid = devid;
239 fs_devices->latest_trans = found_transid;
8a4b83cc
CM
240 device = NULL;
241 } else {
a443755f
CM
242 device = __find_device(&fs_devices->devices, devid,
243 disk_super->dev_item.uuid);
8a4b83cc
CM
244 }
245 if (!device) {
246 device = kzalloc(sizeof(*device), GFP_NOFS);
247 if (!device) {
248 /* we can safely leave the fs_devices entry around */
249 return -ENOMEM;
250 }
251 device->devid = devid;
8b712842 252 device->work.func = pending_bios_fn;
a443755f
CM
253 memcpy(device->uuid, disk_super->dev_item.uuid,
254 BTRFS_UUID_SIZE);
f2984462 255 device->barriers = 1;
b248a415 256 spin_lock_init(&device->io_lock);
8a4b83cc
CM
257 device->name = kstrdup(path, GFP_NOFS);
258 if (!device->name) {
259 kfree(device);
260 return -ENOMEM;
261 }
262 list_add(&device->dev_list, &fs_devices->devices);
b3075717 263 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
8a4b83cc
CM
264 fs_devices->num_devices++;
265 }
266
267 if (found_transid > fs_devices->latest_trans) {
268 fs_devices->latest_devid = devid;
269 fs_devices->latest_trans = found_transid;
270 }
8a4b83cc
CM
271 *fs_devices_ret = fs_devices;
272 return 0;
273}
274
dfe25020
CM
275int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
276{
277 struct list_head *head = &fs_devices->devices;
278 struct list_head *cur;
279 struct btrfs_device *device;
280
281 mutex_lock(&uuid_mutex);
282again:
283 list_for_each(cur, head) {
284 device = list_entry(cur, struct btrfs_device, dev_list);
285 if (!device->in_fs_metadata) {
a74a4b97 286 struct block_device *bdev;
dfe25020
CM
287 list_del(&device->dev_list);
288 list_del(&device->dev_alloc_list);
289 fs_devices->num_devices--;
a74a4b97
CM
290 if (device->bdev) {
291 bdev = device->bdev;
292 fs_devices->open_devices--;
293 mutex_unlock(&uuid_mutex);
294 close_bdev_excl(bdev);
295 mutex_lock(&uuid_mutex);
296 }
dfe25020
CM
297 kfree(device->name);
298 kfree(device);
299 goto again;
300 }
301 }
302 mutex_unlock(&uuid_mutex);
303 return 0;
304}
a0af469b 305
8a4b83cc
CM
306int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
307{
308 struct list_head *head = &fs_devices->devices;
309 struct list_head *cur;
310 struct btrfs_device *device;
311
312 mutex_lock(&uuid_mutex);
313 list_for_each(cur, head) {
314 device = list_entry(cur, struct btrfs_device, dev_list);
315 if (device->bdev) {
316 close_bdev_excl(device->bdev);
a0af469b 317 fs_devices->open_devices--;
8a4b83cc
CM
318 }
319 device->bdev = NULL;
dfe25020 320 device->in_fs_metadata = 0;
8a4b83cc 321 }
a0af469b 322 fs_devices->mounted = 0;
8a4b83cc
CM
323 mutex_unlock(&uuid_mutex);
324 return 0;
325}
326
327int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
328 int flags, void *holder)
329{
330 struct block_device *bdev;
331 struct list_head *head = &fs_devices->devices;
332 struct list_head *cur;
333 struct btrfs_device *device;
a0af469b
CM
334 struct block_device *latest_bdev = NULL;
335 struct buffer_head *bh;
336 struct btrfs_super_block *disk_super;
337 u64 latest_devid = 0;
338 u64 latest_transid = 0;
339 u64 transid;
340 u64 devid;
341 int ret = 0;
8a4b83cc
CM
342
343 mutex_lock(&uuid_mutex);
a0af469b
CM
344 if (fs_devices->mounted)
345 goto out;
346
8a4b83cc
CM
347 list_for_each(cur, head) {
348 device = list_entry(cur, struct btrfs_device, dev_list);
c1c4d91c
CM
349 if (device->bdev)
350 continue;
351
dfe25020
CM
352 if (!device->name)
353 continue;
354
8a4b83cc 355 bdev = open_bdev_excl(device->name, flags, holder);
e17cade2 356
8a4b83cc
CM
357 if (IS_ERR(bdev)) {
358 printk("open %s failed\n", device->name);
a0af469b 359 goto error;
8a4b83cc 360 }
a061fc8d 361 set_blocksize(bdev, 4096);
a0af469b
CM
362
363 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
364 if (!bh)
365 goto error_close;
366
367 disk_super = (struct btrfs_super_block *)bh->b_data;
368 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
369 sizeof(disk_super->magic)))
370 goto error_brelse;
371
372 devid = le64_to_cpu(disk_super->dev_item.devid);
373 if (devid != device->devid)
374 goto error_brelse;
375
376 transid = btrfs_super_generation(disk_super);
6af5ac3c 377 if (!latest_transid || transid > latest_transid) {
a0af469b
CM
378 latest_devid = devid;
379 latest_transid = transid;
380 latest_bdev = bdev;
381 }
382
8a4b83cc 383 device->bdev = bdev;
dfe25020 384 device->in_fs_metadata = 0;
a0af469b
CM
385 fs_devices->open_devices++;
386 continue;
a061fc8d 387
a0af469b
CM
388error_brelse:
389 brelse(bh);
390error_close:
391 close_bdev_excl(bdev);
392error:
393 continue;
8a4b83cc 394 }
a0af469b
CM
395 if (fs_devices->open_devices == 0) {
396 ret = -EIO;
397 goto out;
398 }
399 fs_devices->mounted = 1;
400 fs_devices->latest_bdev = latest_bdev;
401 fs_devices->latest_devid = latest_devid;
402 fs_devices->latest_trans = latest_transid;
403out:
8a4b83cc 404 mutex_unlock(&uuid_mutex);
8a4b83cc
CM
405 return ret;
406}
407
408int btrfs_scan_one_device(const char *path, int flags, void *holder,
409 struct btrfs_fs_devices **fs_devices_ret)
410{
411 struct btrfs_super_block *disk_super;
412 struct block_device *bdev;
413 struct buffer_head *bh;
414 int ret;
415 u64 devid;
f2984462 416 u64 transid;
8a4b83cc
CM
417
418 mutex_lock(&uuid_mutex);
419
8a4b83cc
CM
420 bdev = open_bdev_excl(path, flags, holder);
421
422 if (IS_ERR(bdev)) {
8a4b83cc
CM
423 ret = PTR_ERR(bdev);
424 goto error;
425 }
426
427 ret = set_blocksize(bdev, 4096);
428 if (ret)
429 goto error_close;
430 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
431 if (!bh) {
432 ret = -EIO;
433 goto error_close;
434 }
435 disk_super = (struct btrfs_super_block *)bh->b_data;
436 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
437 sizeof(disk_super->magic))) {
e58ca020 438 ret = -EINVAL;
8a4b83cc
CM
439 goto error_brelse;
440 }
441 devid = le64_to_cpu(disk_super->dev_item.devid);
f2984462 442 transid = btrfs_super_generation(disk_super);
7ae9c09d
CM
443 if (disk_super->label[0])
444 printk("device label %s ", disk_super->label);
445 else {
446 /* FIXME, make a readl uuid parser */
447 printk("device fsid %llx-%llx ",
448 *(unsigned long long *)disk_super->fsid,
449 *(unsigned long long *)(disk_super->fsid + 8));
450 }
451 printk("devid %Lu transid %Lu %s\n", devid, transid, path);
8a4b83cc
CM
452 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
453
454error_brelse:
455 brelse(bh);
456error_close:
457 close_bdev_excl(bdev);
8a4b83cc
CM
458error:
459 mutex_unlock(&uuid_mutex);
460 return ret;
461}
0b86a832
CM
462
463/*
464 * this uses a pretty simple search, the expectation is that it is
465 * called very infrequently and that a given device has a small number
466 * of extents
467 */
468static int find_free_dev_extent(struct btrfs_trans_handle *trans,
469 struct btrfs_device *device,
470 struct btrfs_path *path,
471 u64 num_bytes, u64 *start)
472{
473 struct btrfs_key key;
474 struct btrfs_root *root = device->dev_root;
475 struct btrfs_dev_extent *dev_extent = NULL;
476 u64 hole_size = 0;
477 u64 last_byte = 0;
478 u64 search_start = 0;
479 u64 search_end = device->total_bytes;
480 int ret;
481 int slot = 0;
482 int start_found;
483 struct extent_buffer *l;
484
485 start_found = 0;
486 path->reada = 2;
487
488 /* FIXME use last free of some kind */
489
8a4b83cc
CM
490 /* we don't want to overwrite the superblock on the drive,
491 * so we make sure to start at an offset of at least 1MB
492 */
493 search_start = max((u64)1024 * 1024, search_start);
8f18cf13
CM
494
495 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
496 search_start = max(root->fs_info->alloc_start, search_start);
497
0b86a832
CM
498 key.objectid = device->devid;
499 key.offset = search_start;
500 key.type = BTRFS_DEV_EXTENT_KEY;
501 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
502 if (ret < 0)
503 goto error;
504 ret = btrfs_previous_item(root, path, 0, key.type);
505 if (ret < 0)
506 goto error;
507 l = path->nodes[0];
508 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
509 while (1) {
510 l = path->nodes[0];
511 slot = path->slots[0];
512 if (slot >= btrfs_header_nritems(l)) {
513 ret = btrfs_next_leaf(root, path);
514 if (ret == 0)
515 continue;
516 if (ret < 0)
517 goto error;
518no_more_items:
519 if (!start_found) {
520 if (search_start >= search_end) {
521 ret = -ENOSPC;
522 goto error;
523 }
524 *start = search_start;
525 start_found = 1;
526 goto check_pending;
527 }
528 *start = last_byte > search_start ?
529 last_byte : search_start;
530 if (search_end <= *start) {
531 ret = -ENOSPC;
532 goto error;
533 }
534 goto check_pending;
535 }
536 btrfs_item_key_to_cpu(l, &key, slot);
537
538 if (key.objectid < device->devid)
539 goto next;
540
541 if (key.objectid > device->devid)
542 goto no_more_items;
543
544 if (key.offset >= search_start && key.offset > last_byte &&
545 start_found) {
546 if (last_byte < search_start)
547 last_byte = search_start;
548 hole_size = key.offset - last_byte;
549 if (key.offset > last_byte &&
550 hole_size >= num_bytes) {
551 *start = last_byte;
552 goto check_pending;
553 }
554 }
555 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
556 goto next;
557 }
558
559 start_found = 1;
560 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
561 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
562next:
563 path->slots[0]++;
564 cond_resched();
565 }
566check_pending:
567 /* we have to make sure we didn't find an extent that has already
568 * been allocated by the map tree or the original allocation
569 */
570 btrfs_release_path(root, path);
571 BUG_ON(*start < search_start);
572
6324fbf3 573 if (*start + num_bytes > search_end) {
0b86a832
CM
574 ret = -ENOSPC;
575 goto error;
576 }
577 /* check for pending inserts here */
578 return 0;
579
580error:
581 btrfs_release_path(root, path);
582 return ret;
583}
584
8f18cf13
CM
585int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
586 struct btrfs_device *device,
587 u64 start)
588{
589 int ret;
590 struct btrfs_path *path;
591 struct btrfs_root *root = device->dev_root;
592 struct btrfs_key key;
a061fc8d
CM
593 struct btrfs_key found_key;
594 struct extent_buffer *leaf = NULL;
595 struct btrfs_dev_extent *extent = NULL;
8f18cf13
CM
596
597 path = btrfs_alloc_path();
598 if (!path)
599 return -ENOMEM;
600
601 key.objectid = device->devid;
602 key.offset = start;
603 key.type = BTRFS_DEV_EXTENT_KEY;
604
605 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
a061fc8d
CM
606 if (ret > 0) {
607 ret = btrfs_previous_item(root, path, key.objectid,
608 BTRFS_DEV_EXTENT_KEY);
609 BUG_ON(ret);
610 leaf = path->nodes[0];
611 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
612 extent = btrfs_item_ptr(leaf, path->slots[0],
613 struct btrfs_dev_extent);
614 BUG_ON(found_key.offset > start || found_key.offset +
615 btrfs_dev_extent_length(leaf, extent) < start);
616 ret = 0;
617 } else if (ret == 0) {
618 leaf = path->nodes[0];
619 extent = btrfs_item_ptr(leaf, path->slots[0],
620 struct btrfs_dev_extent);
621 }
8f18cf13
CM
622 BUG_ON(ret);
623
dfe25020
CM
624 if (device->bytes_used > 0)
625 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
8f18cf13
CM
626 ret = btrfs_del_item(trans, root, path);
627 BUG_ON(ret);
628
629 btrfs_free_path(path);
630 return ret;
631}
632
0b86a832
CM
633int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
634 struct btrfs_device *device,
e17cade2
CM
635 u64 chunk_tree, u64 chunk_objectid,
636 u64 chunk_offset,
637 u64 num_bytes, u64 *start)
0b86a832
CM
638{
639 int ret;
640 struct btrfs_path *path;
641 struct btrfs_root *root = device->dev_root;
642 struct btrfs_dev_extent *extent;
643 struct extent_buffer *leaf;
644 struct btrfs_key key;
645
dfe25020 646 WARN_ON(!device->in_fs_metadata);
0b86a832
CM
647 path = btrfs_alloc_path();
648 if (!path)
649 return -ENOMEM;
650
651 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
6324fbf3 652 if (ret) {
0b86a832 653 goto err;
6324fbf3 654 }
0b86a832
CM
655
656 key.objectid = device->devid;
657 key.offset = *start;
658 key.type = BTRFS_DEV_EXTENT_KEY;
659 ret = btrfs_insert_empty_item(trans, root, path, &key,
660 sizeof(*extent));
661 BUG_ON(ret);
662
663 leaf = path->nodes[0];
664 extent = btrfs_item_ptr(leaf, path->slots[0],
665 struct btrfs_dev_extent);
e17cade2
CM
666 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
667 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
668 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
669
670 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
671 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
672 BTRFS_UUID_SIZE);
673
0b86a832
CM
674 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
675 btrfs_mark_buffer_dirty(leaf);
676err:
677 btrfs_free_path(path);
678 return ret;
679}
680
e17cade2 681static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
0b86a832
CM
682{
683 struct btrfs_path *path;
684 int ret;
685 struct btrfs_key key;
e17cade2 686 struct btrfs_chunk *chunk;
0b86a832
CM
687 struct btrfs_key found_key;
688
689 path = btrfs_alloc_path();
690 BUG_ON(!path);
691
e17cade2 692 key.objectid = objectid;
0b86a832
CM
693 key.offset = (u64)-1;
694 key.type = BTRFS_CHUNK_ITEM_KEY;
695
696 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
697 if (ret < 0)
698 goto error;
699
700 BUG_ON(ret == 0);
701
702 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
703 if (ret) {
e17cade2 704 *offset = 0;
0b86a832
CM
705 } else {
706 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
707 path->slots[0]);
e17cade2
CM
708 if (found_key.objectid != objectid)
709 *offset = 0;
710 else {
711 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
712 struct btrfs_chunk);
713 *offset = found_key.offset +
714 btrfs_chunk_length(path->nodes[0], chunk);
715 }
0b86a832
CM
716 }
717 ret = 0;
718error:
719 btrfs_free_path(path);
720 return ret;
721}
722
0b86a832
CM
723static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
724 u64 *objectid)
725{
726 int ret;
727 struct btrfs_key key;
728 struct btrfs_key found_key;
729
730 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
731 key.type = BTRFS_DEV_ITEM_KEY;
732 key.offset = (u64)-1;
733
734 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
735 if (ret < 0)
736 goto error;
737
738 BUG_ON(ret == 0);
739
740 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
741 BTRFS_DEV_ITEM_KEY);
742 if (ret) {
743 *objectid = 1;
744 } else {
745 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
746 path->slots[0]);
747 *objectid = found_key.offset + 1;
748 }
749 ret = 0;
750error:
751 btrfs_release_path(root, path);
752 return ret;
753}
754
755/*
756 * the device information is stored in the chunk root
757 * the btrfs_device struct should be fully filled in
758 */
759int btrfs_add_device(struct btrfs_trans_handle *trans,
760 struct btrfs_root *root,
761 struct btrfs_device *device)
762{
763 int ret;
764 struct btrfs_path *path;
765 struct btrfs_dev_item *dev_item;
766 struct extent_buffer *leaf;
767 struct btrfs_key key;
768 unsigned long ptr;
006a58a2 769 u64 free_devid = 0;
0b86a832
CM
770
771 root = root->fs_info->chunk_root;
772
773 path = btrfs_alloc_path();
774 if (!path)
775 return -ENOMEM;
776
777 ret = find_next_devid(root, path, &free_devid);
778 if (ret)
779 goto out;
780
781 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
782 key.type = BTRFS_DEV_ITEM_KEY;
783 key.offset = free_devid;
784
785 ret = btrfs_insert_empty_item(trans, root, path, &key,
0d81ba5d 786 sizeof(*dev_item));
0b86a832
CM
787 if (ret)
788 goto out;
789
790 leaf = path->nodes[0];
791 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
792
8a4b83cc 793 device->devid = free_devid;
0b86a832
CM
794 btrfs_set_device_id(leaf, dev_item, device->devid);
795 btrfs_set_device_type(leaf, dev_item, device->type);
796 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
797 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
798 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
0b86a832
CM
799 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
800 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
e17cade2
CM
801 btrfs_set_device_group(leaf, dev_item, 0);
802 btrfs_set_device_seek_speed(leaf, dev_item, 0);
803 btrfs_set_device_bandwidth(leaf, dev_item, 0);
0b86a832 804
0b86a832 805 ptr = (unsigned long)btrfs_device_uuid(dev_item);
e17cade2 806 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
0b86a832
CM
807 btrfs_mark_buffer_dirty(leaf);
808 ret = 0;
809
810out:
811 btrfs_free_path(path);
812 return ret;
813}
8f18cf13 814
a061fc8d
CM
815static int btrfs_rm_dev_item(struct btrfs_root *root,
816 struct btrfs_device *device)
817{
818 int ret;
819 struct btrfs_path *path;
820 struct block_device *bdev = device->bdev;
821 struct btrfs_device *next_dev;
822 struct btrfs_key key;
823 u64 total_bytes;
824 struct btrfs_fs_devices *fs_devices;
825 struct btrfs_trans_handle *trans;
826
827 root = root->fs_info->chunk_root;
828
829 path = btrfs_alloc_path();
830 if (!path)
831 return -ENOMEM;
832
833 trans = btrfs_start_transaction(root, 1);
834 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
835 key.type = BTRFS_DEV_ITEM_KEY;
836 key.offset = device->devid;
7d9eb12c 837 lock_chunks(root);
a061fc8d
CM
838
839 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
840 if (ret < 0)
841 goto out;
842
843 if (ret > 0) {
844 ret = -ENOENT;
845 goto out;
846 }
847
848 ret = btrfs_del_item(trans, root, path);
849 if (ret)
850 goto out;
851
852 /*
853 * at this point, the device is zero sized. We want to
854 * remove it from the devices list and zero out the old super
855 */
856 list_del_init(&device->dev_list);
857 list_del_init(&device->dev_alloc_list);
858 fs_devices = root->fs_info->fs_devices;
859
860 next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
861 dev_list);
a061fc8d
CM
862 if (bdev == root->fs_info->sb->s_bdev)
863 root->fs_info->sb->s_bdev = next_dev->bdev;
864 if (bdev == fs_devices->latest_bdev)
865 fs_devices->latest_bdev = next_dev->bdev;
866
a061fc8d
CM
867 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
868 btrfs_set_super_num_devices(&root->fs_info->super_copy,
869 total_bytes - 1);
870out:
871 btrfs_free_path(path);
7d9eb12c 872 unlock_chunks(root);
a061fc8d
CM
873 btrfs_commit_transaction(trans, root);
874 return ret;
875}
876
877int btrfs_rm_device(struct btrfs_root *root, char *device_path)
878{
879 struct btrfs_device *device;
880 struct block_device *bdev;
dfe25020 881 struct buffer_head *bh = NULL;
a061fc8d
CM
882 struct btrfs_super_block *disk_super;
883 u64 all_avail;
884 u64 devid;
885 int ret = 0;
886
a061fc8d 887 mutex_lock(&uuid_mutex);
7d9eb12c 888 mutex_lock(&root->fs_info->volume_mutex);
a061fc8d
CM
889
890 all_avail = root->fs_info->avail_data_alloc_bits |
891 root->fs_info->avail_system_alloc_bits |
892 root->fs_info->avail_metadata_alloc_bits;
893
894 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
dfe25020 895 btrfs_super_num_devices(&root->fs_info->super_copy) <= 4) {
a061fc8d
CM
896 printk("btrfs: unable to go below four devices on raid10\n");
897 ret = -EINVAL;
898 goto out;
899 }
900
901 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
dfe25020 902 btrfs_super_num_devices(&root->fs_info->super_copy) <= 2) {
a061fc8d
CM
903 printk("btrfs: unable to go below two devices on raid1\n");
904 ret = -EINVAL;
905 goto out;
906 }
907
dfe25020
CM
908 if (strcmp(device_path, "missing") == 0) {
909 struct list_head *cur;
910 struct list_head *devices;
911 struct btrfs_device *tmp;
a061fc8d 912
dfe25020
CM
913 device = NULL;
914 devices = &root->fs_info->fs_devices->devices;
915 list_for_each(cur, devices) {
916 tmp = list_entry(cur, struct btrfs_device, dev_list);
917 if (tmp->in_fs_metadata && !tmp->bdev) {
918 device = tmp;
919 break;
920 }
921 }
922 bdev = NULL;
923 bh = NULL;
924 disk_super = NULL;
925 if (!device) {
926 printk("btrfs: no missing devices found to remove\n");
927 goto out;
928 }
929
930 } else {
931 bdev = open_bdev_excl(device_path, 0,
932 root->fs_info->bdev_holder);
933 if (IS_ERR(bdev)) {
934 ret = PTR_ERR(bdev);
935 goto out;
936 }
a061fc8d 937
dfe25020
CM
938 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
939 if (!bh) {
940 ret = -EIO;
941 goto error_close;
942 }
943 disk_super = (struct btrfs_super_block *)bh->b_data;
944 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
945 sizeof(disk_super->magic))) {
946 ret = -ENOENT;
947 goto error_brelse;
948 }
949 if (memcmp(disk_super->fsid, root->fs_info->fsid,
950 BTRFS_FSID_SIZE)) {
951 ret = -ENOENT;
952 goto error_brelse;
953 }
954 devid = le64_to_cpu(disk_super->dev_item.devid);
955 device = btrfs_find_device(root, devid, NULL);
956 if (!device) {
957 ret = -ENOENT;
958 goto error_brelse;
959 }
960
961 }
a061fc8d 962 root->fs_info->fs_devices->num_devices--;
0ef3e66b 963 root->fs_info->fs_devices->open_devices--;
a061fc8d
CM
964
965 ret = btrfs_shrink_device(device, 0);
966 if (ret)
967 goto error_brelse;
968
969
970 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
971 if (ret)
972 goto error_brelse;
973
dfe25020
CM
974 if (bh) {
975 /* make sure this device isn't detected as part of
976 * the FS anymore
977 */
978 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
979 set_buffer_dirty(bh);
980 sync_dirty_buffer(bh);
a061fc8d 981
dfe25020
CM
982 brelse(bh);
983 }
a061fc8d 984
dfe25020
CM
985 if (device->bdev) {
986 /* one close for the device struct or super_block */
987 close_bdev_excl(device->bdev);
988 }
989 if (bdev) {
990 /* one close for us */
991 close_bdev_excl(bdev);
992 }
a061fc8d
CM
993 kfree(device->name);
994 kfree(device);
995 ret = 0;
996 goto out;
997
998error_brelse:
999 brelse(bh);
1000error_close:
dfe25020
CM
1001 if (bdev)
1002 close_bdev_excl(bdev);
a061fc8d 1003out:
7d9eb12c 1004 mutex_unlock(&root->fs_info->volume_mutex);
a061fc8d 1005 mutex_unlock(&uuid_mutex);
a061fc8d
CM
1006 return ret;
1007}
1008
788f20eb
CM
1009int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1010{
1011 struct btrfs_trans_handle *trans;
1012 struct btrfs_device *device;
1013 struct block_device *bdev;
1014 struct list_head *cur;
1015 struct list_head *devices;
1016 u64 total_bytes;
1017 int ret = 0;
1018
1019
1020 bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
1021 if (!bdev) {
1022 return -EIO;
1023 }
a2135011 1024
7d9eb12c 1025 mutex_lock(&root->fs_info->volume_mutex);
a2135011 1026
788f20eb 1027 trans = btrfs_start_transaction(root, 1);
7d9eb12c 1028 lock_chunks(root);
788f20eb
CM
1029 devices = &root->fs_info->fs_devices->devices;
1030 list_for_each(cur, devices) {
1031 device = list_entry(cur, struct btrfs_device, dev_list);
1032 if (device->bdev == bdev) {
1033 ret = -EEXIST;
1034 goto out;
1035 }
1036 }
1037
1038 device = kzalloc(sizeof(*device), GFP_NOFS);
1039 if (!device) {
1040 /* we can safely leave the fs_devices entry around */
1041 ret = -ENOMEM;
1042 goto out_close_bdev;
1043 }
1044
1045 device->barriers = 1;
8b712842 1046 device->work.func = pending_bios_fn;
788f20eb
CM
1047 generate_random_uuid(device->uuid);
1048 spin_lock_init(&device->io_lock);
1049 device->name = kstrdup(device_path, GFP_NOFS);
1050 if (!device->name) {
1051 kfree(device);
1052 goto out_close_bdev;
1053 }
1054 device->io_width = root->sectorsize;
1055 device->io_align = root->sectorsize;
1056 device->sector_size = root->sectorsize;
1057 device->total_bytes = i_size_read(bdev->bd_inode);
1058 device->dev_root = root->fs_info->dev_root;
1059 device->bdev = bdev;
dfe25020 1060 device->in_fs_metadata = 1;
788f20eb
CM
1061
1062 ret = btrfs_add_device(trans, root, device);
1063 if (ret)
1064 goto out_close_bdev;
1065
1066 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1067 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1068 total_bytes + device->total_bytes);
1069
1070 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1071 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1072 total_bytes + 1);
1073
1074 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1075 list_add(&device->dev_alloc_list,
1076 &root->fs_info->fs_devices->alloc_list);
1077 root->fs_info->fs_devices->num_devices++;
a0af469b 1078 root->fs_info->fs_devices->open_devices++;
788f20eb 1079out:
7d9eb12c 1080 unlock_chunks(root);
788f20eb 1081 btrfs_end_transaction(trans, root);
7d9eb12c 1082 mutex_unlock(&root->fs_info->volume_mutex);
a2135011 1083
788f20eb
CM
1084 return ret;
1085
1086out_close_bdev:
1087 close_bdev_excl(bdev);
1088 goto out;
1089}
1090
0b86a832
CM
1091int btrfs_update_device(struct btrfs_trans_handle *trans,
1092 struct btrfs_device *device)
1093{
1094 int ret;
1095 struct btrfs_path *path;
1096 struct btrfs_root *root;
1097 struct btrfs_dev_item *dev_item;
1098 struct extent_buffer *leaf;
1099 struct btrfs_key key;
1100
1101 root = device->dev_root->fs_info->chunk_root;
1102
1103 path = btrfs_alloc_path();
1104 if (!path)
1105 return -ENOMEM;
1106
1107 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1108 key.type = BTRFS_DEV_ITEM_KEY;
1109 key.offset = device->devid;
1110
1111 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1112 if (ret < 0)
1113 goto out;
1114
1115 if (ret > 0) {
1116 ret = -ENOENT;
1117 goto out;
1118 }
1119
1120 leaf = path->nodes[0];
1121 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1122
1123 btrfs_set_device_id(leaf, dev_item, device->devid);
1124 btrfs_set_device_type(leaf, dev_item, device->type);
1125 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1126 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1127 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
0b86a832
CM
1128 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1129 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1130 btrfs_mark_buffer_dirty(leaf);
1131
1132out:
1133 btrfs_free_path(path);
1134 return ret;
1135}
1136
7d9eb12c 1137static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
8f18cf13
CM
1138 struct btrfs_device *device, u64 new_size)
1139{
1140 struct btrfs_super_block *super_copy =
1141 &device->dev_root->fs_info->super_copy;
1142 u64 old_total = btrfs_super_total_bytes(super_copy);
1143 u64 diff = new_size - device->total_bytes;
1144
1145 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1146 return btrfs_update_device(trans, device);
1147}
1148
7d9eb12c
CM
1149int btrfs_grow_device(struct btrfs_trans_handle *trans,
1150 struct btrfs_device *device, u64 new_size)
1151{
1152 int ret;
1153 lock_chunks(device->dev_root);
1154 ret = __btrfs_grow_device(trans, device, new_size);
1155 unlock_chunks(device->dev_root);
1156 return ret;
1157}
1158
8f18cf13
CM
1159static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1160 struct btrfs_root *root,
1161 u64 chunk_tree, u64 chunk_objectid,
1162 u64 chunk_offset)
1163{
1164 int ret;
1165 struct btrfs_path *path;
1166 struct btrfs_key key;
1167
1168 root = root->fs_info->chunk_root;
1169 path = btrfs_alloc_path();
1170 if (!path)
1171 return -ENOMEM;
1172
1173 key.objectid = chunk_objectid;
1174 key.offset = chunk_offset;
1175 key.type = BTRFS_CHUNK_ITEM_KEY;
1176
1177 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1178 BUG_ON(ret);
1179
1180 ret = btrfs_del_item(trans, root, path);
1181 BUG_ON(ret);
1182
1183 btrfs_free_path(path);
1184 return 0;
1185}
1186
1187int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1188 chunk_offset)
1189{
1190 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1191 struct btrfs_disk_key *disk_key;
1192 struct btrfs_chunk *chunk;
1193 u8 *ptr;
1194 int ret = 0;
1195 u32 num_stripes;
1196 u32 array_size;
1197 u32 len = 0;
1198 u32 cur;
1199 struct btrfs_key key;
1200
1201 array_size = btrfs_super_sys_array_size(super_copy);
1202
1203 ptr = super_copy->sys_chunk_array;
1204 cur = 0;
1205
1206 while (cur < array_size) {
1207 disk_key = (struct btrfs_disk_key *)ptr;
1208 btrfs_disk_key_to_cpu(&key, disk_key);
1209
1210 len = sizeof(*disk_key);
1211
1212 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1213 chunk = (struct btrfs_chunk *)(ptr + len);
1214 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1215 len += btrfs_chunk_item_size(num_stripes);
1216 } else {
1217 ret = -EIO;
1218 break;
1219 }
1220 if (key.objectid == chunk_objectid &&
1221 key.offset == chunk_offset) {
1222 memmove(ptr, ptr + len, array_size - (cur + len));
1223 array_size -= len;
1224 btrfs_set_super_sys_array_size(super_copy, array_size);
1225 } else {
1226 ptr += len;
1227 cur += len;
1228 }
1229 }
1230 return ret;
1231}
1232
1233
1234int btrfs_relocate_chunk(struct btrfs_root *root,
1235 u64 chunk_tree, u64 chunk_objectid,
1236 u64 chunk_offset)
1237{
1238 struct extent_map_tree *em_tree;
1239 struct btrfs_root *extent_root;
1240 struct btrfs_trans_handle *trans;
1241 struct extent_map *em;
1242 struct map_lookup *map;
1243 int ret;
1244 int i;
1245
323da79c
CM
1246 printk("btrfs relocating chunk %llu\n",
1247 (unsigned long long)chunk_offset);
8f18cf13
CM
1248 root = root->fs_info->chunk_root;
1249 extent_root = root->fs_info->extent_root;
1250 em_tree = &root->fs_info->mapping_tree.map_tree;
1251
1252 /* step one, relocate all the extents inside this chunk */
1253 ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
1254 BUG_ON(ret);
1255
1256 trans = btrfs_start_transaction(root, 1);
1257 BUG_ON(!trans);
1258
7d9eb12c
CM
1259 lock_chunks(root);
1260
8f18cf13
CM
1261 /*
1262 * step two, delete the device extents and the
1263 * chunk tree entries
1264 */
1265 spin_lock(&em_tree->lock);
1266 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1267 spin_unlock(&em_tree->lock);
1268
a061fc8d
CM
1269 BUG_ON(em->start > chunk_offset ||
1270 em->start + em->len < chunk_offset);
8f18cf13
CM
1271 map = (struct map_lookup *)em->bdev;
1272
1273 for (i = 0; i < map->num_stripes; i++) {
1274 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1275 map->stripes[i].physical);
1276 BUG_ON(ret);
a061fc8d 1277
dfe25020
CM
1278 if (map->stripes[i].dev) {
1279 ret = btrfs_update_device(trans, map->stripes[i].dev);
1280 BUG_ON(ret);
1281 }
8f18cf13
CM
1282 }
1283 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1284 chunk_offset);
1285
1286 BUG_ON(ret);
1287
1288 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1289 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1290 BUG_ON(ret);
8f18cf13
CM
1291 }
1292
8f18cf13
CM
1293 spin_lock(&em_tree->lock);
1294 remove_extent_mapping(em_tree, em);
1295 kfree(map);
1296 em->bdev = NULL;
1297
1298 /* once for the tree */
1299 free_extent_map(em);
1300 spin_unlock(&em_tree->lock);
1301
8f18cf13
CM
1302 /* once for us */
1303 free_extent_map(em);
1304
7d9eb12c 1305 unlock_chunks(root);
8f18cf13
CM
1306 btrfs_end_transaction(trans, root);
1307 return 0;
1308}
1309
ec44a35c
CM
1310static u64 div_factor(u64 num, int factor)
1311{
1312 if (factor == 10)
1313 return num;
1314 num *= factor;
1315 do_div(num, 10);
1316 return num;
1317}
1318
1319
1320int btrfs_balance(struct btrfs_root *dev_root)
1321{
1322 int ret;
1323 struct list_head *cur;
1324 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1325 struct btrfs_device *device;
1326 u64 old_size;
1327 u64 size_to_free;
1328 struct btrfs_path *path;
1329 struct btrfs_key key;
1330 struct btrfs_chunk *chunk;
1331 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1332 struct btrfs_trans_handle *trans;
1333 struct btrfs_key found_key;
1334
1335
7d9eb12c 1336 mutex_lock(&dev_root->fs_info->volume_mutex);
ec44a35c
CM
1337 dev_root = dev_root->fs_info->dev_root;
1338
ec44a35c
CM
1339 /* step one make some room on all the devices */
1340 list_for_each(cur, devices) {
1341 device = list_entry(cur, struct btrfs_device, dev_list);
1342 old_size = device->total_bytes;
1343 size_to_free = div_factor(old_size, 1);
1344 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1345 if (device->total_bytes - device->bytes_used > size_to_free)
1346 continue;
1347
1348 ret = btrfs_shrink_device(device, old_size - size_to_free);
1349 BUG_ON(ret);
1350
1351 trans = btrfs_start_transaction(dev_root, 1);
1352 BUG_ON(!trans);
1353
1354 ret = btrfs_grow_device(trans, device, old_size);
1355 BUG_ON(ret);
1356
1357 btrfs_end_transaction(trans, dev_root);
1358 }
1359
1360 /* step two, relocate all the chunks */
1361 path = btrfs_alloc_path();
1362 BUG_ON(!path);
1363
1364 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1365 key.offset = (u64)-1;
1366 key.type = BTRFS_CHUNK_ITEM_KEY;
1367
1368 while(1) {
1369 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1370 if (ret < 0)
1371 goto error;
1372
1373 /*
1374 * this shouldn't happen, it means the last relocate
1375 * failed
1376 */
1377 if (ret == 0)
1378 break;
1379
1380 ret = btrfs_previous_item(chunk_root, path, 0,
1381 BTRFS_CHUNK_ITEM_KEY);
7d9eb12c 1382 if (ret)
ec44a35c 1383 break;
7d9eb12c 1384
ec44a35c
CM
1385 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1386 path->slots[0]);
1387 if (found_key.objectid != key.objectid)
1388 break;
7d9eb12c 1389
ec44a35c
CM
1390 chunk = btrfs_item_ptr(path->nodes[0],
1391 path->slots[0],
1392 struct btrfs_chunk);
1393 key.offset = found_key.offset;
1394 /* chunk zero is special */
1395 if (key.offset == 0)
1396 break;
1397
7d9eb12c 1398 btrfs_release_path(chunk_root, path);
ec44a35c
CM
1399 ret = btrfs_relocate_chunk(chunk_root,
1400 chunk_root->root_key.objectid,
1401 found_key.objectid,
1402 found_key.offset);
1403 BUG_ON(ret);
ec44a35c
CM
1404 }
1405 ret = 0;
1406error:
1407 btrfs_free_path(path);
7d9eb12c 1408 mutex_unlock(&dev_root->fs_info->volume_mutex);
ec44a35c
CM
1409 return ret;
1410}
1411
8f18cf13
CM
1412/*
1413 * shrinking a device means finding all of the device extents past
1414 * the new size, and then following the back refs to the chunks.
1415 * The chunk relocation code actually frees the device extent
1416 */
1417int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1418{
1419 struct btrfs_trans_handle *trans;
1420 struct btrfs_root *root = device->dev_root;
1421 struct btrfs_dev_extent *dev_extent = NULL;
1422 struct btrfs_path *path;
1423 u64 length;
1424 u64 chunk_tree;
1425 u64 chunk_objectid;
1426 u64 chunk_offset;
1427 int ret;
1428 int slot;
1429 struct extent_buffer *l;
1430 struct btrfs_key key;
1431 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1432 u64 old_total = btrfs_super_total_bytes(super_copy);
1433 u64 diff = device->total_bytes - new_size;
1434
1435
1436 path = btrfs_alloc_path();
1437 if (!path)
1438 return -ENOMEM;
1439
1440 trans = btrfs_start_transaction(root, 1);
1441 if (!trans) {
1442 ret = -ENOMEM;
1443 goto done;
1444 }
1445
1446 path->reada = 2;
1447
7d9eb12c
CM
1448 lock_chunks(root);
1449
8f18cf13
CM
1450 device->total_bytes = new_size;
1451 ret = btrfs_update_device(trans, device);
1452 if (ret) {
7d9eb12c 1453 unlock_chunks(root);
8f18cf13
CM
1454 btrfs_end_transaction(trans, root);
1455 goto done;
1456 }
1457 WARN_ON(diff > old_total);
1458 btrfs_set_super_total_bytes(super_copy, old_total - diff);
7d9eb12c 1459 unlock_chunks(root);
8f18cf13
CM
1460 btrfs_end_transaction(trans, root);
1461
1462 key.objectid = device->devid;
1463 key.offset = (u64)-1;
1464 key.type = BTRFS_DEV_EXTENT_KEY;
1465
1466 while (1) {
1467 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1468 if (ret < 0)
1469 goto done;
1470
1471 ret = btrfs_previous_item(root, path, 0, key.type);
1472 if (ret < 0)
1473 goto done;
1474 if (ret) {
1475 ret = 0;
1476 goto done;
1477 }
1478
1479 l = path->nodes[0];
1480 slot = path->slots[0];
1481 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1482
1483 if (key.objectid != device->devid)
1484 goto done;
1485
1486 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1487 length = btrfs_dev_extent_length(l, dev_extent);
1488
1489 if (key.offset + length <= new_size)
1490 goto done;
1491
1492 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1493 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1494 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1495 btrfs_release_path(root, path);
1496
1497 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1498 chunk_offset);
1499 if (ret)
1500 goto done;
1501 }
1502
1503done:
1504 btrfs_free_path(path);
1505 return ret;
1506}
1507
0b86a832
CM
1508int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1509 struct btrfs_root *root,
1510 struct btrfs_key *key,
1511 struct btrfs_chunk *chunk, int item_size)
1512{
1513 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1514 struct btrfs_disk_key disk_key;
1515 u32 array_size;
1516 u8 *ptr;
1517
1518 array_size = btrfs_super_sys_array_size(super_copy);
1519 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1520 return -EFBIG;
1521
1522 ptr = super_copy->sys_chunk_array + array_size;
1523 btrfs_cpu_key_to_disk(&disk_key, key);
1524 memcpy(ptr, &disk_key, sizeof(disk_key));
1525 ptr += sizeof(disk_key);
1526 memcpy(ptr, chunk, item_size);
1527 item_size += sizeof(disk_key);
1528 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1529 return 0;
1530}
1531
9b3f68b9
CM
1532static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
1533 int sub_stripes)
1534{
1535 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1536 return calc_size;
1537 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1538 return calc_size * (num_stripes / sub_stripes);
1539 else
1540 return calc_size * num_stripes;
1541}
1542
1543
0b86a832
CM
1544int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1545 struct btrfs_root *extent_root, u64 *start,
6324fbf3 1546 u64 *num_bytes, u64 type)
0b86a832
CM
1547{
1548 u64 dev_offset;
593060d7 1549 struct btrfs_fs_info *info = extent_root->fs_info;
0b86a832 1550 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
8f18cf13 1551 struct btrfs_path *path;
0b86a832
CM
1552 struct btrfs_stripe *stripes;
1553 struct btrfs_device *device = NULL;
1554 struct btrfs_chunk *chunk;
6324fbf3 1555 struct list_head private_devs;
b3075717 1556 struct list_head *dev_list;
6324fbf3 1557 struct list_head *cur;
0b86a832
CM
1558 struct extent_map_tree *em_tree;
1559 struct map_lookup *map;
1560 struct extent_map *em;
a40a90a0 1561 int min_stripe_size = 1 * 1024 * 1024;
0b86a832
CM
1562 u64 physical;
1563 u64 calc_size = 1024 * 1024 * 1024;
9b3f68b9
CM
1564 u64 max_chunk_size = calc_size;
1565 u64 min_free;
6324fbf3
CM
1566 u64 avail;
1567 u64 max_avail = 0;
9b3f68b9 1568 u64 percent_max;
6324fbf3 1569 int num_stripes = 1;
a40a90a0 1570 int min_stripes = 1;
321aecc6 1571 int sub_stripes = 0;
6324fbf3 1572 int looped = 0;
0b86a832 1573 int ret;
6324fbf3 1574 int index;
593060d7 1575 int stripe_len = 64 * 1024;
0b86a832
CM
1576 struct btrfs_key key;
1577
ec44a35c
CM
1578 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1579 (type & BTRFS_BLOCK_GROUP_DUP)) {
1580 WARN_ON(1);
1581 type &= ~BTRFS_BLOCK_GROUP_DUP;
1582 }
b3075717 1583 dev_list = &extent_root->fs_info->fs_devices->alloc_list;
6324fbf3
CM
1584 if (list_empty(dev_list))
1585 return -ENOSPC;
593060d7 1586
a40a90a0 1587 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
0ef3e66b 1588 num_stripes = extent_root->fs_info->fs_devices->open_devices;
a40a90a0
CM
1589 min_stripes = 2;
1590 }
1591 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
611f0e00 1592 num_stripes = 2;
a40a90a0
CM
1593 min_stripes = 2;
1594 }
8790d502
CM
1595 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1596 num_stripes = min_t(u64, 2,
0ef3e66b 1597 extent_root->fs_info->fs_devices->open_devices);
9b3f68b9
CM
1598 if (num_stripes < 2)
1599 return -ENOSPC;
a40a90a0 1600 min_stripes = 2;
8790d502 1601 }
321aecc6 1602 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
0ef3e66b 1603 num_stripes = extent_root->fs_info->fs_devices->open_devices;
321aecc6
CM
1604 if (num_stripes < 4)
1605 return -ENOSPC;
1606 num_stripes &= ~(u32)1;
1607 sub_stripes = 2;
a40a90a0 1608 min_stripes = 4;
321aecc6 1609 }
9b3f68b9
CM
1610
1611 if (type & BTRFS_BLOCK_GROUP_DATA) {
1612 max_chunk_size = 10 * calc_size;
a40a90a0 1613 min_stripe_size = 64 * 1024 * 1024;
9b3f68b9
CM
1614 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1615 max_chunk_size = 4 * calc_size;
a40a90a0
CM
1616 min_stripe_size = 32 * 1024 * 1024;
1617 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1618 calc_size = 8 * 1024 * 1024;
1619 max_chunk_size = calc_size * 2;
1620 min_stripe_size = 1 * 1024 * 1024;
9b3f68b9
CM
1621 }
1622
8f18cf13
CM
1623 path = btrfs_alloc_path();
1624 if (!path)
1625 return -ENOMEM;
1626
9b3f68b9
CM
1627 /* we don't want a chunk larger than 10% of the FS */
1628 percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1629 max_chunk_size = min(percent_max, max_chunk_size);
1630
a40a90a0 1631again:
9b3f68b9
CM
1632 if (calc_size * num_stripes > max_chunk_size) {
1633 calc_size = max_chunk_size;
1634 do_div(calc_size, num_stripes);
1635 do_div(calc_size, stripe_len);
1636 calc_size *= stripe_len;
1637 }
1638 /* we don't want tiny stripes */
a40a90a0 1639 calc_size = max_t(u64, min_stripe_size, calc_size);
9b3f68b9 1640
9b3f68b9
CM
1641 do_div(calc_size, stripe_len);
1642 calc_size *= stripe_len;
1643
6324fbf3
CM
1644 INIT_LIST_HEAD(&private_devs);
1645 cur = dev_list->next;
1646 index = 0;
611f0e00
CM
1647
1648 if (type & BTRFS_BLOCK_GROUP_DUP)
1649 min_free = calc_size * 2;
9b3f68b9
CM
1650 else
1651 min_free = calc_size;
611f0e00 1652
ad5bd91e
CM
1653 /* we add 1MB because we never use the first 1MB of the device */
1654 min_free += 1024 * 1024;
1655
6324fbf3
CM
1656 /* build a private list of devices we will allocate from */
1657 while(index < num_stripes) {
b3075717 1658 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
611f0e00 1659
dfe25020
CM
1660 if (device->total_bytes > device->bytes_used)
1661 avail = device->total_bytes - device->bytes_used;
1662 else
1663 avail = 0;
6324fbf3 1664 cur = cur->next;
8f18cf13 1665
dfe25020 1666 if (device->in_fs_metadata && avail >= min_free) {
8f18cf13
CM
1667 u64 ignored_start = 0;
1668 ret = find_free_dev_extent(trans, device, path,
1669 min_free,
1670 &ignored_start);
1671 if (ret == 0) {
1672 list_move_tail(&device->dev_alloc_list,
1673 &private_devs);
611f0e00 1674 index++;
8f18cf13
CM
1675 if (type & BTRFS_BLOCK_GROUP_DUP)
1676 index++;
1677 }
dfe25020 1678 } else if (device->in_fs_metadata && avail > max_avail)
a40a90a0 1679 max_avail = avail;
6324fbf3
CM
1680 if (cur == dev_list)
1681 break;
1682 }
1683 if (index < num_stripes) {
1684 list_splice(&private_devs, dev_list);
a40a90a0
CM
1685 if (index >= min_stripes) {
1686 num_stripes = index;
1687 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1688 num_stripes /= sub_stripes;
1689 num_stripes *= sub_stripes;
1690 }
1691 looped = 1;
1692 goto again;
1693 }
6324fbf3
CM
1694 if (!looped && max_avail > 0) {
1695 looped = 1;
1696 calc_size = max_avail;
1697 goto again;
1698 }
8f18cf13 1699 btrfs_free_path(path);
6324fbf3
CM
1700 return -ENOSPC;
1701 }
e17cade2
CM
1702 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1703 key.type = BTRFS_CHUNK_ITEM_KEY;
1704 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1705 &key.offset);
8f18cf13
CM
1706 if (ret) {
1707 btrfs_free_path(path);
0b86a832 1708 return ret;
8f18cf13 1709 }
0b86a832 1710
0b86a832 1711 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
8f18cf13
CM
1712 if (!chunk) {
1713 btrfs_free_path(path);
0b86a832 1714 return -ENOMEM;
8f18cf13 1715 }
0b86a832 1716
593060d7
CM
1717 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1718 if (!map) {
1719 kfree(chunk);
8f18cf13 1720 btrfs_free_path(path);
593060d7
CM
1721 return -ENOMEM;
1722 }
8f18cf13
CM
1723 btrfs_free_path(path);
1724 path = NULL;
593060d7 1725
0b86a832 1726 stripes = &chunk->stripe;
9b3f68b9
CM
1727 *num_bytes = chunk_bytes_by_type(type, calc_size,
1728 num_stripes, sub_stripes);
0b86a832 1729
6324fbf3 1730 index = 0;
0b86a832 1731 while(index < num_stripes) {
e17cade2 1732 struct btrfs_stripe *stripe;
6324fbf3
CM
1733 BUG_ON(list_empty(&private_devs));
1734 cur = private_devs.next;
b3075717 1735 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
611f0e00
CM
1736
1737 /* loop over this device again if we're doing a dup group */
1738 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1739 (index == num_stripes - 1))
b3075717 1740 list_move_tail(&device->dev_alloc_list, dev_list);
0b86a832
CM
1741
1742 ret = btrfs_alloc_dev_extent(trans, device,
e17cade2
CM
1743 info->chunk_root->root_key.objectid,
1744 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1745 calc_size, &dev_offset);
0b86a832 1746 BUG_ON(ret);
0b86a832
CM
1747 device->bytes_used += calc_size;
1748 ret = btrfs_update_device(trans, device);
1749 BUG_ON(ret);
1750
593060d7
CM
1751 map->stripes[index].dev = device;
1752 map->stripes[index].physical = dev_offset;
e17cade2
CM
1753 stripe = stripes + index;
1754 btrfs_set_stack_stripe_devid(stripe, device->devid);
1755 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1756 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
0b86a832
CM
1757 physical = dev_offset;
1758 index++;
1759 }
6324fbf3 1760 BUG_ON(!list_empty(&private_devs));
0b86a832 1761
e17cade2
CM
1762 /* key was set above */
1763 btrfs_set_stack_chunk_length(chunk, *num_bytes);
0b86a832 1764 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
593060d7 1765 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
0b86a832
CM
1766 btrfs_set_stack_chunk_type(chunk, type);
1767 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
593060d7
CM
1768 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1769 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
0b86a832 1770 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
321aecc6 1771 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
593060d7
CM
1772 map->sector_size = extent_root->sectorsize;
1773 map->stripe_len = stripe_len;
1774 map->io_align = stripe_len;
1775 map->io_width = stripe_len;
1776 map->type = type;
1777 map->num_stripes = num_stripes;
321aecc6 1778 map->sub_stripes = sub_stripes;
0b86a832
CM
1779
1780 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1781 btrfs_chunk_item_size(num_stripes));
1782 BUG_ON(ret);
e17cade2 1783 *start = key.offset;;
0b86a832
CM
1784
1785 em = alloc_extent_map(GFP_NOFS);
1786 if (!em)
1787 return -ENOMEM;
0b86a832 1788 em->bdev = (struct block_device *)map;
e17cade2
CM
1789 em->start = key.offset;
1790 em->len = *num_bytes;
0b86a832
CM
1791 em->block_start = 0;
1792
8f18cf13
CM
1793 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1794 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1795 chunk, btrfs_chunk_item_size(num_stripes));
1796 BUG_ON(ret);
1797 }
0b86a832
CM
1798 kfree(chunk);
1799
1800 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1801 spin_lock(&em_tree->lock);
1802 ret = add_extent_mapping(em_tree, em);
0b86a832 1803 spin_unlock(&em_tree->lock);
b248a415 1804 BUG_ON(ret);
0b86a832
CM
1805 free_extent_map(em);
1806 return ret;
1807}
1808
1809void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1810{
1811 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1812}
1813
1814void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1815{
1816 struct extent_map *em;
1817
1818 while(1) {
1819 spin_lock(&tree->map_tree.lock);
1820 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1821 if (em)
1822 remove_extent_mapping(&tree->map_tree, em);
1823 spin_unlock(&tree->map_tree.lock);
1824 if (!em)
1825 break;
1826 kfree(em->bdev);
1827 /* once for us */
1828 free_extent_map(em);
1829 /* once for the tree */
1830 free_extent_map(em);
1831 }
1832}
1833
f188591e
CM
1834int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1835{
1836 struct extent_map *em;
1837 struct map_lookup *map;
1838 struct extent_map_tree *em_tree = &map_tree->map_tree;
1839 int ret;
1840
1841 spin_lock(&em_tree->lock);
1842 em = lookup_extent_mapping(em_tree, logical, len);
b248a415 1843 spin_unlock(&em_tree->lock);
f188591e
CM
1844 BUG_ON(!em);
1845
1846 BUG_ON(em->start > logical || em->start + em->len < logical);
1847 map = (struct map_lookup *)em->bdev;
1848 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1849 ret = map->num_stripes;
321aecc6
CM
1850 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1851 ret = map->sub_stripes;
f188591e
CM
1852 else
1853 ret = 1;
1854 free_extent_map(em);
f188591e
CM
1855 return ret;
1856}
1857
dfe25020
CM
1858static int find_live_mirror(struct map_lookup *map, int first, int num,
1859 int optimal)
1860{
1861 int i;
1862 if (map->stripes[optimal].dev->bdev)
1863 return optimal;
1864 for (i = first; i < first + num; i++) {
1865 if (map->stripes[i].dev->bdev)
1866 return i;
1867 }
1868 /* we couldn't find one that doesn't fail. Just return something
1869 * and the io error handling code will clean up eventually
1870 */
1871 return optimal;
1872}
1873
f2d8d74d
CM
1874static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1875 u64 logical, u64 *length,
1876 struct btrfs_multi_bio **multi_ret,
1877 int mirror_num, struct page *unplug_page)
0b86a832
CM
1878{
1879 struct extent_map *em;
1880 struct map_lookup *map;
1881 struct extent_map_tree *em_tree = &map_tree->map_tree;
1882 u64 offset;
593060d7
CM
1883 u64 stripe_offset;
1884 u64 stripe_nr;
cea9e445 1885 int stripes_allocated = 8;
321aecc6 1886 int stripes_required = 1;
593060d7 1887 int stripe_index;
cea9e445 1888 int i;
f2d8d74d 1889 int num_stripes;
a236aed1 1890 int max_errors = 0;
cea9e445 1891 struct btrfs_multi_bio *multi = NULL;
0b86a832 1892
cea9e445
CM
1893 if (multi_ret && !(rw & (1 << BIO_RW))) {
1894 stripes_allocated = 1;
1895 }
1896again:
1897 if (multi_ret) {
1898 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1899 GFP_NOFS);
1900 if (!multi)
1901 return -ENOMEM;
a236aed1
CM
1902
1903 atomic_set(&multi->error, 0);
cea9e445 1904 }
0b86a832
CM
1905
1906 spin_lock(&em_tree->lock);
1907 em = lookup_extent_mapping(em_tree, logical, *length);
b248a415 1908 spin_unlock(&em_tree->lock);
f2d8d74d
CM
1909
1910 if (!em && unplug_page)
1911 return 0;
1912
3b951516 1913 if (!em) {
a061fc8d 1914 printk("unable to find logical %Lu len %Lu\n", logical, *length);
f2d8d74d 1915 BUG();
3b951516 1916 }
0b86a832
CM
1917
1918 BUG_ON(em->start > logical || em->start + em->len < logical);
1919 map = (struct map_lookup *)em->bdev;
1920 offset = logical - em->start;
593060d7 1921
f188591e
CM
1922 if (mirror_num > map->num_stripes)
1923 mirror_num = 0;
1924
cea9e445 1925 /* if our multi bio struct is too small, back off and try again */
321aecc6
CM
1926 if (rw & (1 << BIO_RW)) {
1927 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1928 BTRFS_BLOCK_GROUP_DUP)) {
1929 stripes_required = map->num_stripes;
a236aed1 1930 max_errors = 1;
321aecc6
CM
1931 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1932 stripes_required = map->sub_stripes;
a236aed1 1933 max_errors = 1;
321aecc6
CM
1934 }
1935 }
1936 if (multi_ret && rw == WRITE &&
1937 stripes_allocated < stripes_required) {
cea9e445 1938 stripes_allocated = map->num_stripes;
cea9e445
CM
1939 free_extent_map(em);
1940 kfree(multi);
1941 goto again;
1942 }
593060d7
CM
1943 stripe_nr = offset;
1944 /*
1945 * stripe_nr counts the total number of stripes we have to stride
1946 * to get to this block
1947 */
1948 do_div(stripe_nr, map->stripe_len);
1949
1950 stripe_offset = stripe_nr * map->stripe_len;
1951 BUG_ON(offset < stripe_offset);
1952
1953 /* stripe_offset is the offset of this block in its stripe*/
1954 stripe_offset = offset - stripe_offset;
1955
cea9e445 1956 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
321aecc6 1957 BTRFS_BLOCK_GROUP_RAID10 |
cea9e445
CM
1958 BTRFS_BLOCK_GROUP_DUP)) {
1959 /* we limit the length of each bio to what fits in a stripe */
1960 *length = min_t(u64, em->len - offset,
1961 map->stripe_len - stripe_offset);
1962 } else {
1963 *length = em->len - offset;
1964 }
f2d8d74d
CM
1965
1966 if (!multi_ret && !unplug_page)
cea9e445
CM
1967 goto out;
1968
f2d8d74d 1969 num_stripes = 1;
cea9e445 1970 stripe_index = 0;
8790d502 1971 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
f2d8d74d
CM
1972 if (unplug_page || (rw & (1 << BIO_RW)))
1973 num_stripes = map->num_stripes;
2fff734f 1974 else if (mirror_num)
f188591e 1975 stripe_index = mirror_num - 1;
dfe25020
CM
1976 else {
1977 stripe_index = find_live_mirror(map, 0,
1978 map->num_stripes,
1979 current->pid % map->num_stripes);
1980 }
2fff734f 1981
611f0e00 1982 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
cea9e445 1983 if (rw & (1 << BIO_RW))
f2d8d74d 1984 num_stripes = map->num_stripes;
f188591e
CM
1985 else if (mirror_num)
1986 stripe_index = mirror_num - 1;
2fff734f 1987
321aecc6
CM
1988 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1989 int factor = map->num_stripes / map->sub_stripes;
321aecc6
CM
1990
1991 stripe_index = do_div(stripe_nr, factor);
1992 stripe_index *= map->sub_stripes;
1993
f2d8d74d
CM
1994 if (unplug_page || (rw & (1 << BIO_RW)))
1995 num_stripes = map->sub_stripes;
321aecc6
CM
1996 else if (mirror_num)
1997 stripe_index += mirror_num - 1;
dfe25020
CM
1998 else {
1999 stripe_index = find_live_mirror(map, stripe_index,
2000 map->sub_stripes, stripe_index +
2001 current->pid % map->sub_stripes);
2002 }
8790d502
CM
2003 } else {
2004 /*
2005 * after this do_div call, stripe_nr is the number of stripes
2006 * on this device we have to walk to find the data, and
2007 * stripe_index is the number of our device in the stripe array
2008 */
2009 stripe_index = do_div(stripe_nr, map->num_stripes);
2010 }
593060d7 2011 BUG_ON(stripe_index >= map->num_stripes);
cea9e445 2012
f2d8d74d
CM
2013 for (i = 0; i < num_stripes; i++) {
2014 if (unplug_page) {
2015 struct btrfs_device *device;
2016 struct backing_dev_info *bdi;
2017
2018 device = map->stripes[stripe_index].dev;
dfe25020
CM
2019 if (device->bdev) {
2020 bdi = blk_get_backing_dev_info(device->bdev);
2021 if (bdi->unplug_io_fn) {
2022 bdi->unplug_io_fn(bdi, unplug_page);
2023 }
f2d8d74d
CM
2024 }
2025 } else {
2026 multi->stripes[i].physical =
2027 map->stripes[stripe_index].physical +
2028 stripe_offset + stripe_nr * map->stripe_len;
2029 multi->stripes[i].dev = map->stripes[stripe_index].dev;
2030 }
cea9e445 2031 stripe_index++;
593060d7 2032 }
f2d8d74d
CM
2033 if (multi_ret) {
2034 *multi_ret = multi;
2035 multi->num_stripes = num_stripes;
a236aed1 2036 multi->max_errors = max_errors;
f2d8d74d 2037 }
cea9e445 2038out:
0b86a832 2039 free_extent_map(em);
0b86a832
CM
2040 return 0;
2041}
2042
f2d8d74d
CM
2043int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2044 u64 logical, u64 *length,
2045 struct btrfs_multi_bio **multi_ret, int mirror_num)
2046{
2047 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2048 mirror_num, NULL);
2049}
2050
2051int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2052 u64 logical, struct page *page)
2053{
2054 u64 length = PAGE_CACHE_SIZE;
2055 return __btrfs_map_block(map_tree, READ, logical, &length,
2056 NULL, 0, page);
2057}
2058
2059
8790d502
CM
2060#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2061static void end_bio_multi_stripe(struct bio *bio, int err)
2062#else
2063static int end_bio_multi_stripe(struct bio *bio,
2064 unsigned int bytes_done, int err)
2065#endif
2066{
cea9e445 2067 struct btrfs_multi_bio *multi = bio->bi_private;
8790d502
CM
2068
2069#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2070 if (bio->bi_size)
2071 return 1;
2072#endif
2073 if (err)
a236aed1 2074 atomic_inc(&multi->error);
8790d502 2075
cea9e445 2076 if (atomic_dec_and_test(&multi->stripes_pending)) {
8790d502
CM
2077 bio->bi_private = multi->private;
2078 bio->bi_end_io = multi->end_io;
a236aed1
CM
2079 /* only send an error to the higher layers if it is
2080 * beyond the tolerance of the multi-bio
2081 */
1259ab75 2082 if (atomic_read(&multi->error) > multi->max_errors) {
a236aed1 2083 err = -EIO;
1259ab75
CM
2084 } else if (err) {
2085 /*
2086 * this bio is actually up to date, we didn't
2087 * go over the max number of errors
2088 */
2089 set_bit(BIO_UPTODATE, &bio->bi_flags);
a236aed1 2090 err = 0;
1259ab75 2091 }
8790d502
CM
2092 kfree(multi);
2093
73f61b2a
M
2094#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2095 bio_endio(bio, bio->bi_size, err);
2096#else
8790d502 2097 bio_endio(bio, err);
73f61b2a 2098#endif
8790d502
CM
2099 } else {
2100 bio_put(bio);
2101 }
2102#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2103 return 0;
2104#endif
2105}
2106
8b712842
CM
2107struct async_sched {
2108 struct bio *bio;
2109 int rw;
2110 struct btrfs_fs_info *info;
2111 struct btrfs_work work;
2112};
2113
2114/*
2115 * see run_scheduled_bios for a description of why bios are collected for
2116 * async submit.
2117 *
2118 * This will add one bio to the pending list for a device and make sure
2119 * the work struct is scheduled.
2120 */
2121int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
2122 int rw, struct bio *bio)
2123{
2124 int should_queue = 1;
2125
2126 /* don't bother with additional async steps for reads, right now */
2127 if (!(rw & (1 << BIO_RW))) {
2128 submit_bio(rw, bio);
2129 return 0;
2130 }
2131
2132 /*
2133 * nr_async_sumbits allows us to reliably return congestion to the
2134 * higher layers. Otherwise, the async bio makes it appear we have
2135 * made progress against dirty pages when we've really just put it
2136 * on a queue for later
2137 */
2138 atomic_inc(&root->fs_info->nr_async_submits);
2139 bio->bi_next = NULL;
2140 bio->bi_rw |= rw;
2141
2142 spin_lock(&device->io_lock);
2143
2144 if (device->pending_bio_tail)
2145 device->pending_bio_tail->bi_next = bio;
2146
2147 device->pending_bio_tail = bio;
2148 if (!device->pending_bios)
2149 device->pending_bios = bio;
2150 if (device->running_pending)
2151 should_queue = 0;
2152
2153 spin_unlock(&device->io_lock);
2154
2155 if (should_queue)
1cc127b5
CM
2156 btrfs_queue_worker(&root->fs_info->submit_workers,
2157 &device->work);
8b712842
CM
2158 return 0;
2159}
2160
f188591e 2161int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
8b712842 2162 int mirror_num, int async_submit)
0b86a832
CM
2163{
2164 struct btrfs_mapping_tree *map_tree;
2165 struct btrfs_device *dev;
8790d502 2166 struct bio *first_bio = bio;
0b86a832 2167 u64 logical = bio->bi_sector << 9;
0b86a832
CM
2168 u64 length = 0;
2169 u64 map_length;
cea9e445 2170 struct btrfs_multi_bio *multi = NULL;
0b86a832 2171 int ret;
8790d502
CM
2172 int dev_nr = 0;
2173 int total_devs = 1;
0b86a832 2174
f2d8d74d 2175 length = bio->bi_size;
0b86a832
CM
2176 map_tree = &root->fs_info->mapping_tree;
2177 map_length = length;
cea9e445 2178
f188591e
CM
2179 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2180 mirror_num);
cea9e445
CM
2181 BUG_ON(ret);
2182
2183 total_devs = multi->num_stripes;
2184 if (map_length < length) {
2185 printk("mapping failed logical %Lu bio len %Lu "
2186 "len %Lu\n", logical, length, map_length);
2187 BUG();
2188 }
2189 multi->end_io = first_bio->bi_end_io;
2190 multi->private = first_bio->bi_private;
2191 atomic_set(&multi->stripes_pending, multi->num_stripes);
2192
8790d502 2193 while(dev_nr < total_devs) {
8790d502 2194 if (total_devs > 1) {
8790d502
CM
2195 if (dev_nr < total_devs - 1) {
2196 bio = bio_clone(first_bio, GFP_NOFS);
2197 BUG_ON(!bio);
2198 } else {
2199 bio = first_bio;
2200 }
2201 bio->bi_private = multi;
2202 bio->bi_end_io = end_bio_multi_stripe;
2203 }
cea9e445
CM
2204 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2205 dev = multi->stripes[dev_nr].dev;
dfe25020
CM
2206 if (dev && dev->bdev) {
2207 bio->bi_bdev = dev->bdev;
8b712842
CM
2208 if (async_submit)
2209 schedule_bio(root, dev, rw, bio);
2210 else
2211 submit_bio(rw, bio);
dfe25020
CM
2212 } else {
2213 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2214 bio->bi_sector = logical >> 9;
2215#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2216 bio_endio(bio, bio->bi_size, -EIO);
2217#else
2218 bio_endio(bio, -EIO);
2219#endif
2220 }
8790d502
CM
2221 dev_nr++;
2222 }
cea9e445
CM
2223 if (total_devs == 1)
2224 kfree(multi);
0b86a832
CM
2225 return 0;
2226}
2227
a443755f
CM
2228struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2229 u8 *uuid)
0b86a832 2230{
8a4b83cc 2231 struct list_head *head = &root->fs_info->fs_devices->devices;
0b86a832 2232
a443755f 2233 return __find_device(head, devid, uuid);
0b86a832
CM
2234}
2235
dfe25020
CM
2236static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2237 u64 devid, u8 *dev_uuid)
2238{
2239 struct btrfs_device *device;
2240 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2241
2242 device = kzalloc(sizeof(*device), GFP_NOFS);
2243 list_add(&device->dev_list,
2244 &fs_devices->devices);
2245 list_add(&device->dev_alloc_list,
2246 &fs_devices->alloc_list);
2247 device->barriers = 1;
2248 device->dev_root = root->fs_info->dev_root;
2249 device->devid = devid;
8b712842 2250 device->work.func = pending_bios_fn;
dfe25020
CM
2251 fs_devices->num_devices++;
2252 spin_lock_init(&device->io_lock);
2253 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2254 return device;
2255}
2256
2257
0b86a832
CM
2258static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2259 struct extent_buffer *leaf,
2260 struct btrfs_chunk *chunk)
2261{
2262 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2263 struct map_lookup *map;
2264 struct extent_map *em;
2265 u64 logical;
2266 u64 length;
2267 u64 devid;
a443755f 2268 u8 uuid[BTRFS_UUID_SIZE];
593060d7 2269 int num_stripes;
0b86a832 2270 int ret;
593060d7 2271 int i;
0b86a832 2272
e17cade2
CM
2273 logical = key->offset;
2274 length = btrfs_chunk_length(leaf, chunk);
a061fc8d 2275
0b86a832
CM
2276 spin_lock(&map_tree->map_tree.lock);
2277 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
b248a415 2278 spin_unlock(&map_tree->map_tree.lock);
0b86a832
CM
2279
2280 /* already mapped? */
2281 if (em && em->start <= logical && em->start + em->len > logical) {
2282 free_extent_map(em);
0b86a832
CM
2283 return 0;
2284 } else if (em) {
2285 free_extent_map(em);
2286 }
0b86a832
CM
2287
2288 map = kzalloc(sizeof(*map), GFP_NOFS);
2289 if (!map)
2290 return -ENOMEM;
2291
2292 em = alloc_extent_map(GFP_NOFS);
2293 if (!em)
2294 return -ENOMEM;
593060d7
CM
2295 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2296 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
0b86a832
CM
2297 if (!map) {
2298 free_extent_map(em);
2299 return -ENOMEM;
2300 }
2301
2302 em->bdev = (struct block_device *)map;
2303 em->start = logical;
2304 em->len = length;
2305 em->block_start = 0;
2306
593060d7
CM
2307 map->num_stripes = num_stripes;
2308 map->io_width = btrfs_chunk_io_width(leaf, chunk);
2309 map->io_align = btrfs_chunk_io_align(leaf, chunk);
2310 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2311 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2312 map->type = btrfs_chunk_type(leaf, chunk);
321aecc6 2313 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
593060d7
CM
2314 for (i = 0; i < num_stripes; i++) {
2315 map->stripes[i].physical =
2316 btrfs_stripe_offset_nr(leaf, chunk, i);
2317 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
a443755f
CM
2318 read_extent_buffer(leaf, uuid, (unsigned long)
2319 btrfs_stripe_dev_uuid_nr(chunk, i),
2320 BTRFS_UUID_SIZE);
2321 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
dfe25020
CM
2322
2323 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
593060d7
CM
2324 kfree(map);
2325 free_extent_map(em);
2326 return -EIO;
2327 }
dfe25020
CM
2328 if (!map->stripes[i].dev) {
2329 map->stripes[i].dev =
2330 add_missing_dev(root, devid, uuid);
2331 if (!map->stripes[i].dev) {
2332 kfree(map);
2333 free_extent_map(em);
2334 return -EIO;
2335 }
2336 }
2337 map->stripes[i].dev->in_fs_metadata = 1;
0b86a832
CM
2338 }
2339
2340 spin_lock(&map_tree->map_tree.lock);
2341 ret = add_extent_mapping(&map_tree->map_tree, em);
0b86a832 2342 spin_unlock(&map_tree->map_tree.lock);
b248a415 2343 BUG_ON(ret);
0b86a832
CM
2344 free_extent_map(em);
2345
2346 return 0;
2347}
2348
2349static int fill_device_from_item(struct extent_buffer *leaf,
2350 struct btrfs_dev_item *dev_item,
2351 struct btrfs_device *device)
2352{
2353 unsigned long ptr;
0b86a832
CM
2354
2355 device->devid = btrfs_device_id(leaf, dev_item);
2356 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2357 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2358 device->type = btrfs_device_type(leaf, dev_item);
2359 device->io_align = btrfs_device_io_align(leaf, dev_item);
2360 device->io_width = btrfs_device_io_width(leaf, dev_item);
2361 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
0b86a832
CM
2362
2363 ptr = (unsigned long)btrfs_device_uuid(dev_item);
e17cade2 2364 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
0b86a832 2365
0b86a832
CM
2366 return 0;
2367}
2368
0d81ba5d 2369static int read_one_dev(struct btrfs_root *root,
0b86a832
CM
2370 struct extent_buffer *leaf,
2371 struct btrfs_dev_item *dev_item)
2372{
2373 struct btrfs_device *device;
2374 u64 devid;
2375 int ret;
a443755f
CM
2376 u8 dev_uuid[BTRFS_UUID_SIZE];
2377
0b86a832 2378 devid = btrfs_device_id(leaf, dev_item);
a443755f
CM
2379 read_extent_buffer(leaf, dev_uuid,
2380 (unsigned long)btrfs_device_uuid(dev_item),
2381 BTRFS_UUID_SIZE);
2382 device = btrfs_find_device(root, devid, dev_uuid);
6324fbf3 2383 if (!device) {
dfe25020
CM
2384 printk("warning devid %Lu missing\n", devid);
2385 device = add_missing_dev(root, devid, dev_uuid);
6324fbf3
CM
2386 if (!device)
2387 return -ENOMEM;
6324fbf3 2388 }
0b86a832
CM
2389
2390 fill_device_from_item(leaf, dev_item, device);
2391 device->dev_root = root->fs_info->dev_root;
dfe25020 2392 device->in_fs_metadata = 1;
0b86a832
CM
2393 ret = 0;
2394#if 0
2395 ret = btrfs_open_device(device);
2396 if (ret) {
2397 kfree(device);
2398 }
2399#endif
2400 return ret;
2401}
2402
0d81ba5d
CM
2403int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
2404{
2405 struct btrfs_dev_item *dev_item;
2406
2407 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
2408 dev_item);
2409 return read_one_dev(root, buf, dev_item);
2410}
2411
0b86a832
CM
2412int btrfs_read_sys_array(struct btrfs_root *root)
2413{
2414 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
a061fc8d 2415 struct extent_buffer *sb;
0b86a832 2416 struct btrfs_disk_key *disk_key;
0b86a832 2417 struct btrfs_chunk *chunk;
84eed90f
CM
2418 u8 *ptr;
2419 unsigned long sb_ptr;
2420 int ret = 0;
0b86a832
CM
2421 u32 num_stripes;
2422 u32 array_size;
2423 u32 len = 0;
0b86a832 2424 u32 cur;
84eed90f 2425 struct btrfs_key key;
0b86a832 2426
a061fc8d
CM
2427 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
2428 BTRFS_SUPER_INFO_SIZE);
2429 if (!sb)
2430 return -ENOMEM;
2431 btrfs_set_buffer_uptodate(sb);
2432 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
0b86a832
CM
2433 array_size = btrfs_super_sys_array_size(super_copy);
2434
0b86a832
CM
2435 ptr = super_copy->sys_chunk_array;
2436 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
2437 cur = 0;
2438
2439 while (cur < array_size) {
2440 disk_key = (struct btrfs_disk_key *)ptr;
2441 btrfs_disk_key_to_cpu(&key, disk_key);
2442
a061fc8d 2443 len = sizeof(*disk_key); ptr += len;
0b86a832
CM
2444 sb_ptr += len;
2445 cur += len;
2446
0d81ba5d 2447 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
0b86a832 2448 chunk = (struct btrfs_chunk *)sb_ptr;
0d81ba5d 2449 ret = read_one_chunk(root, &key, sb, chunk);
84eed90f
CM
2450 if (ret)
2451 break;
0b86a832
CM
2452 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2453 len = btrfs_chunk_item_size(num_stripes);
2454 } else {
84eed90f
CM
2455 ret = -EIO;
2456 break;
0b86a832
CM
2457 }
2458 ptr += len;
2459 sb_ptr += len;
2460 cur += len;
2461 }
a061fc8d 2462 free_extent_buffer(sb);
84eed90f 2463 return ret;
0b86a832
CM
2464}
2465
2466int btrfs_read_chunk_tree(struct btrfs_root *root)
2467{
2468 struct btrfs_path *path;
2469 struct extent_buffer *leaf;
2470 struct btrfs_key key;
2471 struct btrfs_key found_key;
2472 int ret;
2473 int slot;
2474
2475 root = root->fs_info->chunk_root;
2476
2477 path = btrfs_alloc_path();
2478 if (!path)
2479 return -ENOMEM;
2480
2481 /* first we search for all of the device items, and then we
2482 * read in all of the chunk items. This way we can create chunk
2483 * mappings that reference all of the devices that are afound
2484 */
2485 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2486 key.offset = 0;
2487 key.type = 0;
2488again:
2489 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2490 while(1) {
2491 leaf = path->nodes[0];
2492 slot = path->slots[0];
2493 if (slot >= btrfs_header_nritems(leaf)) {
2494 ret = btrfs_next_leaf(root, path);
2495 if (ret == 0)
2496 continue;
2497 if (ret < 0)
2498 goto error;
2499 break;
2500 }
2501 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2502 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2503 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
2504 break;
2505 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2506 struct btrfs_dev_item *dev_item;
2507 dev_item = btrfs_item_ptr(leaf, slot,
2508 struct btrfs_dev_item);
0d81ba5d 2509 ret = read_one_dev(root, leaf, dev_item);
0b86a832
CM
2510 BUG_ON(ret);
2511 }
2512 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2513 struct btrfs_chunk *chunk;
2514 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2515 ret = read_one_chunk(root, &found_key, leaf, chunk);
2516 }
2517 path->slots[0]++;
2518 }
2519 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2520 key.objectid = 0;
2521 btrfs_release_path(root, path);
2522 goto again;
2523 }
2524
2525 btrfs_free_path(path);
2526 ret = 0;
2527error:
2528 return ret;
2529}