]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/btrfs/inode.c
Btrfs: patch queue: page_mkwrite
[net-next-2.6.git] / fs / btrfs / inode.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
39279cc3
CM
19#include <linux/module.h>
20#include <linux/buffer_head.h>
21#include <linux/fs.h>
22#include <linux/pagemap.h>
23#include <linux/highmem.h>
24#include <linux/time.h>
25#include <linux/init.h>
26#include <linux/string.h>
27#include <linux/smp_lock.h>
28#include <linux/backing-dev.h>
29#include <linux/mpage.h>
30#include <linux/swap.h>
31#include <linux/writeback.h>
32#include <linux/statfs.h>
33#include <linux/compat.h>
9ebefb18 34#include <linux/bit_spinlock.h>
39279cc3
CM
35#include "ctree.h"
36#include "disk-io.h"
37#include "transaction.h"
38#include "btrfs_inode.h"
39#include "ioctl.h"
40#include "print-tree.h"
41
42struct btrfs_iget_args {
43 u64 ino;
44 struct btrfs_root *root;
45};
46
47static struct inode_operations btrfs_dir_inode_operations;
48static struct inode_operations btrfs_symlink_inode_operations;
49static struct inode_operations btrfs_dir_ro_inode_operations;
50static struct inode_operations btrfs_file_inode_operations;
51static struct address_space_operations btrfs_aops;
52static struct address_space_operations btrfs_symlink_aops;
53static struct file_operations btrfs_dir_file_operations;
54
55static struct kmem_cache *btrfs_inode_cachep;
56struct kmem_cache *btrfs_trans_handle_cachep;
57struct kmem_cache *btrfs_transaction_cachep;
58struct kmem_cache *btrfs_bit_radix_cachep;
59struct kmem_cache *btrfs_path_cachep;
60
61#define S_SHIFT 12
62static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
63 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
64 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
65 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
66 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
67 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
68 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
69 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
70};
71
72void btrfs_read_locked_inode(struct inode *inode)
73{
74 struct btrfs_path *path;
75 struct btrfs_inode_item *inode_item;
76 struct btrfs_root *root = BTRFS_I(inode)->root;
77 struct btrfs_key location;
78 u64 alloc_group_block;
79 int ret;
80
81 path = btrfs_alloc_path();
82 BUG_ON(!path);
39279cc3
CM
83 mutex_lock(&root->fs_info->fs_mutex);
84
85 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
86 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
87 if (ret) {
88 btrfs_free_path(path);
89 goto make_bad;
90 }
91 inode_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
92 path->slots[0],
93 struct btrfs_inode_item);
94
95 inode->i_mode = btrfs_inode_mode(inode_item);
96 inode->i_nlink = btrfs_inode_nlink(inode_item);
97 inode->i_uid = btrfs_inode_uid(inode_item);
98 inode->i_gid = btrfs_inode_gid(inode_item);
99 inode->i_size = btrfs_inode_size(inode_item);
100 inode->i_atime.tv_sec = btrfs_timespec_sec(&inode_item->atime);
101 inode->i_atime.tv_nsec = btrfs_timespec_nsec(&inode_item->atime);
102 inode->i_mtime.tv_sec = btrfs_timespec_sec(&inode_item->mtime);
103 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(&inode_item->mtime);
104 inode->i_ctime.tv_sec = btrfs_timespec_sec(&inode_item->ctime);
105 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(&inode_item->ctime);
106 inode->i_blocks = btrfs_inode_nblocks(inode_item);
107 inode->i_generation = btrfs_inode_generation(inode_item);
108 alloc_group_block = btrfs_inode_block_group(inode_item);
109 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
110 alloc_group_block);
111
112 btrfs_free_path(path);
113 inode_item = NULL;
114
115 mutex_unlock(&root->fs_info->fs_mutex);
116
117 switch (inode->i_mode & S_IFMT) {
118#if 0
119 default:
120 init_special_inode(inode, inode->i_mode,
121 btrfs_inode_rdev(inode_item));
122 break;
123#endif
124 case S_IFREG:
125 inode->i_mapping->a_ops = &btrfs_aops;
126 inode->i_fop = &btrfs_file_operations;
127 inode->i_op = &btrfs_file_inode_operations;
128 break;
129 case S_IFDIR:
130 inode->i_fop = &btrfs_dir_file_operations;
131 if (root == root->fs_info->tree_root)
132 inode->i_op = &btrfs_dir_ro_inode_operations;
133 else
134 inode->i_op = &btrfs_dir_inode_operations;
135 break;
136 case S_IFLNK:
137 inode->i_op = &btrfs_symlink_inode_operations;
138 inode->i_mapping->a_ops = &btrfs_symlink_aops;
139 break;
140 }
141 return;
142
143make_bad:
144 btrfs_release_path(root, path);
145 btrfs_free_path(path);
146 mutex_unlock(&root->fs_info->fs_mutex);
147 make_bad_inode(inode);
148}
149
150static void fill_inode_item(struct btrfs_inode_item *item,
151 struct inode *inode)
152{
153 btrfs_set_inode_uid(item, inode->i_uid);
154 btrfs_set_inode_gid(item, inode->i_gid);
155 btrfs_set_inode_size(item, inode->i_size);
156 btrfs_set_inode_mode(item, inode->i_mode);
157 btrfs_set_inode_nlink(item, inode->i_nlink);
158 btrfs_set_timespec_sec(&item->atime, inode->i_atime.tv_sec);
159 btrfs_set_timespec_nsec(&item->atime, inode->i_atime.tv_nsec);
160 btrfs_set_timespec_sec(&item->mtime, inode->i_mtime.tv_sec);
161 btrfs_set_timespec_nsec(&item->mtime, inode->i_mtime.tv_nsec);
162 btrfs_set_timespec_sec(&item->ctime, inode->i_ctime.tv_sec);
163 btrfs_set_timespec_nsec(&item->ctime, inode->i_ctime.tv_nsec);
164 btrfs_set_inode_nblocks(item, inode->i_blocks);
165 btrfs_set_inode_generation(item, inode->i_generation);
166 btrfs_set_inode_block_group(item,
167 BTRFS_I(inode)->block_group->key.objectid);
168}
169
170static int btrfs_update_inode(struct btrfs_trans_handle *trans,
171 struct btrfs_root *root,
172 struct inode *inode)
173{
174 struct btrfs_inode_item *inode_item;
175 struct btrfs_path *path;
176 int ret;
177
178 path = btrfs_alloc_path();
179 BUG_ON(!path);
39279cc3
CM
180 ret = btrfs_lookup_inode(trans, root, path,
181 &BTRFS_I(inode)->location, 1);
182 if (ret) {
183 if (ret > 0)
184 ret = -ENOENT;
185 goto failed;
186 }
187
188 inode_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
189 path->slots[0],
190 struct btrfs_inode_item);
191
192 fill_inode_item(inode_item, inode);
193 btrfs_mark_buffer_dirty(path->nodes[0]);
194 ret = 0;
195failed:
196 btrfs_release_path(root, path);
197 btrfs_free_path(path);
198 return ret;
199}
200
201
202static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
203 struct btrfs_root *root,
204 struct inode *dir,
205 struct dentry *dentry)
206{
207 struct btrfs_path *path;
208 const char *name = dentry->d_name.name;
209 int name_len = dentry->d_name.len;
210 int ret = 0;
211 u64 objectid;
212 struct btrfs_dir_item *di;
213
214 path = btrfs_alloc_path();
215 BUG_ON(!path);
39279cc3
CM
216 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
217 name, name_len, -1);
218 if (IS_ERR(di)) {
219 ret = PTR_ERR(di);
220 goto err;
221 }
222 if (!di) {
223 ret = -ENOENT;
224 goto err;
225 }
226 objectid = btrfs_disk_key_objectid(&di->location);
227 ret = btrfs_delete_one_dir_name(trans, root, path, di);
228 BUG_ON(ret);
229 btrfs_release_path(root, path);
230
231 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
232 objectid, name, name_len, -1);
233 if (IS_ERR(di)) {
234 ret = PTR_ERR(di);
235 goto err;
236 }
237 if (!di) {
238 ret = -ENOENT;
239 goto err;
240 }
241 ret = btrfs_delete_one_dir_name(trans, root, path, di);
242 BUG_ON(ret);
243
244 dentry->d_inode->i_ctime = dir->i_ctime;
245err:
246 btrfs_free_path(path);
247 if (!ret) {
248 dir->i_size -= name_len * 2;
249 btrfs_update_inode(trans, root, dir);
250 drop_nlink(dentry->d_inode);
251 btrfs_update_inode(trans, root, dentry->d_inode);
252 dir->i_sb->s_dirt = 1;
253 }
254 return ret;
255}
256
257static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
258{
259 struct btrfs_root *root;
260 struct btrfs_trans_handle *trans;
261 int ret;
262
263 root = BTRFS_I(dir)->root;
264 mutex_lock(&root->fs_info->fs_mutex);
265 trans = btrfs_start_transaction(root, 1);
266 btrfs_set_trans_block_group(trans, dir);
267 ret = btrfs_unlink_trans(trans, root, dir, dentry);
268 btrfs_end_transaction(trans, root);
269 mutex_unlock(&root->fs_info->fs_mutex);
270 btrfs_btree_balance_dirty(root);
271 return ret;
272}
273
274static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
275{
276 struct inode *inode = dentry->d_inode;
277 int err;
278 int ret;
279 struct btrfs_root *root = BTRFS_I(dir)->root;
280 struct btrfs_path *path;
281 struct btrfs_key key;
282 struct btrfs_trans_handle *trans;
283 struct btrfs_key found_key;
284 int found_type;
285 struct btrfs_leaf *leaf;
286 char *goodnames = "..";
287
288 path = btrfs_alloc_path();
289 BUG_ON(!path);
39279cc3
CM
290 mutex_lock(&root->fs_info->fs_mutex);
291 trans = btrfs_start_transaction(root, 1);
292 btrfs_set_trans_block_group(trans, dir);
293 key.objectid = inode->i_ino;
294 key.offset = (u64)-1;
295 key.flags = (u32)-1;
296 while(1) {
297 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
298 if (ret < 0) {
299 err = ret;
300 goto out;
301 }
302 BUG_ON(ret == 0);
303 if (path->slots[0] == 0) {
304 err = -ENOENT;
305 goto out;
306 }
307 path->slots[0]--;
308 leaf = btrfs_buffer_leaf(path->nodes[0]);
309 btrfs_disk_key_to_cpu(&found_key,
310 &leaf->items[path->slots[0]].key);
311 found_type = btrfs_key_type(&found_key);
312 if (found_key.objectid != inode->i_ino) {
313 err = -ENOENT;
314 goto out;
315 }
316 if ((found_type != BTRFS_DIR_ITEM_KEY &&
317 found_type != BTRFS_DIR_INDEX_KEY) ||
318 (!btrfs_match_dir_item_name(root, path, goodnames, 2) &&
319 !btrfs_match_dir_item_name(root, path, goodnames, 1))) {
320 err = -ENOTEMPTY;
321 goto out;
322 }
323 ret = btrfs_del_item(trans, root, path);
324 BUG_ON(ret);
325
326 if (found_type == BTRFS_DIR_ITEM_KEY && found_key.offset == 1)
327 break;
328 btrfs_release_path(root, path);
329 }
330 ret = 0;
331 btrfs_release_path(root, path);
332
333 /* now the directory is empty */
334 err = btrfs_unlink_trans(trans, root, dir, dentry);
335 if (!err) {
336 inode->i_size = 0;
337 }
338out:
339 btrfs_release_path(root, path);
340 btrfs_free_path(path);
341 mutex_unlock(&root->fs_info->fs_mutex);
342 ret = btrfs_end_transaction(trans, root);
343 btrfs_btree_balance_dirty(root);
344 if (ret && !err)
345 err = ret;
346 return err;
347}
348
349static int btrfs_free_inode(struct btrfs_trans_handle *trans,
350 struct btrfs_root *root,
351 struct inode *inode)
352{
353 struct btrfs_path *path;
354 int ret;
355
356 clear_inode(inode);
357
358 path = btrfs_alloc_path();
359 BUG_ON(!path);
39279cc3
CM
360 ret = btrfs_lookup_inode(trans, root, path,
361 &BTRFS_I(inode)->location, -1);
362 BUG_ON(ret);
363 ret = btrfs_del_item(trans, root, path);
364 BUG_ON(ret);
365 btrfs_free_path(path);
366 return ret;
367}
368
369/*
370 * truncates go from a high offset to a low offset. So, walk
371 * from hi to lo in the node and issue readas. Stop when you find
372 * keys from a different objectid
373 */
374static void reada_truncate(struct btrfs_root *root, struct btrfs_path *path,
375 u64 objectid)
376{
377 struct btrfs_node *node;
378 int i;
379 int nritems;
380 u64 item_objectid;
381 u64 blocknr;
382 int slot;
383 int ret;
384
385 if (!path->nodes[1])
386 return;
387 node = btrfs_buffer_node(path->nodes[1]);
388 slot = path->slots[1];
389 if (slot == 0)
390 return;
391 nritems = btrfs_header_nritems(&node->header);
392 for (i = slot - 1; i >= 0; i--) {
393 item_objectid = btrfs_disk_key_objectid(&node->ptrs[i].key);
394 if (item_objectid != objectid)
395 break;
396 blocknr = btrfs_node_blockptr(node, i);
397 ret = readahead_tree_block(root, blocknr);
398 if (ret)
399 break;
400 }
401}
402
403/*
404 * this can truncate away extent items, csum items and directory items.
405 * It starts at a high offset and removes keys until it can't find
406 * any higher than i_size.
407 *
408 * csum items that cross the new i_size are truncated to the new size
409 * as well.
410 */
411static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
412 struct btrfs_root *root,
413 struct inode *inode)
414{
415 int ret;
416 struct btrfs_path *path;
417 struct btrfs_key key;
418 struct btrfs_disk_key *found_key;
419 u32 found_type;
420 struct btrfs_leaf *leaf;
421 struct btrfs_file_extent_item *fi;
422 u64 extent_start = 0;
423 u64 extent_num_blocks = 0;
424 u64 item_end = 0;
425 int found_extent;
426 int del_item;
427
428 path = btrfs_alloc_path();
429 BUG_ON(!path);
430 /* FIXME, add redo link to tree so we don't leak on crash */
431 key.objectid = inode->i_ino;
432 key.offset = (u64)-1;
433 key.flags = (u32)-1;
434 while(1) {
435 btrfs_init_path(path);
436 fi = NULL;
437 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
438 if (ret < 0) {
439 goto error;
440 }
441 if (ret > 0) {
442 BUG_ON(path->slots[0] == 0);
443 path->slots[0]--;
444 }
445 reada_truncate(root, path, inode->i_ino);
446 leaf = btrfs_buffer_leaf(path->nodes[0]);
447 found_key = &leaf->items[path->slots[0]].key;
448 found_type = btrfs_disk_key_type(found_key);
449
450 if (btrfs_disk_key_objectid(found_key) != inode->i_ino)
451 break;
452 if (found_type != BTRFS_CSUM_ITEM_KEY &&
453 found_type != BTRFS_DIR_ITEM_KEY &&
454 found_type != BTRFS_DIR_INDEX_KEY &&
455 found_type != BTRFS_EXTENT_DATA_KEY)
456 break;
457
458 item_end = btrfs_disk_key_offset(found_key);
459 if (found_type == BTRFS_EXTENT_DATA_KEY) {
460 fi = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
461 path->slots[0],
462 struct btrfs_file_extent_item);
463 if (btrfs_file_extent_type(fi) !=
464 BTRFS_FILE_EXTENT_INLINE) {
465 item_end += btrfs_file_extent_num_blocks(fi) <<
466 inode->i_blkbits;
467 }
468 }
469 if (found_type == BTRFS_CSUM_ITEM_KEY) {
470 ret = btrfs_csum_truncate(trans, root, path,
471 inode->i_size);
472 BUG_ON(ret);
473 }
474 if (item_end < inode->i_size) {
475 if (found_type) {
476 btrfs_set_key_type(&key, found_type - 1);
477 continue;
478 }
479 break;
480 }
481 if (btrfs_disk_key_offset(found_key) >= inode->i_size)
482 del_item = 1;
483 else
484 del_item = 0;
485 found_extent = 0;
486
487 /* FIXME, shrink the extent if the ref count is only 1 */
488 if (found_type == BTRFS_EXTENT_DATA_KEY &&
489 btrfs_file_extent_type(fi) !=
490 BTRFS_FILE_EXTENT_INLINE) {
491 u64 num_dec;
492 if (!del_item) {
493 u64 orig_num_blocks =
494 btrfs_file_extent_num_blocks(fi);
495 extent_num_blocks = inode->i_size -
496 btrfs_disk_key_offset(found_key) +
497 root->blocksize - 1;
498 extent_num_blocks >>= inode->i_blkbits;
499 btrfs_set_file_extent_num_blocks(fi,
500 extent_num_blocks);
501 inode->i_blocks -= (orig_num_blocks -
502 extent_num_blocks) << 3;
503 mark_buffer_dirty(path->nodes[0]);
504 } else {
505 extent_start =
506 btrfs_file_extent_disk_blocknr(fi);
507 extent_num_blocks =
508 btrfs_file_extent_disk_num_blocks(fi);
509 /* FIXME blocksize != 4096 */
510 num_dec = btrfs_file_extent_num_blocks(fi) << 3;
511 if (extent_start != 0) {
512 found_extent = 1;
513 inode->i_blocks -= num_dec;
514 }
515 }
516 }
517 if (del_item) {
518 ret = btrfs_del_item(trans, root, path);
519 BUG_ON(ret);
520 } else {
521 break;
522 }
523 btrfs_release_path(root, path);
524 if (found_extent) {
525 ret = btrfs_free_extent(trans, root, extent_start,
526 extent_num_blocks, 0);
527 BUG_ON(ret);
528 }
529 }
530 ret = 0;
531error:
532 btrfs_release_path(root, path);
533 btrfs_free_path(path);
534 inode->i_sb->s_dirt = 1;
535 return ret;
536}
537
538/*
539 * taken from block_truncate_page, but does cow as it zeros out
540 * any bytes left in the last page in the file.
541 */
542static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
543{
544 struct inode *inode = mapping->host;
545 unsigned blocksize = 1 << inode->i_blkbits;
546 pgoff_t index = from >> PAGE_CACHE_SHIFT;
547 unsigned offset = from & (PAGE_CACHE_SIZE-1);
548 struct page *page;
549 char *kaddr;
550 int ret = 0;
551 struct btrfs_root *root = BTRFS_I(inode)->root;
552 u64 alloc_hint = 0;
553 struct btrfs_key ins;
554 struct btrfs_trans_handle *trans;
555
556 if ((offset & (blocksize - 1)) == 0)
557 goto out;
558
559 ret = -ENOMEM;
560 page = grab_cache_page(mapping, index);
561 if (!page)
562 goto out;
563
564 if (!PageUptodate(page)) {
9ebefb18 565 ret = btrfs_readpage(NULL, page);
39279cc3
CM
566 lock_page(page);
567 if (!PageUptodate(page)) {
568 ret = -EIO;
569 goto out;
570 }
571 }
572 mutex_lock(&root->fs_info->fs_mutex);
573 trans = btrfs_start_transaction(root, 1);
574 btrfs_set_trans_block_group(trans, inode);
575
576 ret = btrfs_drop_extents(trans, root, inode,
577 page->index << PAGE_CACHE_SHIFT,
578 (page->index + 1) << PAGE_CACHE_SHIFT,
579 &alloc_hint);
580 BUG_ON(ret);
581 ret = btrfs_alloc_extent(trans, root, inode->i_ino, 1,
582 alloc_hint, (u64)-1, &ins, 1);
583 BUG_ON(ret);
584 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
585 page->index << PAGE_CACHE_SHIFT,
586 ins.objectid, 1, 1);
587 BUG_ON(ret);
588 SetPageChecked(page);
589 kaddr = kmap(page);
590 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
591 flush_dcache_page(page);
592 btrfs_csum_file_block(trans, root, inode->i_ino,
593 page->index << PAGE_CACHE_SHIFT,
594 kaddr, PAGE_CACHE_SIZE);
595 kunmap(page);
596 btrfs_end_transaction(trans, root);
597 mutex_unlock(&root->fs_info->fs_mutex);
598
599 set_page_dirty(page);
600 unlock_page(page);
601 page_cache_release(page);
602out:
603 return ret;
604}
605
606static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
607{
608 struct inode *inode = dentry->d_inode;
609 int err;
610
611 err = inode_change_ok(inode, attr);
612 if (err)
613 return err;
614
615 if (S_ISREG(inode->i_mode) &&
616 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
617 struct btrfs_trans_handle *trans;
618 struct btrfs_root *root = BTRFS_I(inode)->root;
619 u64 mask = root->blocksize - 1;
620 u64 pos = (inode->i_size + mask) & ~mask;
621 u64 hole_size;
622
623 if (attr->ia_size <= pos)
624 goto out;
625
626 btrfs_truncate_page(inode->i_mapping, inode->i_size);
627
628 hole_size = (attr->ia_size - pos + mask) & ~mask;
629 hole_size >>= inode->i_blkbits;
630
631 mutex_lock(&root->fs_info->fs_mutex);
632 trans = btrfs_start_transaction(root, 1);
633 btrfs_set_trans_block_group(trans, inode);
634 err = btrfs_insert_file_extent(trans, root, inode->i_ino,
635 pos, 0, 0, hole_size);
636 BUG_ON(err);
637 btrfs_end_transaction(trans, root);
638 mutex_unlock(&root->fs_info->fs_mutex);
639 }
640out:
641 err = inode_setattr(inode, attr);
642
643 return err;
644}
645void btrfs_delete_inode(struct inode *inode)
646{
647 struct btrfs_trans_handle *trans;
648 struct btrfs_root *root = BTRFS_I(inode)->root;
649 int ret;
650
651 truncate_inode_pages(&inode->i_data, 0);
652 if (is_bad_inode(inode)) {
653 goto no_delete;
654 }
655 inode->i_size = 0;
656 mutex_lock(&root->fs_info->fs_mutex);
657 trans = btrfs_start_transaction(root, 1);
658 btrfs_set_trans_block_group(trans, inode);
659 ret = btrfs_truncate_in_trans(trans, root, inode);
660 BUG_ON(ret);
661 btrfs_free_inode(trans, root, inode);
662 btrfs_end_transaction(trans, root);
663 mutex_unlock(&root->fs_info->fs_mutex);
664 btrfs_btree_balance_dirty(root);
665 return;
666no_delete:
667 clear_inode(inode);
668}
669
670/*
671 * this returns the key found in the dir entry in the location pointer.
672 * If no dir entries were found, location->objectid is 0.
673 */
674static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
675 struct btrfs_key *location)
676{
677 const char *name = dentry->d_name.name;
678 int namelen = dentry->d_name.len;
679 struct btrfs_dir_item *di;
680 struct btrfs_path *path;
681 struct btrfs_root *root = BTRFS_I(dir)->root;
682 int ret;
683
684 path = btrfs_alloc_path();
685 BUG_ON(!path);
39279cc3
CM
686 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
687 namelen, 0);
688 if (!di || IS_ERR(di)) {
689 location->objectid = 0;
690 ret = 0;
691 goto out;
692 }
693 btrfs_disk_key_to_cpu(location, &di->location);
694out:
695 btrfs_release_path(root, path);
696 btrfs_free_path(path);
697 return ret;
698}
699
700/*
701 * when we hit a tree root in a directory, the btrfs part of the inode
702 * needs to be changed to reflect the root directory of the tree root. This
703 * is kind of like crossing a mount point.
704 */
705static int fixup_tree_root_location(struct btrfs_root *root,
706 struct btrfs_key *location,
707 struct btrfs_root **sub_root)
708{
709 struct btrfs_path *path;
710 struct btrfs_root_item *ri;
711
712 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
713 return 0;
714 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
715 return 0;
716
717 path = btrfs_alloc_path();
718 BUG_ON(!path);
719 mutex_lock(&root->fs_info->fs_mutex);
720
721 *sub_root = btrfs_read_fs_root(root->fs_info, location);
722 if (IS_ERR(*sub_root))
723 return PTR_ERR(*sub_root);
724
725 ri = &(*sub_root)->root_item;
726 location->objectid = btrfs_root_dirid(ri);
727 location->flags = 0;
728 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
729 location->offset = 0;
730
731 btrfs_free_path(path);
732 mutex_unlock(&root->fs_info->fs_mutex);
733 return 0;
734}
735
736static int btrfs_init_locked_inode(struct inode *inode, void *p)
737{
738 struct btrfs_iget_args *args = p;
739 inode->i_ino = args->ino;
740 BTRFS_I(inode)->root = args->root;
741 return 0;
742}
743
744static int btrfs_find_actor(struct inode *inode, void *opaque)
745{
746 struct btrfs_iget_args *args = opaque;
747 return (args->ino == inode->i_ino &&
748 args->root == BTRFS_I(inode)->root);
749}
750
751struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
752 struct btrfs_root *root)
753{
754 struct inode *inode;
755 struct btrfs_iget_args args;
756 args.ino = objectid;
757 args.root = root;
758
759 inode = iget5_locked(s, objectid, btrfs_find_actor,
760 btrfs_init_locked_inode,
761 (void *)&args);
762 return inode;
763}
764
765static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
766 struct nameidata *nd)
767{
768 struct inode * inode;
769 struct btrfs_inode *bi = BTRFS_I(dir);
770 struct btrfs_root *root = bi->root;
771 struct btrfs_root *sub_root = root;
772 struct btrfs_key location;
773 int ret;
774
775 if (dentry->d_name.len > BTRFS_NAME_LEN)
776 return ERR_PTR(-ENAMETOOLONG);
777 mutex_lock(&root->fs_info->fs_mutex);
778 ret = btrfs_inode_by_name(dir, dentry, &location);
779 mutex_unlock(&root->fs_info->fs_mutex);
780 if (ret < 0)
781 return ERR_PTR(ret);
782 inode = NULL;
783 if (location.objectid) {
784 ret = fixup_tree_root_location(root, &location, &sub_root);
785 if (ret < 0)
786 return ERR_PTR(ret);
787 if (ret > 0)
788 return ERR_PTR(-ENOENT);
789 inode = btrfs_iget_locked(dir->i_sb, location.objectid,
790 sub_root);
791 if (!inode)
792 return ERR_PTR(-EACCES);
793 if (inode->i_state & I_NEW) {
794 /* the inode and parent dir are two different roots */
795 if (sub_root != root) {
796 igrab(inode);
797 sub_root->inode = inode;
798 }
799 BTRFS_I(inode)->root = sub_root;
800 memcpy(&BTRFS_I(inode)->location, &location,
801 sizeof(location));
802 btrfs_read_locked_inode(inode);
803 unlock_new_inode(inode);
804 }
805 }
806 return d_splice_alias(inode, dentry);
807}
808
809/*
810 * readahead one full node of leaves as long as their keys include
811 * the objectid supplied
812 */
813static void reada_leaves(struct btrfs_root *root, struct btrfs_path *path,
814 u64 objectid)
815{
816 struct btrfs_node *node;
817 int i;
818 u32 nritems;
819 u64 item_objectid;
820 u64 blocknr;
821 int slot;
822 int ret;
823
824 if (!path->nodes[1])
825 return;
826 node = btrfs_buffer_node(path->nodes[1]);
827 slot = path->slots[1];
828 nritems = btrfs_header_nritems(&node->header);
829 for (i = slot + 1; i < nritems; i++) {
830 item_objectid = btrfs_disk_key_objectid(&node->ptrs[i].key);
831 if (item_objectid != objectid)
832 break;
833 blocknr = btrfs_node_blockptr(node, i);
834 ret = readahead_tree_block(root, blocknr);
835 if (ret)
836 break;
837 }
838}
839static unsigned char btrfs_filetype_table[] = {
840 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
841};
842
843static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
844{
845 struct inode *inode = filp->f_path.dentry->d_inode;
846 struct btrfs_root *root = BTRFS_I(inode)->root;
847 struct btrfs_item *item;
848 struct btrfs_dir_item *di;
849 struct btrfs_key key;
850 struct btrfs_path *path;
851 int ret;
852 u32 nritems;
853 struct btrfs_leaf *leaf;
854 int slot;
855 int advance;
856 unsigned char d_type;
857 int over = 0;
858 u32 di_cur;
859 u32 di_total;
860 u32 di_len;
861 int key_type = BTRFS_DIR_INDEX_KEY;
862
863 /* FIXME, use a real flag for deciding about the key type */
864 if (root->fs_info->tree_root == root)
865 key_type = BTRFS_DIR_ITEM_KEY;
866 mutex_lock(&root->fs_info->fs_mutex);
867 key.objectid = inode->i_ino;
868 key.flags = 0;
869 btrfs_set_key_type(&key, key_type);
870 key.offset = filp->f_pos;
871 path = btrfs_alloc_path();
39279cc3
CM
872 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
873 if (ret < 0)
874 goto err;
875 advance = 0;
876 reada_leaves(root, path, inode->i_ino);
877 while(1) {
878 leaf = btrfs_buffer_leaf(path->nodes[0]);
879 nritems = btrfs_header_nritems(&leaf->header);
880 slot = path->slots[0];
881 if (advance || slot >= nritems) {
882 if (slot >= nritems -1) {
883 reada_leaves(root, path, inode->i_ino);
884 ret = btrfs_next_leaf(root, path);
885 if (ret)
886 break;
887 leaf = btrfs_buffer_leaf(path->nodes[0]);
888 nritems = btrfs_header_nritems(&leaf->header);
889 slot = path->slots[0];
890 } else {
891 slot++;
892 path->slots[0]++;
893 }
894 }
895 advance = 1;
896 item = leaf->items + slot;
897 if (btrfs_disk_key_objectid(&item->key) != key.objectid)
898 break;
899 if (btrfs_disk_key_type(&item->key) != key_type)
900 break;
901 if (btrfs_disk_key_offset(&item->key) < filp->f_pos)
902 continue;
903 filp->f_pos = btrfs_disk_key_offset(&item->key);
904 advance = 1;
905 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
906 di_cur = 0;
907 di_total = btrfs_item_size(leaf->items + slot);
908 while(di_cur < di_total) {
909 d_type = btrfs_filetype_table[btrfs_dir_type(di)];
910 over = filldir(dirent, (const char *)(di + 1),
911 btrfs_dir_name_len(di),
912 btrfs_disk_key_offset(&item->key),
913 btrfs_disk_key_objectid(&di->location),
914 d_type);
915 if (over)
916 goto nopos;
917 di_len = btrfs_dir_name_len(di) + sizeof(*di);
918 di_cur += di_len;
919 di = (struct btrfs_dir_item *)((char *)di + di_len);
920 }
921 }
922 filp->f_pos++;
923nopos:
924 ret = 0;
925err:
926 btrfs_release_path(root, path);
927 btrfs_free_path(path);
928 mutex_unlock(&root->fs_info->fs_mutex);
929 return ret;
930}
931
932int btrfs_write_inode(struct inode *inode, int wait)
933{
934 struct btrfs_root *root = BTRFS_I(inode)->root;
935 struct btrfs_trans_handle *trans;
936 int ret = 0;
937
938 if (wait) {
939 mutex_lock(&root->fs_info->fs_mutex);
940 trans = btrfs_start_transaction(root, 1);
941 btrfs_set_trans_block_group(trans, inode);
942 ret = btrfs_commit_transaction(trans, root);
943 mutex_unlock(&root->fs_info->fs_mutex);
944 }
945 return ret;
946}
947
948/*
949 * This is somewhat expense, updating the tree every time the
950 * inode changes. But, it is most likely to find the inode in cache.
951 * FIXME, needs more benchmarking...there are no reasons other than performance
952 * to keep or drop this code.
953 */
954void btrfs_dirty_inode(struct inode *inode)
955{
956 struct btrfs_root *root = BTRFS_I(inode)->root;
957 struct btrfs_trans_handle *trans;
958
959 mutex_lock(&root->fs_info->fs_mutex);
960 trans = btrfs_start_transaction(root, 1);
961 btrfs_set_trans_block_group(trans, inode);
962 btrfs_update_inode(trans, root, inode);
963 btrfs_end_transaction(trans, root);
964 mutex_unlock(&root->fs_info->fs_mutex);
965 btrfs_btree_balance_dirty(root);
966}
967
968static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
969 struct btrfs_root *root,
970 u64 objectid,
971 struct btrfs_block_group_cache *group,
972 int mode)
973{
974 struct inode *inode;
975 struct btrfs_inode_item inode_item;
976 struct btrfs_key *location;
977 int ret;
978 int owner;
979
980 inode = new_inode(root->fs_info->sb);
981 if (!inode)
982 return ERR_PTR(-ENOMEM);
983
984 BTRFS_I(inode)->root = root;
985 if (mode & S_IFDIR)
986 owner = 0;
987 else
988 owner = 1;
989 group = btrfs_find_block_group(root, group, 0, 0, owner);
990 BTRFS_I(inode)->block_group = group;
991
992 inode->i_uid = current->fsuid;
993 inode->i_gid = current->fsgid;
994 inode->i_mode = mode;
995 inode->i_ino = objectid;
996 inode->i_blocks = 0;
997 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
998 fill_inode_item(&inode_item, inode);
999 location = &BTRFS_I(inode)->location;
1000 location->objectid = objectid;
1001 location->flags = 0;
1002 location->offset = 0;
1003 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1004
1005 ret = btrfs_insert_inode(trans, root, objectid, &inode_item);
1006 BUG_ON(ret);
1007
1008 insert_inode_hash(inode);
1009 return inode;
1010}
1011
1012static inline u8 btrfs_inode_type(struct inode *inode)
1013{
1014 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
1015}
1016
1017static int btrfs_add_link(struct btrfs_trans_handle *trans,
1018 struct dentry *dentry, struct inode *inode)
1019{
1020 int ret;
1021 struct btrfs_key key;
1022 struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
1023 key.objectid = inode->i_ino;
1024 key.flags = 0;
1025 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1026 key.offset = 0;
1027
1028 ret = btrfs_insert_dir_item(trans, root,
1029 dentry->d_name.name, dentry->d_name.len,
1030 dentry->d_parent->d_inode->i_ino,
1031 &key, btrfs_inode_type(inode));
1032 if (ret == 0) {
1033 dentry->d_parent->d_inode->i_size += dentry->d_name.len * 2;
1034 ret = btrfs_update_inode(trans, root,
1035 dentry->d_parent->d_inode);
1036 }
1037 return ret;
1038}
1039
1040static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
1041 struct dentry *dentry, struct inode *inode)
1042{
1043 int err = btrfs_add_link(trans, dentry, inode);
1044 if (!err) {
1045 d_instantiate(dentry, inode);
1046 return 0;
1047 }
1048 if (err > 0)
1049 err = -EEXIST;
1050 return err;
1051}
1052
1053static int btrfs_create(struct inode *dir, struct dentry *dentry,
1054 int mode, struct nameidata *nd)
1055{
1056 struct btrfs_trans_handle *trans;
1057 struct btrfs_root *root = BTRFS_I(dir)->root;
1058 struct inode *inode;
1059 int err;
1060 int drop_inode = 0;
1061 u64 objectid;
1062
1063 mutex_lock(&root->fs_info->fs_mutex);
1064 trans = btrfs_start_transaction(root, 1);
1065 btrfs_set_trans_block_group(trans, dir);
1066
1067 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1068 if (err) {
1069 err = -ENOSPC;
1070 goto out_unlock;
1071 }
1072
1073 inode = btrfs_new_inode(trans, root, objectid,
1074 BTRFS_I(dir)->block_group, mode);
1075 err = PTR_ERR(inode);
1076 if (IS_ERR(inode))
1077 goto out_unlock;
1078
1079 btrfs_set_trans_block_group(trans, inode);
1080 err = btrfs_add_nondir(trans, dentry, inode);
1081 if (err)
1082 drop_inode = 1;
1083 else {
1084 inode->i_mapping->a_ops = &btrfs_aops;
1085 inode->i_fop = &btrfs_file_operations;
1086 inode->i_op = &btrfs_file_inode_operations;
1087 }
1088 dir->i_sb->s_dirt = 1;
1089 btrfs_update_inode_block_group(trans, inode);
1090 btrfs_update_inode_block_group(trans, dir);
1091out_unlock:
1092 btrfs_end_transaction(trans, root);
1093 mutex_unlock(&root->fs_info->fs_mutex);
1094
1095 if (drop_inode) {
1096 inode_dec_link_count(inode);
1097 iput(inode);
1098 }
1099 btrfs_btree_balance_dirty(root);
1100 return err;
1101}
1102
1103static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
1104 struct dentry *dentry)
1105{
1106 struct btrfs_trans_handle *trans;
1107 struct btrfs_root *root = BTRFS_I(dir)->root;
1108 struct inode *inode = old_dentry->d_inode;
1109 int err;
1110 int drop_inode = 0;
1111
1112 if (inode->i_nlink == 0)
1113 return -ENOENT;
1114
1115 inc_nlink(inode);
1116 mutex_lock(&root->fs_info->fs_mutex);
1117 trans = btrfs_start_transaction(root, 1);
1118 btrfs_set_trans_block_group(trans, dir);
1119 atomic_inc(&inode->i_count);
1120 err = btrfs_add_nondir(trans, dentry, inode);
1121 if (err)
1122 drop_inode = 1;
1123 dir->i_sb->s_dirt = 1;
1124 btrfs_update_inode_block_group(trans, dir);
1125 btrfs_update_inode(trans, root, inode);
1126
1127 btrfs_end_transaction(trans, root);
1128 mutex_unlock(&root->fs_info->fs_mutex);
1129
1130 if (drop_inode) {
1131 inode_dec_link_count(inode);
1132 iput(inode);
1133 }
1134 btrfs_btree_balance_dirty(root);
1135 return err;
1136}
1137
1138static int btrfs_make_empty_dir(struct btrfs_trans_handle *trans,
1139 struct btrfs_root *root,
1140 u64 objectid, u64 dirid)
1141{
1142 int ret;
1143 char buf[2];
1144 struct btrfs_key key;
1145
1146 buf[0] = '.';
1147 buf[1] = '.';
1148
1149 key.objectid = objectid;
1150 key.offset = 0;
1151 key.flags = 0;
1152 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1153
1154 ret = btrfs_insert_dir_item(trans, root, buf, 1, objectid,
1155 &key, BTRFS_FT_DIR);
1156 if (ret)
1157 goto error;
1158 key.objectid = dirid;
1159 ret = btrfs_insert_dir_item(trans, root, buf, 2, objectid,
1160 &key, BTRFS_FT_DIR);
1161 if (ret)
1162 goto error;
1163error:
1164 return ret;
1165}
1166
1167static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1168{
1169 struct inode *inode;
1170 struct btrfs_trans_handle *trans;
1171 struct btrfs_root *root = BTRFS_I(dir)->root;
1172 int err = 0;
1173 int drop_on_err = 0;
1174 u64 objectid;
1175
1176 mutex_lock(&root->fs_info->fs_mutex);
1177 trans = btrfs_start_transaction(root, 1);
1178 btrfs_set_trans_block_group(trans, dir);
1179 if (IS_ERR(trans)) {
1180 err = PTR_ERR(trans);
1181 goto out_unlock;
1182 }
1183
1184 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1185 if (err) {
1186 err = -ENOSPC;
1187 goto out_unlock;
1188 }
1189
1190 inode = btrfs_new_inode(trans, root, objectid,
1191 BTRFS_I(dir)->block_group, S_IFDIR | mode);
1192 if (IS_ERR(inode)) {
1193 err = PTR_ERR(inode);
1194 goto out_fail;
1195 }
1196 drop_on_err = 1;
1197 inode->i_op = &btrfs_dir_inode_operations;
1198 inode->i_fop = &btrfs_dir_file_operations;
1199 btrfs_set_trans_block_group(trans, inode);
1200
1201 err = btrfs_make_empty_dir(trans, root, inode->i_ino, dir->i_ino);
1202 if (err)
1203 goto out_fail;
1204
1205 inode->i_size = 6;
1206 err = btrfs_update_inode(trans, root, inode);
1207 if (err)
1208 goto out_fail;
1209 err = btrfs_add_link(trans, dentry, inode);
1210 if (err)
1211 goto out_fail;
1212 d_instantiate(dentry, inode);
1213 drop_on_err = 0;
1214 dir->i_sb->s_dirt = 1;
1215 btrfs_update_inode_block_group(trans, inode);
1216 btrfs_update_inode_block_group(trans, dir);
1217
1218out_fail:
1219 btrfs_end_transaction(trans, root);
1220out_unlock:
1221 mutex_unlock(&root->fs_info->fs_mutex);
1222 if (drop_on_err)
1223 iput(inode);
1224 btrfs_btree_balance_dirty(root);
1225 return err;
1226}
1227
1228/*
1229 * FIBMAP and others want to pass in a fake buffer head. They need to
1230 * use BTRFS_GET_BLOCK_NO_DIRECT to make sure we don't try to memcpy
1231 * any packed file data into the fake bh
1232 */
1233#define BTRFS_GET_BLOCK_NO_CREATE 0
1234#define BTRFS_GET_BLOCK_CREATE 1
1235#define BTRFS_GET_BLOCK_NO_DIRECT 2
1236
1237/*
1238 * FIXME create==1 doe not work.
1239 */
1240static int btrfs_get_block_lock(struct inode *inode, sector_t iblock,
1241 struct buffer_head *result, int create)
1242{
1243 int ret;
1244 int err = 0;
1245 u64 blocknr;
1246 u64 extent_start = 0;
1247 u64 extent_end = 0;
1248 u64 objectid = inode->i_ino;
1249 u32 found_type;
1250 u64 alloc_hint = 0;
1251 struct btrfs_path *path;
1252 struct btrfs_root *root = BTRFS_I(inode)->root;
1253 struct btrfs_file_extent_item *item;
1254 struct btrfs_leaf *leaf;
1255 struct btrfs_disk_key *found_key;
1256 struct btrfs_trans_handle *trans = NULL;
1257
1258 path = btrfs_alloc_path();
1259 BUG_ON(!path);
39279cc3 1260 if (create & BTRFS_GET_BLOCK_CREATE) {
9ebefb18
CM
1261 /*
1262 * danger!, this only works if the page is properly up
1263 * to date somehow
1264 */
39279cc3
CM
1265 trans = btrfs_start_transaction(root, 1);
1266 if (!trans) {
1267 err = -ENOMEM;
1268 goto out;
1269 }
1270 ret = btrfs_drop_extents(trans, root, inode,
1271 iblock << inode->i_blkbits,
1272 (iblock + 1) << inode->i_blkbits,
1273 &alloc_hint);
1274 BUG_ON(ret);
1275 }
1276
1277 ret = btrfs_lookup_file_extent(NULL, root, path,
f1ace244 1278 objectid,
39279cc3
CM
1279 iblock << inode->i_blkbits, 0);
1280 if (ret < 0) {
1281 err = ret;
1282 goto out;
1283 }
1284
1285 if (ret != 0) {
1286 if (path->slots[0] == 0) {
1287 btrfs_release_path(root, path);
1288 goto not_found;
1289 }
1290 path->slots[0]--;
1291 }
1292
1293 item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
1294 struct btrfs_file_extent_item);
1295 leaf = btrfs_buffer_leaf(path->nodes[0]);
1296 blocknr = btrfs_file_extent_disk_blocknr(item);
1297 blocknr += btrfs_file_extent_offset(item);
1298
1299 /* are we inside the extent that was found? */
1300 found_key = &leaf->items[path->slots[0]].key;
1301 found_type = btrfs_disk_key_type(found_key);
1302 if (btrfs_disk_key_objectid(found_key) != objectid ||
1303 found_type != BTRFS_EXTENT_DATA_KEY) {
1304 extent_end = 0;
1305 extent_start = 0;
1306 goto not_found;
1307 }
1308 found_type = btrfs_file_extent_type(item);
1309 extent_start = btrfs_disk_key_offset(&leaf->items[path->slots[0]].key);
1310 if (found_type == BTRFS_FILE_EXTENT_REG) {
1311 extent_start = extent_start >> inode->i_blkbits;
1312 extent_end = extent_start + btrfs_file_extent_num_blocks(item);
1313 err = 0;
1314 if (btrfs_file_extent_disk_blocknr(item) == 0)
1315 goto out;
1316 if (iblock >= extent_start && iblock < extent_end) {
1317 btrfs_map_bh_to_logical(root, result, blocknr +
1318 iblock - extent_start);
1319 goto out;
1320 }
1321 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
1322 char *ptr;
1323 char *map;
1324 u32 size;
1325
1326 if (create & BTRFS_GET_BLOCK_NO_DIRECT) {
1327 err = -EINVAL;
1328 goto out;
1329 }
1330 size = btrfs_file_extent_inline_len(leaf->items +
1331 path->slots[0]);
1332 extent_end = (extent_start + size) >> inode->i_blkbits;
1333 extent_start >>= inode->i_blkbits;
1334 if (iblock < extent_start || iblock > extent_end) {
1335 goto not_found;
1336 }
1337 ptr = btrfs_file_extent_inline_start(item);
1338 map = kmap(result->b_page);
1339 memcpy(map, ptr, size);
1340 memset(map + size, 0, PAGE_CACHE_SIZE - size);
1341 flush_dcache_page(result->b_page);
1342 kunmap(result->b_page);
1343 set_buffer_uptodate(result);
1344 SetPageChecked(result->b_page);
1345 btrfs_map_bh_to_logical(root, result, 0);
1346 }
1347not_found:
1348 if (create & BTRFS_GET_BLOCK_CREATE) {
1349 struct btrfs_key ins;
1350 ret = btrfs_alloc_extent(trans, root, inode->i_ino,
1351 1, alloc_hint, (u64)-1,
1352 &ins, 1);
1353 BUG_ON(ret);
1354 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
1355 iblock << inode->i_blkbits,
1356 ins.objectid, ins.offset,
1357 ins.offset);
1358 BUG_ON(ret);
39279cc3
CM
1359 btrfs_map_bh_to_logical(root, result, ins.objectid);
1360 }
1361out:
1362 if (trans)
1363 err = btrfs_end_transaction(trans, root);
1364 btrfs_free_path(path);
1365 return err;
1366}
1367
1368int btrfs_get_block(struct inode *inode, sector_t iblock,
1369 struct buffer_head *result, int create)
1370{
1371 int err;
1372 struct btrfs_root *root = BTRFS_I(inode)->root;
1373 mutex_lock(&root->fs_info->fs_mutex);
1374 err = btrfs_get_block_lock(inode, iblock, result, create);
1375 mutex_unlock(&root->fs_info->fs_mutex);
1376 return err;
1377}
1378
9ebefb18
CM
1379int btrfs_get_block_csum(struct inode *inode, sector_t iblock,
1380 struct buffer_head *result, int create)
1381{
1382 int ret;
1383 struct btrfs_root *root = BTRFS_I(inode)->root;
1384 struct page *page = result->b_page;
1385 u64 offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(result);
1386 struct btrfs_csum_item *item;
1387 struct btrfs_path *path = NULL;
1388
1389 mutex_lock(&root->fs_info->fs_mutex);
1390 ret = btrfs_get_block_lock(inode, iblock, result, create);
1391 if (ret)
1392 goto out;
1393
1394 path = btrfs_alloc_path();
1395 item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, offset, 0);
1396 if (IS_ERR(item)) {
1397 ret = PTR_ERR(item);
1398 /* a csum that isn't present is a preallocated region. */
1399 if (ret == -ENOENT || ret == -EFBIG)
1400 ret = 0;
1401 result->b_private = 0;
1402 goto out;
1403 }
1404 memcpy((char *)&result->b_private, &item->csum, BTRFS_CRC32_SIZE);
1405printk("get_block_sum file %lu offset %llu csum %X\n", inode->i_ino, (unsigned long long)offset, *(int *)(&item->csum));
1406out:
1407 if (path)
1408 btrfs_free_path(path);
1409 mutex_unlock(&root->fs_info->fs_mutex);
1410 return ret;
1411}
1412
39279cc3
CM
1413static int btrfs_get_block_bmap(struct inode *inode, sector_t iblock,
1414 struct buffer_head *result, int create)
1415{
1416 struct btrfs_root *root = BTRFS_I(inode)->root;
1417 mutex_lock(&root->fs_info->fs_mutex);
1418 btrfs_get_block_lock(inode, iblock, result, BTRFS_GET_BLOCK_NO_DIRECT);
1419 mutex_unlock(&root->fs_info->fs_mutex);
1420 return 0;
1421}
1422
1423static sector_t btrfs_bmap(struct address_space *as, sector_t block)
1424{
1425 return generic_block_bmap(as, block, btrfs_get_block_bmap);
1426}
1427
1428static int btrfs_prepare_write(struct file *file, struct page *page,
1429 unsigned from, unsigned to)
1430{
1431 return block_prepare_write(page, from, to, btrfs_get_block);
1432}
1433
9ebefb18
CM
1434static void buffer_io_error(struct buffer_head *bh)
1435{
1436 char b[BDEVNAME_SIZE];
1437
1438 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1439 bdevname(bh->b_bdev, b),
1440 (unsigned long long)bh->b_blocknr);
1441}
1442
1443/*
1444 * I/O completion handler for block_read_full_page() - pages
1445 * which come unlocked at the end of I/O.
1446 */
1447static void btrfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
39279cc3 1448{
9ebefb18
CM
1449 unsigned long flags;
1450 struct buffer_head *first;
1451 struct buffer_head *tmp;
1452 struct page *page;
1453 int page_uptodate = 1;
1454 struct inode *inode;
1455 int ret;
1456
1457 BUG_ON(!buffer_async_read(bh));
1458
1459 page = bh->b_page;
1460 inode = page->mapping->host;
1461 if (uptodate) {
1462 void *kaddr;
1463 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1464 if (bh->b_private) {
1465 char csum[BTRFS_CRC32_SIZE];
1466 kaddr = kmap_atomic(page, KM_IRQ0);
1467 ret = btrfs_csum_data(root, kaddr + bh_offset(bh),
1468 bh->b_size, csum);
1469 BUG_ON(ret);
1470 if (memcmp(csum, &bh->b_private, BTRFS_CRC32_SIZE)) {
1471 u64 offset;
1472 offset = (page->index << PAGE_CACHE_SHIFT) +
1473 bh_offset(bh);
1474 printk("btrfs csum failed ino %lu off %llu\n",
1475 page->mapping->host->i_ino,
1476 (unsigned long long)offset);
1477 memset(kaddr + bh_offset(bh), 1, bh->b_size);
1478 flush_dcache_page(page);
1479printk("bad verify file %lu offset %llu bh_private %lX csum %X\n", inode->i_ino, (unsigned long long)offset, (unsigned long)(bh->b_private), *(int *)csum);
1480 }
1481 kunmap_atomic(kaddr, KM_IRQ0);
1482 }
1483 set_buffer_uptodate(bh);
1484 } else {
1485 clear_buffer_uptodate(bh);
1486 if (printk_ratelimit())
1487 buffer_io_error(bh);
1488 SetPageError(page);
1489 }
1490
1491 /*
1492 * Be _very_ careful from here on. Bad things can happen if
1493 * two buffer heads end IO at almost the same time and both
1494 * decide that the page is now completely done.
1495 */
1496 first = page_buffers(page);
1497 local_irq_save(flags);
1498 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1499 clear_buffer_async_read(bh);
1500 unlock_buffer(bh);
1501 tmp = bh;
1502 do {
1503 if (!buffer_uptodate(tmp))
1504 page_uptodate = 0;
1505 if (buffer_async_read(tmp)) {
1506 BUG_ON(!buffer_locked(tmp));
1507 goto still_busy;
1508 }
1509 tmp = tmp->b_this_page;
1510 } while (tmp != bh);
1511 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
1512 local_irq_restore(flags);
1513
1514 /*
1515 * If none of the buffers had errors and they are all
1516 * uptodate then we can set the page uptodate.
1517 */
1518 if (page_uptodate && !PageError(page))
1519 SetPageUptodate(page);
1520 unlock_page(page);
1521 return;
1522
1523still_busy:
1524 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
1525 local_irq_restore(flags);
1526 return;
1527}
1528
1529/*
1530 * Generic "read page" function for block devices that have the normal
1531 * get_block functionality. This is most of the block device filesystems.
1532 * Reads the page asynchronously --- the unlock_buffer() and
1533 * set/clear_buffer_uptodate() functions propagate buffer state into the
1534 * page struct once IO has completed.
1535 */
1536int btrfs_readpage(struct file *file, struct page *page)
1537{
1538 struct inode *inode = page->mapping->host;
1539 sector_t iblock, lblock;
1540 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1541 unsigned int blocksize;
1542 int nr, i;
1543 int fully_mapped = 1;
1544
1545 BUG_ON(!PageLocked(page));
1546 blocksize = 1 << inode->i_blkbits;
1547 if (!page_has_buffers(page))
1548 create_empty_buffers(page, blocksize, 0);
1549 head = page_buffers(page);
1550
1551 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1552 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
1553 bh = head;
1554 nr = 0;
1555 i = 0;
1556
1557 do {
1558 if (buffer_uptodate(bh))
1559 continue;
1560
1561 if (!buffer_mapped(bh)) {
1562 int err = 0;
1563
1564 fully_mapped = 0;
1565 if (iblock < lblock) {
1566 WARN_ON(bh->b_size != blocksize);
1567 err = btrfs_get_block_csum(inode, iblock,
1568 bh, 0);
1569 if (err)
1570 SetPageError(page);
1571 }
1572 if (!buffer_mapped(bh)) {
1573 void *kaddr = kmap_atomic(page, KM_USER0);
1574 memset(kaddr + i * blocksize, 0, blocksize);
1575 flush_dcache_page(page);
1576 kunmap_atomic(kaddr, KM_USER0);
1577 if (!err)
1578 set_buffer_uptodate(bh);
1579 continue;
1580 }
1581 /*
1582 * get_block() might have updated the buffer
1583 * synchronously
1584 */
1585 if (buffer_uptodate(bh))
1586 continue;
1587 }
1588 arr[nr++] = bh;
1589 } while (i++, iblock++, (bh = bh->b_this_page) != head);
1590
1591 if (fully_mapped)
1592 SetPageMappedToDisk(page);
1593
1594 if (!nr) {
1595 /*
1596 * All buffers are uptodate - we can set the page uptodate
1597 * as well. But not if get_block() returned an error.
1598 */
1599 if (!PageError(page))
1600 SetPageUptodate(page);
1601 unlock_page(page);
1602 return 0;
1603 }
1604
1605 /* Stage two: lock the buffers */
1606 for (i = 0; i < nr; i++) {
1607 bh = arr[i];
1608 lock_buffer(bh);
1609 bh->b_end_io = btrfs_end_buffer_async_read;
1610 set_buffer_async_read(bh);
1611 }
1612
1613 /*
1614 * Stage 3: start the IO. Check for uptodateness
1615 * inside the buffer lock in case another process reading
1616 * the underlying blockdev brought it uptodate (the sct fix).
1617 */
1618 for (i = 0; i < nr; i++) {
1619 bh = arr[i];
1620 if (buffer_uptodate(bh))
1621 btrfs_end_buffer_async_read(bh, 1);
1622 else
1623 submit_bh(READ, bh);
1624 }
1625 return 0;
39279cc3
CM
1626}
1627
1628/*
1629 * Aside from a tiny bit of packed file data handling, this is the
1630 * same as the generic code.
1631 *
1632 * While block_write_full_page is writing back the dirty buffers under
1633 * the page lock, whoever dirtied the buffers may decide to clean them
1634 * again at any time. We handle that by only looking at the buffer
1635 * state inside lock_buffer().
1636 *
1637 * If block_write_full_page() is called for regular writeback
1638 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1639 * locked buffer. This only can happen if someone has written the buffer
1640 * directly, with submit_bh(). At the address_space level PageWriteback
1641 * prevents this contention from occurring.
1642 */
1643static int __btrfs_write_full_page(struct inode *inode, struct page *page,
1644 struct writeback_control *wbc)
1645{
1646 int err;
1647 sector_t block;
1648 sector_t last_block;
1649 struct buffer_head *bh, *head;
1650 const unsigned blocksize = 1 << inode->i_blkbits;
1651 int nr_underway = 0;
9ebefb18 1652 struct btrfs_root *root = BTRFS_I(inode)->root;
39279cc3
CM
1653
1654 BUG_ON(!PageLocked(page));
1655
1656 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1657
1658 if (!page_has_buffers(page)) {
1659 create_empty_buffers(page, blocksize,
1660 (1 << BH_Dirty)|(1 << BH_Uptodate));
1661 }
1662
1663 /*
1664 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1665 * here, and the (potentially unmapped) buffers may become dirty at
1666 * any time. If a buffer becomes dirty here after we've inspected it
1667 * then we just miss that fact, and the page stays dirty.
1668 *
1669 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1670 * handle that here by just cleaning them.
1671 */
1672
1673 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1674 head = page_buffers(page);
1675 bh = head;
1676
1677 /*
1678 * Get all the dirty buffers mapped to disk addresses and
1679 * handle any aliases from the underlying blockdev's mapping.
1680 */
1681 do {
1682 if (block > last_block) {
1683 /*
1684 * mapped buffers outside i_size will occur, because
1685 * this page can be outside i_size when there is a
1686 * truncate in progress.
1687 */
1688 /*
1689 * The buffer was zeroed by block_write_full_page()
1690 */
1691 clear_buffer_dirty(bh);
1692 set_buffer_uptodate(bh);
1693 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1694 WARN_ON(bh->b_size != blocksize);
1695 err = btrfs_get_block(inode, block, bh, 0);
1696 if (err) {
1697 goto recover;
1698 }
1699 if (buffer_new(bh)) {
1700 /* blockdev mappings never come here */
1701 clear_buffer_new(bh);
1702 }
1703 }
1704 bh = bh->b_this_page;
1705 block++;
1706 } while (bh != head);
1707
1708 do {
1709 if (!buffer_mapped(bh))
1710 continue;
1711 /*
1712 * If it's a fully non-blocking write attempt and we cannot
1713 * lock the buffer then redirty the page. Note that this can
1714 * potentially cause a busy-wait loop from pdflush and kswapd
1715 * activity, but those code paths have their own higher-level
1716 * throttling.
1717 */
1718 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1719 lock_buffer(bh);
1720 } else if (test_set_buffer_locked(bh)) {
1721 redirty_page_for_writepage(wbc, page);
1722 continue;
1723 }
1724 if (test_clear_buffer_dirty(bh) && bh->b_blocknr != 0) {
9ebefb18
CM
1725 struct btrfs_trans_handle *trans;
1726 int ret;
1727 u64 off = page->index << PAGE_CACHE_SHIFT;
1728 char *kaddr;
1729
1730 off += bh_offset(bh);
1731 mutex_lock(&root->fs_info->fs_mutex);
1732 trans = btrfs_start_transaction(root, 1);
1733 btrfs_set_trans_block_group(trans, inode);
1734 kaddr = kmap(page);
1735 ret = btrfs_csum_file_block(trans, root, inode->i_ino,
1736 off, kaddr + bh_offset(bh),
1737 bh->b_size);
1738 kunmap(page);
1739 BUG_ON(ret);
1740 ret = btrfs_end_transaction(trans, root);
1741 BUG_ON(ret);
1742 mutex_unlock(&root->fs_info->fs_mutex);
39279cc3
CM
1743 mark_buffer_async_write(bh);
1744 } else {
1745 unlock_buffer(bh);
1746 }
1747 } while ((bh = bh->b_this_page) != head);
1748
1749 /*
1750 * The page and its buffers are protected by PageWriteback(), so we can
1751 * drop the bh refcounts early.
1752 */
1753 BUG_ON(PageWriteback(page));
1754 set_page_writeback(page);
1755
1756 do {
1757 struct buffer_head *next = bh->b_this_page;
1758 if (buffer_async_write(bh)) {
1759 submit_bh(WRITE, bh);
1760 nr_underway++;
1761 }
1762 bh = next;
1763 } while (bh != head);
1764 unlock_page(page);
1765
1766 err = 0;
1767done:
1768 if (nr_underway == 0) {
1769 /*
1770 * The page was marked dirty, but the buffers were
1771 * clean. Someone wrote them back by hand with
1772 * ll_rw_block/submit_bh. A rare case.
1773 */
1774 int uptodate = 1;
1775 do {
1776 if (!buffer_uptodate(bh)) {
1777 uptodate = 0;
1778 break;
1779 }
1780 bh = bh->b_this_page;
1781 } while (bh != head);
1782 if (uptodate)
1783 SetPageUptodate(page);
1784 end_page_writeback(page);
1785 }
1786 return err;
1787
1788recover:
1789 /*
1790 * ENOSPC, or some other error. We may already have added some
1791 * blocks to the file, so we need to write these out to avoid
1792 * exposing stale data.
1793 * The page is currently locked and not marked for writeback
1794 */
1795 bh = head;
1796 /* Recovery: lock and submit the mapped buffers */
1797 do {
1798 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1799 lock_buffer(bh);
1800 mark_buffer_async_write(bh);
1801 } else {
1802 /*
1803 * The buffer may have been set dirty during
1804 * attachment to a dirty page.
1805 */
1806 clear_buffer_dirty(bh);
1807 }
1808 } while ((bh = bh->b_this_page) != head);
1809 SetPageError(page);
1810 BUG_ON(PageWriteback(page));
1811 set_page_writeback(page);
1812 do {
1813 struct buffer_head *next = bh->b_this_page;
1814 if (buffer_async_write(bh)) {
1815 clear_buffer_dirty(bh);
1816 submit_bh(WRITE, bh);
1817 nr_underway++;
1818 }
1819 bh = next;
1820 } while (bh != head);
1821 unlock_page(page);
1822 goto done;
1823}
1824
1825static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
1826{
1827 struct inode * const inode = page->mapping->host;
1828 loff_t i_size = i_size_read(inode);
1829 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
1830 unsigned offset;
1831 void *kaddr;
1832
1833 /* Is the page fully inside i_size? */
1834 if (page->index < end_index)
1835 return __btrfs_write_full_page(inode, page, wbc);
1836
1837 /* Is the page fully outside i_size? (truncate in progress) */
1838 offset = i_size & (PAGE_CACHE_SIZE-1);
1839 if (page->index >= end_index+1 || !offset) {
1840 /*
1841 * The page may have dirty, unmapped buffers. For example,
1842 * they may have been added in ext3_writepage(). Make them
1843 * freeable here, so the page does not leak.
1844 */
1845 block_invalidatepage(page, 0);
1846 unlock_page(page);
1847 return 0; /* don't care */
1848 }
1849
1850 /*
1851 * The page straddles i_size. It must be zeroed out on each and every
1852 * writepage invokation because it may be mmapped. "A file is mapped
1853 * in multiples of the page size. For a file that is not a multiple of
1854 * the page size, the remaining memory is zeroed when mapped, and
1855 * writes to that region are not written out to the file."
1856 */
1857 kaddr = kmap_atomic(page, KM_USER0);
1858 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
1859 flush_dcache_page(page);
1860 kunmap_atomic(kaddr, KM_USER0);
1861 return __btrfs_write_full_page(inode, page, wbc);
1862}
1863
9ebefb18
CM
1864/*
1865 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
1866 * called from a page fault handler when a page is first dirtied. Hence we must
1867 * be careful to check for EOF conditions here. We set the page up correctly
1868 * for a written page which means we get ENOSPC checking when writing into
1869 * holes and correct delalloc and unwritten extent mapping on filesystems that
1870 * support these features.
1871 *
1872 * We are not allowed to take the i_mutex here so we have to play games to
1873 * protect against truncate races as the page could now be beyond EOF. Because
1874 * vmtruncate() writes the inode size before removing pages, once we have the
1875 * page lock we can determine safely if the page is beyond EOF. If it is not
1876 * beyond EOF, then the page is guaranteed safe against truncation until we
1877 * unlock the page.
1878 */
1879int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
1880{
1881 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1882 unsigned long end;
1883 loff_t size;
1884 int ret = -EINVAL;
1885
1886 lock_page(page);
1887 wait_on_page_writeback(page);
1888printk("btrfs_page_mkwrite %lu %lu\n", page->mapping->host->i_ino, page->index);
1889 size = i_size_read(inode);
1890 if ((page->mapping != inode->i_mapping) ||
1891 ((page->index << PAGE_CACHE_SHIFT) > size)) {
1892 /* page got truncated out from underneath us */
1893 goto out_unlock;
1894 }
1895
1896 /* page is wholly or partially inside EOF */
1897 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
1898 end = size & ~PAGE_CACHE_MASK;
1899 else
1900 end = PAGE_CACHE_SIZE;
1901
1902 ret = btrfs_prepare_write(NULL, page, 0, end);
1903 if (!ret)
1904 ret = btrfs_commit_write(NULL, page, 0, end);
1905
1906out_unlock:
1907 unlock_page(page);
1908 return ret;
1909}
1910
39279cc3
CM
1911static void btrfs_truncate(struct inode *inode)
1912{
1913 struct btrfs_root *root = BTRFS_I(inode)->root;
1914 int ret;
1915 struct btrfs_trans_handle *trans;
1916
1917 if (!S_ISREG(inode->i_mode))
1918 return;
1919 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1920 return;
1921
1922 btrfs_truncate_page(inode->i_mapping, inode->i_size);
1923
1924 mutex_lock(&root->fs_info->fs_mutex);
1925 trans = btrfs_start_transaction(root, 1);
1926 btrfs_set_trans_block_group(trans, inode);
1927
1928 /* FIXME, add redo link to tree so we don't leak on crash */
1929 ret = btrfs_truncate_in_trans(trans, root, inode);
1930 BUG_ON(ret);
1931 btrfs_update_inode(trans, root, inode);
1932 ret = btrfs_end_transaction(trans, root);
1933 BUG_ON(ret);
1934 mutex_unlock(&root->fs_info->fs_mutex);
1935 btrfs_btree_balance_dirty(root);
1936}
1937
1938int btrfs_commit_write(struct file *file, struct page *page,
1939 unsigned from, unsigned to)
1940{
1941 struct inode *inode = page->mapping->host;
1942 struct buffer_head *bh;
1943 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1944
1945 SetPageUptodate(page);
1946 bh = page_buffers(page);
1947 set_buffer_uptodate(bh);
1948 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
1949 set_page_dirty(page);
1950 }
1951 if (pos > inode->i_size) {
1952 i_size_write(inode, pos);
1953 mark_inode_dirty(inode);
1954 }
1955 return 0;
1956}
1957
1958static int create_subvol(struct btrfs_root *root, char *name, int namelen)
1959{
1960 struct btrfs_trans_handle *trans;
1961 struct btrfs_key key;
1962 struct btrfs_root_item root_item;
1963 struct btrfs_inode_item *inode_item;
1964 struct buffer_head *subvol;
1965 struct btrfs_leaf *leaf;
1966 struct btrfs_root *new_root;
1967 struct inode *inode;
1968 struct inode *dir;
1969 int ret;
1970 u64 objectid;
1971 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
1972
1973 mutex_lock(&root->fs_info->fs_mutex);
1974 trans = btrfs_start_transaction(root, 1);
1975 BUG_ON(!trans);
1976
1977 subvol = btrfs_alloc_free_block(trans, root, 0);
1978 if (subvol == NULL)
1979 return -ENOSPC;
1980 leaf = btrfs_buffer_leaf(subvol);
1981 btrfs_set_header_nritems(&leaf->header, 0);
1982 btrfs_set_header_level(&leaf->header, 0);
1983 btrfs_set_header_blocknr(&leaf->header, bh_blocknr(subvol));
1984 btrfs_set_header_generation(&leaf->header, trans->transid);
1985 btrfs_set_header_owner(&leaf->header, root->root_key.objectid);
1986 memcpy(leaf->header.fsid, root->fs_info->disk_super->fsid,
1987 sizeof(leaf->header.fsid));
1988 mark_buffer_dirty(subvol);
1989
1990 inode_item = &root_item.inode;
1991 memset(inode_item, 0, sizeof(*inode_item));
1992 btrfs_set_inode_generation(inode_item, 1);
1993 btrfs_set_inode_size(inode_item, 3);
1994 btrfs_set_inode_nlink(inode_item, 1);
1995 btrfs_set_inode_nblocks(inode_item, 1);
1996 btrfs_set_inode_mode(inode_item, S_IFDIR | 0755);
1997
1998 btrfs_set_root_blocknr(&root_item, bh_blocknr(subvol));
1999 btrfs_set_root_refs(&root_item, 1);
2000 brelse(subvol);
2001 subvol = NULL;
2002
2003 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2004 0, &objectid);
2005 BUG_ON(ret);
2006
2007 btrfs_set_root_dirid(&root_item, new_dirid);
2008
2009 key.objectid = objectid;
2010 key.offset = 1;
2011 key.flags = 0;
2012 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2013 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2014 &root_item);
2015 BUG_ON(ret);
2016
2017 /*
2018 * insert the directory item
2019 */
2020 key.offset = (u64)-1;
2021 dir = root->fs_info->sb->s_root->d_inode;
2022 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2023 name, namelen, dir->i_ino, &key,
2024 BTRFS_FT_DIR);
2025 BUG_ON(ret);
2026
2027 ret = btrfs_commit_transaction(trans, root);
2028 BUG_ON(ret);
2029
2030 new_root = btrfs_read_fs_root(root->fs_info, &key);
2031 BUG_ON(!new_root);
2032
2033 trans = btrfs_start_transaction(new_root, 1);
2034 BUG_ON(!trans);
2035
2036 inode = btrfs_new_inode(trans, new_root, new_dirid,
2037 BTRFS_I(dir)->block_group, S_IFDIR | 0700);
2038 inode->i_op = &btrfs_dir_inode_operations;
2039 inode->i_fop = &btrfs_dir_file_operations;
34088780 2040 new_root->inode = inode;
39279cc3
CM
2041
2042 ret = btrfs_make_empty_dir(trans, new_root, new_dirid, new_dirid);
2043 BUG_ON(ret);
2044
2045 inode->i_nlink = 1;
2046 inode->i_size = 6;
2047 ret = btrfs_update_inode(trans, new_root, inode);
2048 BUG_ON(ret);
2049
2050 ret = btrfs_commit_transaction(trans, new_root);
2051 BUG_ON(ret);
2052
39279cc3
CM
2053 mutex_unlock(&root->fs_info->fs_mutex);
2054 btrfs_btree_balance_dirty(root);
2055 return 0;
2056}
2057
2058static int create_snapshot(struct btrfs_root *root, char *name, int namelen)
2059{
2060 struct btrfs_trans_handle *trans;
2061 struct btrfs_key key;
2062 struct btrfs_root_item new_root_item;
2063 int ret;
2064 u64 objectid;
2065
2066 if (!root->ref_cows)
2067 return -EINVAL;
2068
2069 mutex_lock(&root->fs_info->fs_mutex);
2070 trans = btrfs_start_transaction(root, 1);
2071 BUG_ON(!trans);
2072
2073 ret = btrfs_update_inode(trans, root, root->inode);
2074 BUG_ON(ret);
2075
2076 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2077 0, &objectid);
2078 BUG_ON(ret);
2079
2080 memcpy(&new_root_item, &root->root_item,
2081 sizeof(new_root_item));
2082
2083 key.objectid = objectid;
2084 key.offset = 1;
2085 key.flags = 0;
2086 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2087 btrfs_set_root_blocknr(&new_root_item, bh_blocknr(root->node));
2088
2089 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2090 &new_root_item);
2091 BUG_ON(ret);
2092
2093 /*
2094 * insert the directory item
2095 */
2096 key.offset = (u64)-1;
2097 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2098 name, namelen,
2099 root->fs_info->sb->s_root->d_inode->i_ino,
2100 &key, BTRFS_FT_DIR);
2101
2102 BUG_ON(ret);
2103
2104 ret = btrfs_inc_root_ref(trans, root);
2105 BUG_ON(ret);
2106
2107 ret = btrfs_commit_transaction(trans, root);
2108 BUG_ON(ret);
2109 mutex_unlock(&root->fs_info->fs_mutex);
2110 btrfs_btree_balance_dirty(root);
2111 return 0;
2112}
2113
2114int btrfs_ioctl(struct inode *inode, struct file *filp, unsigned int
2115 cmd, unsigned long arg)
2116{
2117 struct btrfs_root *root = BTRFS_I(inode)->root;
2118 struct btrfs_ioctl_vol_args vol_args;
2119 int ret = 0;
2120 struct btrfs_dir_item *di;
2121 int namelen;
2122 struct btrfs_path *path;
2123 u64 root_dirid;
2124
2125 switch (cmd) {
2126 case BTRFS_IOC_SNAP_CREATE:
2127 if (copy_from_user(&vol_args,
2128 (struct btrfs_ioctl_vol_args __user *)arg,
2129 sizeof(vol_args)))
2130 return -EFAULT;
2131 namelen = strlen(vol_args.name);
2132 if (namelen > BTRFS_VOL_NAME_MAX)
2133 return -EINVAL;
8a712645
CM
2134 if (strchr(vol_args.name, '/'))
2135 return -EINVAL;
39279cc3
CM
2136 path = btrfs_alloc_path();
2137 if (!path)
2138 return -ENOMEM;
2139 root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
2140 mutex_lock(&root->fs_info->fs_mutex);
2141 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
2142 path, root_dirid,
2143 vol_args.name, namelen, 0);
2144 mutex_unlock(&root->fs_info->fs_mutex);
2145 btrfs_free_path(path);
2146 if (di && !IS_ERR(di))
2147 return -EEXIST;
2148
2149 if (root == root->fs_info->tree_root)
2150 ret = create_subvol(root, vol_args.name, namelen);
2151 else
2152 ret = create_snapshot(root, vol_args.name, namelen);
2153 WARN_ON(ret);
2154 break;
2155 default:
2156 return -ENOTTY;
2157 }
2158 return ret;
2159}
2160
2161#ifdef CONFIG_COMPAT
2162long btrfs_compat_ioctl(struct file *file, unsigned int cmd,
2163 unsigned long arg)
2164{
2165 struct inode *inode = file->f_path.dentry->d_inode;
2166 int ret;
2167 lock_kernel();
2168 ret = btrfs_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg));
2169 unlock_kernel();
2170 return ret;
2171
2172}
2173#endif
2174
2175/*
2176 * Called inside transaction, so use GFP_NOFS
2177 */
2178struct inode *btrfs_alloc_inode(struct super_block *sb)
2179{
2180 struct btrfs_inode *ei;
2181
2182 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
2183 if (!ei)
2184 return NULL;
2185 return &ei->vfs_inode;
2186}
2187
2188void btrfs_destroy_inode(struct inode *inode)
2189{
2190 WARN_ON(!list_empty(&inode->i_dentry));
2191 WARN_ON(inode->i_data.nrpages);
2192
2193 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
2194}
2195
2196static void init_once(void * foo, struct kmem_cache * cachep,
2197 unsigned long flags)
2198{
2199 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
2200
2201 inode_init_once(&ei->vfs_inode);
2202}
2203
2204void btrfs_destroy_cachep(void)
2205{
2206 if (btrfs_inode_cachep)
2207 kmem_cache_destroy(btrfs_inode_cachep);
2208 if (btrfs_trans_handle_cachep)
2209 kmem_cache_destroy(btrfs_trans_handle_cachep);
2210 if (btrfs_transaction_cachep)
2211 kmem_cache_destroy(btrfs_transaction_cachep);
2212 if (btrfs_bit_radix_cachep)
2213 kmem_cache_destroy(btrfs_bit_radix_cachep);
2214 if (btrfs_path_cachep)
2215 kmem_cache_destroy(btrfs_path_cachep);
2216}
2217
2218int btrfs_init_cachep(void)
2219{
2220 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
2221 sizeof(struct btrfs_inode),
2222 0, (SLAB_RECLAIM_ACCOUNT|
2223 SLAB_MEM_SPREAD),
2224 init_once, NULL);
2225 if (!btrfs_inode_cachep)
2226 goto fail;
2227 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
2228 sizeof(struct btrfs_trans_handle),
2229 0, (SLAB_RECLAIM_ACCOUNT|
2230 SLAB_MEM_SPREAD),
2231 NULL, NULL);
2232 if (!btrfs_trans_handle_cachep)
2233 goto fail;
2234 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
2235 sizeof(struct btrfs_transaction),
2236 0, (SLAB_RECLAIM_ACCOUNT|
2237 SLAB_MEM_SPREAD),
2238 NULL, NULL);
2239 if (!btrfs_transaction_cachep)
2240 goto fail;
2241 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
2242 sizeof(struct btrfs_transaction),
2243 0, (SLAB_RECLAIM_ACCOUNT|
2244 SLAB_MEM_SPREAD),
2245 NULL, NULL);
2246 if (!btrfs_path_cachep)
2247 goto fail;
2248 btrfs_bit_radix_cachep = kmem_cache_create("btrfs_radix",
2249 256,
2250 0, (SLAB_RECLAIM_ACCOUNT|
2251 SLAB_MEM_SPREAD |
2252 SLAB_DESTROY_BY_RCU),
2253 NULL, NULL);
2254 if (!btrfs_bit_radix_cachep)
2255 goto fail;
2256 return 0;
2257fail:
2258 btrfs_destroy_cachep();
2259 return -ENOMEM;
2260}
2261
2262static int btrfs_getattr(struct vfsmount *mnt,
2263 struct dentry *dentry, struct kstat *stat)
2264{
2265 struct inode *inode = dentry->d_inode;
2266 generic_fillattr(inode, stat);
2267 stat->blksize = 256 * 1024;
2268 return 0;
2269}
2270
2271static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
2272 struct inode * new_dir,struct dentry *new_dentry)
2273{
2274 struct btrfs_trans_handle *trans;
2275 struct btrfs_root *root = BTRFS_I(old_dir)->root;
2276 struct inode *new_inode = new_dentry->d_inode;
2277 struct inode *old_inode = old_dentry->d_inode;
2278 struct timespec ctime = CURRENT_TIME;
2279 struct btrfs_path *path;
2280 struct btrfs_dir_item *di;
2281 int ret;
2282
2283 if (S_ISDIR(old_inode->i_mode) && new_inode &&
2284 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
2285 return -ENOTEMPTY;
2286 }
2287 mutex_lock(&root->fs_info->fs_mutex);
2288 trans = btrfs_start_transaction(root, 1);
2289 btrfs_set_trans_block_group(trans, new_dir);
2290 path = btrfs_alloc_path();
2291 if (!path) {
2292 ret = -ENOMEM;
2293 goto out_fail;
2294 }
2295
2296 old_dentry->d_inode->i_nlink++;
2297 old_dir->i_ctime = old_dir->i_mtime = ctime;
2298 new_dir->i_ctime = new_dir->i_mtime = ctime;
2299 old_inode->i_ctime = ctime;
2300 if (S_ISDIR(old_inode->i_mode) && old_dir != new_dir) {
2301 struct btrfs_key *location = &BTRFS_I(new_dir)->location;
2302 u64 old_parent_oid;
2303 di = btrfs_lookup_dir_item(trans, root, path, old_inode->i_ino,
2304 "..", 2, -1);
2305 if (IS_ERR(di)) {
2306 ret = PTR_ERR(di);
2307 goto out_fail;
2308 }
2309 if (!di) {
2310 ret = -ENOENT;
2311 goto out_fail;
2312 }
2313 old_parent_oid = btrfs_disk_key_objectid(&di->location);
2314 ret = btrfs_del_item(trans, root, path);
2315 if (ret) {
2316 ret = -EIO;
2317 goto out_fail;
2318 }
2319 btrfs_release_path(root, path);
2320
2321 di = btrfs_lookup_dir_index_item(trans, root, path,
2322 old_inode->i_ino,
2323 old_parent_oid,
2324 "..", 2, -1);
2325 if (IS_ERR(di)) {
2326 ret = PTR_ERR(di);
2327 goto out_fail;
2328 }
2329 if (!di) {
2330 ret = -ENOENT;
2331 goto out_fail;
2332 }
2333 ret = btrfs_del_item(trans, root, path);
2334 if (ret) {
2335 ret = -EIO;
2336 goto out_fail;
2337 }
2338 btrfs_release_path(root, path);
2339
2340 ret = btrfs_insert_dir_item(trans, root, "..", 2,
2341 old_inode->i_ino, location,
2342 BTRFS_FT_DIR);
2343 if (ret)
2344 goto out_fail;
2345 }
2346
2347
2348 ret = btrfs_unlink_trans(trans, root, old_dir, old_dentry);
2349 if (ret)
2350 goto out_fail;
2351
2352 if (new_inode) {
2353 new_inode->i_ctime = CURRENT_TIME;
2354 ret = btrfs_unlink_trans(trans, root, new_dir, new_dentry);
2355 if (ret)
2356 goto out_fail;
2357 if (S_ISDIR(new_inode->i_mode))
2358 clear_nlink(new_inode);
2359 else
2360 drop_nlink(new_inode);
2361 btrfs_update_inode(trans, root, new_inode);
2362 }
2363 ret = btrfs_add_link(trans, new_dentry, old_inode);
2364 if (ret)
2365 goto out_fail;
2366
2367out_fail:
2368 btrfs_free_path(path);
2369 btrfs_end_transaction(trans, root);
2370 mutex_unlock(&root->fs_info->fs_mutex);
2371 return ret;
2372}
2373
2374static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
2375 const char *symname)
2376{
2377 struct btrfs_trans_handle *trans;
2378 struct btrfs_root *root = BTRFS_I(dir)->root;
2379 struct btrfs_path *path;
2380 struct btrfs_key key;
2381 struct inode *inode;
2382 int err;
2383 int drop_inode = 0;
2384 u64 objectid;
2385 int name_len;
2386 int datasize;
2387 char *ptr;
2388 struct btrfs_file_extent_item *ei;
2389
2390 name_len = strlen(symname) + 1;
2391 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
2392 return -ENAMETOOLONG;
2393 mutex_lock(&root->fs_info->fs_mutex);
2394 trans = btrfs_start_transaction(root, 1);
2395 btrfs_set_trans_block_group(trans, dir);
2396
2397 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
2398 if (err) {
2399 err = -ENOSPC;
2400 goto out_unlock;
2401 }
2402
2403 inode = btrfs_new_inode(trans, root, objectid,
2404 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO);
2405 err = PTR_ERR(inode);
2406 if (IS_ERR(inode))
2407 goto out_unlock;
2408
2409 btrfs_set_trans_block_group(trans, inode);
2410 err = btrfs_add_nondir(trans, dentry, inode);
2411 if (err)
2412 drop_inode = 1;
2413 else {
2414 inode->i_mapping->a_ops = &btrfs_aops;
2415 inode->i_fop = &btrfs_file_operations;
2416 inode->i_op = &btrfs_file_inode_operations;
2417 }
2418 dir->i_sb->s_dirt = 1;
2419 btrfs_update_inode_block_group(trans, inode);
2420 btrfs_update_inode_block_group(trans, dir);
2421 if (drop_inode)
2422 goto out_unlock;
2423
2424 path = btrfs_alloc_path();
2425 BUG_ON(!path);
2426 key.objectid = inode->i_ino;
2427 key.offset = 0;
2428 key.flags = 0;
2429 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
2430 datasize = btrfs_file_extent_calc_inline_size(name_len);
2431 err = btrfs_insert_empty_item(trans, root, path, &key,
2432 datasize);
2433 BUG_ON(err);
2434 ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
2435 path->slots[0], struct btrfs_file_extent_item);
2436 btrfs_set_file_extent_generation(ei, trans->transid);
2437 btrfs_set_file_extent_type(ei,
2438 BTRFS_FILE_EXTENT_INLINE);
2439 ptr = btrfs_file_extent_inline_start(ei);
2440 btrfs_memcpy(root, path->nodes[0]->b_data,
2441 ptr, symname, name_len);
2442 mark_buffer_dirty(path->nodes[0]);
2443 btrfs_free_path(path);
2444 inode->i_op = &btrfs_symlink_inode_operations;
2445 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2446 inode->i_size = name_len - 1;
2447 btrfs_update_inode(trans, root, inode);
2448 err = 0;
2449
2450out_unlock:
2451 btrfs_end_transaction(trans, root);
2452 mutex_unlock(&root->fs_info->fs_mutex);
2453
2454 if (drop_inode) {
2455 inode_dec_link_count(inode);
2456 iput(inode);
2457 }
2458 btrfs_btree_balance_dirty(root);
2459 return err;
2460}
2461
2462static struct inode_operations btrfs_dir_inode_operations = {
2463 .lookup = btrfs_lookup,
2464 .create = btrfs_create,
2465 .unlink = btrfs_unlink,
2466 .link = btrfs_link,
2467 .mkdir = btrfs_mkdir,
2468 .rmdir = btrfs_rmdir,
2469 .rename = btrfs_rename,
2470 .symlink = btrfs_symlink,
2471 .setattr = btrfs_setattr,
2472};
2473
2474static struct inode_operations btrfs_dir_ro_inode_operations = {
2475 .lookup = btrfs_lookup,
2476};
2477
2478static struct file_operations btrfs_dir_file_operations = {
2479 .llseek = generic_file_llseek,
2480 .read = generic_read_dir,
2481 .readdir = btrfs_readdir,
2482 .ioctl = btrfs_ioctl,
2483#ifdef CONFIG_COMPAT
2484 .compat_ioctl = btrfs_compat_ioctl,
2485#endif
2486};
2487
2488static struct address_space_operations btrfs_aops = {
2489 .readpage = btrfs_readpage,
2490 .writepage = btrfs_writepage,
2491 .sync_page = block_sync_page,
2492 .prepare_write = btrfs_prepare_write,
2493 .commit_write = btrfs_commit_write,
2494 .bmap = btrfs_bmap,
2495};
2496
2497static struct address_space_operations btrfs_symlink_aops = {
2498 .readpage = btrfs_readpage,
2499 .writepage = btrfs_writepage,
2500};
2501
2502static struct inode_operations btrfs_file_inode_operations = {
2503 .truncate = btrfs_truncate,
2504 .getattr = btrfs_getattr,
2505 .setattr = btrfs_setattr,
2506};
2507
2508static struct inode_operations btrfs_symlink_inode_operations = {
2509 .readlink = generic_readlink,
2510 .follow_link = page_follow_link_light,
2511 .put_link = page_put_link,
2512};