]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/block_dev.c
[PATCH] BLOCK: Remove dependence on existence of blockdev_superblock [try #6]
[net-next-2.6.git] / fs / block_dev.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/block_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
1da177e4
LT
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fcntl.h>
11#include <linux/slab.h>
12#include <linux/kmod.h>
13#include <linux/major.h>
1da177e4
LT
14#include <linux/smp_lock.h>
15#include <linux/highmem.h>
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/blkpg.h>
19#include <linux/buffer_head.h>
20#include <linux/mpage.h>
21#include <linux/mount.h>
22#include <linux/uio.h>
23#include <linux/namei.h>
24#include <asm/uaccess.h>
07f3f05c 25#include "internal.h"
1da177e4
LT
26
27struct bdev_inode {
28 struct block_device bdev;
29 struct inode vfs_inode;
30};
31
32static inline struct bdev_inode *BDEV_I(struct inode *inode)
33{
34 return container_of(inode, struct bdev_inode, vfs_inode);
35}
36
37inline struct block_device *I_BDEV(struct inode *inode)
38{
39 return &BDEV_I(inode)->bdev;
40}
41
42EXPORT_SYMBOL(I_BDEV);
43
44static sector_t max_block(struct block_device *bdev)
45{
46 sector_t retval = ~((sector_t)0);
47 loff_t sz = i_size_read(bdev->bd_inode);
48
49 if (sz) {
50 unsigned int size = block_size(bdev);
51 unsigned int sizebits = blksize_bits(size);
52 retval = (sz >> sizebits);
53 }
54 return retval;
55}
56
57/* Kill _all_ buffers, dirty or not.. */
58static void kill_bdev(struct block_device *bdev)
59{
60 invalidate_bdev(bdev, 1);
61 truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
62}
63
64int set_blocksize(struct block_device *bdev, int size)
65{
66 /* Size must be a power of two, and between 512 and PAGE_SIZE */
67 if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
68 return -EINVAL;
69
70 /* Size cannot be smaller than the size supported by the device */
71 if (size < bdev_hardsect_size(bdev))
72 return -EINVAL;
73
74 /* Don't change the size if it is same as current */
75 if (bdev->bd_block_size != size) {
76 sync_blockdev(bdev);
77 bdev->bd_block_size = size;
78 bdev->bd_inode->i_blkbits = blksize_bits(size);
79 kill_bdev(bdev);
80 }
81 return 0;
82}
83
84EXPORT_SYMBOL(set_blocksize);
85
86int sb_set_blocksize(struct super_block *sb, int size)
87{
1da177e4
LT
88 if (set_blocksize(sb->s_bdev, size))
89 return 0;
90 /* If we get here, we know size is power of two
91 * and it's value is between 512 and PAGE_SIZE */
92 sb->s_blocksize = size;
38885bd4 93 sb->s_blocksize_bits = blksize_bits(size);
1da177e4
LT
94 return sb->s_blocksize;
95}
96
97EXPORT_SYMBOL(sb_set_blocksize);
98
99int sb_min_blocksize(struct super_block *sb, int size)
100{
101 int minsize = bdev_hardsect_size(sb->s_bdev);
102 if (size < minsize)
103 size = minsize;
104 return sb_set_blocksize(sb, size);
105}
106
107EXPORT_SYMBOL(sb_min_blocksize);
108
109static int
110blkdev_get_block(struct inode *inode, sector_t iblock,
111 struct buffer_head *bh, int create)
112{
113 if (iblock >= max_block(I_BDEV(inode))) {
114 if (create)
115 return -EIO;
116
117 /*
118 * for reads, we're just trying to fill a partial page.
119 * return a hole, they will have to call get_block again
120 * before they can fill it, and they will get -EIO at that
121 * time
122 */
123 return 0;
124 }
125 bh->b_bdev = I_BDEV(inode);
126 bh->b_blocknr = iblock;
127 set_buffer_mapped(bh);
128 return 0;
129}
130
131static int
132blkdev_get_blocks(struct inode *inode, sector_t iblock,
1d8fa7a2 133 struct buffer_head *bh, int create)
1da177e4
LT
134{
135 sector_t end_block = max_block(I_BDEV(inode));
1d8fa7a2 136 unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
1da177e4
LT
137
138 if ((iblock + max_blocks) > end_block) {
139 max_blocks = end_block - iblock;
140 if ((long)max_blocks <= 0) {
141 if (create)
142 return -EIO; /* write fully beyond EOF */
143 /*
144 * It is a read which is fully beyond EOF. We return
145 * a !buffer_mapped buffer
146 */
147 max_blocks = 0;
148 }
149 }
150
151 bh->b_bdev = I_BDEV(inode);
152 bh->b_blocknr = iblock;
153 bh->b_size = max_blocks << inode->i_blkbits;
154 if (max_blocks)
155 set_buffer_mapped(bh);
156 return 0;
157}
158
159static ssize_t
160blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
161 loff_t offset, unsigned long nr_segs)
162{
163 struct file *file = iocb->ki_filp;
164 struct inode *inode = file->f_mapping->host;
165
166 return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
167 iov, offset, nr_segs, blkdev_get_blocks, NULL);
168}
169
170static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
171{
172 return block_write_full_page(page, blkdev_get_block, wbc);
173}
174
175static int blkdev_readpage(struct file * file, struct page * page)
176{
177 return block_read_full_page(page, blkdev_get_block);
178}
179
180static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
181{
182 return block_prepare_write(page, from, to, blkdev_get_block);
183}
184
185static int blkdev_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
186{
187 return block_commit_write(page, from, to);
188}
189
190/*
191 * private llseek:
192 * for a block special file file->f_dentry->d_inode->i_size is zero
193 * so we compute the size by hand (just as in block_read/write above)
194 */
195static loff_t block_llseek(struct file *file, loff_t offset, int origin)
196{
197 struct inode *bd_inode = file->f_mapping->host;
198 loff_t size;
199 loff_t retval;
200
1b1dcc1b 201 mutex_lock(&bd_inode->i_mutex);
1da177e4
LT
202 size = i_size_read(bd_inode);
203
204 switch (origin) {
205 case 2:
206 offset += size;
207 break;
208 case 1:
209 offset += file->f_pos;
210 }
211 retval = -EINVAL;
212 if (offset >= 0 && offset <= size) {
213 if (offset != file->f_pos) {
214 file->f_pos = offset;
215 }
216 retval = offset;
217 }
1b1dcc1b 218 mutex_unlock(&bd_inode->i_mutex);
1da177e4
LT
219 return retval;
220}
221
222/*
223 * Filp is never NULL; the only case when ->fsync() is called with
224 * NULL first argument is nfsd_sync_dir() and that's not a directory.
225 */
226
227static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
228{
229 return sync_blockdev(I_BDEV(filp->f_mapping->host));
230}
231
232/*
233 * pseudo-fs
234 */
235
236static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
fa3536cc 237static kmem_cache_t * bdev_cachep __read_mostly;
1da177e4
LT
238
239static struct inode *bdev_alloc_inode(struct super_block *sb)
240{
241 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL);
242 if (!ei)
243 return NULL;
244 return &ei->vfs_inode;
245}
246
247static void bdev_destroy_inode(struct inode *inode)
248{
249 struct bdev_inode *bdi = BDEV_I(inode);
250
251 bdi->bdev.bd_inode_backing_dev_info = NULL;
252 kmem_cache_free(bdev_cachep, bdi);
253}
254
255static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
256{
257 struct bdev_inode *ei = (struct bdev_inode *) foo;
258 struct block_device *bdev = &ei->bdev;
259
260 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
261 SLAB_CTOR_CONSTRUCTOR)
262 {
263 memset(bdev, 0, sizeof(*bdev));
c039e313
AV
264 mutex_init(&bdev->bd_mutex);
265 mutex_init(&bdev->bd_mount_mutex);
1da177e4
LT
266 INIT_LIST_HEAD(&bdev->bd_inodes);
267 INIT_LIST_HEAD(&bdev->bd_list);
641dc636
JN
268#ifdef CONFIG_SYSFS
269 INIT_LIST_HEAD(&bdev->bd_holder_list);
270#endif
1da177e4
LT
271 inode_init_once(&ei->vfs_inode);
272 }
273}
274
275static inline void __bd_forget(struct inode *inode)
276{
277 list_del_init(&inode->i_devices);
278 inode->i_bdev = NULL;
279 inode->i_mapping = &inode->i_data;
280}
281
282static void bdev_clear_inode(struct inode *inode)
283{
284 struct block_device *bdev = &BDEV_I(inode)->bdev;
285 struct list_head *p;
286 spin_lock(&bdev_lock);
287 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
288 __bd_forget(list_entry(p, struct inode, i_devices));
289 }
290 list_del_init(&bdev->bd_list);
291 spin_unlock(&bdev_lock);
292}
293
294static struct super_operations bdev_sops = {
295 .statfs = simple_statfs,
296 .alloc_inode = bdev_alloc_inode,
297 .destroy_inode = bdev_destroy_inode,
298 .drop_inode = generic_delete_inode,
299 .clear_inode = bdev_clear_inode,
300};
301
454e2398
DH
302static int bd_get_sb(struct file_system_type *fs_type,
303 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
1da177e4 304{
454e2398 305 return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt);
1da177e4
LT
306}
307
308static struct file_system_type bd_type = {
309 .name = "bdev",
310 .get_sb = bd_get_sb,
311 .kill_sb = kill_anon_super,
312};
313
fa3536cc 314static struct vfsmount *bd_mnt __read_mostly;
1da177e4
LT
315struct super_block *blockdev_superblock;
316
317void __init bdev_cache_init(void)
318{
319 int err;
320 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
fffb60f9
PJ
321 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
322 SLAB_MEM_SPREAD|SLAB_PANIC),
1da177e4
LT
323 init_once, NULL);
324 err = register_filesystem(&bd_type);
325 if (err)
326 panic("Cannot register bdev pseudo-fs");
327 bd_mnt = kern_mount(&bd_type);
328 err = PTR_ERR(bd_mnt);
329 if (IS_ERR(bd_mnt))
330 panic("Cannot create bdev pseudo-fs");
331 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
332}
333
334/*
335 * Most likely _very_ bad one - but then it's hardly critical for small
336 * /dev and can be fixed when somebody will need really large one.
337 * Keep in mind that it will be fed through icache hash function too.
338 */
339static inline unsigned long hash(dev_t dev)
340{
341 return MAJOR(dev)+MINOR(dev);
342}
343
344static int bdev_test(struct inode *inode, void *data)
345{
346 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
347}
348
349static int bdev_set(struct inode *inode, void *data)
350{
351 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
352 return 0;
353}
354
355static LIST_HEAD(all_bdevs);
356
357struct block_device *bdget(dev_t dev)
358{
359 struct block_device *bdev;
360 struct inode *inode;
361
362 inode = iget5_locked(bd_mnt->mnt_sb, hash(dev),
363 bdev_test, bdev_set, &dev);
364
365 if (!inode)
366 return NULL;
367
368 bdev = &BDEV_I(inode)->bdev;
369
370 if (inode->i_state & I_NEW) {
371 bdev->bd_contains = NULL;
372 bdev->bd_inode = inode;
373 bdev->bd_block_size = (1 << inode->i_blkbits);
374 bdev->bd_part_count = 0;
375 bdev->bd_invalidated = 0;
376 inode->i_mode = S_IFBLK;
377 inode->i_rdev = dev;
378 inode->i_bdev = bdev;
379 inode->i_data.a_ops = &def_blk_aops;
380 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
381 inode->i_data.backing_dev_info = &default_backing_dev_info;
382 spin_lock(&bdev_lock);
383 list_add(&bdev->bd_list, &all_bdevs);
384 spin_unlock(&bdev_lock);
385 unlock_new_inode(inode);
386 }
387 return bdev;
388}
389
390EXPORT_SYMBOL(bdget);
391
392long nr_blockdev_pages(void)
393{
394 struct list_head *p;
395 long ret = 0;
396 spin_lock(&bdev_lock);
397 list_for_each(p, &all_bdevs) {
398 struct block_device *bdev;
399 bdev = list_entry(p, struct block_device, bd_list);
400 ret += bdev->bd_inode->i_mapping->nrpages;
401 }
402 spin_unlock(&bdev_lock);
403 return ret;
404}
405
406void bdput(struct block_device *bdev)
407{
408 iput(bdev->bd_inode);
409}
410
411EXPORT_SYMBOL(bdput);
412
413static struct block_device *bd_acquire(struct inode *inode)
414{
415 struct block_device *bdev;
09d967c6 416
1da177e4
LT
417 spin_lock(&bdev_lock);
418 bdev = inode->i_bdev;
09d967c6
OH
419 if (bdev) {
420 atomic_inc(&bdev->bd_inode->i_count);
1da177e4
LT
421 spin_unlock(&bdev_lock);
422 return bdev;
423 }
424 spin_unlock(&bdev_lock);
09d967c6 425
1da177e4
LT
426 bdev = bdget(inode->i_rdev);
427 if (bdev) {
428 spin_lock(&bdev_lock);
09d967c6
OH
429 if (!inode->i_bdev) {
430 /*
431 * We take an additional bd_inode->i_count for inode,
432 * and it's released in clear_inode() of inode.
433 * So, we can access it via ->i_mapping always
434 * without igrab().
435 */
436 atomic_inc(&bdev->bd_inode->i_count);
437 inode->i_bdev = bdev;
438 inode->i_mapping = bdev->bd_inode->i_mapping;
439 list_add(&inode->i_devices, &bdev->bd_inodes);
440 }
1da177e4
LT
441 spin_unlock(&bdev_lock);
442 }
443 return bdev;
444}
445
446/* Call when you free inode */
447
448void bd_forget(struct inode *inode)
449{
09d967c6
OH
450 struct block_device *bdev = NULL;
451
1da177e4 452 spin_lock(&bdev_lock);
09d967c6
OH
453 if (inode->i_bdev) {
454 if (inode->i_sb != blockdev_superblock)
455 bdev = inode->i_bdev;
1da177e4 456 __bd_forget(inode);
09d967c6 457 }
1da177e4 458 spin_unlock(&bdev_lock);
09d967c6
OH
459
460 if (bdev)
461 iput(bdev->bd_inode);
1da177e4
LT
462}
463
464int bd_claim(struct block_device *bdev, void *holder)
465{
466 int res;
467 spin_lock(&bdev_lock);
468
469 /* first decide result */
470 if (bdev->bd_holder == holder)
471 res = 0; /* already a holder */
472 else if (bdev->bd_holder != NULL)
473 res = -EBUSY; /* held by someone else */
474 else if (bdev->bd_contains == bdev)
475 res = 0; /* is a whole device which isn't held */
476
477 else if (bdev->bd_contains->bd_holder == bd_claim)
478 res = 0; /* is a partition of a device that is being partitioned */
479 else if (bdev->bd_contains->bd_holder != NULL)
480 res = -EBUSY; /* is a partition of a held device */
481 else
482 res = 0; /* is a partition of an un-held device */
483
484 /* now impose change */
485 if (res==0) {
486 /* note that for a whole device bd_holders
487 * will be incremented twice, and bd_holder will
488 * be set to bd_claim before being set to holder
489 */
490 bdev->bd_contains->bd_holders ++;
491 bdev->bd_contains->bd_holder = bd_claim;
492 bdev->bd_holders++;
493 bdev->bd_holder = holder;
494 }
495 spin_unlock(&bdev_lock);
496 return res;
497}
498
499EXPORT_SYMBOL(bd_claim);
500
501void bd_release(struct block_device *bdev)
502{
503 spin_lock(&bdev_lock);
504 if (!--bdev->bd_contains->bd_holders)
505 bdev->bd_contains->bd_holder = NULL;
506 if (!--bdev->bd_holders)
507 bdev->bd_holder = NULL;
508 spin_unlock(&bdev_lock);
509}
510
511EXPORT_SYMBOL(bd_release);
512
641dc636
JN
513#ifdef CONFIG_SYSFS
514/*
515 * Functions for bd_claim_by_kobject / bd_release_from_kobject
516 *
517 * If a kobject is passed to bd_claim_by_kobject()
518 * and the kobject has a parent directory,
519 * following symlinks are created:
520 * o from the kobject to the claimed bdev
521 * o from "holders" directory of the bdev to the parent of the kobject
522 * bd_release_from_kobject() removes these symlinks.
523 *
524 * Example:
525 * If /dev/dm-0 maps to /dev/sda, kobject corresponding to
526 * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
527 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
528 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
529 */
530
531static struct kobject *bdev_get_kobj(struct block_device *bdev)
532{
533 if (bdev->bd_contains != bdev)
534 return kobject_get(&bdev->bd_part->kobj);
535 else
536 return kobject_get(&bdev->bd_disk->kobj);
537}
538
539static struct kobject *bdev_get_holder(struct block_device *bdev)
540{
541 if (bdev->bd_contains != bdev)
542 return kobject_get(bdev->bd_part->holder_dir);
543 else
544 return kobject_get(bdev->bd_disk->holder_dir);
545}
546
4d7dd8fd 547static int add_symlink(struct kobject *from, struct kobject *to)
641dc636
JN
548{
549 if (!from || !to)
4d7dd8fd
AM
550 return 0;
551 return sysfs_create_link(from, to, kobject_name(to));
641dc636
JN
552}
553
554static void del_symlink(struct kobject *from, struct kobject *to)
555{
556 if (!from || !to)
557 return;
558 sysfs_remove_link(from, kobject_name(to));
559}
560
561/*
562 * 'struct bd_holder' contains pointers to kobjects symlinked by
563 * bd_claim_by_kobject.
564 * It's connected to bd_holder_list which is protected by bdev->bd_sem.
565 */
566struct bd_holder {
567 struct list_head list; /* chain of holders of the bdev */
568 int count; /* references from the holder */
569 struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */
570 struct kobject *hdev; /* e.g. "/block/dm-0" */
571 struct kobject *hdir; /* e.g. "/block/sda/holders" */
572 struct kobject *sdev; /* e.g. "/block/sda" */
573};
574
575/*
576 * Get references of related kobjects at once.
577 * Returns 1 on success. 0 on failure.
578 *
579 * Should call bd_holder_release_dirs() after successful use.
580 */
581static int bd_holder_grab_dirs(struct block_device *bdev,
582 struct bd_holder *bo)
583{
584 if (!bdev || !bo)
585 return 0;
586
587 bo->sdir = kobject_get(bo->sdir);
588 if (!bo->sdir)
589 return 0;
590
591 bo->hdev = kobject_get(bo->sdir->parent);
592 if (!bo->hdev)
593 goto fail_put_sdir;
594
595 bo->sdev = bdev_get_kobj(bdev);
596 if (!bo->sdev)
597 goto fail_put_hdev;
598
599 bo->hdir = bdev_get_holder(bdev);
600 if (!bo->hdir)
601 goto fail_put_sdev;
602
603 return 1;
604
605fail_put_sdev:
606 kobject_put(bo->sdev);
607fail_put_hdev:
608 kobject_put(bo->hdev);
609fail_put_sdir:
610 kobject_put(bo->sdir);
611
612 return 0;
613}
614
615/* Put references of related kobjects at once. */
616static void bd_holder_release_dirs(struct bd_holder *bo)
617{
618 kobject_put(bo->hdir);
619 kobject_put(bo->sdev);
620 kobject_put(bo->hdev);
621 kobject_put(bo->sdir);
622}
623
624static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
625{
626 struct bd_holder *bo;
627
628 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
629 if (!bo)
630 return NULL;
631
632 bo->count = 1;
633 bo->sdir = kobj;
634
635 return bo;
636}
637
638static void free_bd_holder(struct bd_holder *bo)
639{
640 kfree(bo);
641}
642
643/**
644 * add_bd_holder - create sysfs symlinks for bd_claim() relationship
645 *
646 * @bdev: block device to be bd_claimed
647 * @bo: preallocated and initialized by alloc_bd_holder()
648 *
649 * If there is no matching entry with @bo in @bdev->bd_holder_list,
650 * add @bo to the list, create symlinks.
651 *
4d7dd8fd
AM
652 * Returns 0 if symlinks are created or already there.
653 * Returns -ve if something fails and @bo can be freed.
641dc636
JN
654 */
655static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
656{
657 struct bd_holder *tmp;
4d7dd8fd 658 int ret;
641dc636
JN
659
660 if (!bo)
4d7dd8fd 661 return -EINVAL;
641dc636
JN
662
663 list_for_each_entry(tmp, &bdev->bd_holder_list, list) {
664 if (tmp->sdir == bo->sdir) {
665 tmp->count++;
4d7dd8fd
AM
666 /* We've already done what we need to do here. */
667 free_bd_holder(bo);
641dc636
JN
668 return 0;
669 }
670 }
671
672 if (!bd_holder_grab_dirs(bdev, bo))
4d7dd8fd 673 return -EBUSY;
641dc636 674
4d7dd8fd
AM
675 ret = add_symlink(bo->sdir, bo->sdev);
676 if (ret == 0) {
677 ret = add_symlink(bo->hdir, bo->hdev);
678 if (ret)
679 del_symlink(bo->sdir, bo->sdev);
680 }
681 if (ret == 0)
682 list_add_tail(&bo->list, &bdev->bd_holder_list);
683 return ret;
641dc636
JN
684}
685
686/**
687 * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
688 *
689 * @bdev: block device to be bd_claimed
690 * @kobj: holder's kobject
691 *
692 * If there is matching entry with @kobj in @bdev->bd_holder_list
693 * and no other bd_claim() from the same kobject,
694 * remove the struct bd_holder from the list, delete symlinks for it.
695 *
696 * Returns a pointer to the struct bd_holder when it's removed from the list
697 * and ready to be freed.
698 * Returns NULL if matching claim isn't found or there is other bd_claim()
699 * by the same kobject.
700 */
701static struct bd_holder *del_bd_holder(struct block_device *bdev,
702 struct kobject *kobj)
703{
704 struct bd_holder *bo;
705
706 list_for_each_entry(bo, &bdev->bd_holder_list, list) {
707 if (bo->sdir == kobj) {
708 bo->count--;
709 BUG_ON(bo->count < 0);
710 if (!bo->count) {
711 list_del(&bo->list);
712 del_symlink(bo->sdir, bo->sdev);
713 del_symlink(bo->hdir, bo->hdev);
714 bd_holder_release_dirs(bo);
715 return bo;
716 }
717 break;
718 }
719 }
720
721 return NULL;
722}
723
724/**
725 * bd_claim_by_kobject - bd_claim() with additional kobject signature
726 *
727 * @bdev: block device to be claimed
728 * @holder: holder's signature
729 * @kobj: holder's kobject
730 *
731 * Do bd_claim() and if it succeeds, create sysfs symlinks between
732 * the bdev and the holder's kobject.
733 * Use bd_release_from_kobject() when relesing the claimed bdev.
734 *
735 * Returns 0 on success. (same as bd_claim())
736 * Returns errno on failure.
737 */
738static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
739 struct kobject *kobj)
740{
741 int res;
742 struct bd_holder *bo;
743
744 if (!kobj)
745 return -EINVAL;
746
747 bo = alloc_bd_holder(kobj);
748 if (!bo)
749 return -ENOMEM;
750
663d440e 751 mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_PARTITION);
641dc636 752 res = bd_claim(bdev, holder);
4d7dd8fd
AM
753 if (res == 0)
754 res = add_bd_holder(bdev, bo);
755 if (res)
641dc636 756 free_bd_holder(bo);
b4cf1b72 757 mutex_unlock(&bdev->bd_mutex);
641dc636
JN
758
759 return res;
760}
761
762/**
763 * bd_release_from_kobject - bd_release() with additional kobject signature
764 *
765 * @bdev: block device to be released
766 * @kobj: holder's kobject
767 *
768 * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
769 */
770static void bd_release_from_kobject(struct block_device *bdev,
771 struct kobject *kobj)
772{
773 struct bd_holder *bo;
774
775 if (!kobj)
776 return;
777
663d440e 778 mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_PARTITION);
641dc636
JN
779 bd_release(bdev);
780 if ((bo = del_bd_holder(bdev, kobj)))
781 free_bd_holder(bo);
b4cf1b72 782 mutex_unlock(&bdev->bd_mutex);
641dc636
JN
783}
784
785/**
786 * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
787 *
788 * @bdev: block device to be claimed
789 * @holder: holder's signature
790 * @disk: holder's gendisk
791 *
792 * Call bd_claim_by_kobject() with getting @disk->slave_dir.
793 */
794int bd_claim_by_disk(struct block_device *bdev, void *holder,
795 struct gendisk *disk)
796{
797 return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
798}
799EXPORT_SYMBOL_GPL(bd_claim_by_disk);
800
801/**
802 * bd_release_from_disk - wrapper function for bd_release_from_kobject()
803 *
804 * @bdev: block device to be claimed
805 * @disk: holder's gendisk
806 *
807 * Call bd_release_from_kobject() and put @disk->slave_dir.
808 */
809void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
810{
811 bd_release_from_kobject(bdev, disk->slave_dir);
812 kobject_put(disk->slave_dir);
813}
814EXPORT_SYMBOL_GPL(bd_release_from_disk);
815#endif
816
1da177e4
LT
817/*
818 * Tries to open block device by device number. Use it ONLY if you
819 * really do not have anything better - i.e. when you are behind a
820 * truly sucky interface and all you are given is a device number. _Never_
821 * to be used for internal purposes. If you ever need it - reconsider
822 * your API.
823 */
824struct block_device *open_by_devnum(dev_t dev, unsigned mode)
825{
826 struct block_device *bdev = bdget(dev);
827 int err = -ENOMEM;
828 int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
829 if (bdev)
830 err = blkdev_get(bdev, mode, flags);
831 return err ? ERR_PTR(err) : bdev;
832}
833
834EXPORT_SYMBOL(open_by_devnum);
835
663d440e
IM
836static int
837blkdev_get_partition(struct block_device *bdev, mode_t mode, unsigned flags);
838
839struct block_device *open_partition_by_devnum(dev_t dev, unsigned mode)
840{
841 struct block_device *bdev = bdget(dev);
842 int err = -ENOMEM;
843 int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
844 if (bdev)
845 err = blkdev_get_partition(bdev, mode, flags);
846 return err ? ERR_PTR(err) : bdev;
847}
848
849EXPORT_SYMBOL(open_partition_by_devnum);
850
851
1da177e4
LT
852/*
853 * This routine checks whether a removable media has been changed,
854 * and invalidates all buffer-cache-entries in that case. This
855 * is a relatively slow routine, so we have to try to minimize using
856 * it. Thus it is called only upon a 'mount' or 'open'. This
857 * is the best way of combining speed and utility, I think.
858 * People changing diskettes in the middle of an operation deserve
859 * to lose :-)
860 */
861int check_disk_change(struct block_device *bdev)
862{
863 struct gendisk *disk = bdev->bd_disk;
864 struct block_device_operations * bdops = disk->fops;
865
866 if (!bdops->media_changed)
867 return 0;
868 if (!bdops->media_changed(bdev->bd_disk))
869 return 0;
870
2ef41634 871 if (__invalidate_device(bdev))
1da177e4
LT
872 printk("VFS: busy inodes on changed media.\n");
873
874 if (bdops->revalidate_disk)
875 bdops->revalidate_disk(bdev->bd_disk);
876 if (bdev->bd_disk->minors > 1)
877 bdev->bd_invalidated = 1;
878 return 1;
879}
880
881EXPORT_SYMBOL(check_disk_change);
882
883void bd_set_size(struct block_device *bdev, loff_t size)
884{
885 unsigned bsize = bdev_hardsect_size(bdev);
886
887 bdev->bd_inode->i_size = size;
888 while (bsize < PAGE_CACHE_SIZE) {
889 if (size & bsize)
890 break;
891 bsize <<= 1;
892 }
893 bdev->bd_block_size = bsize;
894 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
895}
896EXPORT_SYMBOL(bd_set_size);
897
6946bd63
PZ
898static int __blkdev_put(struct block_device *bdev, unsigned int subclass)
899{
900 int ret = 0;
901 struct inode *bd_inode = bdev->bd_inode;
902 struct gendisk *disk = bdev->bd_disk;
903
904 mutex_lock_nested(&bdev->bd_mutex, subclass);
905 lock_kernel();
906 if (!--bdev->bd_openers) {
907 sync_blockdev(bdev);
908 kill_bdev(bdev);
909 }
910 if (bdev->bd_contains == bdev) {
911 if (disk->fops->release)
912 ret = disk->fops->release(bd_inode, NULL);
913 } else {
914 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
915 subclass + 1);
916 bdev->bd_contains->bd_part_count--;
917 mutex_unlock(&bdev->bd_contains->bd_mutex);
918 }
919 if (!bdev->bd_openers) {
920 struct module *owner = disk->fops->owner;
921
922 put_disk(disk);
923 module_put(owner);
924
925 if (bdev->bd_contains != bdev) {
926 kobject_put(&bdev->bd_part->kobj);
927 bdev->bd_part = NULL;
928 }
929 bdev->bd_disk = NULL;
930 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
931 if (bdev != bdev->bd_contains)
932 __blkdev_put(bdev->bd_contains, subclass + 1);
933 bdev->bd_contains = NULL;
934 }
935 unlock_kernel();
936 mutex_unlock(&bdev->bd_mutex);
937 bdput(bdev);
938 return ret;
939}
940
941int blkdev_put(struct block_device *bdev)
942{
943 return __blkdev_put(bdev, BD_MUTEX_NORMAL);
944}
945EXPORT_SYMBOL(blkdev_put);
946
947int blkdev_put_partition(struct block_device *bdev)
948{
949 return __blkdev_put(bdev, BD_MUTEX_PARTITION);
950}
951EXPORT_SYMBOL(blkdev_put_partition);
952
663d440e
IM
953static int
954blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags);
955
956static int
957do_open(struct block_device *bdev, struct file *file, unsigned int subclass)
1da177e4
LT
958{
959 struct module *owner = NULL;
960 struct gendisk *disk;
961 int ret = -ENXIO;
962 int part;
963
964 file->f_mapping = bdev->bd_inode->i_mapping;
965 lock_kernel();
966 disk = get_gendisk(bdev->bd_dev, &part);
967 if (!disk) {
968 unlock_kernel();
969 bdput(bdev);
970 return ret;
971 }
972 owner = disk->fops->owner;
973
663d440e
IM
974 mutex_lock_nested(&bdev->bd_mutex, subclass);
975
1da177e4
LT
976 if (!bdev->bd_openers) {
977 bdev->bd_disk = disk;
978 bdev->bd_contains = bdev;
979 if (!part) {
980 struct backing_dev_info *bdi;
981 if (disk->fops->open) {
982 ret = disk->fops->open(bdev->bd_inode, file);
983 if (ret)
984 goto out_first;
985 }
986 if (!bdev->bd_openers) {
987 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
988 bdi = blk_get_backing_dev_info(bdev);
989 if (bdi == NULL)
990 bdi = &default_backing_dev_info;
991 bdev->bd_inode->i_data.backing_dev_info = bdi;
992 }
993 if (bdev->bd_invalidated)
994 rescan_partitions(disk, bdev);
995 } else {
996 struct hd_struct *p;
997 struct block_device *whole;
998 whole = bdget_disk(disk, 0);
999 ret = -ENOMEM;
1000 if (!whole)
1001 goto out_first;
663d440e 1002 ret = blkdev_get_whole(whole, file->f_mode, file->f_flags);
1da177e4
LT
1003 if (ret)
1004 goto out_first;
1005 bdev->bd_contains = whole;
663d440e 1006 mutex_lock_nested(&whole->bd_mutex, BD_MUTEX_WHOLE);
1da177e4
LT
1007 whole->bd_part_count++;
1008 p = disk->part[part - 1];
1009 bdev->bd_inode->i_data.backing_dev_info =
1010 whole->bd_inode->i_data.backing_dev_info;
1011 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) {
1012 whole->bd_part_count--;
c039e313 1013 mutex_unlock(&whole->bd_mutex);
1da177e4
LT
1014 ret = -ENXIO;
1015 goto out_first;
1016 }
1017 kobject_get(&p->kobj);
1018 bdev->bd_part = p;
1019 bd_set_size(bdev, (loff_t) p->nr_sects << 9);
c039e313 1020 mutex_unlock(&whole->bd_mutex);
1da177e4
LT
1021 }
1022 } else {
1023 put_disk(disk);
1024 module_put(owner);
1025 if (bdev->bd_contains == bdev) {
1026 if (bdev->bd_disk->fops->open) {
1027 ret = bdev->bd_disk->fops->open(bdev->bd_inode, file);
1028 if (ret)
1029 goto out;
1030 }
1031 if (bdev->bd_invalidated)
1032 rescan_partitions(bdev->bd_disk, bdev);
1033 } else {
663d440e 1034 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
87d7c8ac 1035 BD_MUTEX_WHOLE);
1da177e4 1036 bdev->bd_contains->bd_part_count++;
c039e313 1037 mutex_unlock(&bdev->bd_contains->bd_mutex);
1da177e4
LT
1038 }
1039 }
1040 bdev->bd_openers++;
c039e313 1041 mutex_unlock(&bdev->bd_mutex);
1da177e4
LT
1042 unlock_kernel();
1043 return 0;
1044
1045out_first:
1046 bdev->bd_disk = NULL;
1047 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1048 if (bdev != bdev->bd_contains)
6946bd63 1049 __blkdev_put(bdev->bd_contains, BD_MUTEX_WHOLE);
1da177e4
LT
1050 bdev->bd_contains = NULL;
1051 put_disk(disk);
1052 module_put(owner);
1053out:
c039e313 1054 mutex_unlock(&bdev->bd_mutex);
1da177e4
LT
1055 unlock_kernel();
1056 if (ret)
1057 bdput(bdev);
1058 return ret;
1059}
1060
1061int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags)
1062{
1063 /*
1064 * This crockload is due to bad choice of ->open() type.
1065 * It will go away.
1066 * For now, block device ->open() routine must _not_
1067 * examine anything in 'inode' argument except ->i_rdev.
1068 */
1069 struct file fake_file = {};
1070 struct dentry fake_dentry = {};
1071 fake_file.f_mode = mode;
1072 fake_file.f_flags = flags;
1073 fake_file.f_dentry = &fake_dentry;
1074 fake_dentry.d_inode = bdev->bd_inode;
1075
663d440e 1076 return do_open(bdev, &fake_file, BD_MUTEX_NORMAL);
1da177e4
LT
1077}
1078
1079EXPORT_SYMBOL(blkdev_get);
1080
663d440e
IM
1081static int
1082blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags)
1083{
1084 /*
1085 * This crockload is due to bad choice of ->open() type.
1086 * It will go away.
1087 * For now, block device ->open() routine must _not_
1088 * examine anything in 'inode' argument except ->i_rdev.
1089 */
1090 struct file fake_file = {};
1091 struct dentry fake_dentry = {};
1092 fake_file.f_mode = mode;
1093 fake_file.f_flags = flags;
1094 fake_file.f_dentry = &fake_dentry;
1095 fake_dentry.d_inode = bdev->bd_inode;
1096
1097 return do_open(bdev, &fake_file, BD_MUTEX_WHOLE);
1098}
1099
1100static int
1101blkdev_get_partition(struct block_device *bdev, mode_t mode, unsigned flags)
1102{
1103 /*
1104 * This crockload is due to bad choice of ->open() type.
1105 * It will go away.
1106 * For now, block device ->open() routine must _not_
1107 * examine anything in 'inode' argument except ->i_rdev.
1108 */
1109 struct file fake_file = {};
1110 struct dentry fake_dentry = {};
1111 fake_file.f_mode = mode;
1112 fake_file.f_flags = flags;
1113 fake_file.f_dentry = &fake_dentry;
1114 fake_dentry.d_inode = bdev->bd_inode;
1115
1116 return do_open(bdev, &fake_file, BD_MUTEX_PARTITION);
1117}
1118
1da177e4
LT
1119static int blkdev_open(struct inode * inode, struct file * filp)
1120{
1121 struct block_device *bdev;
1122 int res;
1123
1124 /*
1125 * Preserve backwards compatibility and allow large file access
1126 * even if userspace doesn't ask for it explicitly. Some mkfs
1127 * binary needs it. We might want to drop this workaround
1128 * during an unstable branch.
1129 */
1130 filp->f_flags |= O_LARGEFILE;
1131
1132 bdev = bd_acquire(inode);
1133
663d440e 1134 res = do_open(bdev, filp, BD_MUTEX_NORMAL);
1da177e4
LT
1135 if (res)
1136 return res;
1137
1138 if (!(filp->f_flags & O_EXCL) )
1139 return 0;
1140
1141 if (!(res = bd_claim(bdev, filp)))
1142 return 0;
1143
1144 blkdev_put(bdev);
1145 return res;
1146}
1147
1da177e4
LT
1148static int blkdev_close(struct inode * inode, struct file * filp)
1149{
1150 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
1151 if (bdev->bd_holder == filp)
1152 bd_release(bdev);
1153 return blkdev_put(bdev);
1154}
1155
1156static ssize_t blkdev_file_write(struct file *file, const char __user *buf,
1157 size_t count, loff_t *ppos)
1158{
1159 struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
1160
1161 return generic_file_write_nolock(file, &local_iov, 1, ppos);
1162}
1163
1164static ssize_t blkdev_file_aio_write(struct kiocb *iocb, const char __user *buf,
1165 size_t count, loff_t pos)
1166{
1167 struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
1168
1169 return generic_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
1170}
1171
bb93e3a5 1172static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1da177e4
LT
1173{
1174 return blkdev_ioctl(file->f_mapping->host, file, cmd, arg);
1175}
1176
f5e54d6e 1177const struct address_space_operations def_blk_aops = {
1da177e4
LT
1178 .readpage = blkdev_readpage,
1179 .writepage = blkdev_writepage,
1180 .sync_page = block_sync_page,
1181 .prepare_write = blkdev_prepare_write,
1182 .commit_write = blkdev_commit_write,
1183 .writepages = generic_writepages,
1184 .direct_IO = blkdev_direct_IO,
1185};
1186
4b6f5d20 1187const struct file_operations def_blk_fops = {
1da177e4
LT
1188 .open = blkdev_open,
1189 .release = blkdev_close,
1190 .llseek = block_llseek,
1191 .read = generic_file_read,
1192 .write = blkdev_file_write,
1193 .aio_read = generic_file_aio_read,
1194 .aio_write = blkdev_file_aio_write,
1195 .mmap = generic_file_mmap,
1196 .fsync = block_fsync,
bb93e3a5 1197 .unlocked_ioctl = block_ioctl,
1da177e4
LT
1198#ifdef CONFIG_COMPAT
1199 .compat_ioctl = compat_blkdev_ioctl,
1200#endif
1201 .readv = generic_file_readv,
1202 .writev = generic_file_write_nolock,
1203 .sendfile = generic_file_sendfile,
7f9c51f0
JA
1204 .splice_read = generic_file_splice_read,
1205 .splice_write = generic_file_splice_write,
1da177e4
LT
1206};
1207
1208int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1209{
1210 int res;
1211 mm_segment_t old_fs = get_fs();
1212 set_fs(KERNEL_DS);
1213 res = blkdev_ioctl(bdev->bd_inode, NULL, cmd, arg);
1214 set_fs(old_fs);
1215 return res;
1216}
1217
1218EXPORT_SYMBOL(ioctl_by_bdev);
1219
1220/**
1221 * lookup_bdev - lookup a struct block_device by name
1222 *
1223 * @path: special file representing the block device
1224 *
1225 * Get a reference to the blockdevice at @path in the current
1226 * namespace if possible and return it. Return ERR_PTR(error)
1227 * otherwise.
1228 */
1229struct block_device *lookup_bdev(const char *path)
1230{
1231 struct block_device *bdev;
1232 struct inode *inode;
1233 struct nameidata nd;
1234 int error;
1235
1236 if (!path || !*path)
1237 return ERR_PTR(-EINVAL);
1238
1239 error = path_lookup(path, LOOKUP_FOLLOW, &nd);
1240 if (error)
1241 return ERR_PTR(error);
1242
1243 inode = nd.dentry->d_inode;
1244 error = -ENOTBLK;
1245 if (!S_ISBLK(inode->i_mode))
1246 goto fail;
1247 error = -EACCES;
1248 if (nd.mnt->mnt_flags & MNT_NODEV)
1249 goto fail;
1250 error = -ENOMEM;
1251 bdev = bd_acquire(inode);
1252 if (!bdev)
1253 goto fail;
1254out:
1255 path_release(&nd);
1256 return bdev;
1257fail:
1258 bdev = ERR_PTR(error);
1259 goto out;
1260}
1261
1262/**
1263 * open_bdev_excl - open a block device by name and set it up for use
1264 *
1265 * @path: special file representing the block device
1266 * @flags: %MS_RDONLY for opening read-only
1267 * @holder: owner for exclusion
1268 *
1269 * Open the blockdevice described by the special file at @path, claim it
1270 * for the @holder.
1271 */
1272struct block_device *open_bdev_excl(const char *path, int flags, void *holder)
1273{
1274 struct block_device *bdev;
1275 mode_t mode = FMODE_READ;
1276 int error = 0;
1277
1278 bdev = lookup_bdev(path);
1279 if (IS_ERR(bdev))
1280 return bdev;
1281
1282 if (!(flags & MS_RDONLY))
1283 mode |= FMODE_WRITE;
1284 error = blkdev_get(bdev, mode, 0);
1285 if (error)
1286 return ERR_PTR(error);
1287 error = -EACCES;
1288 if (!(flags & MS_RDONLY) && bdev_read_only(bdev))
1289 goto blkdev_put;
1290 error = bd_claim(bdev, holder);
1291 if (error)
1292 goto blkdev_put;
1293
1294 return bdev;
1295
1296blkdev_put:
1297 blkdev_put(bdev);
1298 return ERR_PTR(error);
1299}
1300
1301EXPORT_SYMBOL(open_bdev_excl);
1302
1303/**
1304 * close_bdev_excl - release a blockdevice openen by open_bdev_excl()
1305 *
1306 * @bdev: blockdevice to close
1307 *
1308 * This is the counterpart to open_bdev_excl().
1309 */
1310void close_bdev_excl(struct block_device *bdev)
1311{
1312 bd_release(bdev);
1313 blkdev_put(bdev);
1314}
1315
1316EXPORT_SYMBOL(close_bdev_excl);