]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/block_dev.c
[PATCH] BLOCK: Don't call block_sync_page() from AFS [try #6]
[net-next-2.6.git] / fs / block_dev.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/block_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
1da177e4
LT
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fcntl.h>
11#include <linux/slab.h>
12#include <linux/kmod.h>
13#include <linux/major.h>
1da177e4
LT
14#include <linux/smp_lock.h>
15#include <linux/highmem.h>
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/blkpg.h>
19#include <linux/buffer_head.h>
20#include <linux/mpage.h>
21#include <linux/mount.h>
22#include <linux/uio.h>
23#include <linux/namei.h>
24#include <asm/uaccess.h>
25
26struct bdev_inode {
27 struct block_device bdev;
28 struct inode vfs_inode;
29};
30
31static inline struct bdev_inode *BDEV_I(struct inode *inode)
32{
33 return container_of(inode, struct bdev_inode, vfs_inode);
34}
35
36inline struct block_device *I_BDEV(struct inode *inode)
37{
38 return &BDEV_I(inode)->bdev;
39}
40
41EXPORT_SYMBOL(I_BDEV);
42
43static sector_t max_block(struct block_device *bdev)
44{
45 sector_t retval = ~((sector_t)0);
46 loff_t sz = i_size_read(bdev->bd_inode);
47
48 if (sz) {
49 unsigned int size = block_size(bdev);
50 unsigned int sizebits = blksize_bits(size);
51 retval = (sz >> sizebits);
52 }
53 return retval;
54}
55
56/* Kill _all_ buffers, dirty or not.. */
57static void kill_bdev(struct block_device *bdev)
58{
59 invalidate_bdev(bdev, 1);
60 truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
61}
62
63int set_blocksize(struct block_device *bdev, int size)
64{
65 /* Size must be a power of two, and between 512 and PAGE_SIZE */
66 if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
67 return -EINVAL;
68
69 /* Size cannot be smaller than the size supported by the device */
70 if (size < bdev_hardsect_size(bdev))
71 return -EINVAL;
72
73 /* Don't change the size if it is same as current */
74 if (bdev->bd_block_size != size) {
75 sync_blockdev(bdev);
76 bdev->bd_block_size = size;
77 bdev->bd_inode->i_blkbits = blksize_bits(size);
78 kill_bdev(bdev);
79 }
80 return 0;
81}
82
83EXPORT_SYMBOL(set_blocksize);
84
85int sb_set_blocksize(struct super_block *sb, int size)
86{
1da177e4
LT
87 if (set_blocksize(sb->s_bdev, size))
88 return 0;
89 /* If we get here, we know size is power of two
90 * and it's value is between 512 and PAGE_SIZE */
91 sb->s_blocksize = size;
38885bd4 92 sb->s_blocksize_bits = blksize_bits(size);
1da177e4
LT
93 return sb->s_blocksize;
94}
95
96EXPORT_SYMBOL(sb_set_blocksize);
97
98int sb_min_blocksize(struct super_block *sb, int size)
99{
100 int minsize = bdev_hardsect_size(sb->s_bdev);
101 if (size < minsize)
102 size = minsize;
103 return sb_set_blocksize(sb, size);
104}
105
106EXPORT_SYMBOL(sb_min_blocksize);
107
108static int
109blkdev_get_block(struct inode *inode, sector_t iblock,
110 struct buffer_head *bh, int create)
111{
112 if (iblock >= max_block(I_BDEV(inode))) {
113 if (create)
114 return -EIO;
115
116 /*
117 * for reads, we're just trying to fill a partial page.
118 * return a hole, they will have to call get_block again
119 * before they can fill it, and they will get -EIO at that
120 * time
121 */
122 return 0;
123 }
124 bh->b_bdev = I_BDEV(inode);
125 bh->b_blocknr = iblock;
126 set_buffer_mapped(bh);
127 return 0;
128}
129
130static int
131blkdev_get_blocks(struct inode *inode, sector_t iblock,
1d8fa7a2 132 struct buffer_head *bh, int create)
1da177e4
LT
133{
134 sector_t end_block = max_block(I_BDEV(inode));
1d8fa7a2 135 unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
1da177e4
LT
136
137 if ((iblock + max_blocks) > end_block) {
138 max_blocks = end_block - iblock;
139 if ((long)max_blocks <= 0) {
140 if (create)
141 return -EIO; /* write fully beyond EOF */
142 /*
143 * It is a read which is fully beyond EOF. We return
144 * a !buffer_mapped buffer
145 */
146 max_blocks = 0;
147 }
148 }
149
150 bh->b_bdev = I_BDEV(inode);
151 bh->b_blocknr = iblock;
152 bh->b_size = max_blocks << inode->i_blkbits;
153 if (max_blocks)
154 set_buffer_mapped(bh);
155 return 0;
156}
157
158static ssize_t
159blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
160 loff_t offset, unsigned long nr_segs)
161{
162 struct file *file = iocb->ki_filp;
163 struct inode *inode = file->f_mapping->host;
164
165 return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
166 iov, offset, nr_segs, blkdev_get_blocks, NULL);
167}
168
169static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
170{
171 return block_write_full_page(page, blkdev_get_block, wbc);
172}
173
174static int blkdev_readpage(struct file * file, struct page * page)
175{
176 return block_read_full_page(page, blkdev_get_block);
177}
178
179static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
180{
181 return block_prepare_write(page, from, to, blkdev_get_block);
182}
183
184static int blkdev_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
185{
186 return block_commit_write(page, from, to);
187}
188
189/*
190 * private llseek:
191 * for a block special file file->f_dentry->d_inode->i_size is zero
192 * so we compute the size by hand (just as in block_read/write above)
193 */
194static loff_t block_llseek(struct file *file, loff_t offset, int origin)
195{
196 struct inode *bd_inode = file->f_mapping->host;
197 loff_t size;
198 loff_t retval;
199
1b1dcc1b 200 mutex_lock(&bd_inode->i_mutex);
1da177e4
LT
201 size = i_size_read(bd_inode);
202
203 switch (origin) {
204 case 2:
205 offset += size;
206 break;
207 case 1:
208 offset += file->f_pos;
209 }
210 retval = -EINVAL;
211 if (offset >= 0 && offset <= size) {
212 if (offset != file->f_pos) {
213 file->f_pos = offset;
214 }
215 retval = offset;
216 }
1b1dcc1b 217 mutex_unlock(&bd_inode->i_mutex);
1da177e4
LT
218 return retval;
219}
220
221/*
222 * Filp is never NULL; the only case when ->fsync() is called with
223 * NULL first argument is nfsd_sync_dir() and that's not a directory.
224 */
225
226static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
227{
228 return sync_blockdev(I_BDEV(filp->f_mapping->host));
229}
230
231/*
232 * pseudo-fs
233 */
234
235static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
fa3536cc 236static kmem_cache_t * bdev_cachep __read_mostly;
1da177e4
LT
237
238static struct inode *bdev_alloc_inode(struct super_block *sb)
239{
240 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL);
241 if (!ei)
242 return NULL;
243 return &ei->vfs_inode;
244}
245
246static void bdev_destroy_inode(struct inode *inode)
247{
248 struct bdev_inode *bdi = BDEV_I(inode);
249
250 bdi->bdev.bd_inode_backing_dev_info = NULL;
251 kmem_cache_free(bdev_cachep, bdi);
252}
253
254static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
255{
256 struct bdev_inode *ei = (struct bdev_inode *) foo;
257 struct block_device *bdev = &ei->bdev;
258
259 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
260 SLAB_CTOR_CONSTRUCTOR)
261 {
262 memset(bdev, 0, sizeof(*bdev));
c039e313
AV
263 mutex_init(&bdev->bd_mutex);
264 mutex_init(&bdev->bd_mount_mutex);
1da177e4
LT
265 INIT_LIST_HEAD(&bdev->bd_inodes);
266 INIT_LIST_HEAD(&bdev->bd_list);
641dc636
JN
267#ifdef CONFIG_SYSFS
268 INIT_LIST_HEAD(&bdev->bd_holder_list);
269#endif
1da177e4
LT
270 inode_init_once(&ei->vfs_inode);
271 }
272}
273
274static inline void __bd_forget(struct inode *inode)
275{
276 list_del_init(&inode->i_devices);
277 inode->i_bdev = NULL;
278 inode->i_mapping = &inode->i_data;
279}
280
281static void bdev_clear_inode(struct inode *inode)
282{
283 struct block_device *bdev = &BDEV_I(inode)->bdev;
284 struct list_head *p;
285 spin_lock(&bdev_lock);
286 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
287 __bd_forget(list_entry(p, struct inode, i_devices));
288 }
289 list_del_init(&bdev->bd_list);
290 spin_unlock(&bdev_lock);
291}
292
293static struct super_operations bdev_sops = {
294 .statfs = simple_statfs,
295 .alloc_inode = bdev_alloc_inode,
296 .destroy_inode = bdev_destroy_inode,
297 .drop_inode = generic_delete_inode,
298 .clear_inode = bdev_clear_inode,
299};
300
454e2398
DH
301static int bd_get_sb(struct file_system_type *fs_type,
302 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
1da177e4 303{
454e2398 304 return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt);
1da177e4
LT
305}
306
307static struct file_system_type bd_type = {
308 .name = "bdev",
309 .get_sb = bd_get_sb,
310 .kill_sb = kill_anon_super,
311};
312
fa3536cc 313static struct vfsmount *bd_mnt __read_mostly;
1da177e4
LT
314struct super_block *blockdev_superblock;
315
316void __init bdev_cache_init(void)
317{
318 int err;
319 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
fffb60f9
PJ
320 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
321 SLAB_MEM_SPREAD|SLAB_PANIC),
1da177e4
LT
322 init_once, NULL);
323 err = register_filesystem(&bd_type);
324 if (err)
325 panic("Cannot register bdev pseudo-fs");
326 bd_mnt = kern_mount(&bd_type);
327 err = PTR_ERR(bd_mnt);
328 if (IS_ERR(bd_mnt))
329 panic("Cannot create bdev pseudo-fs");
330 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
331}
332
333/*
334 * Most likely _very_ bad one - but then it's hardly critical for small
335 * /dev and can be fixed when somebody will need really large one.
336 * Keep in mind that it will be fed through icache hash function too.
337 */
338static inline unsigned long hash(dev_t dev)
339{
340 return MAJOR(dev)+MINOR(dev);
341}
342
343static int bdev_test(struct inode *inode, void *data)
344{
345 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
346}
347
348static int bdev_set(struct inode *inode, void *data)
349{
350 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
351 return 0;
352}
353
354static LIST_HEAD(all_bdevs);
355
356struct block_device *bdget(dev_t dev)
357{
358 struct block_device *bdev;
359 struct inode *inode;
360
361 inode = iget5_locked(bd_mnt->mnt_sb, hash(dev),
362 bdev_test, bdev_set, &dev);
363
364 if (!inode)
365 return NULL;
366
367 bdev = &BDEV_I(inode)->bdev;
368
369 if (inode->i_state & I_NEW) {
370 bdev->bd_contains = NULL;
371 bdev->bd_inode = inode;
372 bdev->bd_block_size = (1 << inode->i_blkbits);
373 bdev->bd_part_count = 0;
374 bdev->bd_invalidated = 0;
375 inode->i_mode = S_IFBLK;
376 inode->i_rdev = dev;
377 inode->i_bdev = bdev;
378 inode->i_data.a_ops = &def_blk_aops;
379 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
380 inode->i_data.backing_dev_info = &default_backing_dev_info;
381 spin_lock(&bdev_lock);
382 list_add(&bdev->bd_list, &all_bdevs);
383 spin_unlock(&bdev_lock);
384 unlock_new_inode(inode);
385 }
386 return bdev;
387}
388
389EXPORT_SYMBOL(bdget);
390
391long nr_blockdev_pages(void)
392{
393 struct list_head *p;
394 long ret = 0;
395 spin_lock(&bdev_lock);
396 list_for_each(p, &all_bdevs) {
397 struct block_device *bdev;
398 bdev = list_entry(p, struct block_device, bd_list);
399 ret += bdev->bd_inode->i_mapping->nrpages;
400 }
401 spin_unlock(&bdev_lock);
402 return ret;
403}
404
405void bdput(struct block_device *bdev)
406{
407 iput(bdev->bd_inode);
408}
409
410EXPORT_SYMBOL(bdput);
411
412static struct block_device *bd_acquire(struct inode *inode)
413{
414 struct block_device *bdev;
09d967c6 415
1da177e4
LT
416 spin_lock(&bdev_lock);
417 bdev = inode->i_bdev;
09d967c6
OH
418 if (bdev) {
419 atomic_inc(&bdev->bd_inode->i_count);
1da177e4
LT
420 spin_unlock(&bdev_lock);
421 return bdev;
422 }
423 spin_unlock(&bdev_lock);
09d967c6 424
1da177e4
LT
425 bdev = bdget(inode->i_rdev);
426 if (bdev) {
427 spin_lock(&bdev_lock);
09d967c6
OH
428 if (!inode->i_bdev) {
429 /*
430 * We take an additional bd_inode->i_count for inode,
431 * and it's released in clear_inode() of inode.
432 * So, we can access it via ->i_mapping always
433 * without igrab().
434 */
435 atomic_inc(&bdev->bd_inode->i_count);
436 inode->i_bdev = bdev;
437 inode->i_mapping = bdev->bd_inode->i_mapping;
438 list_add(&inode->i_devices, &bdev->bd_inodes);
439 }
1da177e4
LT
440 spin_unlock(&bdev_lock);
441 }
442 return bdev;
443}
444
445/* Call when you free inode */
446
447void bd_forget(struct inode *inode)
448{
09d967c6
OH
449 struct block_device *bdev = NULL;
450
1da177e4 451 spin_lock(&bdev_lock);
09d967c6
OH
452 if (inode->i_bdev) {
453 if (inode->i_sb != blockdev_superblock)
454 bdev = inode->i_bdev;
1da177e4 455 __bd_forget(inode);
09d967c6 456 }
1da177e4 457 spin_unlock(&bdev_lock);
09d967c6
OH
458
459 if (bdev)
460 iput(bdev->bd_inode);
1da177e4
LT
461}
462
463int bd_claim(struct block_device *bdev, void *holder)
464{
465 int res;
466 spin_lock(&bdev_lock);
467
468 /* first decide result */
469 if (bdev->bd_holder == holder)
470 res = 0; /* already a holder */
471 else if (bdev->bd_holder != NULL)
472 res = -EBUSY; /* held by someone else */
473 else if (bdev->bd_contains == bdev)
474 res = 0; /* is a whole device which isn't held */
475
476 else if (bdev->bd_contains->bd_holder == bd_claim)
477 res = 0; /* is a partition of a device that is being partitioned */
478 else if (bdev->bd_contains->bd_holder != NULL)
479 res = -EBUSY; /* is a partition of a held device */
480 else
481 res = 0; /* is a partition of an un-held device */
482
483 /* now impose change */
484 if (res==0) {
485 /* note that for a whole device bd_holders
486 * will be incremented twice, and bd_holder will
487 * be set to bd_claim before being set to holder
488 */
489 bdev->bd_contains->bd_holders ++;
490 bdev->bd_contains->bd_holder = bd_claim;
491 bdev->bd_holders++;
492 bdev->bd_holder = holder;
493 }
494 spin_unlock(&bdev_lock);
495 return res;
496}
497
498EXPORT_SYMBOL(bd_claim);
499
500void bd_release(struct block_device *bdev)
501{
502 spin_lock(&bdev_lock);
503 if (!--bdev->bd_contains->bd_holders)
504 bdev->bd_contains->bd_holder = NULL;
505 if (!--bdev->bd_holders)
506 bdev->bd_holder = NULL;
507 spin_unlock(&bdev_lock);
508}
509
510EXPORT_SYMBOL(bd_release);
511
641dc636
JN
512#ifdef CONFIG_SYSFS
513/*
514 * Functions for bd_claim_by_kobject / bd_release_from_kobject
515 *
516 * If a kobject is passed to bd_claim_by_kobject()
517 * and the kobject has a parent directory,
518 * following symlinks are created:
519 * o from the kobject to the claimed bdev
520 * o from "holders" directory of the bdev to the parent of the kobject
521 * bd_release_from_kobject() removes these symlinks.
522 *
523 * Example:
524 * If /dev/dm-0 maps to /dev/sda, kobject corresponding to
525 * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
526 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
527 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
528 */
529
530static struct kobject *bdev_get_kobj(struct block_device *bdev)
531{
532 if (bdev->bd_contains != bdev)
533 return kobject_get(&bdev->bd_part->kobj);
534 else
535 return kobject_get(&bdev->bd_disk->kobj);
536}
537
538static struct kobject *bdev_get_holder(struct block_device *bdev)
539{
540 if (bdev->bd_contains != bdev)
541 return kobject_get(bdev->bd_part->holder_dir);
542 else
543 return kobject_get(bdev->bd_disk->holder_dir);
544}
545
4d7dd8fd 546static int add_symlink(struct kobject *from, struct kobject *to)
641dc636
JN
547{
548 if (!from || !to)
4d7dd8fd
AM
549 return 0;
550 return sysfs_create_link(from, to, kobject_name(to));
641dc636
JN
551}
552
553static void del_symlink(struct kobject *from, struct kobject *to)
554{
555 if (!from || !to)
556 return;
557 sysfs_remove_link(from, kobject_name(to));
558}
559
560/*
561 * 'struct bd_holder' contains pointers to kobjects symlinked by
562 * bd_claim_by_kobject.
563 * It's connected to bd_holder_list which is protected by bdev->bd_sem.
564 */
565struct bd_holder {
566 struct list_head list; /* chain of holders of the bdev */
567 int count; /* references from the holder */
568 struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */
569 struct kobject *hdev; /* e.g. "/block/dm-0" */
570 struct kobject *hdir; /* e.g. "/block/sda/holders" */
571 struct kobject *sdev; /* e.g. "/block/sda" */
572};
573
574/*
575 * Get references of related kobjects at once.
576 * Returns 1 on success. 0 on failure.
577 *
578 * Should call bd_holder_release_dirs() after successful use.
579 */
580static int bd_holder_grab_dirs(struct block_device *bdev,
581 struct bd_holder *bo)
582{
583 if (!bdev || !bo)
584 return 0;
585
586 bo->sdir = kobject_get(bo->sdir);
587 if (!bo->sdir)
588 return 0;
589
590 bo->hdev = kobject_get(bo->sdir->parent);
591 if (!bo->hdev)
592 goto fail_put_sdir;
593
594 bo->sdev = bdev_get_kobj(bdev);
595 if (!bo->sdev)
596 goto fail_put_hdev;
597
598 bo->hdir = bdev_get_holder(bdev);
599 if (!bo->hdir)
600 goto fail_put_sdev;
601
602 return 1;
603
604fail_put_sdev:
605 kobject_put(bo->sdev);
606fail_put_hdev:
607 kobject_put(bo->hdev);
608fail_put_sdir:
609 kobject_put(bo->sdir);
610
611 return 0;
612}
613
614/* Put references of related kobjects at once. */
615static void bd_holder_release_dirs(struct bd_holder *bo)
616{
617 kobject_put(bo->hdir);
618 kobject_put(bo->sdev);
619 kobject_put(bo->hdev);
620 kobject_put(bo->sdir);
621}
622
623static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
624{
625 struct bd_holder *bo;
626
627 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
628 if (!bo)
629 return NULL;
630
631 bo->count = 1;
632 bo->sdir = kobj;
633
634 return bo;
635}
636
637static void free_bd_holder(struct bd_holder *bo)
638{
639 kfree(bo);
640}
641
642/**
643 * add_bd_holder - create sysfs symlinks for bd_claim() relationship
644 *
645 * @bdev: block device to be bd_claimed
646 * @bo: preallocated and initialized by alloc_bd_holder()
647 *
648 * If there is no matching entry with @bo in @bdev->bd_holder_list,
649 * add @bo to the list, create symlinks.
650 *
4d7dd8fd
AM
651 * Returns 0 if symlinks are created or already there.
652 * Returns -ve if something fails and @bo can be freed.
641dc636
JN
653 */
654static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
655{
656 struct bd_holder *tmp;
4d7dd8fd 657 int ret;
641dc636
JN
658
659 if (!bo)
4d7dd8fd 660 return -EINVAL;
641dc636
JN
661
662 list_for_each_entry(tmp, &bdev->bd_holder_list, list) {
663 if (tmp->sdir == bo->sdir) {
664 tmp->count++;
4d7dd8fd
AM
665 /* We've already done what we need to do here. */
666 free_bd_holder(bo);
641dc636
JN
667 return 0;
668 }
669 }
670
671 if (!bd_holder_grab_dirs(bdev, bo))
4d7dd8fd 672 return -EBUSY;
641dc636 673
4d7dd8fd
AM
674 ret = add_symlink(bo->sdir, bo->sdev);
675 if (ret == 0) {
676 ret = add_symlink(bo->hdir, bo->hdev);
677 if (ret)
678 del_symlink(bo->sdir, bo->sdev);
679 }
680 if (ret == 0)
681 list_add_tail(&bo->list, &bdev->bd_holder_list);
682 return ret;
641dc636
JN
683}
684
685/**
686 * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
687 *
688 * @bdev: block device to be bd_claimed
689 * @kobj: holder's kobject
690 *
691 * If there is matching entry with @kobj in @bdev->bd_holder_list
692 * and no other bd_claim() from the same kobject,
693 * remove the struct bd_holder from the list, delete symlinks for it.
694 *
695 * Returns a pointer to the struct bd_holder when it's removed from the list
696 * and ready to be freed.
697 * Returns NULL if matching claim isn't found or there is other bd_claim()
698 * by the same kobject.
699 */
700static struct bd_holder *del_bd_holder(struct block_device *bdev,
701 struct kobject *kobj)
702{
703 struct bd_holder *bo;
704
705 list_for_each_entry(bo, &bdev->bd_holder_list, list) {
706 if (bo->sdir == kobj) {
707 bo->count--;
708 BUG_ON(bo->count < 0);
709 if (!bo->count) {
710 list_del(&bo->list);
711 del_symlink(bo->sdir, bo->sdev);
712 del_symlink(bo->hdir, bo->hdev);
713 bd_holder_release_dirs(bo);
714 return bo;
715 }
716 break;
717 }
718 }
719
720 return NULL;
721}
722
723/**
724 * bd_claim_by_kobject - bd_claim() with additional kobject signature
725 *
726 * @bdev: block device to be claimed
727 * @holder: holder's signature
728 * @kobj: holder's kobject
729 *
730 * Do bd_claim() and if it succeeds, create sysfs symlinks between
731 * the bdev and the holder's kobject.
732 * Use bd_release_from_kobject() when relesing the claimed bdev.
733 *
734 * Returns 0 on success. (same as bd_claim())
735 * Returns errno on failure.
736 */
737static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
738 struct kobject *kobj)
739{
740 int res;
741 struct bd_holder *bo;
742
743 if (!kobj)
744 return -EINVAL;
745
746 bo = alloc_bd_holder(kobj);
747 if (!bo)
748 return -ENOMEM;
749
663d440e 750 mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_PARTITION);
641dc636 751 res = bd_claim(bdev, holder);
4d7dd8fd
AM
752 if (res == 0)
753 res = add_bd_holder(bdev, bo);
754 if (res)
641dc636 755 free_bd_holder(bo);
b4cf1b72 756 mutex_unlock(&bdev->bd_mutex);
641dc636
JN
757
758 return res;
759}
760
761/**
762 * bd_release_from_kobject - bd_release() with additional kobject signature
763 *
764 * @bdev: block device to be released
765 * @kobj: holder's kobject
766 *
767 * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
768 */
769static void bd_release_from_kobject(struct block_device *bdev,
770 struct kobject *kobj)
771{
772 struct bd_holder *bo;
773
774 if (!kobj)
775 return;
776
663d440e 777 mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_PARTITION);
641dc636
JN
778 bd_release(bdev);
779 if ((bo = del_bd_holder(bdev, kobj)))
780 free_bd_holder(bo);
b4cf1b72 781 mutex_unlock(&bdev->bd_mutex);
641dc636
JN
782}
783
784/**
785 * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
786 *
787 * @bdev: block device to be claimed
788 * @holder: holder's signature
789 * @disk: holder's gendisk
790 *
791 * Call bd_claim_by_kobject() with getting @disk->slave_dir.
792 */
793int bd_claim_by_disk(struct block_device *bdev, void *holder,
794 struct gendisk *disk)
795{
796 return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
797}
798EXPORT_SYMBOL_GPL(bd_claim_by_disk);
799
800/**
801 * bd_release_from_disk - wrapper function for bd_release_from_kobject()
802 *
803 * @bdev: block device to be claimed
804 * @disk: holder's gendisk
805 *
806 * Call bd_release_from_kobject() and put @disk->slave_dir.
807 */
808void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
809{
810 bd_release_from_kobject(bdev, disk->slave_dir);
811 kobject_put(disk->slave_dir);
812}
813EXPORT_SYMBOL_GPL(bd_release_from_disk);
814#endif
815
1da177e4
LT
816/*
817 * Tries to open block device by device number. Use it ONLY if you
818 * really do not have anything better - i.e. when you are behind a
819 * truly sucky interface and all you are given is a device number. _Never_
820 * to be used for internal purposes. If you ever need it - reconsider
821 * your API.
822 */
823struct block_device *open_by_devnum(dev_t dev, unsigned mode)
824{
825 struct block_device *bdev = bdget(dev);
826 int err = -ENOMEM;
827 int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
828 if (bdev)
829 err = blkdev_get(bdev, mode, flags);
830 return err ? ERR_PTR(err) : bdev;
831}
832
833EXPORT_SYMBOL(open_by_devnum);
834
663d440e
IM
835static int
836blkdev_get_partition(struct block_device *bdev, mode_t mode, unsigned flags);
837
838struct block_device *open_partition_by_devnum(dev_t dev, unsigned mode)
839{
840 struct block_device *bdev = bdget(dev);
841 int err = -ENOMEM;
842 int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
843 if (bdev)
844 err = blkdev_get_partition(bdev, mode, flags);
845 return err ? ERR_PTR(err) : bdev;
846}
847
848EXPORT_SYMBOL(open_partition_by_devnum);
849
850
1da177e4
LT
851/*
852 * This routine checks whether a removable media has been changed,
853 * and invalidates all buffer-cache-entries in that case. This
854 * is a relatively slow routine, so we have to try to minimize using
855 * it. Thus it is called only upon a 'mount' or 'open'. This
856 * is the best way of combining speed and utility, I think.
857 * People changing diskettes in the middle of an operation deserve
858 * to lose :-)
859 */
860int check_disk_change(struct block_device *bdev)
861{
862 struct gendisk *disk = bdev->bd_disk;
863 struct block_device_operations * bdops = disk->fops;
864
865 if (!bdops->media_changed)
866 return 0;
867 if (!bdops->media_changed(bdev->bd_disk))
868 return 0;
869
2ef41634 870 if (__invalidate_device(bdev))
1da177e4
LT
871 printk("VFS: busy inodes on changed media.\n");
872
873 if (bdops->revalidate_disk)
874 bdops->revalidate_disk(bdev->bd_disk);
875 if (bdev->bd_disk->minors > 1)
876 bdev->bd_invalidated = 1;
877 return 1;
878}
879
880EXPORT_SYMBOL(check_disk_change);
881
882void bd_set_size(struct block_device *bdev, loff_t size)
883{
884 unsigned bsize = bdev_hardsect_size(bdev);
885
886 bdev->bd_inode->i_size = size;
887 while (bsize < PAGE_CACHE_SIZE) {
888 if (size & bsize)
889 break;
890 bsize <<= 1;
891 }
892 bdev->bd_block_size = bsize;
893 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
894}
895EXPORT_SYMBOL(bd_set_size);
896
6946bd63
PZ
897static int __blkdev_put(struct block_device *bdev, unsigned int subclass)
898{
899 int ret = 0;
900 struct inode *bd_inode = bdev->bd_inode;
901 struct gendisk *disk = bdev->bd_disk;
902
903 mutex_lock_nested(&bdev->bd_mutex, subclass);
904 lock_kernel();
905 if (!--bdev->bd_openers) {
906 sync_blockdev(bdev);
907 kill_bdev(bdev);
908 }
909 if (bdev->bd_contains == bdev) {
910 if (disk->fops->release)
911 ret = disk->fops->release(bd_inode, NULL);
912 } else {
913 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
914 subclass + 1);
915 bdev->bd_contains->bd_part_count--;
916 mutex_unlock(&bdev->bd_contains->bd_mutex);
917 }
918 if (!bdev->bd_openers) {
919 struct module *owner = disk->fops->owner;
920
921 put_disk(disk);
922 module_put(owner);
923
924 if (bdev->bd_contains != bdev) {
925 kobject_put(&bdev->bd_part->kobj);
926 bdev->bd_part = NULL;
927 }
928 bdev->bd_disk = NULL;
929 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
930 if (bdev != bdev->bd_contains)
931 __blkdev_put(bdev->bd_contains, subclass + 1);
932 bdev->bd_contains = NULL;
933 }
934 unlock_kernel();
935 mutex_unlock(&bdev->bd_mutex);
936 bdput(bdev);
937 return ret;
938}
939
940int blkdev_put(struct block_device *bdev)
941{
942 return __blkdev_put(bdev, BD_MUTEX_NORMAL);
943}
944EXPORT_SYMBOL(blkdev_put);
945
946int blkdev_put_partition(struct block_device *bdev)
947{
948 return __blkdev_put(bdev, BD_MUTEX_PARTITION);
949}
950EXPORT_SYMBOL(blkdev_put_partition);
951
663d440e
IM
952static int
953blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags);
954
955static int
956do_open(struct block_device *bdev, struct file *file, unsigned int subclass)
1da177e4
LT
957{
958 struct module *owner = NULL;
959 struct gendisk *disk;
960 int ret = -ENXIO;
961 int part;
962
963 file->f_mapping = bdev->bd_inode->i_mapping;
964 lock_kernel();
965 disk = get_gendisk(bdev->bd_dev, &part);
966 if (!disk) {
967 unlock_kernel();
968 bdput(bdev);
969 return ret;
970 }
971 owner = disk->fops->owner;
972
663d440e
IM
973 mutex_lock_nested(&bdev->bd_mutex, subclass);
974
1da177e4
LT
975 if (!bdev->bd_openers) {
976 bdev->bd_disk = disk;
977 bdev->bd_contains = bdev;
978 if (!part) {
979 struct backing_dev_info *bdi;
980 if (disk->fops->open) {
981 ret = disk->fops->open(bdev->bd_inode, file);
982 if (ret)
983 goto out_first;
984 }
985 if (!bdev->bd_openers) {
986 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
987 bdi = blk_get_backing_dev_info(bdev);
988 if (bdi == NULL)
989 bdi = &default_backing_dev_info;
990 bdev->bd_inode->i_data.backing_dev_info = bdi;
991 }
992 if (bdev->bd_invalidated)
993 rescan_partitions(disk, bdev);
994 } else {
995 struct hd_struct *p;
996 struct block_device *whole;
997 whole = bdget_disk(disk, 0);
998 ret = -ENOMEM;
999 if (!whole)
1000 goto out_first;
663d440e 1001 ret = blkdev_get_whole(whole, file->f_mode, file->f_flags);
1da177e4
LT
1002 if (ret)
1003 goto out_first;
1004 bdev->bd_contains = whole;
663d440e 1005 mutex_lock_nested(&whole->bd_mutex, BD_MUTEX_WHOLE);
1da177e4
LT
1006 whole->bd_part_count++;
1007 p = disk->part[part - 1];
1008 bdev->bd_inode->i_data.backing_dev_info =
1009 whole->bd_inode->i_data.backing_dev_info;
1010 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) {
1011 whole->bd_part_count--;
c039e313 1012 mutex_unlock(&whole->bd_mutex);
1da177e4
LT
1013 ret = -ENXIO;
1014 goto out_first;
1015 }
1016 kobject_get(&p->kobj);
1017 bdev->bd_part = p;
1018 bd_set_size(bdev, (loff_t) p->nr_sects << 9);
c039e313 1019 mutex_unlock(&whole->bd_mutex);
1da177e4
LT
1020 }
1021 } else {
1022 put_disk(disk);
1023 module_put(owner);
1024 if (bdev->bd_contains == bdev) {
1025 if (bdev->bd_disk->fops->open) {
1026 ret = bdev->bd_disk->fops->open(bdev->bd_inode, file);
1027 if (ret)
1028 goto out;
1029 }
1030 if (bdev->bd_invalidated)
1031 rescan_partitions(bdev->bd_disk, bdev);
1032 } else {
663d440e 1033 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
87d7c8ac 1034 BD_MUTEX_WHOLE);
1da177e4 1035 bdev->bd_contains->bd_part_count++;
c039e313 1036 mutex_unlock(&bdev->bd_contains->bd_mutex);
1da177e4
LT
1037 }
1038 }
1039 bdev->bd_openers++;
c039e313 1040 mutex_unlock(&bdev->bd_mutex);
1da177e4
LT
1041 unlock_kernel();
1042 return 0;
1043
1044out_first:
1045 bdev->bd_disk = NULL;
1046 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1047 if (bdev != bdev->bd_contains)
6946bd63 1048 __blkdev_put(bdev->bd_contains, BD_MUTEX_WHOLE);
1da177e4
LT
1049 bdev->bd_contains = NULL;
1050 put_disk(disk);
1051 module_put(owner);
1052out:
c039e313 1053 mutex_unlock(&bdev->bd_mutex);
1da177e4
LT
1054 unlock_kernel();
1055 if (ret)
1056 bdput(bdev);
1057 return ret;
1058}
1059
1060int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags)
1061{
1062 /*
1063 * This crockload is due to bad choice of ->open() type.
1064 * It will go away.
1065 * For now, block device ->open() routine must _not_
1066 * examine anything in 'inode' argument except ->i_rdev.
1067 */
1068 struct file fake_file = {};
1069 struct dentry fake_dentry = {};
1070 fake_file.f_mode = mode;
1071 fake_file.f_flags = flags;
1072 fake_file.f_dentry = &fake_dentry;
1073 fake_dentry.d_inode = bdev->bd_inode;
1074
663d440e 1075 return do_open(bdev, &fake_file, BD_MUTEX_NORMAL);
1da177e4
LT
1076}
1077
1078EXPORT_SYMBOL(blkdev_get);
1079
663d440e
IM
1080static int
1081blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags)
1082{
1083 /*
1084 * This crockload is due to bad choice of ->open() type.
1085 * It will go away.
1086 * For now, block device ->open() routine must _not_
1087 * examine anything in 'inode' argument except ->i_rdev.
1088 */
1089 struct file fake_file = {};
1090 struct dentry fake_dentry = {};
1091 fake_file.f_mode = mode;
1092 fake_file.f_flags = flags;
1093 fake_file.f_dentry = &fake_dentry;
1094 fake_dentry.d_inode = bdev->bd_inode;
1095
1096 return do_open(bdev, &fake_file, BD_MUTEX_WHOLE);
1097}
1098
1099static int
1100blkdev_get_partition(struct block_device *bdev, mode_t mode, unsigned flags)
1101{
1102 /*
1103 * This crockload is due to bad choice of ->open() type.
1104 * It will go away.
1105 * For now, block device ->open() routine must _not_
1106 * examine anything in 'inode' argument except ->i_rdev.
1107 */
1108 struct file fake_file = {};
1109 struct dentry fake_dentry = {};
1110 fake_file.f_mode = mode;
1111 fake_file.f_flags = flags;
1112 fake_file.f_dentry = &fake_dentry;
1113 fake_dentry.d_inode = bdev->bd_inode;
1114
1115 return do_open(bdev, &fake_file, BD_MUTEX_PARTITION);
1116}
1117
1da177e4
LT
1118static int blkdev_open(struct inode * inode, struct file * filp)
1119{
1120 struct block_device *bdev;
1121 int res;
1122
1123 /*
1124 * Preserve backwards compatibility and allow large file access
1125 * even if userspace doesn't ask for it explicitly. Some mkfs
1126 * binary needs it. We might want to drop this workaround
1127 * during an unstable branch.
1128 */
1129 filp->f_flags |= O_LARGEFILE;
1130
1131 bdev = bd_acquire(inode);
1132
663d440e 1133 res = do_open(bdev, filp, BD_MUTEX_NORMAL);
1da177e4
LT
1134 if (res)
1135 return res;
1136
1137 if (!(filp->f_flags & O_EXCL) )
1138 return 0;
1139
1140 if (!(res = bd_claim(bdev, filp)))
1141 return 0;
1142
1143 blkdev_put(bdev);
1144 return res;
1145}
1146
1da177e4
LT
1147static int blkdev_close(struct inode * inode, struct file * filp)
1148{
1149 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
1150 if (bdev->bd_holder == filp)
1151 bd_release(bdev);
1152 return blkdev_put(bdev);
1153}
1154
1155static ssize_t blkdev_file_write(struct file *file, const char __user *buf,
1156 size_t count, loff_t *ppos)
1157{
1158 struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
1159
1160 return generic_file_write_nolock(file, &local_iov, 1, ppos);
1161}
1162
1163static ssize_t blkdev_file_aio_write(struct kiocb *iocb, const char __user *buf,
1164 size_t count, loff_t pos)
1165{
1166 struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
1167
1168 return generic_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
1169}
1170
bb93e3a5 1171static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1da177e4
LT
1172{
1173 return blkdev_ioctl(file->f_mapping->host, file, cmd, arg);
1174}
1175
f5e54d6e 1176const struct address_space_operations def_blk_aops = {
1da177e4
LT
1177 .readpage = blkdev_readpage,
1178 .writepage = blkdev_writepage,
1179 .sync_page = block_sync_page,
1180 .prepare_write = blkdev_prepare_write,
1181 .commit_write = blkdev_commit_write,
1182 .writepages = generic_writepages,
1183 .direct_IO = blkdev_direct_IO,
1184};
1185
4b6f5d20 1186const struct file_operations def_blk_fops = {
1da177e4
LT
1187 .open = blkdev_open,
1188 .release = blkdev_close,
1189 .llseek = block_llseek,
1190 .read = generic_file_read,
1191 .write = blkdev_file_write,
1192 .aio_read = generic_file_aio_read,
1193 .aio_write = blkdev_file_aio_write,
1194 .mmap = generic_file_mmap,
1195 .fsync = block_fsync,
bb93e3a5 1196 .unlocked_ioctl = block_ioctl,
1da177e4
LT
1197#ifdef CONFIG_COMPAT
1198 .compat_ioctl = compat_blkdev_ioctl,
1199#endif
1200 .readv = generic_file_readv,
1201 .writev = generic_file_write_nolock,
1202 .sendfile = generic_file_sendfile,
7f9c51f0
JA
1203 .splice_read = generic_file_splice_read,
1204 .splice_write = generic_file_splice_write,
1da177e4
LT
1205};
1206
1207int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1208{
1209 int res;
1210 mm_segment_t old_fs = get_fs();
1211 set_fs(KERNEL_DS);
1212 res = blkdev_ioctl(bdev->bd_inode, NULL, cmd, arg);
1213 set_fs(old_fs);
1214 return res;
1215}
1216
1217EXPORT_SYMBOL(ioctl_by_bdev);
1218
1219/**
1220 * lookup_bdev - lookup a struct block_device by name
1221 *
1222 * @path: special file representing the block device
1223 *
1224 * Get a reference to the blockdevice at @path in the current
1225 * namespace if possible and return it. Return ERR_PTR(error)
1226 * otherwise.
1227 */
1228struct block_device *lookup_bdev(const char *path)
1229{
1230 struct block_device *bdev;
1231 struct inode *inode;
1232 struct nameidata nd;
1233 int error;
1234
1235 if (!path || !*path)
1236 return ERR_PTR(-EINVAL);
1237
1238 error = path_lookup(path, LOOKUP_FOLLOW, &nd);
1239 if (error)
1240 return ERR_PTR(error);
1241
1242 inode = nd.dentry->d_inode;
1243 error = -ENOTBLK;
1244 if (!S_ISBLK(inode->i_mode))
1245 goto fail;
1246 error = -EACCES;
1247 if (nd.mnt->mnt_flags & MNT_NODEV)
1248 goto fail;
1249 error = -ENOMEM;
1250 bdev = bd_acquire(inode);
1251 if (!bdev)
1252 goto fail;
1253out:
1254 path_release(&nd);
1255 return bdev;
1256fail:
1257 bdev = ERR_PTR(error);
1258 goto out;
1259}
1260
1261/**
1262 * open_bdev_excl - open a block device by name and set it up for use
1263 *
1264 * @path: special file representing the block device
1265 * @flags: %MS_RDONLY for opening read-only
1266 * @holder: owner for exclusion
1267 *
1268 * Open the blockdevice described by the special file at @path, claim it
1269 * for the @holder.
1270 */
1271struct block_device *open_bdev_excl(const char *path, int flags, void *holder)
1272{
1273 struct block_device *bdev;
1274 mode_t mode = FMODE_READ;
1275 int error = 0;
1276
1277 bdev = lookup_bdev(path);
1278 if (IS_ERR(bdev))
1279 return bdev;
1280
1281 if (!(flags & MS_RDONLY))
1282 mode |= FMODE_WRITE;
1283 error = blkdev_get(bdev, mode, 0);
1284 if (error)
1285 return ERR_PTR(error);
1286 error = -EACCES;
1287 if (!(flags & MS_RDONLY) && bdev_read_only(bdev))
1288 goto blkdev_put;
1289 error = bd_claim(bdev, holder);
1290 if (error)
1291 goto blkdev_put;
1292
1293 return bdev;
1294
1295blkdev_put:
1296 blkdev_put(bdev);
1297 return ERR_PTR(error);
1298}
1299
1300EXPORT_SYMBOL(open_bdev_excl);
1301
1302/**
1303 * close_bdev_excl - release a blockdevice openen by open_bdev_excl()
1304 *
1305 * @bdev: blockdevice to close
1306 *
1307 * This is the counterpart to open_bdev_excl().
1308 */
1309void close_bdev_excl(struct block_device *bdev)
1310{
1311 bd_release(bdev);
1312 blkdev_put(bdev);
1313}
1314
1315EXPORT_SYMBOL(close_bdev_excl);