]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/block_dev.c
block: push down BKL into .locked_ioctl
[net-next-2.6.git] / fs / block_dev.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/block_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
1da177e4
LT
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fcntl.h>
11#include <linux/slab.h>
12#include <linux/kmod.h>
13#include <linux/major.h>
1da177e4 14#include <linux/smp_lock.h>
7db9cfd3 15#include <linux/device_cgroup.h>
1da177e4
LT
16#include <linux/highmem.h>
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/blkpg.h>
20#include <linux/buffer_head.h>
585d3bc0 21#include <linux/pagevec.h>
811d736f 22#include <linux/writeback.h>
1da177e4
LT
23#include <linux/mpage.h>
24#include <linux/mount.h>
25#include <linux/uio.h>
26#include <linux/namei.h>
1368c4f2 27#include <linux/log2.h>
2e1483c9 28#include <linux/kmemleak.h>
1da177e4 29#include <asm/uaccess.h>
07f3f05c 30#include "internal.h"
1da177e4
LT
31
32struct bdev_inode {
33 struct block_device bdev;
34 struct inode vfs_inode;
35};
36
4c54ac62
AB
37static const struct address_space_operations def_blk_aops;
38
1da177e4
LT
39static inline struct bdev_inode *BDEV_I(struct inode *inode)
40{
41 return container_of(inode, struct bdev_inode, vfs_inode);
42}
43
44inline struct block_device *I_BDEV(struct inode *inode)
45{
46 return &BDEV_I(inode)->bdev;
47}
48
49EXPORT_SYMBOL(I_BDEV);
50
51static sector_t max_block(struct block_device *bdev)
52{
53 sector_t retval = ~((sector_t)0);
54 loff_t sz = i_size_read(bdev->bd_inode);
55
56 if (sz) {
57 unsigned int size = block_size(bdev);
58 unsigned int sizebits = blksize_bits(size);
59 retval = (sz >> sizebits);
60 }
61 return retval;
62}
63
f9a14399 64/* Kill _all_ buffers and pagecache , dirty or not.. */
1da177e4
LT
65static void kill_bdev(struct block_device *bdev)
66{
f9a14399
PZ
67 if (bdev->bd_inode->i_mapping->nrpages == 0)
68 return;
69 invalidate_bh_lrus();
1da177e4
LT
70 truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
71}
72
73int set_blocksize(struct block_device *bdev, int size)
74{
75 /* Size must be a power of two, and between 512 and PAGE_SIZE */
1368c4f2 76 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
1da177e4
LT
77 return -EINVAL;
78
79 /* Size cannot be smaller than the size supported by the device */
e1defc4f 80 if (size < bdev_logical_block_size(bdev))
1da177e4
LT
81 return -EINVAL;
82
83 /* Don't change the size if it is same as current */
84 if (bdev->bd_block_size != size) {
85 sync_blockdev(bdev);
86 bdev->bd_block_size = size;
87 bdev->bd_inode->i_blkbits = blksize_bits(size);
88 kill_bdev(bdev);
89 }
90 return 0;
91}
92
93EXPORT_SYMBOL(set_blocksize);
94
95int sb_set_blocksize(struct super_block *sb, int size)
96{
1da177e4
LT
97 if (set_blocksize(sb->s_bdev, size))
98 return 0;
99 /* If we get here, we know size is power of two
100 * and it's value is between 512 and PAGE_SIZE */
101 sb->s_blocksize = size;
38885bd4 102 sb->s_blocksize_bits = blksize_bits(size);
1da177e4
LT
103 return sb->s_blocksize;
104}
105
106EXPORT_SYMBOL(sb_set_blocksize);
107
108int sb_min_blocksize(struct super_block *sb, int size)
109{
e1defc4f 110 int minsize = bdev_logical_block_size(sb->s_bdev);
1da177e4
LT
111 if (size < minsize)
112 size = minsize;
113 return sb_set_blocksize(sb, size);
114}
115
116EXPORT_SYMBOL(sb_min_blocksize);
117
118static int
119blkdev_get_block(struct inode *inode, sector_t iblock,
120 struct buffer_head *bh, int create)
121{
122 if (iblock >= max_block(I_BDEV(inode))) {
123 if (create)
124 return -EIO;
125
126 /*
127 * for reads, we're just trying to fill a partial page.
128 * return a hole, they will have to call get_block again
129 * before they can fill it, and they will get -EIO at that
130 * time
131 */
132 return 0;
133 }
134 bh->b_bdev = I_BDEV(inode);
135 bh->b_blocknr = iblock;
136 set_buffer_mapped(bh);
137 return 0;
138}
139
b2e895db
AM
140static int
141blkdev_get_blocks(struct inode *inode, sector_t iblock,
142 struct buffer_head *bh, int create)
143{
144 sector_t end_block = max_block(I_BDEV(inode));
145 unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
146
147 if ((iblock + max_blocks) > end_block) {
148 max_blocks = end_block - iblock;
149 if ((long)max_blocks <= 0) {
150 if (create)
151 return -EIO; /* write fully beyond EOF */
152 /*
153 * It is a read which is fully beyond EOF. We return
154 * a !buffer_mapped buffer
155 */
156 max_blocks = 0;
157 }
158 }
159
160 bh->b_bdev = I_BDEV(inode);
161 bh->b_blocknr = iblock;
162 bh->b_size = max_blocks << inode->i_blkbits;
163 if (max_blocks)
164 set_buffer_mapped(bh);
165 return 0;
166}
167
168static ssize_t
169blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
170 loff_t offset, unsigned long nr_segs)
171{
172 struct file *file = iocb->ki_filp;
173 struct inode *inode = file->f_mapping->host;
174
3322e79a
NP
175 return blockdev_direct_IO_no_locking_newtrunc(rw, iocb, inode,
176 I_BDEV(inode), iov, offset, nr_segs,
177 blkdev_get_blocks, NULL);
b2e895db
AM
178}
179
5cee5815
JK
180int __sync_blockdev(struct block_device *bdev, int wait)
181{
182 if (!bdev)
183 return 0;
184 if (!wait)
185 return filemap_flush(bdev->bd_inode->i_mapping);
186 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
187}
188
585d3bc0
NP
189/*
190 * Write out and wait upon all the dirty data associated with a block
191 * device via its mapping. Does not take the superblock lock.
192 */
193int sync_blockdev(struct block_device *bdev)
194{
5cee5815 195 return __sync_blockdev(bdev, 1);
585d3bc0
NP
196}
197EXPORT_SYMBOL(sync_blockdev);
198
199/*
200 * Write out and wait upon all dirty data associated with this
201 * device. Filesystem data as well as the underlying block
202 * device. Takes the superblock lock.
203 */
204int fsync_bdev(struct block_device *bdev)
205{
206 struct super_block *sb = get_super(bdev);
207 if (sb) {
60b0680f 208 int res = sync_filesystem(sb);
585d3bc0
NP
209 drop_super(sb);
210 return res;
211 }
212 return sync_blockdev(bdev);
213}
47e4491b 214EXPORT_SYMBOL(fsync_bdev);
585d3bc0
NP
215
216/**
217 * freeze_bdev -- lock a filesystem and force it into a consistent state
218 * @bdev: blockdevice to lock
219 *
585d3bc0
NP
220 * If a superblock is found on this device, we take the s_umount semaphore
221 * on it to make sure nobody unmounts until the snapshot creation is done.
222 * The reference counter (bd_fsfreeze_count) guarantees that only the last
223 * unfreeze process can unfreeze the frozen filesystem actually when multiple
224 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
225 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
226 * actually.
227 */
228struct super_block *freeze_bdev(struct block_device *bdev)
229{
230 struct super_block *sb;
231 int error = 0;
232
233 mutex_lock(&bdev->bd_fsfreeze_mutex);
4504230a
CH
234 if (++bdev->bd_fsfreeze_count > 1) {
235 /*
236 * We don't even need to grab a reference - the first call
237 * to freeze_bdev grab an active reference and only the last
238 * thaw_bdev drops it.
239 */
585d3bc0 240 sb = get_super(bdev);
4504230a
CH
241 drop_super(sb);
242 mutex_unlock(&bdev->bd_fsfreeze_mutex);
243 return sb;
244 }
245
246 sb = get_active_super(bdev);
247 if (!sb)
248 goto out;
18e9e510
JB
249 error = freeze_super(sb);
250 if (error) {
251 deactivate_super(sb);
252 bdev->bd_fsfreeze_count--;
585d3bc0 253 mutex_unlock(&bdev->bd_fsfreeze_mutex);
18e9e510 254 return ERR_PTR(error);
585d3bc0 255 }
18e9e510 256 deactivate_super(sb);
4504230a 257 out:
585d3bc0
NP
258 sync_blockdev(bdev);
259 mutex_unlock(&bdev->bd_fsfreeze_mutex);
4fadd7bb 260 return sb; /* thaw_bdev releases s->s_umount */
585d3bc0
NP
261}
262EXPORT_SYMBOL(freeze_bdev);
263
264/**
265 * thaw_bdev -- unlock filesystem
266 * @bdev: blockdevice to unlock
267 * @sb: associated superblock
268 *
269 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
270 */
271int thaw_bdev(struct block_device *bdev, struct super_block *sb)
272{
4504230a 273 int error = -EINVAL;
585d3bc0
NP
274
275 mutex_lock(&bdev->bd_fsfreeze_mutex);
4504230a 276 if (!bdev->bd_fsfreeze_count)
18e9e510 277 goto out;
4504230a
CH
278
279 error = 0;
280 if (--bdev->bd_fsfreeze_count > 0)
18e9e510 281 goto out;
4504230a
CH
282
283 if (!sb)
18e9e510 284 goto out;
4504230a 285
18e9e510
JB
286 error = thaw_super(sb);
287 if (error) {
288 bdev->bd_fsfreeze_count++;
289 mutex_unlock(&bdev->bd_fsfreeze_mutex);
290 return error;
291 }
292out:
585d3bc0
NP
293 mutex_unlock(&bdev->bd_fsfreeze_mutex);
294 return 0;
295}
296EXPORT_SYMBOL(thaw_bdev);
297
1da177e4
LT
298static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
299{
300 return block_write_full_page(page, blkdev_get_block, wbc);
301}
302
303static int blkdev_readpage(struct file * file, struct page * page)
304{
305 return block_read_full_page(page, blkdev_get_block);
306}
307
6272b5a5
NP
308static int blkdev_write_begin(struct file *file, struct address_space *mapping,
309 loff_t pos, unsigned len, unsigned flags,
310 struct page **pagep, void **fsdata)
1da177e4 311{
6272b5a5 312 *pagep = NULL;
3322e79a
NP
313 return block_write_begin_newtrunc(file, mapping, pos, len, flags,
314 pagep, fsdata, blkdev_get_block);
1da177e4
LT
315}
316
6272b5a5
NP
317static int blkdev_write_end(struct file *file, struct address_space *mapping,
318 loff_t pos, unsigned len, unsigned copied,
319 struct page *page, void *fsdata)
1da177e4 320{
6272b5a5
NP
321 int ret;
322 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
323
324 unlock_page(page);
325 page_cache_release(page);
326
327 return ret;
1da177e4
LT
328}
329
330/*
331 * private llseek:
0f7fc9e4 332 * for a block special file file->f_path.dentry->d_inode->i_size is zero
1da177e4
LT
333 * so we compute the size by hand (just as in block_read/write above)
334 */
335static loff_t block_llseek(struct file *file, loff_t offset, int origin)
336{
337 struct inode *bd_inode = file->f_mapping->host;
338 loff_t size;
339 loff_t retval;
340
1b1dcc1b 341 mutex_lock(&bd_inode->i_mutex);
1da177e4
LT
342 size = i_size_read(bd_inode);
343
344 switch (origin) {
345 case 2:
346 offset += size;
347 break;
348 case 1:
349 offset += file->f_pos;
350 }
351 retval = -EINVAL;
352 if (offset >= 0 && offset <= size) {
353 if (offset != file->f_pos) {
354 file->f_pos = offset;
355 }
356 retval = offset;
357 }
1b1dcc1b 358 mutex_unlock(&bd_inode->i_mutex);
1da177e4
LT
359 return retval;
360}
361
7ea80859 362int blkdev_fsync(struct file *filp, int datasync)
1da177e4 363{
b8af67e2
AB
364 struct inode *bd_inode = filp->f_mapping->host;
365 struct block_device *bdev = I_BDEV(bd_inode);
ab0a9735
CH
366 int error;
367
b8af67e2
AB
368 /*
369 * There is no need to serialise calls to blkdev_issue_flush with
370 * i_mutex and doing so causes performance issues with concurrent
371 * O_SYNC writers to a block device.
372 */
373 mutex_unlock(&bd_inode->i_mutex);
374
7407cf35 375 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT);
ab0a9735
CH
376 if (error == -EOPNOTSUPP)
377 error = 0;
b8af67e2
AB
378
379 mutex_lock(&bd_inode->i_mutex);
380
ab0a9735 381 return error;
1da177e4 382}
b1dd3b28 383EXPORT_SYMBOL(blkdev_fsync);
1da177e4
LT
384
385/*
386 * pseudo-fs
387 */
388
389static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
e18b890b 390static struct kmem_cache * bdev_cachep __read_mostly;
1da177e4
LT
391
392static struct inode *bdev_alloc_inode(struct super_block *sb)
393{
e94b1766 394 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
1da177e4
LT
395 if (!ei)
396 return NULL;
397 return &ei->vfs_inode;
398}
399
400static void bdev_destroy_inode(struct inode *inode)
401{
402 struct bdev_inode *bdi = BDEV_I(inode);
403
1da177e4
LT
404 kmem_cache_free(bdev_cachep, bdi);
405}
406
51cc5068 407static void init_once(void *foo)
1da177e4
LT
408{
409 struct bdev_inode *ei = (struct bdev_inode *) foo;
410 struct block_device *bdev = &ei->bdev;
411
a35afb83
CL
412 memset(bdev, 0, sizeof(*bdev));
413 mutex_init(&bdev->bd_mutex);
a35afb83
CL
414 INIT_LIST_HEAD(&bdev->bd_inodes);
415 INIT_LIST_HEAD(&bdev->bd_list);
641dc636 416#ifdef CONFIG_SYSFS
a35afb83 417 INIT_LIST_HEAD(&bdev->bd_holder_list);
641dc636 418#endif
a35afb83 419 inode_init_once(&ei->vfs_inode);
fcccf502
TS
420 /* Initialize mutex for freeze. */
421 mutex_init(&bdev->bd_fsfreeze_mutex);
1da177e4
LT
422}
423
424static inline void __bd_forget(struct inode *inode)
425{
426 list_del_init(&inode->i_devices);
427 inode->i_bdev = NULL;
428 inode->i_mapping = &inode->i_data;
429}
430
431static void bdev_clear_inode(struct inode *inode)
432{
433 struct block_device *bdev = &BDEV_I(inode)->bdev;
434 struct list_head *p;
435 spin_lock(&bdev_lock);
436 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
437 __bd_forget(list_entry(p, struct inode, i_devices));
438 }
439 list_del_init(&bdev->bd_list);
440 spin_unlock(&bdev_lock);
441}
442
ee9b6d61 443static const struct super_operations bdev_sops = {
1da177e4
LT
444 .statfs = simple_statfs,
445 .alloc_inode = bdev_alloc_inode,
446 .destroy_inode = bdev_destroy_inode,
447 .drop_inode = generic_delete_inode,
448 .clear_inode = bdev_clear_inode,
449};
450
454e2398
DH
451static int bd_get_sb(struct file_system_type *fs_type,
452 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
1da177e4 453{
454e2398 454 return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt);
1da177e4
LT
455}
456
457static struct file_system_type bd_type = {
458 .name = "bdev",
459 .get_sb = bd_get_sb,
460 .kill_sb = kill_anon_super,
461};
462
c2acf7b9 463struct super_block *blockdev_superblock __read_mostly;
1da177e4
LT
464
465void __init bdev_cache_init(void)
466{
467 int err;
c2acf7b9
DC
468 struct vfsmount *bd_mnt;
469
1da177e4 470 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
fffb60f9
PJ
471 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
472 SLAB_MEM_SPREAD|SLAB_PANIC),
20c2df83 473 init_once);
1da177e4
LT
474 err = register_filesystem(&bd_type);
475 if (err)
476 panic("Cannot register bdev pseudo-fs");
477 bd_mnt = kern_mount(&bd_type);
1da177e4
LT
478 if (IS_ERR(bd_mnt))
479 panic("Cannot create bdev pseudo-fs");
2e1483c9
CM
480 /*
481 * This vfsmount structure is only used to obtain the
482 * blockdev_superblock, so tell kmemleak not to report it.
483 */
484 kmemleak_not_leak(bd_mnt);
1da177e4
LT
485 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
486}
487
488/*
489 * Most likely _very_ bad one - but then it's hardly critical for small
490 * /dev and can be fixed when somebody will need really large one.
491 * Keep in mind that it will be fed through icache hash function too.
492 */
493static inline unsigned long hash(dev_t dev)
494{
495 return MAJOR(dev)+MINOR(dev);
496}
497
498static int bdev_test(struct inode *inode, void *data)
499{
500 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
501}
502
503static int bdev_set(struct inode *inode, void *data)
504{
505 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
506 return 0;
507}
508
509static LIST_HEAD(all_bdevs);
510
511struct block_device *bdget(dev_t dev)
512{
513 struct block_device *bdev;
514 struct inode *inode;
515
c2acf7b9 516 inode = iget5_locked(blockdev_superblock, hash(dev),
1da177e4
LT
517 bdev_test, bdev_set, &dev);
518
519 if (!inode)
520 return NULL;
521
522 bdev = &BDEV_I(inode)->bdev;
523
524 if (inode->i_state & I_NEW) {
525 bdev->bd_contains = NULL;
526 bdev->bd_inode = inode;
527 bdev->bd_block_size = (1 << inode->i_blkbits);
528 bdev->bd_part_count = 0;
529 bdev->bd_invalidated = 0;
530 inode->i_mode = S_IFBLK;
531 inode->i_rdev = dev;
532 inode->i_bdev = bdev;
533 inode->i_data.a_ops = &def_blk_aops;
534 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
535 inode->i_data.backing_dev_info = &default_backing_dev_info;
536 spin_lock(&bdev_lock);
537 list_add(&bdev->bd_list, &all_bdevs);
538 spin_unlock(&bdev_lock);
539 unlock_new_inode(inode);
540 }
541 return bdev;
542}
543
544EXPORT_SYMBOL(bdget);
545
dddac6a7
AJ
546/**
547 * bdgrab -- Grab a reference to an already referenced block device
548 * @bdev: Block device to grab a reference to.
549 */
550struct block_device *bdgrab(struct block_device *bdev)
551{
552 atomic_inc(&bdev->bd_inode->i_count);
553 return bdev;
554}
555
1da177e4
LT
556long nr_blockdev_pages(void)
557{
203a2935 558 struct block_device *bdev;
1da177e4
LT
559 long ret = 0;
560 spin_lock(&bdev_lock);
203a2935 561 list_for_each_entry(bdev, &all_bdevs, bd_list) {
1da177e4
LT
562 ret += bdev->bd_inode->i_mapping->nrpages;
563 }
564 spin_unlock(&bdev_lock);
565 return ret;
566}
567
568void bdput(struct block_device *bdev)
569{
570 iput(bdev->bd_inode);
571}
572
573EXPORT_SYMBOL(bdput);
574
575static struct block_device *bd_acquire(struct inode *inode)
576{
577 struct block_device *bdev;
09d967c6 578
1da177e4
LT
579 spin_lock(&bdev_lock);
580 bdev = inode->i_bdev;
09d967c6
OH
581 if (bdev) {
582 atomic_inc(&bdev->bd_inode->i_count);
1da177e4
LT
583 spin_unlock(&bdev_lock);
584 return bdev;
585 }
586 spin_unlock(&bdev_lock);
09d967c6 587
1da177e4
LT
588 bdev = bdget(inode->i_rdev);
589 if (bdev) {
590 spin_lock(&bdev_lock);
09d967c6
OH
591 if (!inode->i_bdev) {
592 /*
593 * We take an additional bd_inode->i_count for inode,
594 * and it's released in clear_inode() of inode.
595 * So, we can access it via ->i_mapping always
596 * without igrab().
597 */
598 atomic_inc(&bdev->bd_inode->i_count);
599 inode->i_bdev = bdev;
600 inode->i_mapping = bdev->bd_inode->i_mapping;
601 list_add(&inode->i_devices, &bdev->bd_inodes);
602 }
1da177e4
LT
603 spin_unlock(&bdev_lock);
604 }
605 return bdev;
606}
607
608/* Call when you free inode */
609
610void bd_forget(struct inode *inode)
611{
09d967c6
OH
612 struct block_device *bdev = NULL;
613
1da177e4 614 spin_lock(&bdev_lock);
09d967c6 615 if (inode->i_bdev) {
c2acf7b9 616 if (!sb_is_blkdev_sb(inode->i_sb))
09d967c6 617 bdev = inode->i_bdev;
1da177e4 618 __bd_forget(inode);
09d967c6 619 }
1da177e4 620 spin_unlock(&bdev_lock);
09d967c6
OH
621
622 if (bdev)
623 iput(bdev->bd_inode);
1da177e4
LT
624}
625
1a3cbbc5
TH
626/**
627 * bd_may_claim - test whether a block device can be claimed
628 * @bdev: block device of interest
629 * @whole: whole block device containing @bdev, may equal @bdev
630 * @holder: holder trying to claim @bdev
631 *
632 * Test whther @bdev can be claimed by @holder.
633 *
634 * CONTEXT:
635 * spin_lock(&bdev_lock).
636 *
637 * RETURNS:
638 * %true if @bdev can be claimed, %false otherwise.
639 */
640static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
641 void *holder)
1da177e4 642{
1da177e4 643 if (bdev->bd_holder == holder)
1a3cbbc5 644 return true; /* already a holder */
1da177e4 645 else if (bdev->bd_holder != NULL)
1a3cbbc5 646 return false; /* held by someone else */
1da177e4 647 else if (bdev->bd_contains == bdev)
1a3cbbc5 648 return true; /* is a whole device which isn't held */
1da177e4 649
1a3cbbc5
TH
650 else if (whole->bd_holder == bd_claim)
651 return true; /* is a partition of a device that is being partitioned */
652 else if (whole->bd_holder != NULL)
653 return false; /* is a partition of a held device */
1da177e4 654 else
1a3cbbc5
TH
655 return true; /* is a partition of an un-held device */
656}
657
6b4517a7
TH
658/**
659 * bd_prepare_to_claim - prepare to claim a block device
660 * @bdev: block device of interest
661 * @whole: the whole device containing @bdev, may equal @bdev
662 * @holder: holder trying to claim @bdev
663 *
664 * Prepare to claim @bdev. This function fails if @bdev is already
665 * claimed by another holder and waits if another claiming is in
666 * progress. This function doesn't actually claim. On successful
667 * return, the caller has ownership of bd_claiming and bd_holder[s].
668 *
669 * CONTEXT:
670 * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab
671 * it multiple times.
672 *
673 * RETURNS:
674 * 0 if @bdev can be claimed, -EBUSY otherwise.
675 */
676static int bd_prepare_to_claim(struct block_device *bdev,
677 struct block_device *whole, void *holder)
678{
679retry:
680 /* if someone else claimed, fail */
681 if (!bd_may_claim(bdev, whole, holder))
682 return -EBUSY;
683
684 /* if someone else is claiming, wait for it to finish */
685 if (whole->bd_claiming && whole->bd_claiming != holder) {
686 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
687 DEFINE_WAIT(wait);
688
689 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
690 spin_unlock(&bdev_lock);
691 schedule();
692 finish_wait(wq, &wait);
693 spin_lock(&bdev_lock);
694 goto retry;
695 }
696
697 /* yay, all mine */
698 return 0;
699}
700
701/**
702 * bd_start_claiming - start claiming a block device
703 * @bdev: block device of interest
704 * @holder: holder trying to claim @bdev
705 *
706 * @bdev is about to be opened exclusively. Check @bdev can be opened
707 * exclusively and mark that an exclusive open is in progress. Each
708 * successful call to this function must be matched with a call to
b0018361
NP
709 * either bd_finish_claiming() or bd_abort_claiming() (which do not
710 * fail).
711 *
712 * This function is used to gain exclusive access to the block device
713 * without actually causing other exclusive open attempts to fail. It
714 * should be used when the open sequence itself requires exclusive
715 * access but may subsequently fail.
6b4517a7
TH
716 *
717 * CONTEXT:
718 * Might sleep.
719 *
720 * RETURNS:
721 * Pointer to the block device containing @bdev on success, ERR_PTR()
722 * value on failure.
723 */
724static struct block_device *bd_start_claiming(struct block_device *bdev,
725 void *holder)
726{
727 struct gendisk *disk;
728 struct block_device *whole;
729 int partno, err;
730
731 might_sleep();
732
733 /*
734 * @bdev might not have been initialized properly yet, look up
735 * and grab the outer block device the hard way.
736 */
737 disk = get_gendisk(bdev->bd_dev, &partno);
738 if (!disk)
739 return ERR_PTR(-ENXIO);
740
741 whole = bdget_disk(disk, 0);
cf342570 742 module_put(disk->fops->owner);
6b4517a7
TH
743 put_disk(disk);
744 if (!whole)
745 return ERR_PTR(-ENOMEM);
746
747 /* prepare to claim, if successful, mark claiming in progress */
748 spin_lock(&bdev_lock);
749
750 err = bd_prepare_to_claim(bdev, whole, holder);
751 if (err == 0) {
752 whole->bd_claiming = holder;
753 spin_unlock(&bdev_lock);
754 return whole;
755 } else {
756 spin_unlock(&bdev_lock);
757 bdput(whole);
758 return ERR_PTR(err);
759 }
760}
761
762/* releases bdev_lock */
763static void __bd_abort_claiming(struct block_device *whole, void *holder)
764{
765 BUG_ON(whole->bd_claiming != holder);
766 whole->bd_claiming = NULL;
767 wake_up_bit(&whole->bd_claiming, 0);
768
769 spin_unlock(&bdev_lock);
770 bdput(whole);
771}
772
773/**
774 * bd_abort_claiming - abort claiming a block device
775 * @whole: whole block device returned by bd_start_claiming()
776 * @holder: holder trying to claim @bdev
777 *
778 * Abort a claiming block started by bd_start_claiming(). Note that
779 * @whole is not the block device to be claimed but the whole device
780 * returned by bd_start_claiming().
781 *
782 * CONTEXT:
783 * Grabs and releases bdev_lock.
784 */
785static void bd_abort_claiming(struct block_device *whole, void *holder)
786{
787 spin_lock(&bdev_lock);
788 __bd_abort_claiming(whole, holder); /* releases bdev_lock */
789}
790
b0018361
NP
791/* increment holders when we have a legitimate claim. requires bdev_lock */
792static void __bd_claim(struct block_device *bdev, struct block_device *whole,
793 void *holder)
794{
795 /* note that for a whole device bd_holders
796 * will be incremented twice, and bd_holder will
797 * be set to bd_claim before being set to holder
798 */
799 whole->bd_holders++;
800 whole->bd_holder = bd_claim;
801 bdev->bd_holders++;
802 bdev->bd_holder = holder;
803}
804
805/**
806 * bd_finish_claiming - finish claiming a block device
807 * @bdev: block device of interest (passed to bd_start_claiming())
808 * @whole: whole block device returned by bd_start_claiming()
809 * @holder: holder trying to claim @bdev
810 *
811 * Finish a claiming block started by bd_start_claiming().
812 *
813 * CONTEXT:
814 * Grabs and releases bdev_lock.
815 */
816static void bd_finish_claiming(struct block_device *bdev,
817 struct block_device *whole, void *holder)
818{
819 spin_lock(&bdev_lock);
b0018361
NP
820 BUG_ON(!bd_may_claim(bdev, whole, holder));
821 __bd_claim(bdev, whole, holder);
822 __bd_abort_claiming(whole, holder); /* not actually an abort */
823}
824
1a3cbbc5
TH
825/**
826 * bd_claim - claim a block device
827 * @bdev: block device to claim
828 * @holder: holder trying to claim @bdev
829 *
b0018361 830 * Try to claim @bdev which must have been opened successfully.
6b4517a7
TH
831 *
832 * CONTEXT:
833 * Might sleep.
1a3cbbc5
TH
834 *
835 * RETURNS:
836 * 0 if successful, -EBUSY if @bdev is already claimed.
837 */
838int bd_claim(struct block_device *bdev, void *holder)
839{
840 struct block_device *whole = bdev->bd_contains;
6b4517a7
TH
841 int res;
842
843 might_sleep();
1a3cbbc5
TH
844
845 spin_lock(&bdev_lock);
6b4517a7 846 res = bd_prepare_to_claim(bdev, whole, holder);
b0018361
NP
847 if (res == 0)
848 __bd_claim(bdev, whole, holder);
849 spin_unlock(&bdev_lock);
6b4517a7 850
1da177e4
LT
851 return res;
852}
1da177e4
LT
853EXPORT_SYMBOL(bd_claim);
854
855void bd_release(struct block_device *bdev)
856{
857 spin_lock(&bdev_lock);
858 if (!--bdev->bd_contains->bd_holders)
859 bdev->bd_contains->bd_holder = NULL;
860 if (!--bdev->bd_holders)
861 bdev->bd_holder = NULL;
862 spin_unlock(&bdev_lock);
863}
864
865EXPORT_SYMBOL(bd_release);
866
641dc636
JN
867#ifdef CONFIG_SYSFS
868/*
869 * Functions for bd_claim_by_kobject / bd_release_from_kobject
870 *
871 * If a kobject is passed to bd_claim_by_kobject()
872 * and the kobject has a parent directory,
873 * following symlinks are created:
874 * o from the kobject to the claimed bdev
875 * o from "holders" directory of the bdev to the parent of the kobject
876 * bd_release_from_kobject() removes these symlinks.
877 *
878 * Example:
879 * If /dev/dm-0 maps to /dev/sda, kobject corresponding to
880 * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
881 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
882 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
883 */
884
4d7dd8fd 885static int add_symlink(struct kobject *from, struct kobject *to)
641dc636
JN
886{
887 if (!from || !to)
4d7dd8fd
AM
888 return 0;
889 return sysfs_create_link(from, to, kobject_name(to));
641dc636
JN
890}
891
892static void del_symlink(struct kobject *from, struct kobject *to)
893{
894 if (!from || !to)
895 return;
896 sysfs_remove_link(from, kobject_name(to));
897}
898
899/*
900 * 'struct bd_holder' contains pointers to kobjects symlinked by
901 * bd_claim_by_kobject.
902 * It's connected to bd_holder_list which is protected by bdev->bd_sem.
903 */
904struct bd_holder {
905 struct list_head list; /* chain of holders of the bdev */
906 int count; /* references from the holder */
907 struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */
908 struct kobject *hdev; /* e.g. "/block/dm-0" */
909 struct kobject *hdir; /* e.g. "/block/sda/holders" */
910 struct kobject *sdev; /* e.g. "/block/sda" */
911};
912
913/*
914 * Get references of related kobjects at once.
915 * Returns 1 on success. 0 on failure.
916 *
917 * Should call bd_holder_release_dirs() after successful use.
918 */
919static int bd_holder_grab_dirs(struct block_device *bdev,
920 struct bd_holder *bo)
921{
922 if (!bdev || !bo)
923 return 0;
924
925 bo->sdir = kobject_get(bo->sdir);
926 if (!bo->sdir)
927 return 0;
928
929 bo->hdev = kobject_get(bo->sdir->parent);
930 if (!bo->hdev)
931 goto fail_put_sdir;
932
0762b8bd 933 bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj);
641dc636
JN
934 if (!bo->sdev)
935 goto fail_put_hdev;
936
4c46501d 937 bo->hdir = kobject_get(bdev->bd_part->holder_dir);
641dc636
JN
938 if (!bo->hdir)
939 goto fail_put_sdev;
940
941 return 1;
942
943fail_put_sdev:
944 kobject_put(bo->sdev);
945fail_put_hdev:
946 kobject_put(bo->hdev);
947fail_put_sdir:
948 kobject_put(bo->sdir);
949
950 return 0;
951}
952
953/* Put references of related kobjects at once. */
954static void bd_holder_release_dirs(struct bd_holder *bo)
955{
956 kobject_put(bo->hdir);
957 kobject_put(bo->sdev);
958 kobject_put(bo->hdev);
959 kobject_put(bo->sdir);
960}
961
962static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
963{
964 struct bd_holder *bo;
965
966 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
967 if (!bo)
968 return NULL;
969
970 bo->count = 1;
971 bo->sdir = kobj;
972
973 return bo;
974}
975
976static void free_bd_holder(struct bd_holder *bo)
977{
978 kfree(bo);
979}
980
df6c0cd9
JN
981/**
982 * find_bd_holder - find matching struct bd_holder from the block device
983 *
984 * @bdev: struct block device to be searched
985 * @bo: target struct bd_holder
986 *
987 * Returns matching entry with @bo in @bdev->bd_holder_list.
988 * If found, increment the reference count and return the pointer.
989 * If not found, returns NULL.
990 */
36a561d6
AM
991static struct bd_holder *find_bd_holder(struct block_device *bdev,
992 struct bd_holder *bo)
df6c0cd9
JN
993{
994 struct bd_holder *tmp;
995
996 list_for_each_entry(tmp, &bdev->bd_holder_list, list)
997 if (tmp->sdir == bo->sdir) {
998 tmp->count++;
999 return tmp;
1000 }
1001
1002 return NULL;
1003}
1004
641dc636
JN
1005/**
1006 * add_bd_holder - create sysfs symlinks for bd_claim() relationship
1007 *
1008 * @bdev: block device to be bd_claimed
1009 * @bo: preallocated and initialized by alloc_bd_holder()
1010 *
df6c0cd9 1011 * Add @bo to @bdev->bd_holder_list, create symlinks.
641dc636 1012 *
df6c0cd9
JN
1013 * Returns 0 if symlinks are created.
1014 * Returns -ve if something fails.
641dc636
JN
1015 */
1016static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
1017{
4e91672c 1018 int err;
641dc636
JN
1019
1020 if (!bo)
4d7dd8fd 1021 return -EINVAL;
641dc636 1022
641dc636 1023 if (!bd_holder_grab_dirs(bdev, bo))
4d7dd8fd 1024 return -EBUSY;
641dc636 1025
4e91672c
JW
1026 err = add_symlink(bo->sdir, bo->sdev);
1027 if (err)
1028 return err;
1029
1030 err = add_symlink(bo->hdir, bo->hdev);
1031 if (err) {
1032 del_symlink(bo->sdir, bo->sdev);
1033 return err;
4d7dd8fd 1034 }
4e91672c
JW
1035
1036 list_add_tail(&bo->list, &bdev->bd_holder_list);
1037 return 0;
641dc636
JN
1038}
1039
1040/**
1041 * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
1042 *
1043 * @bdev: block device to be bd_claimed
1044 * @kobj: holder's kobject
1045 *
1046 * If there is matching entry with @kobj in @bdev->bd_holder_list
1047 * and no other bd_claim() from the same kobject,
1048 * remove the struct bd_holder from the list, delete symlinks for it.
1049 *
1050 * Returns a pointer to the struct bd_holder when it's removed from the list
1051 * and ready to be freed.
1052 * Returns NULL if matching claim isn't found or there is other bd_claim()
1053 * by the same kobject.
1054 */
1055static struct bd_holder *del_bd_holder(struct block_device *bdev,
1056 struct kobject *kobj)
1057{
1058 struct bd_holder *bo;
1059
1060 list_for_each_entry(bo, &bdev->bd_holder_list, list) {
1061 if (bo->sdir == kobj) {
1062 bo->count--;
1063 BUG_ON(bo->count < 0);
1064 if (!bo->count) {
1065 list_del(&bo->list);
1066 del_symlink(bo->sdir, bo->sdev);
1067 del_symlink(bo->hdir, bo->hdev);
1068 bd_holder_release_dirs(bo);
1069 return bo;
1070 }
1071 break;
1072 }
1073 }
1074
1075 return NULL;
1076}
1077
1078/**
1079 * bd_claim_by_kobject - bd_claim() with additional kobject signature
1080 *
1081 * @bdev: block device to be claimed
1082 * @holder: holder's signature
1083 * @kobj: holder's kobject
1084 *
1085 * Do bd_claim() and if it succeeds, create sysfs symlinks between
1086 * the bdev and the holder's kobject.
1087 * Use bd_release_from_kobject() when relesing the claimed bdev.
1088 *
1089 * Returns 0 on success. (same as bd_claim())
1090 * Returns errno on failure.
1091 */
1092static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
1093 struct kobject *kobj)
1094{
4e91672c 1095 int err;
df6c0cd9 1096 struct bd_holder *bo, *found;
641dc636
JN
1097
1098 if (!kobj)
1099 return -EINVAL;
1100
1101 bo = alloc_bd_holder(kobj);
1102 if (!bo)
1103 return -ENOMEM;
1104
2e7b651d 1105 mutex_lock(&bdev->bd_mutex);
df6c0cd9 1106
4e91672c
JW
1107 err = bd_claim(bdev, holder);
1108 if (err)
4210df28 1109 goto fail;
4e91672c
JW
1110
1111 found = find_bd_holder(bdev, bo);
1112 if (found)
4210df28 1113 goto fail;
4e91672c
JW
1114
1115 err = add_bd_holder(bdev, bo);
1116 if (err)
1117 bd_release(bdev);
4210df28
AM
1118 else
1119 bo = NULL;
1120fail:
b4cf1b72 1121 mutex_unlock(&bdev->bd_mutex);
4210df28 1122 free_bd_holder(bo);
4e91672c 1123 return err;
641dc636
JN
1124}
1125
1126/**
1127 * bd_release_from_kobject - bd_release() with additional kobject signature
1128 *
1129 * @bdev: block device to be released
1130 * @kobj: holder's kobject
1131 *
1132 * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
1133 */
1134static void bd_release_from_kobject(struct block_device *bdev,
1135 struct kobject *kobj)
1136{
641dc636
JN
1137 if (!kobj)
1138 return;
1139
2e7b651d 1140 mutex_lock(&bdev->bd_mutex);
641dc636 1141 bd_release(bdev);
4210df28 1142 free_bd_holder(del_bd_holder(bdev, kobj));
b4cf1b72 1143 mutex_unlock(&bdev->bd_mutex);
641dc636
JN
1144}
1145
1146/**
1147 * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
1148 *
1149 * @bdev: block device to be claimed
1150 * @holder: holder's signature
1151 * @disk: holder's gendisk
1152 *
1153 * Call bd_claim_by_kobject() with getting @disk->slave_dir.
1154 */
1155int bd_claim_by_disk(struct block_device *bdev, void *holder,
1156 struct gendisk *disk)
1157{
1158 return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
1159}
1160EXPORT_SYMBOL_GPL(bd_claim_by_disk);
1161
1162/**
1163 * bd_release_from_disk - wrapper function for bd_release_from_kobject()
1164 *
1165 * @bdev: block device to be claimed
1166 * @disk: holder's gendisk
1167 *
1168 * Call bd_release_from_kobject() and put @disk->slave_dir.
1169 */
1170void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
1171{
1172 bd_release_from_kobject(bdev, disk->slave_dir);
1173 kobject_put(disk->slave_dir);
1174}
1175EXPORT_SYMBOL_GPL(bd_release_from_disk);
1176#endif
1177
1da177e4
LT
1178/*
1179 * Tries to open block device by device number. Use it ONLY if you
1180 * really do not have anything better - i.e. when you are behind a
1181 * truly sucky interface and all you are given is a device number. _Never_
1182 * to be used for internal purposes. If you ever need it - reconsider
1183 * your API.
1184 */
aeb5d727 1185struct block_device *open_by_devnum(dev_t dev, fmode_t mode)
1da177e4
LT
1186{
1187 struct block_device *bdev = bdget(dev);
1188 int err = -ENOMEM;
1da177e4 1189 if (bdev)
572c4892 1190 err = blkdev_get(bdev, mode);
1da177e4
LT
1191 return err ? ERR_PTR(err) : bdev;
1192}
1193
1194EXPORT_SYMBOL(open_by_devnum);
1195
56ade44b
AP
1196/**
1197 * flush_disk - invalidates all buffer-cache entries on a disk
1198 *
1199 * @bdev: struct block device to be flushed
1200 *
1201 * Invalidates all buffer-cache entries on a disk. It should be called
1202 * when a disk has been changed -- either by a media change or online
1203 * resize.
1204 */
1205static void flush_disk(struct block_device *bdev)
1206{
1207 if (__invalidate_device(bdev)) {
1208 char name[BDEVNAME_SIZE] = "";
1209
1210 if (bdev->bd_disk)
1211 disk_name(bdev->bd_disk, 0, name);
1212 printk(KERN_WARNING "VFS: busy inodes on changed media or "
1213 "resized disk %s\n", name);
1214 }
1215
1216 if (!bdev->bd_disk)
1217 return;
1218 if (disk_partitionable(bdev->bd_disk))
1219 bdev->bd_invalidated = 1;
1220}
1221
c3279d14 1222/**
57d1b536 1223 * check_disk_size_change - checks for disk size change and adjusts bdev size.
c3279d14
AP
1224 * @disk: struct gendisk to check
1225 * @bdev: struct bdev to adjust.
1226 *
1227 * This routine checks to see if the bdev size does not match the disk size
1228 * and adjusts it if it differs.
1229 */
1230void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
1231{
1232 loff_t disk_size, bdev_size;
1233
1234 disk_size = (loff_t)get_capacity(disk) << 9;
1235 bdev_size = i_size_read(bdev->bd_inode);
1236 if (disk_size != bdev_size) {
1237 char name[BDEVNAME_SIZE];
1238
1239 disk_name(disk, 0, name);
1240 printk(KERN_INFO
1241 "%s: detected capacity change from %lld to %lld\n",
1242 name, bdev_size, disk_size);
1243 i_size_write(bdev->bd_inode, disk_size);
608aeef1 1244 flush_disk(bdev);
c3279d14
AP
1245 }
1246}
1247EXPORT_SYMBOL(check_disk_size_change);
1248
0c002c2f 1249/**
57d1b536 1250 * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
0c002c2f
AP
1251 * @disk: struct gendisk to be revalidated
1252 *
1253 * This routine is a wrapper for lower-level driver's revalidate_disk
1254 * call-backs. It is used to do common pre and post operations needed
1255 * for all revalidate_disk operations.
1256 */
1257int revalidate_disk(struct gendisk *disk)
1258{
c3279d14 1259 struct block_device *bdev;
0c002c2f
AP
1260 int ret = 0;
1261
1262 if (disk->fops->revalidate_disk)
1263 ret = disk->fops->revalidate_disk(disk);
1264
c3279d14
AP
1265 bdev = bdget_disk(disk, 0);
1266 if (!bdev)
1267 return ret;
1268
1269 mutex_lock(&bdev->bd_mutex);
1270 check_disk_size_change(disk, bdev);
1271 mutex_unlock(&bdev->bd_mutex);
1272 bdput(bdev);
0c002c2f
AP
1273 return ret;
1274}
1275EXPORT_SYMBOL(revalidate_disk);
1276
1da177e4
LT
1277/*
1278 * This routine checks whether a removable media has been changed,
1279 * and invalidates all buffer-cache-entries in that case. This
1280 * is a relatively slow routine, so we have to try to minimize using
1281 * it. Thus it is called only upon a 'mount' or 'open'. This
1282 * is the best way of combining speed and utility, I think.
1283 * People changing diskettes in the middle of an operation deserve
1284 * to lose :-)
1285 */
1286int check_disk_change(struct block_device *bdev)
1287{
1288 struct gendisk *disk = bdev->bd_disk;
83d5cde4 1289 const struct block_device_operations *bdops = disk->fops;
1da177e4
LT
1290
1291 if (!bdops->media_changed)
1292 return 0;
1293 if (!bdops->media_changed(bdev->bd_disk))
1294 return 0;
1295
56ade44b 1296 flush_disk(bdev);
1da177e4
LT
1297 if (bdops->revalidate_disk)
1298 bdops->revalidate_disk(bdev->bd_disk);
1da177e4
LT
1299 return 1;
1300}
1301
1302EXPORT_SYMBOL(check_disk_change);
1303
1304void bd_set_size(struct block_device *bdev, loff_t size)
1305{
e1defc4f 1306 unsigned bsize = bdev_logical_block_size(bdev);
1da177e4
LT
1307
1308 bdev->bd_inode->i_size = size;
1309 while (bsize < PAGE_CACHE_SIZE) {
1310 if (size & bsize)
1311 break;
1312 bsize <<= 1;
1313 }
1314 bdev->bd_block_size = bsize;
1315 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1316}
1317EXPORT_SYMBOL(bd_set_size);
1318
9a1c3542 1319static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
37be4124 1320
6d740cd5
PZ
1321/*
1322 * bd_mutex locking:
1323 *
1324 * mutex_lock(part->bd_mutex)
1325 * mutex_lock_nested(whole->bd_mutex, 1)
1326 */
1327
572c4892 1328static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1da177e4 1329{
1da177e4 1330 struct gendisk *disk;
7db9cfd3 1331 int ret;
cf771cb5 1332 int partno;
fe6e9c1f
AV
1333 int perm = 0;
1334
572c4892 1335 if (mode & FMODE_READ)
fe6e9c1f 1336 perm |= MAY_READ;
572c4892 1337 if (mode & FMODE_WRITE)
fe6e9c1f
AV
1338 perm |= MAY_WRITE;
1339 /*
1340 * hooks: /n/, see "layering violations".
1341 */
1342 ret = devcgroup_inode_permission(bdev->bd_inode, perm);
82666020
AV
1343 if (ret != 0) {
1344 bdput(bdev);
7db9cfd3 1345 return ret;
82666020 1346 }
7db9cfd3 1347
1da177e4 1348 lock_kernel();
d3374825 1349 restart:
0762b8bd 1350
89f97496 1351 ret = -ENXIO;
cf771cb5 1352 disk = get_gendisk(bdev->bd_dev, &partno);
0762b8bd
TH
1353 if (!disk)
1354 goto out_unlock_kernel;
1da177e4 1355
6796bf54 1356 mutex_lock_nested(&bdev->bd_mutex, for_part);
1da177e4
LT
1357 if (!bdev->bd_openers) {
1358 bdev->bd_disk = disk;
1359 bdev->bd_contains = bdev;
cf771cb5 1360 if (!partno) {
1da177e4 1361 struct backing_dev_info *bdi;
89f97496
TH
1362
1363 ret = -ENXIO;
1364 bdev->bd_part = disk_get_part(disk, partno);
1365 if (!bdev->bd_part)
1366 goto out_clear;
1367
1da177e4 1368 if (disk->fops->open) {
572c4892 1369 ret = disk->fops->open(bdev, mode);
d3374825
N
1370 if (ret == -ERESTARTSYS) {
1371 /* Lost a race with 'disk' being
1372 * deleted, try again.
1373 * See md.c
1374 */
1375 disk_put_part(bdev->bd_part);
1376 bdev->bd_part = NULL;
1377 module_put(disk->fops->owner);
1378 put_disk(disk);
1379 bdev->bd_disk = NULL;
1380 mutex_unlock(&bdev->bd_mutex);
1381 goto restart;
1382 }
1da177e4 1383 if (ret)
0762b8bd 1384 goto out_clear;
1da177e4
LT
1385 }
1386 if (!bdev->bd_openers) {
1387 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1388 bdi = blk_get_backing_dev_info(bdev);
1389 if (bdi == NULL)
1390 bdi = &default_backing_dev_info;
1391 bdev->bd_inode->i_data.backing_dev_info = bdi;
1392 }
1393 if (bdev->bd_invalidated)
1394 rescan_partitions(disk, bdev);
1395 } else {
1da177e4
LT
1396 struct block_device *whole;
1397 whole = bdget_disk(disk, 0);
1398 ret = -ENOMEM;
1399 if (!whole)
0762b8bd 1400 goto out_clear;
37be4124 1401 BUG_ON(for_part);
572c4892 1402 ret = __blkdev_get(whole, mode, 1);
1da177e4 1403 if (ret)
0762b8bd 1404 goto out_clear;
1da177e4 1405 bdev->bd_contains = whole;
1da177e4
LT
1406 bdev->bd_inode->i_data.backing_dev_info =
1407 whole->bd_inode->i_data.backing_dev_info;
89f97496 1408 bdev->bd_part = disk_get_part(disk, partno);
e71bf0d0 1409 if (!(disk->flags & GENHD_FL_UP) ||
89f97496 1410 !bdev->bd_part || !bdev->bd_part->nr_sects) {
1da177e4 1411 ret = -ENXIO;
0762b8bd 1412 goto out_clear;
1da177e4 1413 }
89f97496 1414 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1da177e4
LT
1415 }
1416 } else {
0762b8bd 1417 module_put(disk->fops->owner);
960cc0f4 1418 put_disk(disk);
0762b8bd 1419 disk = NULL;
1da177e4
LT
1420 if (bdev->bd_contains == bdev) {
1421 if (bdev->bd_disk->fops->open) {
572c4892 1422 ret = bdev->bd_disk->fops->open(bdev, mode);
1da177e4 1423 if (ret)
0762b8bd 1424 goto out_unlock_bdev;
1da177e4
LT
1425 }
1426 if (bdev->bd_invalidated)
1427 rescan_partitions(bdev->bd_disk, bdev);
1da177e4
LT
1428 }
1429 }
1430 bdev->bd_openers++;
37be4124
N
1431 if (for_part)
1432 bdev->bd_part_count++;
c039e313 1433 mutex_unlock(&bdev->bd_mutex);
1da177e4
LT
1434 unlock_kernel();
1435 return 0;
1436
0762b8bd 1437 out_clear:
89f97496 1438 disk_put_part(bdev->bd_part);
1da177e4 1439 bdev->bd_disk = NULL;
0762b8bd 1440 bdev->bd_part = NULL;
1da177e4
LT
1441 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1442 if (bdev != bdev->bd_contains)
572c4892 1443 __blkdev_put(bdev->bd_contains, mode, 1);
1da177e4 1444 bdev->bd_contains = NULL;
0762b8bd 1445 out_unlock_bdev:
c039e313 1446 mutex_unlock(&bdev->bd_mutex);
0762b8bd 1447 out_unlock_kernel:
1da177e4 1448 unlock_kernel();
0762b8bd 1449
0762b8bd
TH
1450 if (disk)
1451 module_put(disk->fops->owner);
1452 put_disk(disk);
1453 bdput(bdev);
1454
1da177e4
LT
1455 return ret;
1456}
1457
572c4892 1458int blkdev_get(struct block_device *bdev, fmode_t mode)
1da177e4 1459{
572c4892 1460 return __blkdev_get(bdev, mode, 0);
37be4124 1461}
1da177e4
LT
1462EXPORT_SYMBOL(blkdev_get);
1463
1464static int blkdev_open(struct inode * inode, struct file * filp)
1465{
6b4517a7 1466 struct block_device *whole = NULL;
1da177e4
LT
1467 struct block_device *bdev;
1468 int res;
1469
1470 /*
1471 * Preserve backwards compatibility and allow large file access
1472 * even if userspace doesn't ask for it explicitly. Some mkfs
1473 * binary needs it. We might want to drop this workaround
1474 * during an unstable branch.
1475 */
1476 filp->f_flags |= O_LARGEFILE;
1477
572c4892
AV
1478 if (filp->f_flags & O_NDELAY)
1479 filp->f_mode |= FMODE_NDELAY;
1480 if (filp->f_flags & O_EXCL)
1481 filp->f_mode |= FMODE_EXCL;
1482 if ((filp->f_flags & O_ACCMODE) == 3)
1483 filp->f_mode |= FMODE_WRITE_IOCTL;
1484
1da177e4 1485 bdev = bd_acquire(inode);
6a2aae06
PE
1486 if (bdev == NULL)
1487 return -ENOMEM;
1da177e4 1488
6b4517a7
TH
1489 if (filp->f_mode & FMODE_EXCL) {
1490 whole = bd_start_claiming(bdev, filp);
1491 if (IS_ERR(whole)) {
1492 bdput(bdev);
1493 return PTR_ERR(whole);
1494 }
1495 }
1496
572c4892
AV
1497 filp->f_mapping = bdev->bd_inode->i_mapping;
1498
1499 res = blkdev_get(bdev, filp->f_mode);
1da177e4 1500
6b4517a7
TH
1501 if (whole) {
1502 if (res == 0)
b0018361 1503 bd_finish_claiming(bdev, whole, filp);
6b4517a7
TH
1504 else
1505 bd_abort_claiming(whole, filp);
ebbefc01 1506 }
1da177e4 1507
1da177e4
LT
1508 return res;
1509}
1510
9a1c3542 1511static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
2e7b651d
PZ
1512{
1513 int ret = 0;
2e7b651d 1514 struct gendisk *disk = bdev->bd_disk;
37be4124 1515 struct block_device *victim = NULL;
2e7b651d 1516
6796bf54 1517 mutex_lock_nested(&bdev->bd_mutex, for_part);
2e7b651d 1518 lock_kernel();
37be4124
N
1519 if (for_part)
1520 bdev->bd_part_count--;
1521
2e7b651d
PZ
1522 if (!--bdev->bd_openers) {
1523 sync_blockdev(bdev);
1524 kill_bdev(bdev);
1525 }
1526 if (bdev->bd_contains == bdev) {
1527 if (disk->fops->release)
9a1c3542 1528 ret = disk->fops->release(disk, mode);
2e7b651d
PZ
1529 }
1530 if (!bdev->bd_openers) {
1531 struct module *owner = disk->fops->owner;
1532
1533 put_disk(disk);
1534 module_put(owner);
0762b8bd
TH
1535 disk_put_part(bdev->bd_part);
1536 bdev->bd_part = NULL;
2e7b651d
PZ
1537 bdev->bd_disk = NULL;
1538 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
37be4124
N
1539 if (bdev != bdev->bd_contains)
1540 victim = bdev->bd_contains;
2e7b651d
PZ
1541 bdev->bd_contains = NULL;
1542 }
1543 unlock_kernel();
1544 mutex_unlock(&bdev->bd_mutex);
1545 bdput(bdev);
37be4124 1546 if (victim)
9a1c3542 1547 __blkdev_put(victim, mode, 1);
2e7b651d
PZ
1548 return ret;
1549}
1550
9a1c3542 1551int blkdev_put(struct block_device *bdev, fmode_t mode)
37be4124 1552{
9a1c3542 1553 return __blkdev_put(bdev, mode, 0);
37be4124 1554}
2e7b651d
PZ
1555EXPORT_SYMBOL(blkdev_put);
1556
1da177e4
LT
1557static int blkdev_close(struct inode * inode, struct file * filp)
1558{
1559 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
1560 if (bdev->bd_holder == filp)
1561 bd_release(bdev);
9a1c3542 1562 return blkdev_put(bdev, filp->f_mode);
1da177e4
LT
1563}
1564
bb93e3a5 1565static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1da177e4 1566{
56b26add
AV
1567 struct block_device *bdev = I_BDEV(file->f_mapping->host);
1568 fmode_t mode = file->f_mode;
fd4ce1ac
CH
1569
1570 /*
1571 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1572 * to updated it before every ioctl.
1573 */
56b26add 1574 if (file->f_flags & O_NDELAY)
fd4ce1ac
CH
1575 mode |= FMODE_NDELAY;
1576 else
1577 mode &= ~FMODE_NDELAY;
1578
56b26add 1579 return blkdev_ioctl(bdev, mode, cmd, arg);
1da177e4
LT
1580}
1581
eef99380
CH
1582/*
1583 * Write data to the block device. Only intended for the block device itself
1584 * and the raw driver which basically is a fake block device.
1585 *
1586 * Does not take i_mutex for the write and thus is not for general purpose
1587 * use.
1588 */
1589ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
1590 unsigned long nr_segs, loff_t pos)
1591{
1592 struct file *file = iocb->ki_filp;
1593 ssize_t ret;
1594
1595 BUG_ON(iocb->ki_pos != pos);
1596
1597 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
1598 if (ret > 0 || ret == -EIOCBQUEUED) {
1599 ssize_t err;
1600
1601 err = generic_write_sync(file, pos, ret);
1602 if (err < 0 && ret > 0)
1603 ret = err;
1604 }
1605 return ret;
1606}
1607EXPORT_SYMBOL_GPL(blkdev_aio_write);
1608
87d8fe1e
TT
1609/*
1610 * Try to release a page associated with block device when the system
1611 * is under memory pressure.
1612 */
1613static int blkdev_releasepage(struct page *page, gfp_t wait)
1614{
1615 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1616
1617 if (super && super->s_op->bdev_try_to_free_page)
1618 return super->s_op->bdev_try_to_free_page(super, page, wait);
1619
1620 return try_to_free_buffers(page);
1621}
1622
4c54ac62 1623static const struct address_space_operations def_blk_aops = {
1da177e4
LT
1624 .readpage = blkdev_readpage,
1625 .writepage = blkdev_writepage,
1626 .sync_page = block_sync_page,
6272b5a5
NP
1627 .write_begin = blkdev_write_begin,
1628 .write_end = blkdev_write_end,
1da177e4 1629 .writepages = generic_writepages,
87d8fe1e 1630 .releasepage = blkdev_releasepage,
1da177e4
LT
1631 .direct_IO = blkdev_direct_IO,
1632};
1633
4b6f5d20 1634const struct file_operations def_blk_fops = {
1da177e4
LT
1635 .open = blkdev_open,
1636 .release = blkdev_close,
1637 .llseek = block_llseek,
543ade1f
BP
1638 .read = do_sync_read,
1639 .write = do_sync_write,
1da177e4 1640 .aio_read = generic_file_aio_read,
eef99380 1641 .aio_write = blkdev_aio_write,
1da177e4 1642 .mmap = generic_file_mmap,
b1dd3b28 1643 .fsync = blkdev_fsync,
bb93e3a5 1644 .unlocked_ioctl = block_ioctl,
1da177e4
LT
1645#ifdef CONFIG_COMPAT
1646 .compat_ioctl = compat_blkdev_ioctl,
1647#endif
7f9c51f0
JA
1648 .splice_read = generic_file_splice_read,
1649 .splice_write = generic_file_splice_write,
1da177e4
LT
1650};
1651
1652int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1653{
1654 int res;
1655 mm_segment_t old_fs = get_fs();
1656 set_fs(KERNEL_DS);
56b26add 1657 res = blkdev_ioctl(bdev, 0, cmd, arg);
1da177e4
LT
1658 set_fs(old_fs);
1659 return res;
1660}
1661
1662EXPORT_SYMBOL(ioctl_by_bdev);
1663
1664/**
1665 * lookup_bdev - lookup a struct block_device by name
94e2959e 1666 * @pathname: special file representing the block device
1da177e4 1667 *
57d1b536 1668 * Get a reference to the blockdevice at @pathname in the current
1da177e4
LT
1669 * namespace if possible and return it. Return ERR_PTR(error)
1670 * otherwise.
1671 */
421748ec 1672struct block_device *lookup_bdev(const char *pathname)
1da177e4
LT
1673{
1674 struct block_device *bdev;
1675 struct inode *inode;
421748ec 1676 struct path path;
1da177e4
LT
1677 int error;
1678
421748ec 1679 if (!pathname || !*pathname)
1da177e4
LT
1680 return ERR_PTR(-EINVAL);
1681
421748ec 1682 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
1da177e4
LT
1683 if (error)
1684 return ERR_PTR(error);
1685
421748ec 1686 inode = path.dentry->d_inode;
1da177e4
LT
1687 error = -ENOTBLK;
1688 if (!S_ISBLK(inode->i_mode))
1689 goto fail;
1690 error = -EACCES;
421748ec 1691 if (path.mnt->mnt_flags & MNT_NODEV)
1da177e4
LT
1692 goto fail;
1693 error = -ENOMEM;
1694 bdev = bd_acquire(inode);
1695 if (!bdev)
1696 goto fail;
1697out:
421748ec 1698 path_put(&path);
1da177e4
LT
1699 return bdev;
1700fail:
1701 bdev = ERR_PTR(error);
1702 goto out;
1703}
d5686b44 1704EXPORT_SYMBOL(lookup_bdev);
1da177e4
LT
1705
1706/**
30c40d2c 1707 * open_bdev_exclusive - open a block device by name and set it up for use
1da177e4
LT
1708 *
1709 * @path: special file representing the block device
30c40d2c 1710 * @mode: FMODE_... combination to pass be used
1da177e4
LT
1711 * @holder: owner for exclusion
1712 *
1713 * Open the blockdevice described by the special file at @path, claim it
1714 * for the @holder.
1715 */
30c40d2c 1716struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
1da177e4 1717{
6b4517a7
TH
1718 struct block_device *bdev, *whole;
1719 int error;
1da177e4
LT
1720
1721 bdev = lookup_bdev(path);
1722 if (IS_ERR(bdev))
1723 return bdev;
1724
6b4517a7
TH
1725 whole = bd_start_claiming(bdev, holder);
1726 if (IS_ERR(whole)) {
1727 bdput(bdev);
1728 return whole;
1729 }
1730
572c4892 1731 error = blkdev_get(bdev, mode);
1da177e4 1732 if (error)
6b4517a7
TH
1733 goto out_abort_claiming;
1734
1da177e4 1735 error = -EACCES;
30c40d2c 1736 if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
6b4517a7 1737 goto out_blkdev_put;
1da177e4 1738
b0018361 1739 bd_finish_claiming(bdev, whole, holder);
1da177e4 1740 return bdev;
6b4517a7
TH
1741
1742out_blkdev_put:
9a1c3542 1743 blkdev_put(bdev, mode);
6b4517a7
TH
1744out_abort_claiming:
1745 bd_abort_claiming(whole, holder);
1da177e4
LT
1746 return ERR_PTR(error);
1747}
1748
30c40d2c 1749EXPORT_SYMBOL(open_bdev_exclusive);
1da177e4
LT
1750
1751/**
30c40d2c 1752 * close_bdev_exclusive - close a blockdevice opened by open_bdev_exclusive()
1da177e4
LT
1753 *
1754 * @bdev: blockdevice to close
30c40d2c 1755 * @mode: mode, must match that used to open.
1da177e4 1756 *
30c40d2c 1757 * This is the counterpart to open_bdev_exclusive().
1da177e4 1758 */
30c40d2c 1759void close_bdev_exclusive(struct block_device *bdev, fmode_t mode)
1da177e4
LT
1760{
1761 bd_release(bdev);
30c40d2c 1762 blkdev_put(bdev, mode);
1da177e4
LT
1763}
1764
30c40d2c 1765EXPORT_SYMBOL(close_bdev_exclusive);
b71e8a4c
DH
1766
1767int __invalidate_device(struct block_device *bdev)
1768{
1769 struct super_block *sb = get_super(bdev);
1770 int res = 0;
1771
1772 if (sb) {
1773 /*
1774 * no need to lock the super, get_super holds the
1775 * read mutex so the filesystem cannot go away
1776 * under us (->put_super runs with the write lock
1777 * hold).
1778 */
1779 shrink_dcache_sb(sb);
1780 res = invalidate_inodes(sb);
1781 drop_super(sb);
1782 }
f98393a6 1783 invalidate_bdev(bdev);
b71e8a4c
DH
1784 return res;
1785}
1786EXPORT_SYMBOL(__invalidate_device);