1/*
2 *  linux/fs/block_dev.c
3 *
4 *  Copyright (C) 1991, 1992  Linus Torvalds
5 *  Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fcntl.h>
11#include <linux/slab.h>
12#include <linux/kmod.h>
13#include <linux/major.h>
14#include <linux/device_cgroup.h>
15#include <linux/highmem.h>
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/blkpg.h>
19#include <linux/magic.h>
20#include <linux/buffer_head.h>
21#include <linux/swap.h>
22#include <linux/pagevec.h>
23#include <linux/writeback.h>
24#include <linux/mpage.h>
25#include <linux/mount.h>
26#include <linux/uio.h>
27#include <linux/namei.h>
28#include <linux/log2.h>
29#include <linux/cleancache.h>
30#include <asm/uaccess.h>
31#include "internal.h"
32
33struct bdev_inode {
34	struct block_device bdev;
35	struct inode vfs_inode;
36};
37
38static const struct address_space_operations def_blk_aops;
39
40static inline struct bdev_inode *BDEV_I(struct inode *inode)
41{
42	return container_of(inode, struct bdev_inode, vfs_inode);
43}
44
45inline struct block_device *I_BDEV(struct inode *inode)
46{
47	return &BDEV_I(inode)->bdev;
48}
49EXPORT_SYMBOL(I_BDEV);
50
51static void bdev_write_inode(struct inode *inode)
52{
53	spin_lock(&inode->i_lock);
54	while (inode->i_state & I_DIRTY) {
55		spin_unlock(&inode->i_lock);
56		WARN_ON_ONCE(write_inode_now(inode, true));
57		spin_lock(&inode->i_lock);
58	}
59	spin_unlock(&inode->i_lock);
60}
61
62/* Kill _all_ buffers and pagecache , dirty or not.. */
63void kill_bdev(struct block_device *bdev)
64{
65	struct address_space *mapping = bdev->bd_inode->i_mapping;
66
67	if (mapping->nrpages == 0 && mapping->nrshadows == 0)
68		return;
69
70	invalidate_bh_lrus();
71	truncate_inode_pages(mapping, 0);
72}
73EXPORT_SYMBOL(kill_bdev);
74
75/* Invalidate clean unused buffers and pagecache. */
76void invalidate_bdev(struct block_device *bdev)
77{
78	struct address_space *mapping = bdev->bd_inode->i_mapping;
79
80	if (mapping->nrpages == 0)
81		return;
82
83	invalidate_bh_lrus();
84	lru_add_drain_all();	/* make sure all lru add caches are flushed */
85	invalidate_mapping_pages(mapping, 0, -1);
86	/* 99% of the time, we don't need to flush the cleancache on the bdev.
87	 * But, for the strange corners, lets be cautious
88	 */
89	cleancache_invalidate_inode(mapping);
90}
91EXPORT_SYMBOL(invalidate_bdev);
92
93int set_blocksize(struct block_device *bdev, int size)
94{
95	/* Size must be a power of two, and between 512 and PAGE_SIZE */
96	if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
97		return -EINVAL;
98
99	/* Size cannot be smaller than the size supported by the device */
100	if (size < bdev_logical_block_size(bdev))
101		return -EINVAL;
102
103	/* Don't change the size if it is same as current */
104	if (bdev->bd_block_size != size) {
105		sync_blockdev(bdev);
106		bdev->bd_block_size = size;
107		bdev->bd_inode->i_blkbits = blksize_bits(size);
108		kill_bdev(bdev);
109	}
110	return 0;
111}
112
113EXPORT_SYMBOL(set_blocksize);
114
115int sb_set_blocksize(struct super_block *sb, int size)
116{
117	if (set_blocksize(sb->s_bdev, size))
118		return 0;
119	/* If we get here, we know size is power of two
120	 * and it's value is between 512 and PAGE_SIZE */
121	sb->s_blocksize = size;
122	sb->s_blocksize_bits = blksize_bits(size);
123	return sb->s_blocksize;
124}
125
126EXPORT_SYMBOL(sb_set_blocksize);
127
128int sb_min_blocksize(struct super_block *sb, int size)
129{
130	int minsize = bdev_logical_block_size(sb->s_bdev);
131	if (size < minsize)
132		size = minsize;
133	return sb_set_blocksize(sb, size);
134}
135
136EXPORT_SYMBOL(sb_min_blocksize);
137
138static int
139blkdev_get_block(struct inode *inode, sector_t iblock,
140		struct buffer_head *bh, int create)
141{
142	bh->b_bdev = I_BDEV(inode);
143	bh->b_blocknr = iblock;
144	set_buffer_mapped(bh);
145	return 0;
146}
147
148static ssize_t
149blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
150{
151	struct file *file = iocb->ki_filp;
152	struct inode *inode = file->f_mapping->host;
153
154	return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset,
155				    blkdev_get_block, NULL, NULL,
156				    DIO_SKIP_DIO_COUNT);
157}
158
159int __sync_blockdev(struct block_device *bdev, int wait)
160{
161	if (!bdev)
162		return 0;
163	if (!wait)
164		return filemap_flush(bdev->bd_inode->i_mapping);
165	return filemap_write_and_wait(bdev->bd_inode->i_mapping);
166}
167
168/*
169 * Write out and wait upon all the dirty data associated with a block
170 * device via its mapping.  Does not take the superblock lock.
171 */
172int sync_blockdev(struct block_device *bdev)
173{
174	return __sync_blockdev(bdev, 1);
175}
176EXPORT_SYMBOL(sync_blockdev);
177
178/*
179 * Write out and wait upon all dirty data associated with this
180 * device.   Filesystem data as well as the underlying block
181 * device.  Takes the superblock lock.
182 */
183int fsync_bdev(struct block_device *bdev)
184{
185	struct super_block *sb = get_super(bdev);
186	if (sb) {
187		int res = sync_filesystem(sb);
188		drop_super(sb);
189		return res;
190	}
191	return sync_blockdev(bdev);
192}
193EXPORT_SYMBOL(fsync_bdev);
194
195/**
196 * freeze_bdev  --  lock a filesystem and force it into a consistent state
197 * @bdev:	blockdevice to lock
198 *
199 * If a superblock is found on this device, we take the s_umount semaphore
200 * on it to make sure nobody unmounts until the snapshot creation is done.
201 * The reference counter (bd_fsfreeze_count) guarantees that only the last
202 * unfreeze process can unfreeze the frozen filesystem actually when multiple
203 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
204 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
205 * actually.
206 */
207struct super_block *freeze_bdev(struct block_device *bdev)
208{
209	struct super_block *sb;
210	int error = 0;
211
212	mutex_lock(&bdev->bd_fsfreeze_mutex);
213	if (++bdev->bd_fsfreeze_count > 1) {
214		/*
215		 * We don't even need to grab a reference - the first call
216		 * to freeze_bdev grab an active reference and only the last
217		 * thaw_bdev drops it.
218		 */
219		sb = get_super(bdev);
220		drop_super(sb);
221		mutex_unlock(&bdev->bd_fsfreeze_mutex);
222		return sb;
223	}
224
225	sb = get_active_super(bdev);
226	if (!sb)
227		goto out;
228	if (sb->s_op->freeze_super)
229		error = sb->s_op->freeze_super(sb);
230	else
231		error = freeze_super(sb);
232	if (error) {
233		deactivate_super(sb);
234		bdev->bd_fsfreeze_count--;
235		mutex_unlock(&bdev->bd_fsfreeze_mutex);
236		return ERR_PTR(error);
237	}
238	deactivate_super(sb);
239 out:
240	sync_blockdev(bdev);
241	mutex_unlock(&bdev->bd_fsfreeze_mutex);
242	return sb;	/* thaw_bdev releases s->s_umount */
243}
244EXPORT_SYMBOL(freeze_bdev);
245
246/**
247 * thaw_bdev  -- unlock filesystem
248 * @bdev:	blockdevice to unlock
249 * @sb:		associated superblock
250 *
251 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
252 */
253int thaw_bdev(struct block_device *bdev, struct super_block *sb)
254{
255	int error = -EINVAL;
256
257	mutex_lock(&bdev->bd_fsfreeze_mutex);
258	if (!bdev->bd_fsfreeze_count)
259		goto out;
260
261	error = 0;
262	if (--bdev->bd_fsfreeze_count > 0)
263		goto out;
264
265	if (!sb)
266		goto out;
267
268	if (sb->s_op->thaw_super)
269		error = sb->s_op->thaw_super(sb);
270	else
271		error = thaw_super(sb);
272	if (error) {
273		bdev->bd_fsfreeze_count++;
274		mutex_unlock(&bdev->bd_fsfreeze_mutex);
275		return error;
276	}
277out:
278	mutex_unlock(&bdev->bd_fsfreeze_mutex);
279	return 0;
280}
281EXPORT_SYMBOL(thaw_bdev);
282
283static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
284{
285	return block_write_full_page(page, blkdev_get_block, wbc);
286}
287
288static int blkdev_readpage(struct file * file, struct page * page)
289{
290	return block_read_full_page(page, blkdev_get_block);
291}
292
293static int blkdev_readpages(struct file *file, struct address_space *mapping,
294			struct list_head *pages, unsigned nr_pages)
295{
296	return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
297}
298
299static int blkdev_write_begin(struct file *file, struct address_space *mapping,
300			loff_t pos, unsigned len, unsigned flags,
301			struct page **pagep, void **fsdata)
302{
303	return block_write_begin(mapping, pos, len, flags, pagep,
304				 blkdev_get_block);
305}
306
307static int blkdev_write_end(struct file *file, struct address_space *mapping,
308			loff_t pos, unsigned len, unsigned copied,
309			struct page *page, void *fsdata)
310{
311	int ret;
312	ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
313
314	unlock_page(page);
315	page_cache_release(page);
316
317	return ret;
318}
319
320/*
321 * private llseek:
322 * for a block special file file_inode(file)->i_size is zero
323 * so we compute the size by hand (just as in block_read/write above)
324 */
325static loff_t block_llseek(struct file *file, loff_t offset, int whence)
326{
327	struct inode *bd_inode = file->f_mapping->host;
328	loff_t retval;
329
330	mutex_lock(&bd_inode->i_mutex);
331	retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
332	mutex_unlock(&bd_inode->i_mutex);
333	return retval;
334}
335
336int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
337{
338	struct inode *bd_inode = filp->f_mapping->host;
339	struct block_device *bdev = I_BDEV(bd_inode);
340	int error;
341
342	error = filemap_write_and_wait_range(filp->f_mapping, start, end);
343	if (error)
344		return error;
345
346	/*
347	 * There is no need to serialise calls to blkdev_issue_flush with
348	 * i_mutex and doing so causes performance issues with concurrent
349	 * O_SYNC writers to a block device.
350	 */
351	error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
352	if (error == -EOPNOTSUPP)
353		error = 0;
354
355	return error;
356}
357EXPORT_SYMBOL(blkdev_fsync);
358
359/**
360 * bdev_read_page() - Start reading a page from a block device
361 * @bdev: The device to read the page from
362 * @sector: The offset on the device to read the page to (need not be aligned)
363 * @page: The page to read
364 *
365 * On entry, the page should be locked.  It will be unlocked when the page
366 * has been read.  If the block driver implements rw_page synchronously,
367 * that will be true on exit from this function, but it need not be.
368 *
369 * Errors returned by this function are usually "soft", eg out of memory, or
370 * queue full; callers should try a different route to read this page rather
371 * than propagate an error back up the stack.
372 *
373 * Return: negative errno if an error occurs, 0 if submission was successful.
374 */
375int bdev_read_page(struct block_device *bdev, sector_t sector,
376			struct page *page)
377{
378	const struct block_device_operations *ops = bdev->bd_disk->fops;
379	if (!ops->rw_page)
380		return -EOPNOTSUPP;
381	return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
382}
383EXPORT_SYMBOL_GPL(bdev_read_page);
384
385/**
386 * bdev_write_page() - Start writing a page to a block device
387 * @bdev: The device to write the page to
388 * @sector: The offset on the device to write the page to (need not be aligned)
389 * @page: The page to write
390 * @wbc: The writeback_control for the write
391 *
392 * On entry, the page should be locked and not currently under writeback.
393 * On exit, if the write started successfully, the page will be unlocked and
394 * under writeback.  If the write failed already (eg the driver failed to
395 * queue the page to the device), the page will still be locked.  If the
396 * caller is a ->writepage implementation, it will need to unlock the page.
397 *
398 * Errors returned by this function are usually "soft", eg out of memory, or
399 * queue full; callers should try a different route to write this page rather
400 * than propagate an error back up the stack.
401 *
402 * Return: negative errno if an error occurs, 0 if submission was successful.
403 */
404int bdev_write_page(struct block_device *bdev, sector_t sector,
405			struct page *page, struct writeback_control *wbc)
406{
407	int result;
408	int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
409	const struct block_device_operations *ops = bdev->bd_disk->fops;
410	if (!ops->rw_page)
411		return -EOPNOTSUPP;
412	set_page_writeback(page);
413	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
414	if (result)
415		end_page_writeback(page);
416	else
417		unlock_page(page);
418	return result;
419}
420EXPORT_SYMBOL_GPL(bdev_write_page);
421
422/**
423 * bdev_direct_access() - Get the address for directly-accessibly memory
424 * @bdev: The device containing the memory
425 * @sector: The offset within the device
426 * @addr: Where to put the address of the memory
427 * @pfn: The Page Frame Number for the memory
428 * @size: The number of bytes requested
429 *
430 * If a block device is made up of directly addressable memory, this function
431 * will tell the caller the PFN and the address of the memory.  The address
432 * may be directly dereferenced within the kernel without the need to call
433 * ioremap(), kmap() or similar.  The PFN is suitable for inserting into
434 * page tables.
435 *
436 * Return: negative errno if an error occurs, otherwise the number of bytes
437 * accessible at this address.
438 */
439long bdev_direct_access(struct block_device *bdev, sector_t sector,
440			void **addr, unsigned long *pfn, long size)
441{
442	long avail;
443	const struct block_device_operations *ops = bdev->bd_disk->fops;
444
445	if (size < 0)
446		return size;
447	if (!ops->direct_access)
448		return -EOPNOTSUPP;
449	if ((sector + DIV_ROUND_UP(size, 512)) >
450					part_nr_sects_read(bdev->bd_part))
451		return -ERANGE;
452	sector += get_start_sect(bdev);
453	if (sector % (PAGE_SIZE / 512))
454		return -EINVAL;
455	avail = ops->direct_access(bdev, sector, addr, pfn, size);
456	if (!avail)
457		return -ERANGE;
458	return min(avail, size);
459}
460EXPORT_SYMBOL_GPL(bdev_direct_access);
461
462/*
463 * pseudo-fs
464 */
465
466static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
467static struct kmem_cache * bdev_cachep __read_mostly;
468
469static struct inode *bdev_alloc_inode(struct super_block *sb)
470{
471	struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
472	if (!ei)
473		return NULL;
474	return &ei->vfs_inode;
475}
476
477static void bdev_i_callback(struct rcu_head *head)
478{
479	struct inode *inode = container_of(head, struct inode, i_rcu);
480	struct bdev_inode *bdi = BDEV_I(inode);
481
482	kmem_cache_free(bdev_cachep, bdi);
483}
484
485static void bdev_destroy_inode(struct inode *inode)
486{
487	call_rcu(&inode->i_rcu, bdev_i_callback);
488}
489
490static void init_once(void *foo)
491{
492	struct bdev_inode *ei = (struct bdev_inode *) foo;
493	struct block_device *bdev = &ei->bdev;
494
495	memset(bdev, 0, sizeof(*bdev));
496	mutex_init(&bdev->bd_mutex);
497	INIT_LIST_HEAD(&bdev->bd_inodes);
498	INIT_LIST_HEAD(&bdev->bd_list);
499#ifdef CONFIG_SYSFS
500	INIT_LIST_HEAD(&bdev->bd_holder_disks);
501#endif
502	inode_init_once(&ei->vfs_inode);
503	/* Initialize mutex for freeze. */
504	mutex_init(&bdev->bd_fsfreeze_mutex);
505}
506
507static inline void __bd_forget(struct inode *inode)
508{
509	list_del_init(&inode->i_devices);
510	inode->i_bdev = NULL;
511	inode->i_mapping = &inode->i_data;
512}
513
514static void bdev_evict_inode(struct inode *inode)
515{
516	struct block_device *bdev = &BDEV_I(inode)->bdev;
517	struct list_head *p;
518	truncate_inode_pages_final(&inode->i_data);
519	invalidate_inode_buffers(inode); /* is it needed here? */
520	clear_inode(inode);
521	spin_lock(&bdev_lock);
522	while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
523		__bd_forget(list_entry(p, struct inode, i_devices));
524	}
525	list_del_init(&bdev->bd_list);
526	spin_unlock(&bdev_lock);
527}
528
529static const struct super_operations bdev_sops = {
530	.statfs = simple_statfs,
531	.alloc_inode = bdev_alloc_inode,
532	.destroy_inode = bdev_destroy_inode,
533	.drop_inode = generic_delete_inode,
534	.evict_inode = bdev_evict_inode,
535};
536
537static struct dentry *bd_mount(struct file_system_type *fs_type,
538	int flags, const char *dev_name, void *data)
539{
540	return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
541}
542
543static struct file_system_type bd_type = {
544	.name		= "bdev",
545	.mount		= bd_mount,
546	.kill_sb	= kill_anon_super,
547};
548
549static struct super_block *blockdev_superblock __read_mostly;
550
551void __init bdev_cache_init(void)
552{
553	int err;
554	static struct vfsmount *bd_mnt;
555
556	bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
557			0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
558				SLAB_MEM_SPREAD|SLAB_PANIC),
559			init_once);
560	err = register_filesystem(&bd_type);
561	if (err)
562		panic("Cannot register bdev pseudo-fs");
563	bd_mnt = kern_mount(&bd_type);
564	if (IS_ERR(bd_mnt))
565		panic("Cannot create bdev pseudo-fs");
566	blockdev_superblock = bd_mnt->mnt_sb;   /* For writeback */
567}
568
569/*
570 * Most likely _very_ bad one - but then it's hardly critical for small
571 * /dev and can be fixed when somebody will need really large one.
572 * Keep in mind that it will be fed through icache hash function too.
573 */
574static inline unsigned long hash(dev_t dev)
575{
576	return MAJOR(dev)+MINOR(dev);
577}
578
579static int bdev_test(struct inode *inode, void *data)
580{
581	return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
582}
583
584static int bdev_set(struct inode *inode, void *data)
585{
586	BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
587	return 0;
588}
589
590static LIST_HEAD(all_bdevs);
591
592struct block_device *bdget(dev_t dev)
593{
594	struct block_device *bdev;
595	struct inode *inode;
596
597	inode = iget5_locked(blockdev_superblock, hash(dev),
598			bdev_test, bdev_set, &dev);
599
600	if (!inode)
601		return NULL;
602
603	bdev = &BDEV_I(inode)->bdev;
604
605	if (inode->i_state & I_NEW) {
606		bdev->bd_contains = NULL;
607		bdev->bd_super = NULL;
608		bdev->bd_inode = inode;
609		bdev->bd_block_size = (1 << inode->i_blkbits);
610		bdev->bd_part_count = 0;
611		bdev->bd_invalidated = 0;
612		inode->i_mode = S_IFBLK;
613		inode->i_rdev = dev;
614		inode->i_bdev = bdev;
615		inode->i_data.a_ops = &def_blk_aops;
616		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
617		spin_lock(&bdev_lock);
618		list_add(&bdev->bd_list, &all_bdevs);
619		spin_unlock(&bdev_lock);
620		unlock_new_inode(inode);
621	}
622	return bdev;
623}
624
625EXPORT_SYMBOL(bdget);
626
627/**
628 * bdgrab -- Grab a reference to an already referenced block device
629 * @bdev:	Block device to grab a reference to.
630 */
631struct block_device *bdgrab(struct block_device *bdev)
632{
633	ihold(bdev->bd_inode);
634	return bdev;
635}
636EXPORT_SYMBOL(bdgrab);
637
638long nr_blockdev_pages(void)
639{
640	struct block_device *bdev;
641	long ret = 0;
642	spin_lock(&bdev_lock);
643	list_for_each_entry(bdev, &all_bdevs, bd_list) {
644		ret += bdev->bd_inode->i_mapping->nrpages;
645	}
646	spin_unlock(&bdev_lock);
647	return ret;
648}
649
650void bdput(struct block_device *bdev)
651{
652	iput(bdev->bd_inode);
653}
654
655EXPORT_SYMBOL(bdput);
656
657static struct block_device *bd_acquire(struct inode *inode)
658{
659	struct block_device *bdev;
660
661	spin_lock(&bdev_lock);
662	bdev = inode->i_bdev;
663	if (bdev) {
664		ihold(bdev->bd_inode);
665		spin_unlock(&bdev_lock);
666		return bdev;
667	}
668	spin_unlock(&bdev_lock);
669
670	bdev = bdget(inode->i_rdev);
671	if (bdev) {
672		spin_lock(&bdev_lock);
673		if (!inode->i_bdev) {
674			/*
675			 * We take an additional reference to bd_inode,
676			 * and it's released in clear_inode() of inode.
677			 * So, we can access it via ->i_mapping always
678			 * without igrab().
679			 */
680			ihold(bdev->bd_inode);
681			inode->i_bdev = bdev;
682			inode->i_mapping = bdev->bd_inode->i_mapping;
683			list_add(&inode->i_devices, &bdev->bd_inodes);
684		}
685		spin_unlock(&bdev_lock);
686	}
687	return bdev;
688}
689
690int sb_is_blkdev_sb(struct super_block *sb)
691{
692	return sb == blockdev_superblock;
693}
694
695/* Call when you free inode */
696
697void bd_forget(struct inode *inode)
698{
699	struct block_device *bdev = NULL;
700
701	spin_lock(&bdev_lock);
702	if (!sb_is_blkdev_sb(inode->i_sb))
703		bdev = inode->i_bdev;
704	__bd_forget(inode);
705	spin_unlock(&bdev_lock);
706
707	if (bdev)
708		iput(bdev->bd_inode);
709}
710
711/**
712 * bd_may_claim - test whether a block device can be claimed
713 * @bdev: block device of interest
714 * @whole: whole block device containing @bdev, may equal @bdev
715 * @holder: holder trying to claim @bdev
716 *
717 * Test whether @bdev can be claimed by @holder.
718 *
719 * CONTEXT:
720 * spin_lock(&bdev_lock).
721 *
722 * RETURNS:
723 * %true if @bdev can be claimed, %false otherwise.
724 */
725static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
726			 void *holder)
727{
728	if (bdev->bd_holder == holder)
729		return true;	 /* already a holder */
730	else if (bdev->bd_holder != NULL)
731		return false; 	 /* held by someone else */
732	else if (bdev->bd_contains == bdev)
733		return true;  	 /* is a whole device which isn't held */
734
735	else if (whole->bd_holder == bd_may_claim)
736		return true; 	 /* is a partition of a device that is being partitioned */
737	else if (whole->bd_holder != NULL)
738		return false;	 /* is a partition of a held device */
739	else
740		return true;	 /* is a partition of an un-held device */
741}
742
743/**
744 * bd_prepare_to_claim - prepare to claim a block device
745 * @bdev: block device of interest
746 * @whole: the whole device containing @bdev, may equal @bdev
747 * @holder: holder trying to claim @bdev
748 *
749 * Prepare to claim @bdev.  This function fails if @bdev is already
750 * claimed by another holder and waits if another claiming is in
751 * progress.  This function doesn't actually claim.  On successful
752 * return, the caller has ownership of bd_claiming and bd_holder[s].
753 *
754 * CONTEXT:
755 * spin_lock(&bdev_lock).  Might release bdev_lock, sleep and regrab
756 * it multiple times.
757 *
758 * RETURNS:
759 * 0 if @bdev can be claimed, -EBUSY otherwise.
760 */
761static int bd_prepare_to_claim(struct block_device *bdev,
762			       struct block_device *whole, void *holder)
763{
764retry:
765	/* if someone else claimed, fail */
766	if (!bd_may_claim(bdev, whole, holder))
767		return -EBUSY;
768
769	/* if claiming is already in progress, wait for it to finish */
770	if (whole->bd_claiming) {
771		wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
772		DEFINE_WAIT(wait);
773
774		prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
775		spin_unlock(&bdev_lock);
776		schedule();
777		finish_wait(wq, &wait);
778		spin_lock(&bdev_lock);
779		goto retry;
780	}
781
782	/* yay, all mine */
783	return 0;
784}
785
786/**
787 * bd_start_claiming - start claiming a block device
788 * @bdev: block device of interest
789 * @holder: holder trying to claim @bdev
790 *
791 * @bdev is about to be opened exclusively.  Check @bdev can be opened
792 * exclusively and mark that an exclusive open is in progress.  Each
793 * successful call to this function must be matched with a call to
794 * either bd_finish_claiming() or bd_abort_claiming() (which do not
795 * fail).
796 *
797 * This function is used to gain exclusive access to the block device
798 * without actually causing other exclusive open attempts to fail. It
799 * should be used when the open sequence itself requires exclusive
800 * access but may subsequently fail.
801 *
802 * CONTEXT:
803 * Might sleep.
804 *
805 * RETURNS:
806 * Pointer to the block device containing @bdev on success, ERR_PTR()
807 * value on failure.
808 */
809static struct block_device *bd_start_claiming(struct block_device *bdev,
810					      void *holder)
811{
812	struct gendisk *disk;
813	struct block_device *whole;
814	int partno, err;
815
816	might_sleep();
817
818	/*
819	 * @bdev might not have been initialized properly yet, look up
820	 * and grab the outer block device the hard way.
821	 */
822	disk = get_gendisk(bdev->bd_dev, &partno);
823	if (!disk)
824		return ERR_PTR(-ENXIO);
825
826	/*
827	 * Normally, @bdev should equal what's returned from bdget_disk()
828	 * if partno is 0; however, some drivers (floppy) use multiple
829	 * bdev's for the same physical device and @bdev may be one of the
830	 * aliases.  Keep @bdev if partno is 0.  This means claimer
831	 * tracking is broken for those devices but it has always been that
832	 * way.
833	 */
834	if (partno)
835		whole = bdget_disk(disk, 0);
836	else
837		whole = bdgrab(bdev);
838
839	module_put(disk->fops->owner);
840	put_disk(disk);
841	if (!whole)
842		return ERR_PTR(-ENOMEM);
843
844	/* prepare to claim, if successful, mark claiming in progress */
845	spin_lock(&bdev_lock);
846
847	err = bd_prepare_to_claim(bdev, whole, holder);
848	if (err == 0) {
849		whole->bd_claiming = holder;
850		spin_unlock(&bdev_lock);
851		return whole;
852	} else {
853		spin_unlock(&bdev_lock);
854		bdput(whole);
855		return ERR_PTR(err);
856	}
857}
858
859#ifdef CONFIG_SYSFS
860struct bd_holder_disk {
861	struct list_head	list;
862	struct gendisk		*disk;
863	int			refcnt;
864};
865
866static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
867						  struct gendisk *disk)
868{
869	struct bd_holder_disk *holder;
870
871	list_for_each_entry(holder, &bdev->bd_holder_disks, list)
872		if (holder->disk == disk)
873			return holder;
874	return NULL;
875}
876
877static int add_symlink(struct kobject *from, struct kobject *to)
878{
879	return sysfs_create_link(from, to, kobject_name(to));
880}
881
882static void del_symlink(struct kobject *from, struct kobject *to)
883{
884	sysfs_remove_link(from, kobject_name(to));
885}
886
887/**
888 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
889 * @bdev: the claimed slave bdev
890 * @disk: the holding disk
891 *
892 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
893 *
894 * This functions creates the following sysfs symlinks.
895 *
896 * - from "slaves" directory of the holder @disk to the claimed @bdev
897 * - from "holders" directory of the @bdev to the holder @disk
898 *
899 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
900 * passed to bd_link_disk_holder(), then:
901 *
902 *   /sys/block/dm-0/slaves/sda --> /sys/block/sda
903 *   /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
904 *
905 * The caller must have claimed @bdev before calling this function and
906 * ensure that both @bdev and @disk are valid during the creation and
907 * lifetime of these symlinks.
908 *
909 * CONTEXT:
910 * Might sleep.
911 *
912 * RETURNS:
913 * 0 on success, -errno on failure.
914 */
915int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
916{
917	struct bd_holder_disk *holder;
918	int ret = 0;
919
920	mutex_lock(&bdev->bd_mutex);
921
922	WARN_ON_ONCE(!bdev->bd_holder);
923
924	/* FIXME: remove the following once add_disk() handles errors */
925	if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
926		goto out_unlock;
927
928	holder = bd_find_holder_disk(bdev, disk);
929	if (holder) {
930		holder->refcnt++;
931		goto out_unlock;
932	}
933
934	holder = kzalloc(sizeof(*holder), GFP_KERNEL);
935	if (!holder) {
936		ret = -ENOMEM;
937		goto out_unlock;
938	}
939
940	INIT_LIST_HEAD(&holder->list);
941	holder->disk = disk;
942	holder->refcnt = 1;
943
944	ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
945	if (ret)
946		goto out_free;
947
948	ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
949	if (ret)
950		goto out_del;
951	/*
952	 * bdev could be deleted beneath us which would implicitly destroy
953	 * the holder directory.  Hold on to it.
954	 */
955	kobject_get(bdev->bd_part->holder_dir);
956
957	list_add(&holder->list, &bdev->bd_holder_disks);
958	goto out_unlock;
959
960out_del:
961	del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
962out_free:
963	kfree(holder);
964out_unlock:
965	mutex_unlock(&bdev->bd_mutex);
966	return ret;
967}
968EXPORT_SYMBOL_GPL(bd_link_disk_holder);
969
970/**
971 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
972 * @bdev: the calimed slave bdev
973 * @disk: the holding disk
974 *
975 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
976 *
977 * CONTEXT:
978 * Might sleep.
979 */
980void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
981{
982	struct bd_holder_disk *holder;
983
984	mutex_lock(&bdev->bd_mutex);
985
986	holder = bd_find_holder_disk(bdev, disk);
987
988	if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
989		del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
990		del_symlink(bdev->bd_part->holder_dir,
991			    &disk_to_dev(disk)->kobj);
992		kobject_put(bdev->bd_part->holder_dir);
993		list_del_init(&holder->list);
994		kfree(holder);
995	}
996
997	mutex_unlock(&bdev->bd_mutex);
998}
999EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
1000#endif
1001
1002/**
1003 * flush_disk - invalidates all buffer-cache entries on a disk
1004 *
1005 * @bdev:      struct block device to be flushed
1006 * @kill_dirty: flag to guide handling of dirty inodes
1007 *
1008 * Invalidates all buffer-cache entries on a disk. It should be called
1009 * when a disk has been changed -- either by a media change or online
1010 * resize.
1011 */
1012static void flush_disk(struct block_device *bdev, bool kill_dirty)
1013{
1014	if (__invalidate_device(bdev, kill_dirty)) {
1015		char name[BDEVNAME_SIZE] = "";
1016
1017		if (bdev->bd_disk)
1018			disk_name(bdev->bd_disk, 0, name);
1019		printk(KERN_WARNING "VFS: busy inodes on changed media or "
1020		       "resized disk %s\n", name);
1021	}
1022
1023	if (!bdev->bd_disk)
1024		return;
1025	if (disk_part_scan_enabled(bdev->bd_disk))
1026		bdev->bd_invalidated = 1;
1027}
1028
1029/**
1030 * check_disk_size_change - checks for disk size change and adjusts bdev size.
1031 * @disk: struct gendisk to check
1032 * @bdev: struct bdev to adjust.
1033 *
1034 * This routine checks to see if the bdev size does not match the disk size
1035 * and adjusts it if it differs.
1036 */
1037void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
1038{
1039	loff_t disk_size, bdev_size;
1040
1041	disk_size = (loff_t)get_capacity(disk) << 9;
1042	bdev_size = i_size_read(bdev->bd_inode);
1043	if (disk_size != bdev_size) {
1044		char name[BDEVNAME_SIZE];
1045
1046		disk_name(disk, 0, name);
1047		printk(KERN_INFO
1048		       "%s: detected capacity change from %lld to %lld\n",
1049		       name, bdev_size, disk_size);
1050		i_size_write(bdev->bd_inode, disk_size);
1051		flush_disk(bdev, false);
1052	}
1053}
1054EXPORT_SYMBOL(check_disk_size_change);
1055
1056/**
1057 * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
1058 * @disk: struct gendisk to be revalidated
1059 *
1060 * This routine is a wrapper for lower-level driver's revalidate_disk
1061 * call-backs.  It is used to do common pre and post operations needed
1062 * for all revalidate_disk operations.
1063 */
1064int revalidate_disk(struct gendisk *disk)
1065{
1066	struct block_device *bdev;
1067	int ret = 0;
1068
1069	if (disk->fops->revalidate_disk)
1070		ret = disk->fops->revalidate_disk(disk);
1071
1072	bdev = bdget_disk(disk, 0);
1073	if (!bdev)
1074		return ret;
1075
1076	mutex_lock(&bdev->bd_mutex);
1077	check_disk_size_change(disk, bdev);
1078	bdev->bd_invalidated = 0;
1079	mutex_unlock(&bdev->bd_mutex);
1080	bdput(bdev);
1081	return ret;
1082}
1083EXPORT_SYMBOL(revalidate_disk);
1084
1085/*
1086 * This routine checks whether a removable media has been changed,
1087 * and invalidates all buffer-cache-entries in that case. This
1088 * is a relatively slow routine, so we have to try to minimize using
1089 * it. Thus it is called only upon a 'mount' or 'open'. This
1090 * is the best way of combining speed and utility, I think.
1091 * People changing diskettes in the middle of an operation deserve
1092 * to lose :-)
1093 */
1094int check_disk_change(struct block_device *bdev)
1095{
1096	struct gendisk *disk = bdev->bd_disk;
1097	const struct block_device_operations *bdops = disk->fops;
1098	unsigned int events;
1099
1100	events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
1101				   DISK_EVENT_EJECT_REQUEST);
1102	if (!(events & DISK_EVENT_MEDIA_CHANGE))
1103		return 0;
1104
1105	flush_disk(bdev, true);
1106	if (bdops->revalidate_disk)
1107		bdops->revalidate_disk(bdev->bd_disk);
1108	return 1;
1109}
1110
1111EXPORT_SYMBOL(check_disk_change);
1112
1113void bd_set_size(struct block_device *bdev, loff_t size)
1114{
1115	unsigned bsize = bdev_logical_block_size(bdev);
1116
1117	mutex_lock(&bdev->bd_inode->i_mutex);
1118	i_size_write(bdev->bd_inode, size);
1119	mutex_unlock(&bdev->bd_inode->i_mutex);
1120	while (bsize < PAGE_CACHE_SIZE) {
1121		if (size & bsize)
1122			break;
1123		bsize <<= 1;
1124	}
1125	bdev->bd_block_size = bsize;
1126	bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1127}
1128EXPORT_SYMBOL(bd_set_size);
1129
1130static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
1131
1132/*
1133 * bd_mutex locking:
1134 *
1135 *  mutex_lock(part->bd_mutex)
1136 *    mutex_lock_nested(whole->bd_mutex, 1)
1137 */
1138
1139static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1140{
1141	struct gendisk *disk;
1142	struct module *owner;
1143	int ret;
1144	int partno;
1145	int perm = 0;
1146
1147	if (mode & FMODE_READ)
1148		perm |= MAY_READ;
1149	if (mode & FMODE_WRITE)
1150		perm |= MAY_WRITE;
1151	/*
1152	 * hooks: /n/, see "layering violations".
1153	 */
1154	if (!for_part) {
1155		ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1156		if (ret != 0) {
1157			bdput(bdev);
1158			return ret;
1159		}
1160	}
1161
1162 restart:
1163
1164	ret = -ENXIO;
1165	disk = get_gendisk(bdev->bd_dev, &partno);
1166	if (!disk)
1167		goto out;
1168	owner = disk->fops->owner;
1169
1170	disk_block_events(disk);
1171	mutex_lock_nested(&bdev->bd_mutex, for_part);
1172	if (!bdev->bd_openers) {
1173		bdev->bd_disk = disk;
1174		bdev->bd_queue = disk->queue;
1175		bdev->bd_contains = bdev;
1176		if (!partno) {
1177			ret = -ENXIO;
1178			bdev->bd_part = disk_get_part(disk, partno);
1179			if (!bdev->bd_part)
1180				goto out_clear;
1181
1182			ret = 0;
1183			if (disk->fops->open) {
1184				ret = disk->fops->open(bdev, mode);
1185				if (ret == -ERESTARTSYS) {
1186					/* Lost a race with 'disk' being
1187					 * deleted, try again.
1188					 * See md.c
1189					 */
1190					disk_put_part(bdev->bd_part);
1191					bdev->bd_part = NULL;
1192					bdev->bd_disk = NULL;
1193					bdev->bd_queue = NULL;
1194					mutex_unlock(&bdev->bd_mutex);
1195					disk_unblock_events(disk);
1196					put_disk(disk);
1197					module_put(owner);
1198					goto restart;
1199				}
1200			}
1201
1202			if (!ret)
1203				bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1204
1205			/*
1206			 * If the device is invalidated, rescan partition
1207			 * if open succeeded or failed with -ENOMEDIUM.
1208			 * The latter is necessary to prevent ghost
1209			 * partitions on a removed medium.
1210			 */
1211			if (bdev->bd_invalidated) {
1212				if (!ret)
1213					rescan_partitions(disk, bdev);
1214				else if (ret == -ENOMEDIUM)
1215					invalidate_partitions(disk, bdev);
1216			}
1217			if (ret)
1218				goto out_clear;
1219		} else {
1220			struct block_device *whole;
1221			whole = bdget_disk(disk, 0);
1222			ret = -ENOMEM;
1223			if (!whole)
1224				goto out_clear;
1225			BUG_ON(for_part);
1226			ret = __blkdev_get(whole, mode, 1);
1227			if (ret)
1228				goto out_clear;
1229			bdev->bd_contains = whole;
1230			bdev->bd_part = disk_get_part(disk, partno);
1231			if (!(disk->flags & GENHD_FL_UP) ||
1232			    !bdev->bd_part || !bdev->bd_part->nr_sects) {
1233				ret = -ENXIO;
1234				goto out_clear;
1235			}
1236			bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1237			/*
1238			 * If the partition is not aligned on a page
1239			 * boundary, we can't do dax I/O to it.
1240			 */
1241			if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) ||
1242			    (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
1243				bdev->bd_inode->i_flags &= ~S_DAX;
1244		}
1245	} else {
1246		if (bdev->bd_contains == bdev) {
1247			ret = 0;
1248			if (bdev->bd_disk->fops->open)
1249				ret = bdev->bd_disk->fops->open(bdev, mode);
1250			/* the same as first opener case, read comment there */
1251			if (bdev->bd_invalidated) {
1252				if (!ret)
1253					rescan_partitions(bdev->bd_disk, bdev);
1254				else if (ret == -ENOMEDIUM)
1255					invalidate_partitions(bdev->bd_disk, bdev);
1256			}
1257			if (ret)
1258				goto out_unlock_bdev;
1259		}
1260		/* only one opener holds refs to the module and disk */
1261		put_disk(disk);
1262		module_put(owner);
1263	}
1264	bdev->bd_openers++;
1265	if (for_part)
1266		bdev->bd_part_count++;
1267	mutex_unlock(&bdev->bd_mutex);
1268	disk_unblock_events(disk);
1269	return 0;
1270
1271 out_clear:
1272	disk_put_part(bdev->bd_part);
1273	bdev->bd_disk = NULL;
1274	bdev->bd_part = NULL;
1275	bdev->bd_queue = NULL;
1276	if (bdev != bdev->bd_contains)
1277		__blkdev_put(bdev->bd_contains, mode, 1);
1278	bdev->bd_contains = NULL;
1279 out_unlock_bdev:
1280	mutex_unlock(&bdev->bd_mutex);
1281	disk_unblock_events(disk);
1282	put_disk(disk);
1283	module_put(owner);
1284 out:
1285	bdput(bdev);
1286
1287	return ret;
1288}
1289
1290/**
1291 * blkdev_get - open a block device
1292 * @bdev: block_device to open
1293 * @mode: FMODE_* mask
1294 * @holder: exclusive holder identifier
1295 *
1296 * Open @bdev with @mode.  If @mode includes %FMODE_EXCL, @bdev is
1297 * open with exclusive access.  Specifying %FMODE_EXCL with %NULL
1298 * @holder is invalid.  Exclusive opens may nest for the same @holder.
1299 *
1300 * On success, the reference count of @bdev is unchanged.  On failure,
1301 * @bdev is put.
1302 *
1303 * CONTEXT:
1304 * Might sleep.
1305 *
1306 * RETURNS:
1307 * 0 on success, -errno on failure.
1308 */
1309int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1310{
1311	struct block_device *whole = NULL;
1312	int res;
1313
1314	WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
1315
1316	if ((mode & FMODE_EXCL) && holder) {
1317		whole = bd_start_claiming(bdev, holder);
1318		if (IS_ERR(whole)) {
1319			bdput(bdev);
1320			return PTR_ERR(whole);
1321		}
1322	}
1323
1324	res = __blkdev_get(bdev, mode, 0);
1325
1326	if (whole) {
1327		struct gendisk *disk = whole->bd_disk;
1328
1329		/* finish claiming */
1330		mutex_lock(&bdev->bd_mutex);
1331		spin_lock(&bdev_lock);
1332
1333		if (!res) {
1334			BUG_ON(!bd_may_claim(bdev, whole, holder));
1335			/*
1336			 * Note that for a whole device bd_holders
1337			 * will be incremented twice, and bd_holder
1338			 * will be set to bd_may_claim before being
1339			 * set to holder
1340			 */
1341			whole->bd_holders++;
1342			whole->bd_holder = bd_may_claim;
1343			bdev->bd_holders++;
1344			bdev->bd_holder = holder;
1345		}
1346
1347		/* tell others that we're done */
1348		BUG_ON(whole->bd_claiming != holder);
1349		whole->bd_claiming = NULL;
1350		wake_up_bit(&whole->bd_claiming, 0);
1351
1352		spin_unlock(&bdev_lock);
1353
1354		/*
1355		 * Block event polling for write claims if requested.  Any
1356		 * write holder makes the write_holder state stick until
1357		 * all are released.  This is good enough and tracking
1358		 * individual writeable reference is too fragile given the
1359		 * way @mode is used in blkdev_get/put().
1360		 */
1361		if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1362		    (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
1363			bdev->bd_write_holder = true;
1364			disk_block_events(disk);
1365		}
1366
1367		mutex_unlock(&bdev->bd_mutex);
1368		bdput(whole);
1369	}
1370
1371	return res;
1372}
1373EXPORT_SYMBOL(blkdev_get);
1374
1375/**
1376 * blkdev_get_by_path - open a block device by name
1377 * @path: path to the block device to open
1378 * @mode: FMODE_* mask
1379 * @holder: exclusive holder identifier
1380 *
1381 * Open the blockdevice described by the device file at @path.  @mode
1382 * and @holder are identical to blkdev_get().
1383 *
1384 * On success, the returned block_device has reference count of one.
1385 *
1386 * CONTEXT:
1387 * Might sleep.
1388 *
1389 * RETURNS:
1390 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1391 */
1392struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1393					void *holder)
1394{
1395	struct block_device *bdev;
1396	int err;
1397
1398	bdev = lookup_bdev(path);
1399	if (IS_ERR(bdev))
1400		return bdev;
1401
1402	err = blkdev_get(bdev, mode, holder);
1403	if (err)
1404		return ERR_PTR(err);
1405
1406	if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1407		blkdev_put(bdev, mode);
1408		return ERR_PTR(-EACCES);
1409	}
1410
1411	return bdev;
1412}
1413EXPORT_SYMBOL(blkdev_get_by_path);
1414
1415/**
1416 * blkdev_get_by_dev - open a block device by device number
1417 * @dev: device number of block device to open
1418 * @mode: FMODE_* mask
1419 * @holder: exclusive holder identifier
1420 *
1421 * Open the blockdevice described by device number @dev.  @mode and
1422 * @holder are identical to blkdev_get().
1423 *
1424 * Use it ONLY if you really do not have anything better - i.e. when
1425 * you are behind a truly sucky interface and all you are given is a
1426 * device number.  _Never_ to be used for internal purposes.  If you
1427 * ever need it - reconsider your API.
1428 *
1429 * On success, the returned block_device has reference count of one.
1430 *
1431 * CONTEXT:
1432 * Might sleep.
1433 *
1434 * RETURNS:
1435 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1436 */
1437struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1438{
1439	struct block_device *bdev;
1440	int err;
1441
1442	bdev = bdget(dev);
1443	if (!bdev)
1444		return ERR_PTR(-ENOMEM);
1445
1446	err = blkdev_get(bdev, mode, holder);
1447	if (err)
1448		return ERR_PTR(err);
1449
1450	return bdev;
1451}
1452EXPORT_SYMBOL(blkdev_get_by_dev);
1453
1454static int blkdev_open(struct inode * inode, struct file * filp)
1455{
1456	struct block_device *bdev;
1457
1458	/*
1459	 * Preserve backwards compatibility and allow large file access
1460	 * even if userspace doesn't ask for it explicitly. Some mkfs
1461	 * binary needs it. We might want to drop this workaround
1462	 * during an unstable branch.
1463	 */
1464	filp->f_flags |= O_LARGEFILE;
1465
1466	if (filp->f_flags & O_NDELAY)
1467		filp->f_mode |= FMODE_NDELAY;
1468	if (filp->f_flags & O_EXCL)
1469		filp->f_mode |= FMODE_EXCL;
1470	if ((filp->f_flags & O_ACCMODE) == 3)
1471		filp->f_mode |= FMODE_WRITE_IOCTL;
1472
1473	bdev = bd_acquire(inode);
1474	if (bdev == NULL)
1475		return -ENOMEM;
1476
1477	filp->f_mapping = bdev->bd_inode->i_mapping;
1478
1479	return blkdev_get(bdev, filp->f_mode, filp);
1480}
1481
1482static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1483{
1484	struct gendisk *disk = bdev->bd_disk;
1485	struct block_device *victim = NULL;
1486
1487	mutex_lock_nested(&bdev->bd_mutex, for_part);
1488	if (for_part)
1489		bdev->bd_part_count--;
1490
1491	if (!--bdev->bd_openers) {
1492		WARN_ON_ONCE(bdev->bd_holders);
1493		sync_blockdev(bdev);
1494		kill_bdev(bdev);
1495		/*
1496		 * ->release can cause the queue to disappear, so flush all
1497		 * dirty data before.
1498		 */
1499		bdev_write_inode(bdev->bd_inode);
1500	}
1501	if (bdev->bd_contains == bdev) {
1502		if (disk->fops->release)
1503			disk->fops->release(disk, mode);
1504	}
1505	if (!bdev->bd_openers) {
1506		struct module *owner = disk->fops->owner;
1507
1508		disk_put_part(bdev->bd_part);
1509		bdev->bd_part = NULL;
1510		bdev->bd_disk = NULL;
1511		if (bdev != bdev->bd_contains)
1512			victim = bdev->bd_contains;
1513		bdev->bd_contains = NULL;
1514
1515		put_disk(disk);
1516		module_put(owner);
1517	}
1518	mutex_unlock(&bdev->bd_mutex);
1519	bdput(bdev);
1520	if (victim)
1521		__blkdev_put(victim, mode, 1);
1522}
1523
1524void blkdev_put(struct block_device *bdev, fmode_t mode)
1525{
1526	mutex_lock(&bdev->bd_mutex);
1527
1528	if (mode & FMODE_EXCL) {
1529		bool bdev_free;
1530
1531		/*
1532		 * Release a claim on the device.  The holder fields
1533		 * are protected with bdev_lock.  bd_mutex is to
1534		 * synchronize disk_holder unlinking.
1535		 */
1536		spin_lock(&bdev_lock);
1537
1538		WARN_ON_ONCE(--bdev->bd_holders < 0);
1539		WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1540
1541		/* bd_contains might point to self, check in a separate step */
1542		if ((bdev_free = !bdev->bd_holders))
1543			bdev->bd_holder = NULL;
1544		if (!bdev->bd_contains->bd_holders)
1545			bdev->bd_contains->bd_holder = NULL;
1546
1547		spin_unlock(&bdev_lock);
1548
1549		/*
1550		 * If this was the last claim, remove holder link and
1551		 * unblock evpoll if it was a write holder.
1552		 */
1553		if (bdev_free && bdev->bd_write_holder) {
1554			disk_unblock_events(bdev->bd_disk);
1555			bdev->bd_write_holder = false;
1556		}
1557	}
1558
1559	/*
1560	 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1561	 * event.  This is to ensure detection of media removal commanded
1562	 * from userland - e.g. eject(1).
1563	 */
1564	disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1565
1566	mutex_unlock(&bdev->bd_mutex);
1567
1568	__blkdev_put(bdev, mode, 0);
1569}
1570EXPORT_SYMBOL(blkdev_put);
1571
1572static int blkdev_close(struct inode * inode, struct file * filp)
1573{
1574	struct block_device *bdev = I_BDEV(filp->f_mapping->host);
1575	blkdev_put(bdev, filp->f_mode);
1576	return 0;
1577}
1578
1579static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1580{
1581	struct block_device *bdev = I_BDEV(file->f_mapping->host);
1582	fmode_t mode = file->f_mode;
1583
1584	/*
1585	 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1586	 * to updated it before every ioctl.
1587	 */
1588	if (file->f_flags & O_NDELAY)
1589		mode |= FMODE_NDELAY;
1590	else
1591		mode &= ~FMODE_NDELAY;
1592
1593	return blkdev_ioctl(bdev, mode, cmd, arg);
1594}
1595
1596/*
1597 * Write data to the block device.  Only intended for the block device itself
1598 * and the raw driver which basically is a fake block device.
1599 *
1600 * Does not take i_mutex for the write and thus is not for general purpose
1601 * use.
1602 */
1603ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
1604{
1605	struct file *file = iocb->ki_filp;
1606	struct inode *bd_inode = file->f_mapping->host;
1607	loff_t size = i_size_read(bd_inode);
1608	struct blk_plug plug;
1609	ssize_t ret;
1610
1611	if (bdev_read_only(I_BDEV(bd_inode)))
1612		return -EPERM;
1613
1614	if (!iov_iter_count(from))
1615		return 0;
1616
1617	if (iocb->ki_pos >= size)
1618		return -ENOSPC;
1619
1620	iov_iter_truncate(from, size - iocb->ki_pos);
1621
1622	blk_start_plug(&plug);
1623	ret = __generic_file_write_iter(iocb, from);
1624	if (ret > 0) {
1625		ssize_t err;
1626		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
1627		if (err < 0)
1628			ret = err;
1629	}
1630	blk_finish_plug(&plug);
1631	return ret;
1632}
1633EXPORT_SYMBOL_GPL(blkdev_write_iter);
1634
1635ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
1636{
1637	struct file *file = iocb->ki_filp;
1638	struct inode *bd_inode = file->f_mapping->host;
1639	loff_t size = i_size_read(bd_inode);
1640	loff_t pos = iocb->ki_pos;
1641
1642	if (pos >= size)
1643		return 0;
1644
1645	size -= pos;
1646	iov_iter_truncate(to, size);
1647	return generic_file_read_iter(iocb, to);
1648}
1649EXPORT_SYMBOL_GPL(blkdev_read_iter);
1650
1651/*
1652 * Try to release a page associated with block device when the system
1653 * is under memory pressure.
1654 */
1655static int blkdev_releasepage(struct page *page, gfp_t wait)
1656{
1657	struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1658
1659	if (super && super->s_op->bdev_try_to_free_page)
1660		return super->s_op->bdev_try_to_free_page(super, page, wait);
1661
1662	return try_to_free_buffers(page);
1663}
1664
1665static const struct address_space_operations def_blk_aops = {
1666	.readpage	= blkdev_readpage,
1667	.readpages	= blkdev_readpages,
1668	.writepage	= blkdev_writepage,
1669	.write_begin	= blkdev_write_begin,
1670	.write_end	= blkdev_write_end,
1671	.writepages	= generic_writepages,
1672	.releasepage	= blkdev_releasepage,
1673	.direct_IO	= blkdev_direct_IO,
1674	.is_dirty_writeback = buffer_check_dirty_writeback,
1675};
1676
1677const struct file_operations def_blk_fops = {
1678	.open		= blkdev_open,
1679	.release	= blkdev_close,
1680	.llseek		= block_llseek,
1681	.read_iter	= blkdev_read_iter,
1682	.write_iter	= blkdev_write_iter,
1683	.mmap		= generic_file_mmap,
1684	.fsync		= blkdev_fsync,
1685	.unlocked_ioctl	= block_ioctl,
1686#ifdef CONFIG_COMPAT
1687	.compat_ioctl	= compat_blkdev_ioctl,
1688#endif
1689	.splice_read	= generic_file_splice_read,
1690	.splice_write	= iter_file_splice_write,
1691};
1692
1693int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1694{
1695	int res;
1696	mm_segment_t old_fs = get_fs();
1697	set_fs(KERNEL_DS);
1698	res = blkdev_ioctl(bdev, 0, cmd, arg);
1699	set_fs(old_fs);
1700	return res;
1701}
1702
1703EXPORT_SYMBOL(ioctl_by_bdev);
1704
1705/**
1706 * lookup_bdev  - lookup a struct block_device by name
1707 * @pathname:	special file representing the block device
1708 *
1709 * Get a reference to the blockdevice at @pathname in the current
1710 * namespace if possible and return it.  Return ERR_PTR(error)
1711 * otherwise.
1712 */
1713struct block_device *lookup_bdev(const char *pathname)
1714{
1715	struct block_device *bdev;
1716	struct inode *inode;
1717	struct path path;
1718	int error;
1719
1720	if (!pathname || !*pathname)
1721		return ERR_PTR(-EINVAL);
1722
1723	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
1724	if (error)
1725		return ERR_PTR(error);
1726
1727	inode = d_backing_inode(path.dentry);
1728	error = -ENOTBLK;
1729	if (!S_ISBLK(inode->i_mode))
1730		goto fail;
1731	error = -EACCES;
1732	if (path.mnt->mnt_flags & MNT_NODEV)
1733		goto fail;
1734	error = -ENOMEM;
1735	bdev = bd_acquire(inode);
1736	if (!bdev)
1737		goto fail;
1738out:
1739	path_put(&path);
1740	return bdev;
1741fail:
1742	bdev = ERR_PTR(error);
1743	goto out;
1744}
1745EXPORT_SYMBOL(lookup_bdev);
1746
1747int __invalidate_device(struct block_device *bdev, bool kill_dirty)
1748{
1749	struct super_block *sb = get_super(bdev);
1750	int res = 0;
1751
1752	if (sb) {
1753		/*
1754		 * no need to lock the super, get_super holds the
1755		 * read mutex so the filesystem cannot go away
1756		 * under us (->put_super runs with the write lock
1757		 * hold).
1758		 */
1759		shrink_dcache_sb(sb);
1760		res = invalidate_inodes(sb, kill_dirty);
1761		drop_super(sb);
1762	}
1763	invalidate_bdev(bdev);
1764	return res;
1765}
1766EXPORT_SYMBOL(__invalidate_device);
1767
1768void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
1769{
1770	struct inode *inode, *old_inode = NULL;
1771
1772	spin_lock(&inode_sb_list_lock);
1773	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1774		struct address_space *mapping = inode->i_mapping;
1775
1776		spin_lock(&inode->i_lock);
1777		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1778		    mapping->nrpages == 0) {
1779			spin_unlock(&inode->i_lock);
1780			continue;
1781		}
1782		__iget(inode);
1783		spin_unlock(&inode->i_lock);
1784		spin_unlock(&inode_sb_list_lock);
1785		/*
1786		 * We hold a reference to 'inode' so it couldn't have been
1787		 * removed from s_inodes list while we dropped the
1788		 * inode_sb_list_lock.  We cannot iput the inode now as we can
1789		 * be holding the last reference and we cannot iput it under
1790		 * inode_sb_list_lock. So we keep the reference and iput it
1791		 * later.
1792		 */
1793		iput(old_inode);
1794		old_inode = inode;
1795
1796		func(I_BDEV(inode), arg);
1797
1798		spin_lock(&inode_sb_list_lock);
1799	}
1800	spin_unlock(&inode_sb_list_lock);
1801	iput(old_inode);
1802}
1803