1/*
2 * linux/drivers/char/raw.c
3 *
4 * Front-end raw character devices.  These can be bound to any block
5 * devices to provide genuine Unix raw character device semantics.
6 *
7 * We reserve minor number 0 for a control interface.  ioctl()s on this
8 * device are used to bind the other minor numbers to block devices.
9 */
10
11#include <linux/init.h>
12#include <linux/fs.h>
13#include <linux/major.h>
14#include <linux/blkdev.h>
15#include <linux/module.h>
16#include <linux/raw.h>
17#include <linux/capability.h>
18#include <linux/uio.h>
19#include <linux/cdev.h>
20#include <linux/device.h>
21#include <linux/mutex.h>
22#include <linux/gfp.h>
23#include <linux/compat.h>
24#include <linux/vmalloc.h>
25
26#include <asm/uaccess.h>
27
28struct raw_device_data {
29	struct block_device *binding;
30	int inuse;
31};
32
33static struct class *raw_class;
34static struct raw_device_data *raw_devices;
35static DEFINE_MUTEX(raw_mutex);
36static const struct file_operations raw_ctl_fops; /* forward declaration */
37
38static int max_raw_minors = MAX_RAW_MINORS;
39
40module_param(max_raw_minors, int, 0);
41MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
42
43/*
44 * Open/close code for raw IO.
45 *
46 * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
47 * point at the blockdev's address_space and set the file handle to use
48 * O_DIRECT.
49 *
50 * Set the device's soft blocksize to the minimum possible.  This gives the
51 * finest possible alignment and has no adverse impact on performance.
52 */
53static int raw_open(struct inode *inode, struct file *filp)
54{
55	const int minor = iminor(inode);
56	struct block_device *bdev;
57	int err;
58
59	if (minor == 0) {	/* It is the control device */
60		filp->f_op = &raw_ctl_fops;
61		return 0;
62	}
63
64	mutex_lock(&raw_mutex);
65
66	/*
67	 * All we need to do on open is check that the device is bound.
68	 */
69	bdev = raw_devices[minor].binding;
70	err = -ENODEV;
71	if (!bdev)
72		goto out;
73	igrab(bdev->bd_inode);
74	err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
75	if (err)
76		goto out;
77	err = set_blocksize(bdev, bdev_logical_block_size(bdev));
78	if (err)
79		goto out1;
80	filp->f_flags |= O_DIRECT;
81	filp->f_mapping = bdev->bd_inode->i_mapping;
82	if (++raw_devices[minor].inuse == 1)
83		file_inode(filp)->i_mapping =
84			bdev->bd_inode->i_mapping;
85	filp->private_data = bdev;
86	mutex_unlock(&raw_mutex);
87	return 0;
88
89out1:
90	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
91out:
92	mutex_unlock(&raw_mutex);
93	return err;
94}
95
96/*
97 * When the final fd which refers to this character-special node is closed, we
98 * make its ->mapping point back at its own i_data.
99 */
100static int raw_release(struct inode *inode, struct file *filp)
101{
102	const int minor= iminor(inode);
103	struct block_device *bdev;
104
105	mutex_lock(&raw_mutex);
106	bdev = raw_devices[minor].binding;
107	if (--raw_devices[minor].inuse == 0)
108		/* Here  inode->i_mapping == bdev->bd_inode->i_mapping  */
109		inode->i_mapping = &inode->i_data;
110	mutex_unlock(&raw_mutex);
111
112	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
113	return 0;
114}
115
116/*
117 * Forward ioctls to the underlying block device.
118 */
119static long
120raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
121{
122	struct block_device *bdev = filp->private_data;
123	return blkdev_ioctl(bdev, 0, command, arg);
124}
125
126static int bind_set(int number, u64 major, u64 minor)
127{
128	dev_t dev = MKDEV(major, minor);
129	struct raw_device_data *rawdev;
130	int err = 0;
131
132	if (number <= 0 || number >= max_raw_minors)
133		return -EINVAL;
134
135	if (MAJOR(dev) != major || MINOR(dev) != minor)
136		return -EINVAL;
137
138	rawdev = &raw_devices[number];
139
140	/*
141	 * This is like making block devices, so demand the
142	 * same capability
143	 */
144	if (!capable(CAP_SYS_ADMIN))
145		return -EPERM;
146
147	/*
148	 * For now, we don't need to check that the underlying
149	 * block device is present or not: we can do that when
150	 * the raw device is opened.  Just check that the
151	 * major/minor numbers make sense.
152	 */
153
154	if (MAJOR(dev) == 0 && dev != 0)
155		return -EINVAL;
156
157	mutex_lock(&raw_mutex);
158	if (rawdev->inuse) {
159		mutex_unlock(&raw_mutex);
160		return -EBUSY;
161	}
162	if (rawdev->binding) {
163		bdput(rawdev->binding);
164		module_put(THIS_MODULE);
165	}
166	if (!dev) {
167		/* unbind */
168		rawdev->binding = NULL;
169		device_destroy(raw_class, MKDEV(RAW_MAJOR, number));
170	} else {
171		rawdev->binding = bdget(dev);
172		if (rawdev->binding == NULL) {
173			err = -ENOMEM;
174		} else {
175			dev_t raw = MKDEV(RAW_MAJOR, number);
176			__module_get(THIS_MODULE);
177			device_destroy(raw_class, raw);
178			device_create(raw_class, NULL, raw, NULL,
179				      "raw%d", number);
180		}
181	}
182	mutex_unlock(&raw_mutex);
183	return err;
184}
185
186static int bind_get(int number, dev_t *dev)
187{
188	struct raw_device_data *rawdev;
189	struct block_device *bdev;
190
191	if (number <= 0 || number >= max_raw_minors)
192		return -EINVAL;
193
194	rawdev = &raw_devices[number];
195
196	mutex_lock(&raw_mutex);
197	bdev = rawdev->binding;
198	*dev = bdev ? bdev->bd_dev : 0;
199	mutex_unlock(&raw_mutex);
200	return 0;
201}
202
203/*
204 * Deal with ioctls against the raw-device control interface, to bind
205 * and unbind other raw devices.
206 */
207static long raw_ctl_ioctl(struct file *filp, unsigned int command,
208			  unsigned long arg)
209{
210	struct raw_config_request rq;
211	dev_t dev;
212	int err;
213
214	switch (command) {
215	case RAW_SETBIND:
216		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
217			return -EFAULT;
218
219		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
220
221	case RAW_GETBIND:
222		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
223			return -EFAULT;
224
225		err = bind_get(rq.raw_minor, &dev);
226		if (err)
227			return err;
228
229		rq.block_major = MAJOR(dev);
230		rq.block_minor = MINOR(dev);
231
232		if (copy_to_user((void __user *)arg, &rq, sizeof(rq)))
233			return -EFAULT;
234
235		return 0;
236	}
237
238	return -EINVAL;
239}
240
241#ifdef CONFIG_COMPAT
242struct raw32_config_request {
243	compat_int_t	raw_minor;
244	compat_u64	block_major;
245	compat_u64	block_minor;
246};
247
248static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
249				unsigned long arg)
250{
251	struct raw32_config_request __user *user_req = compat_ptr(arg);
252	struct raw32_config_request rq;
253	dev_t dev;
254	int err = 0;
255
256	switch (cmd) {
257	case RAW_SETBIND:
258		if (copy_from_user(&rq, user_req, sizeof(rq)))
259			return -EFAULT;
260
261		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
262
263	case RAW_GETBIND:
264		if (copy_from_user(&rq, user_req, sizeof(rq)))
265			return -EFAULT;
266
267		err = bind_get(rq.raw_minor, &dev);
268		if (err)
269			return err;
270
271		rq.block_major = MAJOR(dev);
272		rq.block_minor = MINOR(dev);
273
274		if (copy_to_user(user_req, &rq, sizeof(rq)))
275			return -EFAULT;
276
277		return 0;
278	}
279
280	return -EINVAL;
281}
282#endif
283
284static const struct file_operations raw_fops = {
285	.read_iter	= blkdev_read_iter,
286	.write_iter	= blkdev_write_iter,
287	.fsync		= blkdev_fsync,
288	.open		= raw_open,
289	.release	= raw_release,
290	.unlocked_ioctl = raw_ioctl,
291	.llseek		= default_llseek,
292	.owner		= THIS_MODULE,
293};
294
295static const struct file_operations raw_ctl_fops = {
296	.unlocked_ioctl = raw_ctl_ioctl,
297#ifdef CONFIG_COMPAT
298	.compat_ioctl	= raw_ctl_compat_ioctl,
299#endif
300	.open		= raw_open,
301	.owner		= THIS_MODULE,
302	.llseek		= noop_llseek,
303};
304
305static struct cdev raw_cdev;
306
307static char *raw_devnode(struct device *dev, umode_t *mode)
308{
309	return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
310}
311
312static int __init raw_init(void)
313{
314	dev_t dev = MKDEV(RAW_MAJOR, 0);
315	int ret;
316
317	if (max_raw_minors < 1 || max_raw_minors > 65536) {
318		printk(KERN_WARNING "raw: invalid max_raw_minors (must be"
319			" between 1 and 65536), using %d\n", MAX_RAW_MINORS);
320		max_raw_minors = MAX_RAW_MINORS;
321	}
322
323	raw_devices = vzalloc(sizeof(struct raw_device_data) * max_raw_minors);
324	if (!raw_devices) {
325		printk(KERN_ERR "Not enough memory for raw device structures\n");
326		ret = -ENOMEM;
327		goto error;
328	}
329
330	ret = register_chrdev_region(dev, max_raw_minors, "raw");
331	if (ret)
332		goto error;
333
334	cdev_init(&raw_cdev, &raw_fops);
335	ret = cdev_add(&raw_cdev, dev, max_raw_minors);
336	if (ret) {
337		goto error_region;
338	}
339
340	raw_class = class_create(THIS_MODULE, "raw");
341	if (IS_ERR(raw_class)) {
342		printk(KERN_ERR "Error creating raw class.\n");
343		cdev_del(&raw_cdev);
344		ret = PTR_ERR(raw_class);
345		goto error_region;
346	}
347	raw_class->devnode = raw_devnode;
348	device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
349
350	return 0;
351
352error_region:
353	unregister_chrdev_region(dev, max_raw_minors);
354error:
355	vfree(raw_devices);
356	return ret;
357}
358
359static void __exit raw_exit(void)
360{
361	device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
362	class_destroy(raw_class);
363	cdev_del(&raw_cdev);
364	unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors);
365}
366
367module_init(raw_init);
368module_exit(raw_exit);
369MODULE_LICENSE("GPL");
370