1 /*
2  * VMEbus User access driver
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by:
8  *   Tom Armistead and Ajit Prem
9  *     Copyright 2004 Motorola Inc.
10  *
11  *
12  * This program is free software; you can redistribute  it and/or modify it
13  * under  the terms of  the GNU General  Public License as published by the
14  * Free Software Foundation;  either version 2 of the  License, or (at your
15  * option) any later version.
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/atomic.h>
21 #include <linux/cdev.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/ioctl.h>
28 #include <linux/kernel.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/pagemap.h>
32 #include <linux/pci.h>
33 #include <linux/mutex.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/syscalls.h>
37 #include <linux/types.h>
38 
39 #include <linux/io.h>
40 #include <linux/uaccess.h>
41 #include <linux/vme.h>
42 
43 #include "vme_user.h"
44 
45 static const char driver_name[] = "vme_user";
46 
47 static int bus[VME_USER_BUS_MAX];
48 static unsigned int bus_num;
49 
50 /* Currently Documentation/devices.txt defines the following for VME:
51  *
52  * 221 char	VME bus
53  *		  0 = /dev/bus/vme/m0		First master image
54  *		  1 = /dev/bus/vme/m1		Second master image
55  *		  2 = /dev/bus/vme/m2		Third master image
56  *		  3 = /dev/bus/vme/m3		Fourth master image
57  *		  4 = /dev/bus/vme/s0		First slave image
58  *		  5 = /dev/bus/vme/s1		Second slave image
59  *		  6 = /dev/bus/vme/s2		Third slave image
60  *		  7 = /dev/bus/vme/s3		Fourth slave image
61  *		  8 = /dev/bus/vme/ctl		Control
62  *
63  *		It is expected that all VME bus drivers will use the
64  *		same interface.  For interface documentation see
65  *		http://www.vmelinux.org/.
66  *
67  * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
68  * even support the tsi148 chipset (which has 8 master and 8 slave windows).
69  * We'll run with this for now as far as possible, however it probably makes
70  * sense to get rid of the old mappings and just do everything dynamically.
71  *
72  * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
73  * defined above and try to support at least some of the interface from
74  * http://www.vmelinux.org/ as an alternative the driver can be written
75  * providing a saner interface later.
76  *
77  * The vmelinux.org driver never supported slave images, the devices reserved
78  * for slaves were repurposed to support all 8 master images on the UniverseII!
79  * We shall support 4 masters and 4 slaves with this driver.
80  */
81 #define VME_MAJOR	221	/* VME Major Device Number */
82 #define VME_DEVS	9	/* Number of dev entries */
83 
84 #define MASTER_MINOR	0
85 #define MASTER_MAX	3
86 #define SLAVE_MINOR	4
87 #define SLAVE_MAX	7
88 #define CONTROL_MINOR	8
89 
90 #define PCI_BUF_SIZE  0x20000	/* Size of one slave image buffer */
91 
92 /*
93  * Structure to handle image related parameters.
94  */
95 struct image_desc {
96 	void *kern_buf;	/* Buffer address in kernel space */
97 	dma_addr_t pci_buf;	/* Buffer address in PCI address space */
98 	unsigned long long size_buf;	/* Buffer size */
99 	struct mutex mutex;	/* Mutex for locking image */
100 	struct device *device;	/* Sysfs device */
101 	struct vme_resource *resource;	/* VME resource */
102 	int users;		/* Number of current users */
103 	int mmap_count;		/* Number of current mmap's */
104 };
105 static struct image_desc image[VME_DEVS];
106 
107 struct driver_stats {
108 	unsigned long reads;
109 	unsigned long writes;
110 	unsigned long ioctls;
111 	unsigned long irqs;
112 	unsigned long berrs;
113 	unsigned long dmaerrors;
114 	unsigned long timeouts;
115 	unsigned long external;
116 };
117 static struct driver_stats statistics;
118 
119 static struct cdev *vme_user_cdev;		/* Character device */
120 static struct class *vme_user_sysfs_class;	/* Sysfs class */
121 static struct vme_dev *vme_user_bridge;		/* Pointer to user device */
122 
123 
124 static const int type[VME_DEVS] = {	MASTER_MINOR,	MASTER_MINOR,
125 					MASTER_MINOR,	MASTER_MINOR,
126 					SLAVE_MINOR,	SLAVE_MINOR,
127 					SLAVE_MINOR,	SLAVE_MINOR,
128 					CONTROL_MINOR
129 				};
130 
131 
132 static int vme_user_open(struct inode *, struct file *);
133 static int vme_user_release(struct inode *, struct file *);
134 static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *);
135 static ssize_t vme_user_write(struct file *, const char __user *, size_t,
136 	loff_t *);
137 static loff_t vme_user_llseek(struct file *, loff_t, int);
138 static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
139 static int vme_user_mmap(struct file *file, struct vm_area_struct *vma);
140 
141 static void vme_user_vm_open(struct vm_area_struct *vma);
142 static void vme_user_vm_close(struct vm_area_struct *vma);
143 
144 static int vme_user_match(struct vme_dev *);
145 static int vme_user_probe(struct vme_dev *);
146 static int vme_user_remove(struct vme_dev *);
147 
148 static const struct file_operations vme_user_fops = {
149 	.open = vme_user_open,
150 	.release = vme_user_release,
151 	.read = vme_user_read,
152 	.write = vme_user_write,
153 	.llseek = vme_user_llseek,
154 	.unlocked_ioctl = vme_user_unlocked_ioctl,
155 	.compat_ioctl = vme_user_unlocked_ioctl,
156 	.mmap = vme_user_mmap,
157 };
158 
159 struct vme_user_vma_priv {
160 	unsigned int minor;
161 	atomic_t refcnt;
162 };
163 
164 static const struct vm_operations_struct vme_user_vm_ops = {
165 	.open = vme_user_vm_open,
166 	.close = vme_user_vm_close,
167 };
168 
169 
170 /*
171  * Reset all the statistic counters
172  */
reset_counters(void)173 static void reset_counters(void)
174 {
175 	statistics.reads = 0;
176 	statistics.writes = 0;
177 	statistics.ioctls = 0;
178 	statistics.irqs = 0;
179 	statistics.berrs = 0;
180 	statistics.dmaerrors = 0;
181 	statistics.timeouts = 0;
182 }
183 
vme_user_open(struct inode * inode,struct file * file)184 static int vme_user_open(struct inode *inode, struct file *file)
185 {
186 	int err;
187 	unsigned int minor = MINOR(inode->i_rdev);
188 
189 	mutex_lock(&image[minor].mutex);
190 	/* Allow device to be opened if a resource is needed and allocated. */
191 	if (minor < CONTROL_MINOR && image[minor].resource == NULL) {
192 		pr_err("No resources allocated for device\n");
193 		err = -EINVAL;
194 		goto err_res;
195 	}
196 
197 	/* Increment user count */
198 	image[minor].users++;
199 
200 	mutex_unlock(&image[minor].mutex);
201 
202 	return 0;
203 
204 err_res:
205 	mutex_unlock(&image[minor].mutex);
206 
207 	return err;
208 }
209 
vme_user_release(struct inode * inode,struct file * file)210 static int vme_user_release(struct inode *inode, struct file *file)
211 {
212 	unsigned int minor = MINOR(inode->i_rdev);
213 
214 	mutex_lock(&image[minor].mutex);
215 
216 	/* Decrement user count */
217 	image[minor].users--;
218 
219 	mutex_unlock(&image[minor].mutex);
220 
221 	return 0;
222 }
223 
224 /*
225  * We are going ot alloc a page during init per window for small transfers.
226  * Small transfers will go VME -> buffer -> user space. Larger (more than a
227  * page) transfers will lock the user space buffer into memory and then
228  * transfer the data directly into the user space buffers.
229  */
resource_to_user(int minor,char __user * buf,size_t count,loff_t * ppos)230 static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
231 	loff_t *ppos)
232 {
233 	ssize_t retval;
234 	ssize_t copied = 0;
235 
236 	if (count <= image[minor].size_buf) {
237 		/* We copy to kernel buffer */
238 		copied = vme_master_read(image[minor].resource,
239 			image[minor].kern_buf, count, *ppos);
240 		if (copied < 0)
241 			return (int)copied;
242 
243 		retval = __copy_to_user(buf, image[minor].kern_buf,
244 			(unsigned long)copied);
245 		if (retval != 0) {
246 			copied = (copied - retval);
247 			pr_info("User copy failed\n");
248 			return -EINVAL;
249 		}
250 
251 	} else {
252 		/* XXX Need to write this */
253 		pr_info("Currently don't support large transfers\n");
254 		/* Map in pages from userspace */
255 
256 		/* Call vme_master_read to do the transfer */
257 		return -EINVAL;
258 	}
259 
260 	return copied;
261 }
262 
263 /*
264  * We are going to alloc a page during init per window for small transfers.
265  * Small transfers will go user space -> buffer -> VME. Larger (more than a
266  * page) transfers will lock the user space buffer into memory and then
267  * transfer the data directly from the user space buffers out to VME.
268  */
resource_from_user(unsigned int minor,const char __user * buf,size_t count,loff_t * ppos)269 static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
270 	size_t count, loff_t *ppos)
271 {
272 	ssize_t retval;
273 	ssize_t copied = 0;
274 
275 	if (count <= image[minor].size_buf) {
276 		retval = __copy_from_user(image[minor].kern_buf, buf,
277 			(unsigned long)count);
278 		if (retval != 0)
279 			copied = (copied - retval);
280 		else
281 			copied = count;
282 
283 		copied = vme_master_write(image[minor].resource,
284 			image[minor].kern_buf, copied, *ppos);
285 	} else {
286 		/* XXX Need to write this */
287 		pr_info("Currently don't support large transfers\n");
288 		/* Map in pages from userspace */
289 
290 		/* Call vme_master_write to do the transfer */
291 		return -EINVAL;
292 	}
293 
294 	return copied;
295 }
296 
buffer_to_user(unsigned int minor,char __user * buf,size_t count,loff_t * ppos)297 static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
298 	size_t count, loff_t *ppos)
299 {
300 	void *image_ptr;
301 	ssize_t retval;
302 
303 	image_ptr = image[minor].kern_buf + *ppos;
304 
305 	retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
306 	if (retval != 0) {
307 		retval = (count - retval);
308 		pr_warn("Partial copy to userspace\n");
309 	} else
310 		retval = count;
311 
312 	/* Return number of bytes successfully read */
313 	return retval;
314 }
315 
buffer_from_user(unsigned int minor,const char __user * buf,size_t count,loff_t * ppos)316 static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
317 	size_t count, loff_t *ppos)
318 {
319 	void *image_ptr;
320 	size_t retval;
321 
322 	image_ptr = image[minor].kern_buf + *ppos;
323 
324 	retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
325 	if (retval != 0) {
326 		retval = (count - retval);
327 		pr_warn("Partial copy to userspace\n");
328 	} else
329 		retval = count;
330 
331 	/* Return number of bytes successfully read */
332 	return retval;
333 }
334 
vme_user_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)335 static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
336 			loff_t *ppos)
337 {
338 	unsigned int minor = MINOR(file_inode(file)->i_rdev);
339 	ssize_t retval;
340 	size_t image_size;
341 	size_t okcount;
342 
343 	if (minor == CONTROL_MINOR)
344 		return 0;
345 
346 	mutex_lock(&image[minor].mutex);
347 
348 	/* XXX Do we *really* want this helper - we can use vme_*_get ? */
349 	image_size = vme_get_size(image[minor].resource);
350 
351 	/* Ensure we are starting at a valid location */
352 	if ((*ppos < 0) || (*ppos > (image_size - 1))) {
353 		mutex_unlock(&image[minor].mutex);
354 		return 0;
355 	}
356 
357 	/* Ensure not reading past end of the image */
358 	if (*ppos + count > image_size)
359 		okcount = image_size - *ppos;
360 	else
361 		okcount = count;
362 
363 	switch (type[minor]) {
364 	case MASTER_MINOR:
365 		retval = resource_to_user(minor, buf, okcount, ppos);
366 		break;
367 	case SLAVE_MINOR:
368 		retval = buffer_to_user(minor, buf, okcount, ppos);
369 		break;
370 	default:
371 		retval = -EINVAL;
372 	}
373 
374 	mutex_unlock(&image[minor].mutex);
375 	if (retval > 0)
376 		*ppos += retval;
377 
378 	return retval;
379 }
380 
vme_user_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)381 static ssize_t vme_user_write(struct file *file, const char __user *buf,
382 			size_t count, loff_t *ppos)
383 {
384 	unsigned int minor = MINOR(file_inode(file)->i_rdev);
385 	ssize_t retval;
386 	size_t image_size;
387 	size_t okcount;
388 
389 	if (minor == CONTROL_MINOR)
390 		return 0;
391 
392 	mutex_lock(&image[minor].mutex);
393 
394 	image_size = vme_get_size(image[minor].resource);
395 
396 	/* Ensure we are starting at a valid location */
397 	if ((*ppos < 0) || (*ppos > (image_size - 1))) {
398 		mutex_unlock(&image[minor].mutex);
399 		return 0;
400 	}
401 
402 	/* Ensure not reading past end of the image */
403 	if (*ppos + count > image_size)
404 		okcount = image_size - *ppos;
405 	else
406 		okcount = count;
407 
408 	switch (type[minor]) {
409 	case MASTER_MINOR:
410 		retval = resource_from_user(minor, buf, okcount, ppos);
411 		break;
412 	case SLAVE_MINOR:
413 		retval = buffer_from_user(minor, buf, okcount, ppos);
414 		break;
415 	default:
416 		retval = -EINVAL;
417 	}
418 
419 	mutex_unlock(&image[minor].mutex);
420 
421 	if (retval > 0)
422 		*ppos += retval;
423 
424 	return retval;
425 }
426 
vme_user_llseek(struct file * file,loff_t off,int whence)427 static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
428 {
429 	unsigned int minor = MINOR(file_inode(file)->i_rdev);
430 	size_t image_size;
431 	loff_t res;
432 
433 	if (minor == CONTROL_MINOR)
434 		return -EINVAL;
435 
436 	mutex_lock(&image[minor].mutex);
437 	image_size = vme_get_size(image[minor].resource);
438 	res = fixed_size_llseek(file, off, whence, image_size);
439 	mutex_unlock(&image[minor].mutex);
440 
441 	return res;
442 }
443 
444 /*
445  * The ioctls provided by the old VME access method (the one at vmelinux.org)
446  * are most certainly wrong as the effectively push the registers layout
447  * through to user space. Given that the VME core can handle multiple bridges,
448  * with different register layouts this is most certainly not the way to go.
449  *
450  * We aren't using the structures defined in the Motorola driver either - these
451  * are also quite low level, however we should use the definitions that have
452  * already been defined.
453  */
vme_user_ioctl(struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)454 static int vme_user_ioctl(struct inode *inode, struct file *file,
455 	unsigned int cmd, unsigned long arg)
456 {
457 	struct vme_master master;
458 	struct vme_slave slave;
459 	struct vme_irq_id irq_req;
460 	unsigned long copied;
461 	unsigned int minor = MINOR(inode->i_rdev);
462 	int retval;
463 	dma_addr_t pci_addr;
464 	void __user *argp = (void __user *)arg;
465 
466 	statistics.ioctls++;
467 
468 	switch (type[minor]) {
469 	case CONTROL_MINOR:
470 		switch (cmd) {
471 		case VME_IRQ_GEN:
472 			copied = copy_from_user(&irq_req, argp,
473 						sizeof(struct vme_irq_id));
474 			if (copied != 0) {
475 				pr_warn("Partial copy from userspace\n");
476 				return -EFAULT;
477 			}
478 
479 			return vme_irq_generate(vme_user_bridge,
480 						  irq_req.level,
481 						  irq_req.statid);
482 		}
483 		break;
484 	case MASTER_MINOR:
485 		switch (cmd) {
486 		case VME_GET_MASTER:
487 			memset(&master, 0, sizeof(struct vme_master));
488 
489 			/* XXX	We do not want to push aspace, cycle and width
490 			 *	to userspace as they are
491 			 */
492 			retval = vme_master_get(image[minor].resource,
493 				&master.enable, &master.vme_addr,
494 				&master.size, &master.aspace,
495 				&master.cycle, &master.dwidth);
496 
497 			copied = copy_to_user(argp, &master,
498 				sizeof(struct vme_master));
499 			if (copied != 0) {
500 				pr_warn("Partial copy to userspace\n");
501 				return -EFAULT;
502 			}
503 
504 			return retval;
505 
506 		case VME_SET_MASTER:
507 
508 			if (image[minor].mmap_count != 0) {
509 				pr_warn("Can't adjust mapped window\n");
510 				return -EPERM;
511 			}
512 
513 			copied = copy_from_user(&master, argp, sizeof(master));
514 			if (copied != 0) {
515 				pr_warn("Partial copy from userspace\n");
516 				return -EFAULT;
517 			}
518 
519 			/* XXX	We do not want to push aspace, cycle and width
520 			 *	to userspace as they are
521 			 */
522 			return vme_master_set(image[minor].resource,
523 				master.enable, master.vme_addr, master.size,
524 				master.aspace, master.cycle, master.dwidth);
525 
526 			break;
527 		}
528 		break;
529 	case SLAVE_MINOR:
530 		switch (cmd) {
531 		case VME_GET_SLAVE:
532 			memset(&slave, 0, sizeof(struct vme_slave));
533 
534 			/* XXX	We do not want to push aspace, cycle and width
535 			 *	to userspace as they are
536 			 */
537 			retval = vme_slave_get(image[minor].resource,
538 				&slave.enable, &slave.vme_addr,
539 				&slave.size, &pci_addr, &slave.aspace,
540 				&slave.cycle);
541 
542 			copied = copy_to_user(argp, &slave,
543 				sizeof(struct vme_slave));
544 			if (copied != 0) {
545 				pr_warn("Partial copy to userspace\n");
546 				return -EFAULT;
547 			}
548 
549 			return retval;
550 
551 		case VME_SET_SLAVE:
552 
553 			copied = copy_from_user(&slave, argp, sizeof(slave));
554 			if (copied != 0) {
555 				pr_warn("Partial copy from userspace\n");
556 				return -EFAULT;
557 			}
558 
559 			/* XXX	We do not want to push aspace, cycle and width
560 			 *	to userspace as they are
561 			 */
562 			return vme_slave_set(image[minor].resource,
563 				slave.enable, slave.vme_addr, slave.size,
564 				image[minor].pci_buf, slave.aspace,
565 				slave.cycle);
566 
567 			break;
568 		}
569 		break;
570 	}
571 
572 	return -EINVAL;
573 }
574 
575 static long
vme_user_unlocked_ioctl(struct file * file,unsigned int cmd,unsigned long arg)576 vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
577 {
578 	int ret;
579 	struct inode *inode = file_inode(file);
580 	unsigned int minor = MINOR(inode->i_rdev);
581 
582 	mutex_lock(&image[minor].mutex);
583 	ret = vme_user_ioctl(inode, file, cmd, arg);
584 	mutex_unlock(&image[minor].mutex);
585 
586 	return ret;
587 }
588 
vme_user_vm_open(struct vm_area_struct * vma)589 static void vme_user_vm_open(struct vm_area_struct *vma)
590 {
591 	struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
592 
593 	atomic_inc(&vma_priv->refcnt);
594 }
595 
vme_user_vm_close(struct vm_area_struct * vma)596 static void vme_user_vm_close(struct vm_area_struct *vma)
597 {
598 	struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
599 	unsigned int minor = vma_priv->minor;
600 
601 	if (!atomic_dec_and_test(&vma_priv->refcnt))
602 		return;
603 
604 	mutex_lock(&image[minor].mutex);
605 	image[minor].mmap_count--;
606 	mutex_unlock(&image[minor].mutex);
607 
608 	kfree(vma_priv);
609 }
610 
vme_user_master_mmap(unsigned int minor,struct vm_area_struct * vma)611 static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
612 {
613 	int err;
614 	struct vme_user_vma_priv *vma_priv;
615 
616 	mutex_lock(&image[minor].mutex);
617 
618 	err = vme_master_mmap(image[minor].resource, vma);
619 	if (err) {
620 		mutex_unlock(&image[minor].mutex);
621 		return err;
622 	}
623 
624 	vma_priv = kmalloc(sizeof(struct vme_user_vma_priv), GFP_KERNEL);
625 	if (vma_priv == NULL) {
626 		mutex_unlock(&image[minor].mutex);
627 		return -ENOMEM;
628 	}
629 
630 	vma_priv->minor = minor;
631 	atomic_set(&vma_priv->refcnt, 1);
632 	vma->vm_ops = &vme_user_vm_ops;
633 	vma->vm_private_data = vma_priv;
634 
635 	image[minor].mmap_count++;
636 
637 	mutex_unlock(&image[minor].mutex);
638 
639 	return 0;
640 }
641 
vme_user_mmap(struct file * file,struct vm_area_struct * vma)642 static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
643 {
644 	unsigned int minor = MINOR(file_inode(file)->i_rdev);
645 
646 	if (type[minor] == MASTER_MINOR)
647 		return vme_user_master_mmap(minor, vma);
648 
649 	return -ENODEV;
650 }
651 
652 
653 /*
654  * Unallocate a previously allocated buffer
655  */
buf_unalloc(int num)656 static void buf_unalloc(int num)
657 {
658 	if (image[num].kern_buf) {
659 #ifdef VME_DEBUG
660 		pr_debug("UniverseII:Releasing buffer at %p\n",
661 			 image[num].pci_buf);
662 #endif
663 
664 		vme_free_consistent(image[num].resource, image[num].size_buf,
665 			image[num].kern_buf, image[num].pci_buf);
666 
667 		image[num].kern_buf = NULL;
668 		image[num].pci_buf = 0;
669 		image[num].size_buf = 0;
670 
671 #ifdef VME_DEBUG
672 	} else {
673 		pr_debug("UniverseII: Buffer not allocated\n");
674 #endif
675 	}
676 }
677 
678 static struct vme_driver vme_user_driver = {
679 	.name = driver_name,
680 	.match = vme_user_match,
681 	.probe = vme_user_probe,
682 	.remove = vme_user_remove,
683 };
684 
685 
vme_user_init(void)686 static int __init vme_user_init(void)
687 {
688 	int retval = 0;
689 
690 	pr_info("VME User Space Access Driver\n");
691 
692 	if (bus_num == 0) {
693 		pr_err("No cards, skipping registration\n");
694 		retval = -ENODEV;
695 		goto err_nocard;
696 	}
697 
698 	/* Let's start by supporting one bus, we can support more than one
699 	 * in future revisions if that ever becomes necessary.
700 	 */
701 	if (bus_num > VME_USER_BUS_MAX) {
702 		pr_err("Driver only able to handle %d buses\n",
703 		       VME_USER_BUS_MAX);
704 		bus_num = VME_USER_BUS_MAX;
705 	}
706 
707 	/*
708 	 * Here we just register the maximum number of devices we can and
709 	 * leave vme_user_match() to allow only 1 to go through to probe().
710 	 * This way, if we later want to allow multiple user access devices,
711 	 * we just change the code in vme_user_match().
712 	 */
713 	retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
714 	if (retval != 0)
715 		goto err_reg;
716 
717 	return retval;
718 
719 err_reg:
720 err_nocard:
721 	return retval;
722 }
723 
vme_user_match(struct vme_dev * vdev)724 static int vme_user_match(struct vme_dev *vdev)
725 {
726 	int i;
727 
728 	int cur_bus = vme_bus_num(vdev);
729 	int cur_slot = vme_slot_num(vdev);
730 
731 	for (i = 0; i < bus_num; i++)
732 		if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
733 			return 1;
734 
735 	return 0;
736 }
737 
738 /*
739  * In this simple access driver, the old behaviour is being preserved as much
740  * as practical. We will therefore reserve the buffers and request the images
741  * here so that we don't have to do it later.
742  */
vme_user_probe(struct vme_dev * vdev)743 static int vme_user_probe(struct vme_dev *vdev)
744 {
745 	int i, err;
746 	char *name;
747 
748 	/* Save pointer to the bridge device */
749 	if (vme_user_bridge != NULL) {
750 		dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
751 		err = -EINVAL;
752 		goto err_dev;
753 	}
754 	vme_user_bridge = vdev;
755 
756 	/* Initialise descriptors */
757 	for (i = 0; i < VME_DEVS; i++) {
758 		image[i].kern_buf = NULL;
759 		image[i].pci_buf = 0;
760 		mutex_init(&image[i].mutex);
761 		image[i].device = NULL;
762 		image[i].resource = NULL;
763 		image[i].users = 0;
764 	}
765 
766 	/* Initialise statistics counters */
767 	reset_counters();
768 
769 	/* Assign major and minor numbers for the driver */
770 	err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
771 		driver_name);
772 	if (err) {
773 		dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
774 			 VME_MAJOR);
775 		goto err_region;
776 	}
777 
778 	/* Register the driver as a char device */
779 	vme_user_cdev = cdev_alloc();
780 	if (!vme_user_cdev) {
781 		err = -ENOMEM;
782 		goto err_char;
783 	}
784 	vme_user_cdev->ops = &vme_user_fops;
785 	vme_user_cdev->owner = THIS_MODULE;
786 	err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
787 	if (err) {
788 		dev_warn(&vdev->dev, "cdev_all failed\n");
789 		goto err_char;
790 	}
791 
792 	/* Request slave resources and allocate buffers (128kB wide) */
793 	for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
794 		/* XXX Need to properly request attributes */
795 		/* For ca91cx42 bridge there are only two slave windows
796 		 * supporting A16 addressing, so we request A24 supported
797 		 * by all windows.
798 		 */
799 		image[i].resource = vme_slave_request(vme_user_bridge,
800 			VME_A24, VME_SCT);
801 		if (image[i].resource == NULL) {
802 			dev_warn(&vdev->dev,
803 				 "Unable to allocate slave resource\n");
804 			err = -ENOMEM;
805 			goto err_slave;
806 		}
807 		image[i].size_buf = PCI_BUF_SIZE;
808 		image[i].kern_buf = vme_alloc_consistent(image[i].resource,
809 			image[i].size_buf, &image[i].pci_buf);
810 		if (image[i].kern_buf == NULL) {
811 			dev_warn(&vdev->dev,
812 				 "Unable to allocate memory for buffer\n");
813 			image[i].pci_buf = 0;
814 			vme_slave_free(image[i].resource);
815 			err = -ENOMEM;
816 			goto err_slave;
817 		}
818 	}
819 
820 	/*
821 	 * Request master resources allocate page sized buffers for small
822 	 * reads and writes
823 	 */
824 	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
825 		/* XXX Need to properly request attributes */
826 		image[i].resource = vme_master_request(vme_user_bridge,
827 			VME_A32, VME_SCT, VME_D32);
828 		if (image[i].resource == NULL) {
829 			dev_warn(&vdev->dev,
830 				 "Unable to allocate master resource\n");
831 			err = -ENOMEM;
832 			goto err_master;
833 		}
834 		image[i].size_buf = PCI_BUF_SIZE;
835 		image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
836 		if (image[i].kern_buf == NULL) {
837 			err = -ENOMEM;
838 			vme_master_free(image[i].resource);
839 			goto err_master;
840 		}
841 	}
842 
843 	/* Create sysfs entries - on udev systems this creates the dev files */
844 	vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
845 	if (IS_ERR(vme_user_sysfs_class)) {
846 		dev_err(&vdev->dev, "Error creating vme_user class.\n");
847 		err = PTR_ERR(vme_user_sysfs_class);
848 		goto err_class;
849 	}
850 
851 	/* Add sysfs Entries */
852 	for (i = 0; i < VME_DEVS; i++) {
853 		int num;
854 
855 		switch (type[i]) {
856 		case MASTER_MINOR:
857 			name = "bus/vme/m%d";
858 			break;
859 		case CONTROL_MINOR:
860 			name = "bus/vme/ctl";
861 			break;
862 		case SLAVE_MINOR:
863 			name = "bus/vme/s%d";
864 			break;
865 		default:
866 			err = -EINVAL;
867 			goto err_sysfs;
868 		}
869 
870 		num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
871 		image[i].device = device_create(vme_user_sysfs_class, NULL,
872 					MKDEV(VME_MAJOR, i), NULL, name, num);
873 		if (IS_ERR(image[i].device)) {
874 			dev_info(&vdev->dev, "Error creating sysfs device\n");
875 			err = PTR_ERR(image[i].device);
876 			goto err_sysfs;
877 		}
878 	}
879 
880 	return 0;
881 
882 err_sysfs:
883 	while (i > 0) {
884 		i--;
885 		device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
886 	}
887 	class_destroy(vme_user_sysfs_class);
888 
889 	/* Ensure counter set correcty to unalloc all master windows */
890 	i = MASTER_MAX + 1;
891 err_master:
892 	while (i > MASTER_MINOR) {
893 		i--;
894 		kfree(image[i].kern_buf);
895 		vme_master_free(image[i].resource);
896 	}
897 
898 	/*
899 	 * Ensure counter set correcty to unalloc all slave windows and buffers
900 	 */
901 	i = SLAVE_MAX + 1;
902 err_slave:
903 	while (i > SLAVE_MINOR) {
904 		i--;
905 		buf_unalloc(i);
906 		vme_slave_free(image[i].resource);
907 	}
908 err_class:
909 	cdev_del(vme_user_cdev);
910 err_char:
911 	unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
912 err_region:
913 err_dev:
914 	return err;
915 }
916 
vme_user_remove(struct vme_dev * dev)917 static int vme_user_remove(struct vme_dev *dev)
918 {
919 	int i;
920 
921 	/* Remove sysfs Entries */
922 	for (i = 0; i < VME_DEVS; i++) {
923 		mutex_destroy(&image[i].mutex);
924 		device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
925 	}
926 	class_destroy(vme_user_sysfs_class);
927 
928 	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
929 		kfree(image[i].kern_buf);
930 		vme_master_free(image[i].resource);
931 	}
932 
933 	for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
934 		vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
935 		buf_unalloc(i);
936 		vme_slave_free(image[i].resource);
937 	}
938 
939 	/* Unregister device driver */
940 	cdev_del(vme_user_cdev);
941 
942 	/* Unregiser the major and minor device numbers */
943 	unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
944 
945 	return 0;
946 }
947 
vme_user_exit(void)948 static void __exit vme_user_exit(void)
949 {
950 	vme_unregister_driver(&vme_user_driver);
951 }
952 
953 
954 MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
955 module_param_array(bus, int, &bus_num, 0);
956 
957 MODULE_DESCRIPTION("VME User Space Access Driver");
958 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
959 MODULE_LICENSE("GPL");
960 
961 module_init(vme_user_init);
962 module_exit(vme_user_exit);
963