This source file includes following definitions.
- dead_write_lines
- dead_read_lines
- dead_frob_lines
- dead_onearg
- dead_initstate
- dead_state
- dead_write
- dead_read
- is_parport
- parport_probe
- parport_bus_init
- parport_bus_exit
- driver_check
- attach_driver_chain
- driver_detach
- detach_driver_chain
- get_lowlevel_driver
- port_check
- port_detect
- __parport_register_driver
- port_detach
- parport_unregister_driver
- free_port
- parport_get_port
- parport_del_port
- parport_put_port
- parport_register_port
- parport_announce_port
- parport_remove_port
- parport_register_device
- free_pardevice
- parport_register_dev_model
- parport_unregister_device
- parport_find_number
- parport_find_base
- parport_claim
- parport_claim_or_block
- parport_release
- parport_irq_handler
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #undef PARPORT_DEBUG_SHARING
19
20 #include <linux/module.h>
21 #include <linux/string.h>
22 #include <linux/threads.h>
23 #include <linux/parport.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/ioport.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/sched/signal.h>
31 #include <linux/kmod.h>
32 #include <linux/device.h>
33
34 #include <linux/spinlock.h>
35 #include <linux/mutex.h>
36 #include <asm/irq.h>
37
38 #undef PARPORT_PARANOID
39
40 #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
41
42 unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
43 int parport_default_spintime = DEFAULT_SPIN_TIME;
44
45 static LIST_HEAD(portlist);
46 static DEFINE_SPINLOCK(parportlist_lock);
47
48
49 static LIST_HEAD(all_ports);
50 static DEFINE_SPINLOCK(full_list_lock);
51
52 static LIST_HEAD(drivers);
53
54 static DEFINE_MUTEX(registration_lock);
55
56
57 static void dead_write_lines(struct parport *p, unsigned char b){}
58 static unsigned char dead_read_lines(struct parport *p) { return 0; }
59 static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
60 unsigned char c) { return 0; }
61 static void dead_onearg(struct parport *p){}
62 static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
63 static void dead_state(struct parport *p, struct parport_state *s) { }
64 static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
65 { return 0; }
66 static size_t dead_read(struct parport *p, void *b, size_t l, int f)
67 { return 0; }
68 static struct parport_operations dead_ops = {
69 .write_data = dead_write_lines,
70 .read_data = dead_read_lines,
71
72 .write_control = dead_write_lines,
73 .read_control = dead_read_lines,
74 .frob_control = dead_frob_lines,
75
76 .read_status = dead_read_lines,
77
78 .enable_irq = dead_onearg,
79 .disable_irq = dead_onearg,
80
81 .data_forward = dead_onearg,
82 .data_reverse = dead_onearg,
83
84 .init_state = dead_initstate,
85 .save_state = dead_state,
86 .restore_state = dead_state,
87
88 .epp_write_data = dead_write,
89 .epp_read_data = dead_read,
90 .epp_write_addr = dead_write,
91 .epp_read_addr = dead_read,
92
93 .ecp_write_data = dead_write,
94 .ecp_read_data = dead_read,
95 .ecp_write_addr = dead_write,
96
97 .compat_write_data = dead_write,
98 .nibble_read_data = dead_read,
99 .byte_read_data = dead_read,
100
101 .owner = NULL,
102 };
103
104 static struct device_type parport_device_type = {
105 .name = "parport",
106 };
107
108 static int is_parport(struct device *dev)
109 {
110 return dev->type == &parport_device_type;
111 }
112
113 static int parport_probe(struct device *dev)
114 {
115 struct parport_driver *drv;
116
117 if (is_parport(dev))
118 return -ENODEV;
119
120 drv = to_parport_driver(dev->driver);
121 if (!drv->probe) {
122
123 struct pardevice *par_dev = to_pardevice(dev);
124
125 if (strcmp(par_dev->name, drv->name))
126 return -ENODEV;
127 return 0;
128 }
129
130 return drv->probe(to_pardevice(dev));
131 }
132
133 static struct bus_type parport_bus_type = {
134 .name = "parport",
135 .probe = parport_probe,
136 };
137
138 int parport_bus_init(void)
139 {
140 return bus_register(&parport_bus_type);
141 }
142
143 void parport_bus_exit(void)
144 {
145 bus_unregister(&parport_bus_type);
146 }
147
148
149
150
151
152
153
154 static int driver_check(struct device_driver *dev_drv, void *_port)
155 {
156 struct parport *port = _port;
157 struct parport_driver *drv = to_parport_driver(dev_drv);
158
159 if (drv->match_port)
160 drv->match_port(port);
161 return 0;
162 }
163
164
165 static void attach_driver_chain(struct parport *port)
166 {
167
168 struct parport_driver *drv;
169
170 list_for_each_entry(drv, &drivers, list)
171 drv->attach(port);
172
173
174
175
176
177
178 bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
179 }
180
181 static int driver_detach(struct device_driver *_drv, void *_port)
182 {
183 struct parport *port = _port;
184 struct parport_driver *drv = to_parport_driver(_drv);
185
186 if (drv->detach)
187 drv->detach(port);
188 return 0;
189 }
190
191
192 static void detach_driver_chain(struct parport *port)
193 {
194 struct parport_driver *drv;
195
196 list_for_each_entry(drv, &drivers, list)
197 drv->detach(port);
198
199
200
201
202
203
204 bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
205 }
206
207
208 static void get_lowlevel_driver(void)
209 {
210
211
212
213
214 request_module("parport_lowlevel");
215 }
216
217
218
219
220
221
222
223 static int port_check(struct device *dev, void *dev_drv)
224 {
225 struct parport_driver *drv = dev_drv;
226
227
228 if (is_parport(dev))
229 drv->match_port(to_parport_dev(dev));
230 return 0;
231 }
232
233
234
235
236
237
238 static int port_detect(struct device *dev, void *dev_drv)
239 {
240 if (is_parport(dev))
241 return 1;
242 return 0;
243 }
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278 int __parport_register_driver(struct parport_driver *drv, struct module *owner,
279 const char *mod_name)
280 {
281 if (list_empty(&portlist))
282 get_lowlevel_driver();
283
284 if (drv->devmodel) {
285
286 int ret;
287
288
289 drv->driver.name = drv->name;
290 drv->driver.bus = &parport_bus_type;
291 drv->driver.owner = owner;
292 drv->driver.mod_name = mod_name;
293 ret = driver_register(&drv->driver);
294 if (ret)
295 return ret;
296
297
298
299
300
301 ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
302 port_detect);
303 if (!ret)
304 get_lowlevel_driver();
305
306 mutex_lock(®istration_lock);
307 if (drv->match_port)
308 bus_for_each_dev(&parport_bus_type, NULL, drv,
309 port_check);
310 mutex_unlock(®istration_lock);
311 } else {
312 struct parport *port;
313
314 drv->devmodel = false;
315
316 mutex_lock(®istration_lock);
317 list_for_each_entry(port, &portlist, list)
318 drv->attach(port);
319 list_add(&drv->list, &drivers);
320 mutex_unlock(®istration_lock);
321 }
322
323 return 0;
324 }
325 EXPORT_SYMBOL(__parport_register_driver);
326
327 static int port_detach(struct device *dev, void *_drv)
328 {
329 struct parport_driver *drv = _drv;
330
331 if (is_parport(dev) && drv->detach)
332 drv->detach(to_parport_dev(dev));
333
334 return 0;
335 }
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354 void parport_unregister_driver(struct parport_driver *drv)
355 {
356 struct parport *port;
357
358 mutex_lock(®istration_lock);
359 if (drv->devmodel) {
360 bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
361 driver_unregister(&drv->driver);
362 } else {
363 list_del_init(&drv->list);
364 list_for_each_entry(port, &portlist, list)
365 drv->detach(port);
366 }
367 mutex_unlock(®istration_lock);
368 }
369 EXPORT_SYMBOL(parport_unregister_driver);
370
371 static void free_port(struct device *dev)
372 {
373 int d;
374 struct parport *port = to_parport_dev(dev);
375
376 spin_lock(&full_list_lock);
377 list_del(&port->full_list);
378 spin_unlock(&full_list_lock);
379 for (d = 0; d < 5; d++) {
380 kfree(port->probe_info[d].class_name);
381 kfree(port->probe_info[d].mfr);
382 kfree(port->probe_info[d].model);
383 kfree(port->probe_info[d].cmdset);
384 kfree(port->probe_info[d].description);
385 }
386
387 kfree(port->name);
388 kfree(port);
389 }
390
391
392
393
394
395
396
397
398
399 struct parport *parport_get_port(struct parport *port)
400 {
401 struct device *dev = get_device(&port->bus_dev);
402
403 return to_parport_dev(dev);
404 }
405 EXPORT_SYMBOL(parport_get_port);
406
407 void parport_del_port(struct parport *port)
408 {
409 device_unregister(&port->bus_dev);
410 }
411 EXPORT_SYMBOL(parport_del_port);
412
413
414
415
416
417
418
419
420
421
422 void parport_put_port(struct parport *port)
423 {
424 put_device(&port->bus_dev);
425 }
426 EXPORT_SYMBOL(parport_put_port);
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457 struct parport *parport_register_port(unsigned long base, int irq, int dma,
458 struct parport_operations *ops)
459 {
460 struct list_head *l;
461 struct parport *tmp;
462 int num;
463 int device;
464 char *name;
465 int ret;
466
467 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
468 if (!tmp)
469 return NULL;
470
471
472 tmp->base = base;
473 tmp->irq = irq;
474 tmp->dma = dma;
475 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
476 tmp->modes = 0;
477 INIT_LIST_HEAD(&tmp->list);
478 tmp->devices = tmp->cad = NULL;
479 tmp->flags = 0;
480 tmp->ops = ops;
481 tmp->physport = tmp;
482 memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
483 rwlock_init(&tmp->cad_lock);
484 spin_lock_init(&tmp->waitlist_lock);
485 spin_lock_init(&tmp->pardevice_lock);
486 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
487 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
488 sema_init(&tmp->ieee1284.irq, 0);
489 tmp->spintime = parport_default_spintime;
490 atomic_set(&tmp->ref_count, 1);
491 INIT_LIST_HEAD(&tmp->full_list);
492
493 name = kmalloc(15, GFP_KERNEL);
494 if (!name) {
495 kfree(tmp);
496 return NULL;
497 }
498
499
500 spin_lock(&full_list_lock);
501 for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
502 struct parport *p = list_entry(l, struct parport, full_list);
503 if (p->number != num)
504 break;
505 }
506 tmp->portnum = tmp->number = num;
507 list_add_tail(&tmp->full_list, l);
508 spin_unlock(&full_list_lock);
509
510
511
512
513 sprintf(name, "parport%d", tmp->portnum = tmp->number);
514 tmp->name = name;
515 tmp->bus_dev.bus = &parport_bus_type;
516 tmp->bus_dev.release = free_port;
517 dev_set_name(&tmp->bus_dev, name);
518 tmp->bus_dev.type = &parport_device_type;
519
520 for (device = 0; device < 5; device++)
521
522 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
523
524 tmp->waithead = tmp->waittail = NULL;
525
526 ret = device_register(&tmp->bus_dev);
527 if (ret) {
528 put_device(&tmp->bus_dev);
529 return NULL;
530 }
531
532 return tmp;
533 }
534 EXPORT_SYMBOL(parport_register_port);
535
536
537
538
539
540
541
542
543
544
545
546
547
548 void parport_announce_port(struct parport *port)
549 {
550 int i;
551
552 #ifdef CONFIG_PARPORT_1284
553
554 parport_daisy_init(port);
555 #endif
556
557 if (!port->dev)
558 printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n",
559 port->name);
560
561 parport_proc_register(port);
562 mutex_lock(®istration_lock);
563 spin_lock_irq(&parportlist_lock);
564 list_add_tail(&port->list, &portlist);
565 for (i = 1; i < 3; i++) {
566 struct parport *slave = port->slaves[i-1];
567 if (slave)
568 list_add_tail(&slave->list, &portlist);
569 }
570 spin_unlock_irq(&parportlist_lock);
571
572
573 attach_driver_chain(port);
574 for (i = 1; i < 3; i++) {
575 struct parport *slave = port->slaves[i-1];
576 if (slave)
577 attach_driver_chain(slave);
578 }
579 mutex_unlock(®istration_lock);
580 }
581 EXPORT_SYMBOL(parport_announce_port);
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602 void parport_remove_port(struct parport *port)
603 {
604 int i;
605
606 mutex_lock(®istration_lock);
607
608
609 detach_driver_chain(port);
610
611 #ifdef CONFIG_PARPORT_1284
612
613 parport_daisy_fini(port);
614 for (i = 1; i < 3; i++) {
615 struct parport *slave = port->slaves[i-1];
616 if (!slave)
617 continue;
618 detach_driver_chain(slave);
619 parport_daisy_fini(slave);
620 }
621 #endif
622
623 port->ops = &dead_ops;
624 spin_lock(&parportlist_lock);
625 list_del_init(&port->list);
626 for (i = 1; i < 3; i++) {
627 struct parport *slave = port->slaves[i-1];
628 if (slave)
629 list_del_init(&slave->list);
630 }
631 spin_unlock(&parportlist_lock);
632
633 mutex_unlock(®istration_lock);
634
635 parport_proc_unregister(port);
636
637 for (i = 1; i < 3; i++) {
638 struct parport *slave = port->slaves[i-1];
639 if (slave)
640 parport_put_port(slave);
641 }
642 }
643 EXPORT_SYMBOL(parport_remove_port);
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714 struct pardevice *
715 parport_register_device(struct parport *port, const char *name,
716 int (*pf)(void *), void (*kf)(void *),
717 void (*irq_func)(void *),
718 int flags, void *handle)
719 {
720 struct pardevice *tmp;
721
722 if (port->physport->flags & PARPORT_FLAG_EXCL) {
723
724 printk(KERN_DEBUG "%s: no more devices allowed\n",
725 port->name);
726 return NULL;
727 }
728
729 if (flags & PARPORT_DEV_LURK) {
730 if (!pf || !kf) {
731 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
732 return NULL;
733 }
734 }
735
736 if (flags & PARPORT_DEV_EXCL) {
737 if (port->physport->devices) {
738
739
740
741
742
743
744 pr_err("%s: cannot grant exclusive access for device %s\n",
745 port->name, name);
746 return NULL;
747 }
748 }
749
750
751
752
753
754
755
756 if (!try_module_get(port->ops->owner))
757 return NULL;
758
759 parport_get_port(port);
760
761 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
762 if (!tmp)
763 goto out;
764
765 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
766 if (!tmp->state)
767 goto out_free_pardevice;
768
769 tmp->name = name;
770 tmp->port = port;
771 tmp->daisy = -1;
772 tmp->preempt = pf;
773 tmp->wakeup = kf;
774 tmp->private = handle;
775 tmp->flags = flags;
776 tmp->irq_func = irq_func;
777 tmp->waiting = 0;
778 tmp->timeout = 5 * HZ;
779 tmp->devmodel = false;
780
781
782 tmp->prev = NULL;
783
784
785
786
787 spin_lock(&port->physport->pardevice_lock);
788
789 if (flags & PARPORT_DEV_EXCL) {
790 if (port->physport->devices) {
791 spin_unlock(&port->physport->pardevice_lock);
792 printk(KERN_DEBUG
793 "%s: cannot grant exclusive access for device %s\n",
794 port->name, name);
795 goto out_free_all;
796 }
797 port->flags |= PARPORT_FLAG_EXCL;
798 }
799
800 tmp->next = port->physport->devices;
801 wmb();
802
803
804
805
806 if (port->physport->devices)
807 port->physport->devices->prev = tmp;
808 port->physport->devices = tmp;
809 spin_unlock(&port->physport->pardevice_lock);
810
811 init_waitqueue_head(&tmp->wait_q);
812 tmp->timeslice = parport_default_timeslice;
813 tmp->waitnext = tmp->waitprev = NULL;
814
815
816
817
818
819 port->ops->init_state(tmp, tmp->state);
820 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
821 port->proc_device = tmp;
822 parport_device_proc_register(tmp);
823 }
824 return tmp;
825
826 out_free_all:
827 kfree(tmp->state);
828 out_free_pardevice:
829 kfree(tmp);
830 out:
831 parport_put_port(port);
832 module_put(port->ops->owner);
833
834 return NULL;
835 }
836 EXPORT_SYMBOL(parport_register_device);
837
838 static void free_pardevice(struct device *dev)
839 {
840 struct pardevice *par_dev = to_pardevice(dev);
841
842 kfree(par_dev->name);
843 kfree(par_dev);
844 }
845
846 struct pardevice *
847 parport_register_dev_model(struct parport *port, const char *name,
848 const struct pardev_cb *par_dev_cb, int id)
849 {
850 struct pardevice *par_dev;
851 int ret;
852 char *devname;
853
854 if (port->physport->flags & PARPORT_FLAG_EXCL) {
855
856 pr_err("%s: no more devices allowed\n", port->name);
857 return NULL;
858 }
859
860 if (par_dev_cb->flags & PARPORT_DEV_LURK) {
861 if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
862 pr_info("%s: refused to register lurking device (%s) without callbacks\n",
863 port->name, name);
864 return NULL;
865 }
866 }
867
868 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
869 if (port->physport->devices) {
870
871
872
873
874
875
876 pr_err("%s: cannot grant exclusive access for device %s\n",
877 port->name, name);
878 return NULL;
879 }
880 }
881
882 if (!try_module_get(port->ops->owner))
883 return NULL;
884
885 parport_get_port(port);
886
887 par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
888 if (!par_dev)
889 goto err_put_port;
890
891 par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
892 if (!par_dev->state)
893 goto err_put_par_dev;
894
895 devname = kstrdup(name, GFP_KERNEL);
896 if (!devname)
897 goto err_free_par_dev;
898
899 par_dev->name = devname;
900 par_dev->port = port;
901 par_dev->daisy = -1;
902 par_dev->preempt = par_dev_cb->preempt;
903 par_dev->wakeup = par_dev_cb->wakeup;
904 par_dev->private = par_dev_cb->private;
905 par_dev->flags = par_dev_cb->flags;
906 par_dev->irq_func = par_dev_cb->irq_func;
907 par_dev->waiting = 0;
908 par_dev->timeout = 5 * HZ;
909
910 par_dev->dev.parent = &port->bus_dev;
911 par_dev->dev.bus = &parport_bus_type;
912 ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
913 if (ret)
914 goto err_free_devname;
915 par_dev->dev.release = free_pardevice;
916 par_dev->devmodel = true;
917 ret = device_register(&par_dev->dev);
918 if (ret) {
919 kfree(par_dev->state);
920 put_device(&par_dev->dev);
921 goto err_put_port;
922 }
923
924
925 par_dev->prev = NULL;
926
927
928
929
930 spin_lock(&port->physport->pardevice_lock);
931
932 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
933 if (port->physport->devices) {
934 spin_unlock(&port->physport->pardevice_lock);
935 pr_debug("%s: cannot grant exclusive access for device %s\n",
936 port->name, name);
937 kfree(par_dev->state);
938 device_unregister(&par_dev->dev);
939 goto err_put_port;
940 }
941 port->flags |= PARPORT_FLAG_EXCL;
942 }
943
944 par_dev->next = port->physport->devices;
945 wmb();
946
947
948
949
950 if (port->physport->devices)
951 port->physport->devices->prev = par_dev;
952 port->physport->devices = par_dev;
953 spin_unlock(&port->physport->pardevice_lock);
954
955 init_waitqueue_head(&par_dev->wait_q);
956 par_dev->timeslice = parport_default_timeslice;
957 par_dev->waitnext = NULL;
958 par_dev->waitprev = NULL;
959
960
961
962
963
964 port->ops->init_state(par_dev, par_dev->state);
965 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
966 port->proc_device = par_dev;
967 parport_device_proc_register(par_dev);
968 }
969
970 return par_dev;
971
972 err_free_devname:
973 kfree(devname);
974 err_free_par_dev:
975 kfree(par_dev->state);
976 err_put_par_dev:
977 if (!par_dev->devmodel)
978 kfree(par_dev);
979 err_put_port:
980 parport_put_port(port);
981 module_put(port->ops->owner);
982
983 return NULL;
984 }
985 EXPORT_SYMBOL(parport_register_dev_model);
986
987
988
989
990
991
992
993
994 void parport_unregister_device(struct pardevice *dev)
995 {
996 struct parport *port;
997
998 #ifdef PARPORT_PARANOID
999 if (!dev) {
1000 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
1001 return;
1002 }
1003 #endif
1004
1005 port = dev->port->physport;
1006
1007 if (port->proc_device == dev) {
1008 port->proc_device = NULL;
1009 clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
1010 parport_device_proc_unregister(dev);
1011 }
1012
1013 if (port->cad == dev) {
1014 printk(KERN_DEBUG "%s: %s forgot to release port\n",
1015 port->name, dev->name);
1016 parport_release(dev);
1017 }
1018
1019 spin_lock(&port->pardevice_lock);
1020 if (dev->next)
1021 dev->next->prev = dev->prev;
1022 if (dev->prev)
1023 dev->prev->next = dev->next;
1024 else
1025 port->devices = dev->next;
1026
1027 if (dev->flags & PARPORT_DEV_EXCL)
1028 port->flags &= ~PARPORT_FLAG_EXCL;
1029
1030 spin_unlock(&port->pardevice_lock);
1031
1032
1033
1034
1035
1036 spin_lock_irq(&port->waitlist_lock);
1037 if (dev->waitprev || dev->waitnext || port->waithead == dev) {
1038 if (dev->waitprev)
1039 dev->waitprev->waitnext = dev->waitnext;
1040 else
1041 port->waithead = dev->waitnext;
1042 if (dev->waitnext)
1043 dev->waitnext->waitprev = dev->waitprev;
1044 else
1045 port->waittail = dev->waitprev;
1046 }
1047 spin_unlock_irq(&port->waitlist_lock);
1048
1049 kfree(dev->state);
1050 if (dev->devmodel)
1051 device_unregister(&dev->dev);
1052 else
1053 kfree(dev);
1054
1055 module_put(port->ops->owner);
1056 parport_put_port(port);
1057 }
1058 EXPORT_SYMBOL(parport_unregister_device);
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 struct parport *parport_find_number(int number)
1073 {
1074 struct parport *port, *result = NULL;
1075
1076 if (list_empty(&portlist))
1077 get_lowlevel_driver();
1078
1079 spin_lock(&parportlist_lock);
1080 list_for_each_entry(port, &portlist, list) {
1081 if (port->number == number) {
1082 result = parport_get_port(port);
1083 break;
1084 }
1085 }
1086 spin_unlock(&parportlist_lock);
1087 return result;
1088 }
1089 EXPORT_SYMBOL(parport_find_number);
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 struct parport *parport_find_base(unsigned long base)
1104 {
1105 struct parport *port, *result = NULL;
1106
1107 if (list_empty(&portlist))
1108 get_lowlevel_driver();
1109
1110 spin_lock(&parportlist_lock);
1111 list_for_each_entry(port, &portlist, list) {
1112 if (port->base == base) {
1113 result = parport_get_port(port);
1114 break;
1115 }
1116 }
1117 spin_unlock(&parportlist_lock);
1118 return result;
1119 }
1120 EXPORT_SYMBOL(parport_find_base);
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134 int parport_claim(struct pardevice *dev)
1135 {
1136 struct pardevice *oldcad;
1137 struct parport *port = dev->port->physport;
1138 unsigned long flags;
1139
1140 if (port->cad == dev) {
1141 printk(KERN_INFO "%s: %s already owner\n",
1142 dev->port->name,dev->name);
1143 return 0;
1144 }
1145
1146
1147 write_lock_irqsave(&port->cad_lock, flags);
1148 oldcad = port->cad;
1149 if (oldcad) {
1150 if (oldcad->preempt) {
1151 if (oldcad->preempt(oldcad->private))
1152 goto blocked;
1153 port->ops->save_state(port, dev->state);
1154 } else
1155 goto blocked;
1156
1157 if (port->cad != oldcad) {
1158
1159
1160
1161
1162 printk(KERN_WARNING
1163 "%s: %s released port when preempted!\n",
1164 port->name, oldcad->name);
1165 if (port->cad)
1166 goto blocked;
1167 }
1168 }
1169
1170
1171 if (dev->waiting & 1) {
1172 dev->waiting = 0;
1173
1174
1175 spin_lock_irq(&port->waitlist_lock);
1176 if (dev->waitprev)
1177 dev->waitprev->waitnext = dev->waitnext;
1178 else
1179 port->waithead = dev->waitnext;
1180 if (dev->waitnext)
1181 dev->waitnext->waitprev = dev->waitprev;
1182 else
1183 port->waittail = dev->waitprev;
1184 spin_unlock_irq(&port->waitlist_lock);
1185 dev->waitprev = dev->waitnext = NULL;
1186 }
1187
1188
1189 port->cad = dev;
1190
1191 #ifdef CONFIG_PARPORT_1284
1192
1193 if (dev->port->muxport >= 0) {
1194
1195 port->muxsel = dev->port->muxport;
1196 }
1197
1198
1199 if (dev->daisy >= 0) {
1200
1201 if (!parport_daisy_select(port, dev->daisy,
1202 IEEE1284_MODE_COMPAT))
1203 port->daisy = dev->daisy;
1204 }
1205 #endif
1206
1207
1208 port->ops->restore_state(port, dev->state);
1209 write_unlock_irqrestore(&port->cad_lock, flags);
1210 dev->time = jiffies;
1211 return 0;
1212
1213 blocked:
1214
1215
1216
1217
1218
1219
1220
1221 if (dev->waiting & 2 || dev->wakeup) {
1222 spin_lock(&port->waitlist_lock);
1223 if (test_and_set_bit(0, &dev->waiting) == 0) {
1224
1225 dev->waitnext = NULL;
1226 dev->waitprev = port->waittail;
1227 if (port->waittail) {
1228 port->waittail->waitnext = dev;
1229 port->waittail = dev;
1230 } else
1231 port->waithead = port->waittail = dev;
1232 }
1233 spin_unlock(&port->waitlist_lock);
1234 }
1235 write_unlock_irqrestore(&port->cad_lock, flags);
1236 return -EAGAIN;
1237 }
1238 EXPORT_SYMBOL(parport_claim);
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250 int parport_claim_or_block(struct pardevice *dev)
1251 {
1252 int r;
1253
1254
1255
1256
1257
1258 dev->waiting = 2;
1259
1260
1261 r = parport_claim(dev);
1262 if (r == -EAGAIN) {
1263 #ifdef PARPORT_DEBUG_SHARING
1264 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
1265 #endif
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279 if (dev->waiting) {
1280 wait_event_interruptible(dev->wait_q,
1281 !dev->waiting);
1282 if (signal_pending(current))
1283 return -EINTR;
1284 r = 1;
1285 } else {
1286 r = 0;
1287 #ifdef PARPORT_DEBUG_SHARING
1288 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
1289 dev->name);
1290 #endif
1291 }
1292
1293 #ifdef PARPORT_DEBUG_SHARING
1294 if (dev->port->physport->cad != dev)
1295 printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
1296 dev->name, dev->port->physport->cad ?
1297 dev->port->physport->cad->name:"nobody");
1298 #endif
1299 }
1300 dev->waiting = 0;
1301 return r;
1302 }
1303 EXPORT_SYMBOL(parport_claim_or_block);
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314 void parport_release(struct pardevice *dev)
1315 {
1316 struct parport *port = dev->port->physport;
1317 struct pardevice *pd;
1318 unsigned long flags;
1319
1320
1321 write_lock_irqsave(&port->cad_lock, flags);
1322 if (port->cad != dev) {
1323 write_unlock_irqrestore(&port->cad_lock, flags);
1324 printk(KERN_WARNING "%s: %s tried to release parport when not owner\n",
1325 port->name, dev->name);
1326 return;
1327 }
1328
1329 #ifdef CONFIG_PARPORT_1284
1330
1331 if (dev->port->muxport >= 0) {
1332
1333 port->muxsel = -1;
1334 }
1335
1336
1337 if (dev->daisy >= 0) {
1338 parport_daisy_deselect_all(port);
1339 port->daisy = -1;
1340 }
1341 #endif
1342
1343 port->cad = NULL;
1344 write_unlock_irqrestore(&port->cad_lock, flags);
1345
1346
1347 port->ops->save_state(port, dev->state);
1348
1349
1350
1351
1352
1353
1354 for (pd = port->waithead; pd; pd = pd->waitnext) {
1355 if (pd->waiting & 2) {
1356 parport_claim(pd);
1357 if (waitqueue_active(&pd->wait_q))
1358 wake_up_interruptible(&pd->wait_q);
1359 return;
1360 } else if (pd->wakeup) {
1361 pd->wakeup(pd->private);
1362 if (dev->port->cad)
1363 return;
1364 } else {
1365 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
1366 }
1367 }
1368
1369
1370
1371
1372
1373
1374 for (pd = port->devices; !port->cad && pd; pd = pd->next) {
1375 if (pd->wakeup && pd != dev)
1376 pd->wakeup(pd->private);
1377 }
1378 }
1379 EXPORT_SYMBOL(parport_release);
1380
1381 irqreturn_t parport_irq_handler(int irq, void *dev_id)
1382 {
1383 struct parport *port = dev_id;
1384
1385 parport_generic_irq(port);
1386
1387 return IRQ_HANDLED;
1388 }
1389 EXPORT_SYMBOL(parport_irq_handler);
1390
1391 MODULE_LICENSE("GPL");