This source file includes following definitions.
- status_reg
- read_reg
- write_status
- write_reg
- DRIVE
- pd_print_error
- pd_reset
- pd_wait_for
- pd_send_command
- pd_ide_command
- schedule_fsm
- ps_tq_int
- set_next_request
- run_fsm
- do_pd_io_start
- pd_special
- pd_next_buf
- do_pd_read_start
- do_pd_write_start
- pd_ready
- do_pd_read_drq
- do_pd_write_done
- pd_init_dev_parms
- pd_door_lock
- pd_door_unlock
- pd_eject
- pd_media_check
- pd_standby_off
- pd_identify
- pd_queue_rq
- pd_special_command
- pd_open
- pd_getgeo
- pd_ioctl
- pd_release
- pd_check_events
- pd_revalidate
- pd_probe_drive
- pd_detect
- pd_init
- pd_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 #define PD_VERSION "1.05"
118 #define PD_MAJOR 45
119 #define PD_NAME "pd"
120 #define PD_UNITS 4
121
122
123
124
125
126
127 #include <linux/types.h>
128
129 static int verbose = 0;
130 static int major = PD_MAJOR;
131 static char *name = PD_NAME;
132 static int cluster = 64;
133 static int nice = 0;
134 static int disable = 0;
135
136 static int drive0[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
137 static int drive1[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
138 static int drive2[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
139 static int drive3[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
140
141 static int (*drives[4])[8] = {&drive0, &drive1, &drive2, &drive3};
142
143 enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
144
145
146
147 #include <linux/init.h>
148 #include <linux/module.h>
149 #include <linux/gfp.h>
150 #include <linux/fs.h>
151 #include <linux/delay.h>
152 #include <linux/hdreg.h>
153 #include <linux/cdrom.h>
154 #include <linux/blk-mq.h>
155 #include <linux/blkpg.h>
156 #include <linux/kernel.h>
157 #include <linux/mutex.h>
158 #include <linux/uaccess.h>
159 #include <linux/workqueue.h>
160
161 static DEFINE_MUTEX(pd_mutex);
162 static DEFINE_SPINLOCK(pd_lock);
163
164 module_param(verbose, int, 0);
165 module_param(major, int, 0);
166 module_param(name, charp, 0);
167 module_param(cluster, int, 0);
168 module_param(nice, int, 0);
169 module_param_array(drive0, int, NULL, 0);
170 module_param_array(drive1, int, NULL, 0);
171 module_param_array(drive2, int, NULL, 0);
172 module_param_array(drive3, int, NULL, 0);
173
174 #include "paride.h"
175
176 #define PD_BITS 4
177
178
179
180 #define PD_LOG_HEADS 64
181 #define PD_LOG_SECTS 32
182
183 #define PD_ID_OFF 54
184 #define PD_ID_LEN 14
185
186 #define PD_MAX_RETRIES 5
187 #define PD_TMO 800
188 #define PD_SPIN_DEL 50
189
190 #define PD_SPIN (1000000*PD_TMO)/(HZ*PD_SPIN_DEL)
191
192 #define STAT_ERR 0x00001
193 #define STAT_INDEX 0x00002
194 #define STAT_ECC 0x00004
195 #define STAT_DRQ 0x00008
196 #define STAT_SEEK 0x00010
197 #define STAT_WRERR 0x00020
198 #define STAT_READY 0x00040
199 #define STAT_BUSY 0x00080
200
201 #define ERR_AMNF 0x00100
202 #define ERR_TK0NF 0x00200
203 #define ERR_ABRT 0x00400
204 #define ERR_MCR 0x00800
205 #define ERR_IDNF 0x01000
206 #define ERR_MC 0x02000
207 #define ERR_UNC 0x04000
208 #define ERR_TMO 0x10000
209
210 #define IDE_READ 0x20
211 #define IDE_WRITE 0x30
212 #define IDE_READ_VRFY 0x40
213 #define IDE_INIT_DEV_PARMS 0x91
214 #define IDE_STANDBY 0x96
215 #define IDE_ACKCHANGE 0xdb
216 #define IDE_DOORLOCK 0xde
217 #define IDE_DOORUNLOCK 0xdf
218 #define IDE_IDENTIFY 0xec
219 #define IDE_EJECT 0xed
220
221 #define PD_NAMELEN 8
222
223 struct pd_unit {
224 struct pi_adapter pia;
225 struct pi_adapter *pi;
226 int access;
227 int capacity;
228 int heads;
229 int sectors;
230 int cylinders;
231 int can_lba;
232 int drive;
233 int changed;
234 int removable;
235 int standby;
236 int alt_geom;
237 char name[PD_NAMELEN];
238 struct gendisk *gd;
239 struct blk_mq_tag_set tag_set;
240 struct list_head rq_list;
241 };
242
243 static struct pd_unit pd[PD_UNITS];
244
245 struct pd_req {
246
247 enum action (*func)(struct pd_unit *disk);
248 };
249
250 static char pd_scratch[512];
251
252 static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
253 "READY", "BUSY", "AMNF", "TK0NF", "ABRT", "MCR",
254 "IDNF", "MC", "UNC", "???", "TMO"
255 };
256
257 static void *par_drv;
258
259 static inline int status_reg(struct pd_unit *disk)
260 {
261 return pi_read_regr(disk->pi, 1, 6);
262 }
263
264 static inline int read_reg(struct pd_unit *disk, int reg)
265 {
266 return pi_read_regr(disk->pi, 0, reg);
267 }
268
269 static inline void write_status(struct pd_unit *disk, int val)
270 {
271 pi_write_regr(disk->pi, 1, 6, val);
272 }
273
274 static inline void write_reg(struct pd_unit *disk, int reg, int val)
275 {
276 pi_write_regr(disk->pi, 0, reg, val);
277 }
278
279 static inline u8 DRIVE(struct pd_unit *disk)
280 {
281 return 0xa0+0x10*disk->drive;
282 }
283
284
285
286 static void pd_print_error(struct pd_unit *disk, char *msg, int status)
287 {
288 int i;
289
290 printk("%s: %s: status = 0x%x =", disk->name, msg, status);
291 for (i = 0; i < ARRAY_SIZE(pd_errs); i++)
292 if (status & (1 << i))
293 printk(" %s", pd_errs[i]);
294 printk("\n");
295 }
296
297 static void pd_reset(struct pd_unit *disk)
298 {
299 write_status(disk, 4);
300 udelay(50);
301 write_status(disk, 0);
302 udelay(250);
303 }
304
305 #define DBMSG(msg) ((verbose>1)?(msg):NULL)
306
307 static int pd_wait_for(struct pd_unit *disk, int w, char *msg)
308 {
309 int k, r, e;
310
311 k = 0;
312 while (k < PD_SPIN) {
313 r = status_reg(disk);
314 k++;
315 if (((r & w) == w) && !(r & STAT_BUSY))
316 break;
317 udelay(PD_SPIN_DEL);
318 }
319 e = (read_reg(disk, 1) << 8) + read_reg(disk, 7);
320 if (k >= PD_SPIN)
321 e |= ERR_TMO;
322 if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL))
323 pd_print_error(disk, msg, e);
324 return e;
325 }
326
327 static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func)
328 {
329 write_reg(disk, 6, DRIVE(disk) + h);
330 write_reg(disk, 1, 0);
331 write_reg(disk, 2, n);
332 write_reg(disk, 3, s);
333 write_reg(disk, 4, c0);
334 write_reg(disk, 5, c1);
335 write_reg(disk, 7, func);
336
337 udelay(1);
338 }
339
340 static void pd_ide_command(struct pd_unit *disk, int func, int block, int count)
341 {
342 int c1, c0, h, s;
343
344 if (disk->can_lba) {
345 s = block & 255;
346 c0 = (block >>= 8) & 255;
347 c1 = (block >>= 8) & 255;
348 h = ((block >>= 8) & 15) + 0x40;
349 } else {
350 s = (block % disk->sectors) + 1;
351 h = (block /= disk->sectors) % disk->heads;
352 c0 = (block /= disk->heads) % 256;
353 c1 = (block >>= 8);
354 }
355 pd_send_command(disk, count, s, h, c0, c1, func);
356 }
357
358
359
360 enum action {Fail = 0, Ok = 1, Hold, Wait};
361
362 static struct request *pd_req;
363 static enum action (*phase)(void);
364
365 static void run_fsm(void);
366
367 static void ps_tq_int(struct work_struct *work);
368
369 static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
370
371 static void schedule_fsm(void)
372 {
373 if (!nice)
374 schedule_delayed_work(&fsm_tq, 0);
375 else
376 schedule_delayed_work(&fsm_tq, nice-1);
377 }
378
379 static void ps_tq_int(struct work_struct *work)
380 {
381 run_fsm();
382 }
383
384 static enum action do_pd_io_start(void);
385 static enum action pd_special(void);
386 static enum action do_pd_read_start(void);
387 static enum action do_pd_write_start(void);
388 static enum action do_pd_read_drq(void);
389 static enum action do_pd_write_done(void);
390
391 static int pd_queue;
392 static int pd_claimed;
393
394 static struct pd_unit *pd_current;
395 static PIA *pi_current;
396
397 static int set_next_request(void)
398 {
399 struct gendisk *disk;
400 struct request_queue *q;
401 int old_pos = pd_queue;
402
403 do {
404 disk = pd[pd_queue].gd;
405 q = disk ? disk->queue : NULL;
406 if (++pd_queue == PD_UNITS)
407 pd_queue = 0;
408 if (q) {
409 struct pd_unit *disk = q->queuedata;
410
411 if (list_empty(&disk->rq_list))
412 continue;
413
414 pd_req = list_first_entry(&disk->rq_list,
415 struct request,
416 queuelist);
417 list_del_init(&pd_req->queuelist);
418 blk_mq_start_request(pd_req);
419 break;
420 }
421 } while (pd_queue != old_pos);
422
423 return pd_req != NULL;
424 }
425
426 static void run_fsm(void)
427 {
428 while (1) {
429 enum action res;
430 int stop = 0;
431
432 if (!phase) {
433 pd_current = pd_req->rq_disk->private_data;
434 pi_current = pd_current->pi;
435 phase = do_pd_io_start;
436 }
437
438 switch (pd_claimed) {
439 case 0:
440 pd_claimed = 1;
441 if (!pi_schedule_claimed(pi_current, run_fsm))
442 return;
443
444 case 1:
445 pd_claimed = 2;
446 pi_current->proto->connect(pi_current);
447 }
448
449 switch(res = phase()) {
450 case Ok: case Fail: {
451 blk_status_t err;
452
453 err = res == Ok ? 0 : BLK_STS_IOERR;
454 pi_disconnect(pi_current);
455 pd_claimed = 0;
456 phase = NULL;
457 spin_lock_irq(&pd_lock);
458 if (!blk_update_request(pd_req, err,
459 blk_rq_cur_bytes(pd_req))) {
460 __blk_mq_end_request(pd_req, err);
461 pd_req = NULL;
462 stop = !set_next_request();
463 }
464 spin_unlock_irq(&pd_lock);
465 if (stop)
466 return;
467 }
468
469 case Hold:
470 schedule_fsm();
471 return;
472 case Wait:
473 pi_disconnect(pi_current);
474 pd_claimed = 0;
475 }
476 }
477 }
478
479 static int pd_retries = 0;
480 static int pd_block;
481 static int pd_count;
482 static int pd_run;
483 static char *pd_buf;
484
485 static enum action do_pd_io_start(void)
486 {
487 switch (req_op(pd_req)) {
488 case REQ_OP_DRV_IN:
489 phase = pd_special;
490 return pd_special();
491 case REQ_OP_READ:
492 case REQ_OP_WRITE:
493 pd_block = blk_rq_pos(pd_req);
494 pd_count = blk_rq_cur_sectors(pd_req);
495 if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
496 return Fail;
497 pd_run = blk_rq_sectors(pd_req);
498 pd_buf = bio_data(pd_req->bio);
499 pd_retries = 0;
500 if (req_op(pd_req) == REQ_OP_READ)
501 return do_pd_read_start();
502 else
503 return do_pd_write_start();
504 }
505 return Fail;
506 }
507
508 static enum action pd_special(void)
509 {
510 struct pd_req *req = blk_mq_rq_to_pdu(pd_req);
511
512 return req->func(pd_current);
513 }
514
515 static int pd_next_buf(void)
516 {
517 unsigned long saved_flags;
518
519 pd_count--;
520 pd_run--;
521 pd_buf += 512;
522 pd_block++;
523 if (!pd_run)
524 return 1;
525 if (pd_count)
526 return 0;
527 spin_lock_irqsave(&pd_lock, saved_flags);
528 if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) {
529 __blk_mq_end_request(pd_req, 0);
530 pd_req = NULL;
531 pd_count = 0;
532 pd_buf = NULL;
533 } else {
534 pd_count = blk_rq_cur_sectors(pd_req);
535 pd_buf = bio_data(pd_req->bio);
536 }
537 spin_unlock_irqrestore(&pd_lock, saved_flags);
538 return !pd_count;
539 }
540
541 static unsigned long pd_timeout;
542
543 static enum action do_pd_read_start(void)
544 {
545 if (pd_wait_for(pd_current, STAT_READY, "do_pd_read") & STAT_ERR) {
546 if (pd_retries < PD_MAX_RETRIES) {
547 pd_retries++;
548 return Wait;
549 }
550 return Fail;
551 }
552 pd_ide_command(pd_current, IDE_READ, pd_block, pd_run);
553 phase = do_pd_read_drq;
554 pd_timeout = jiffies + PD_TMO;
555 return Hold;
556 }
557
558 static enum action do_pd_write_start(void)
559 {
560 if (pd_wait_for(pd_current, STAT_READY, "do_pd_write") & STAT_ERR) {
561 if (pd_retries < PD_MAX_RETRIES) {
562 pd_retries++;
563 return Wait;
564 }
565 return Fail;
566 }
567 pd_ide_command(pd_current, IDE_WRITE, pd_block, pd_run);
568 while (1) {
569 if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_write_drq") & STAT_ERR) {
570 if (pd_retries < PD_MAX_RETRIES) {
571 pd_retries++;
572 return Wait;
573 }
574 return Fail;
575 }
576 pi_write_block(pd_current->pi, pd_buf, 512);
577 if (pd_next_buf())
578 break;
579 }
580 phase = do_pd_write_done;
581 pd_timeout = jiffies + PD_TMO;
582 return Hold;
583 }
584
585 static inline int pd_ready(void)
586 {
587 return !(status_reg(pd_current) & STAT_BUSY);
588 }
589
590 static enum action do_pd_read_drq(void)
591 {
592 if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
593 return Hold;
594
595 while (1) {
596 if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_read_drq") & STAT_ERR) {
597 if (pd_retries < PD_MAX_RETRIES) {
598 pd_retries++;
599 phase = do_pd_read_start;
600 return Wait;
601 }
602 return Fail;
603 }
604 pi_read_block(pd_current->pi, pd_buf, 512);
605 if (pd_next_buf())
606 break;
607 }
608 return Ok;
609 }
610
611 static enum action do_pd_write_done(void)
612 {
613 if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
614 return Hold;
615
616 if (pd_wait_for(pd_current, STAT_READY, "do_pd_write_done") & STAT_ERR) {
617 if (pd_retries < PD_MAX_RETRIES) {
618 pd_retries++;
619 phase = do_pd_write_start;
620 return Wait;
621 }
622 return Fail;
623 }
624 return Ok;
625 }
626
627
628
629
630
631
632
633
634
635 static void pd_init_dev_parms(struct pd_unit *disk)
636 {
637 pd_wait_for(disk, 0, DBMSG("before init_dev_parms"));
638 pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0,
639 IDE_INIT_DEV_PARMS);
640 udelay(300);
641 pd_wait_for(disk, 0, "Initialise device parameters");
642 }
643
644 static enum action pd_door_lock(struct pd_unit *disk)
645 {
646 if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
647 pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORLOCK);
648 pd_wait_for(disk, STAT_READY, "Lock done");
649 }
650 return Ok;
651 }
652
653 static enum action pd_door_unlock(struct pd_unit *disk)
654 {
655 if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
656 pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
657 pd_wait_for(disk, STAT_READY, "Lock done");
658 }
659 return Ok;
660 }
661
662 static enum action pd_eject(struct pd_unit *disk)
663 {
664 pd_wait_for(disk, 0, DBMSG("before unlock on eject"));
665 pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
666 pd_wait_for(disk, 0, DBMSG("after unlock on eject"));
667 pd_wait_for(disk, 0, DBMSG("before eject"));
668 pd_send_command(disk, 0, 0, 0, 0, 0, IDE_EJECT);
669 pd_wait_for(disk, 0, DBMSG("after eject"));
670 return Ok;
671 }
672
673 static enum action pd_media_check(struct pd_unit *disk)
674 {
675 int r = pd_wait_for(disk, STAT_READY, DBMSG("before media_check"));
676 if (!(r & STAT_ERR)) {
677 pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
678 r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after READ_VRFY"));
679 } else
680 disk->changed = 1;
681 if (r & ERR_MC) {
682 disk->changed = 1;
683 pd_send_command(disk, 1, 0, 0, 0, 0, IDE_ACKCHANGE);
684 pd_wait_for(disk, STAT_READY, DBMSG("RDY after ACKCHANGE"));
685 pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
686 r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after VRFY"));
687 }
688 return Ok;
689 }
690
691 static void pd_standby_off(struct pd_unit *disk)
692 {
693 pd_wait_for(disk, 0, DBMSG("before STANDBY"));
694 pd_send_command(disk, 0, 0, 0, 0, 0, IDE_STANDBY);
695 pd_wait_for(disk, 0, DBMSG("after STANDBY"));
696 }
697
698 static enum action pd_identify(struct pd_unit *disk)
699 {
700 int j;
701 char id[PD_ID_LEN + 1];
702
703
704
705
706
707
708
709 if (disk->drive == 0)
710 pd_reset(disk);
711
712 write_reg(disk, 6, DRIVE(disk));
713 pd_wait_for(disk, 0, DBMSG("before IDENT"));
714 pd_send_command(disk, 1, 0, 0, 0, 0, IDE_IDENTIFY);
715
716 if (pd_wait_for(disk, STAT_DRQ, DBMSG("IDENT DRQ")) & STAT_ERR)
717 return Fail;
718 pi_read_block(disk->pi, pd_scratch, 512);
719 disk->can_lba = pd_scratch[99] & 2;
720 disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12));
721 disk->heads = le16_to_cpu(*(__le16 *) (pd_scratch + 6));
722 disk->cylinders = le16_to_cpu(*(__le16 *) (pd_scratch + 2));
723 if (disk->can_lba)
724 disk->capacity = le32_to_cpu(*(__le32 *) (pd_scratch + 120));
725 else
726 disk->capacity = disk->sectors * disk->heads * disk->cylinders;
727
728 for (j = 0; j < PD_ID_LEN; j++)
729 id[j ^ 1] = pd_scratch[j + PD_ID_OFF];
730 j = PD_ID_LEN - 1;
731 while ((j >= 0) && (id[j] <= 0x20))
732 j--;
733 j++;
734 id[j] = 0;
735
736 disk->removable = pd_scratch[0] & 0x80;
737
738 printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n",
739 disk->name, id,
740 disk->drive ? "slave" : "master",
741 disk->capacity, disk->capacity / 2048,
742 disk->cylinders, disk->heads, disk->sectors,
743 disk->removable ? "removable" : "fixed");
744
745 if (disk->capacity)
746 pd_init_dev_parms(disk);
747 if (!disk->standby)
748 pd_standby_off(disk);
749
750 return Ok;
751 }
752
753
754
755 static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx,
756 const struct blk_mq_queue_data *bd)
757 {
758 struct pd_unit *disk = hctx->queue->queuedata;
759
760 spin_lock_irq(&pd_lock);
761 if (!pd_req) {
762 pd_req = bd->rq;
763 blk_mq_start_request(pd_req);
764 } else
765 list_add_tail(&bd->rq->queuelist, &disk->rq_list);
766 spin_unlock_irq(&pd_lock);
767
768 run_fsm();
769 return BLK_STS_OK;
770 }
771
772 static int pd_special_command(struct pd_unit *disk,
773 enum action (*func)(struct pd_unit *disk))
774 {
775 struct request *rq;
776 struct pd_req *req;
777
778 rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
779 if (IS_ERR(rq))
780 return PTR_ERR(rq);
781 req = blk_mq_rq_to_pdu(rq);
782
783 req->func = func;
784 blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
785 blk_put_request(rq);
786 return 0;
787 }
788
789
790
791 static int pd_open(struct block_device *bdev, fmode_t mode)
792 {
793 struct pd_unit *disk = bdev->bd_disk->private_data;
794
795 mutex_lock(&pd_mutex);
796 disk->access++;
797
798 if (disk->removable) {
799 pd_special_command(disk, pd_media_check);
800 pd_special_command(disk, pd_door_lock);
801 }
802 mutex_unlock(&pd_mutex);
803 return 0;
804 }
805
806 static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
807 {
808 struct pd_unit *disk = bdev->bd_disk->private_data;
809
810 if (disk->alt_geom) {
811 geo->heads = PD_LOG_HEADS;
812 geo->sectors = PD_LOG_SECTS;
813 geo->cylinders = disk->capacity / (geo->heads * geo->sectors);
814 } else {
815 geo->heads = disk->heads;
816 geo->sectors = disk->sectors;
817 geo->cylinders = disk->cylinders;
818 }
819
820 return 0;
821 }
822
823 static int pd_ioctl(struct block_device *bdev, fmode_t mode,
824 unsigned int cmd, unsigned long arg)
825 {
826 struct pd_unit *disk = bdev->bd_disk->private_data;
827
828 switch (cmd) {
829 case CDROMEJECT:
830 mutex_lock(&pd_mutex);
831 if (disk->access == 1)
832 pd_special_command(disk, pd_eject);
833 mutex_unlock(&pd_mutex);
834 return 0;
835 default:
836 return -EINVAL;
837 }
838 }
839
840 static void pd_release(struct gendisk *p, fmode_t mode)
841 {
842 struct pd_unit *disk = p->private_data;
843
844 mutex_lock(&pd_mutex);
845 if (!--disk->access && disk->removable)
846 pd_special_command(disk, pd_door_unlock);
847 mutex_unlock(&pd_mutex);
848 }
849
850 static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
851 {
852 struct pd_unit *disk = p->private_data;
853 int r;
854 if (!disk->removable)
855 return 0;
856 pd_special_command(disk, pd_media_check);
857 r = disk->changed;
858 disk->changed = 0;
859 return r ? DISK_EVENT_MEDIA_CHANGE : 0;
860 }
861
862 static int pd_revalidate(struct gendisk *p)
863 {
864 struct pd_unit *disk = p->private_data;
865 if (pd_special_command(disk, pd_identify) == 0)
866 set_capacity(p, disk->capacity);
867 else
868 set_capacity(p, 0);
869 return 0;
870 }
871
872 static const struct block_device_operations pd_fops = {
873 .owner = THIS_MODULE,
874 .open = pd_open,
875 .release = pd_release,
876 .ioctl = pd_ioctl,
877 .getgeo = pd_getgeo,
878 .check_events = pd_check_events,
879 .revalidate_disk= pd_revalidate
880 };
881
882
883
884 static const struct blk_mq_ops pd_mq_ops = {
885 .queue_rq = pd_queue_rq,
886 };
887
888 static void pd_probe_drive(struct pd_unit *disk)
889 {
890 struct gendisk *p;
891
892 p = alloc_disk(1 << PD_BITS);
893 if (!p)
894 return;
895
896 strcpy(p->disk_name, disk->name);
897 p->fops = &pd_fops;
898 p->major = major;
899 p->first_minor = (disk - pd) << PD_BITS;
900 p->events = DISK_EVENT_MEDIA_CHANGE;
901 disk->gd = p;
902 p->private_data = disk;
903
904 memset(&disk->tag_set, 0, sizeof(disk->tag_set));
905 disk->tag_set.ops = &pd_mq_ops;
906 disk->tag_set.cmd_size = sizeof(struct pd_req);
907 disk->tag_set.nr_hw_queues = 1;
908 disk->tag_set.nr_maps = 1;
909 disk->tag_set.queue_depth = 2;
910 disk->tag_set.numa_node = NUMA_NO_NODE;
911 disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
912
913 if (blk_mq_alloc_tag_set(&disk->tag_set))
914 return;
915
916 p->queue = blk_mq_init_queue(&disk->tag_set);
917 if (IS_ERR(p->queue)) {
918 blk_mq_free_tag_set(&disk->tag_set);
919 p->queue = NULL;
920 return;
921 }
922
923 p->queue->queuedata = disk;
924 blk_queue_max_hw_sectors(p->queue, cluster);
925 blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
926
927 if (disk->drive == -1) {
928 for (disk->drive = 0; disk->drive <= 1; disk->drive++)
929 if (pd_special_command(disk, pd_identify) == 0)
930 return;
931 } else if (pd_special_command(disk, pd_identify) == 0)
932 return;
933 disk->gd = NULL;
934 put_disk(p);
935 }
936
937 static int pd_detect(void)
938 {
939 int found = 0, unit, pd_drive_count = 0;
940 struct pd_unit *disk;
941
942 for (unit = 0; unit < PD_UNITS; unit++) {
943 int *parm = *drives[unit];
944 struct pd_unit *disk = pd + unit;
945 disk->pi = &disk->pia;
946 disk->access = 0;
947 disk->changed = 1;
948 disk->capacity = 0;
949 disk->drive = parm[D_SLV];
950 snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
951 disk->alt_geom = parm[D_GEO];
952 disk->standby = parm[D_SBY];
953 if (parm[D_PRT])
954 pd_drive_count++;
955 INIT_LIST_HEAD(&disk->rq_list);
956 }
957
958 par_drv = pi_register_driver(name);
959 if (!par_drv) {
960 pr_err("failed to register %s driver\n", name);
961 return -1;
962 }
963
964 if (pd_drive_count == 0) {
965 disk = pd;
966 if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
967 PI_PD, verbose, disk->name)) {
968 pd_probe_drive(disk);
969 if (!disk->gd)
970 pi_release(disk->pi);
971 }
972
973 } else {
974 for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
975 int *parm = *drives[unit];
976 if (!parm[D_PRT])
977 continue;
978 if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
979 parm[D_UNI], parm[D_PRO], parm[D_DLY],
980 pd_scratch, PI_PD, verbose, disk->name)) {
981 pd_probe_drive(disk);
982 if (!disk->gd)
983 pi_release(disk->pi);
984 }
985 }
986 }
987 for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
988 if (disk->gd) {
989 set_capacity(disk->gd, disk->capacity);
990 add_disk(disk->gd);
991 found = 1;
992 }
993 }
994 if (!found) {
995 printk("%s: no valid drive found\n", name);
996 pi_unregister_driver(par_drv);
997 }
998 return found;
999 }
1000
1001 static int __init pd_init(void)
1002 {
1003 if (disable)
1004 goto out1;
1005
1006 if (register_blkdev(major, name))
1007 goto out1;
1008
1009 printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
1010 name, name, PD_VERSION, major, cluster, nice);
1011 if (!pd_detect())
1012 goto out2;
1013
1014 return 0;
1015
1016 out2:
1017 unregister_blkdev(major, name);
1018 out1:
1019 return -ENODEV;
1020 }
1021
1022 static void __exit pd_exit(void)
1023 {
1024 struct pd_unit *disk;
1025 int unit;
1026 unregister_blkdev(major, name);
1027 for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
1028 struct gendisk *p = disk->gd;
1029 if (p) {
1030 disk->gd = NULL;
1031 del_gendisk(p);
1032 blk_cleanup_queue(p->queue);
1033 blk_mq_free_tag_set(&disk->tag_set);
1034 put_disk(p);
1035 pi_release(disk->pi);
1036 }
1037 }
1038 }
1039
1040 MODULE_LICENSE("GPL");
1041 module_init(pd_init)
1042 module_exit(pd_exit)