1 #ifndef _IDE_H
2 #define _IDE_H
3 /*
4  *  linux/include/linux/ide.h
5  *
6  *  Copyright (C) 1994-2002  Linus Torvalds & authors
7  */
8 
9 #include <linux/init.h>
10 #include <linux/ioport.h>
11 #include <linux/ata.h>
12 #include <linux/blkdev.h>
13 #include <linux/proc_fs.h>
14 #include <linux/interrupt.h>
15 #include <linux/bitops.h>
16 #include <linux/bio.h>
17 #include <linux/pci.h>
18 #include <linux/completion.h>
19 #include <linux/pm.h>
20 #include <linux/mutex.h>
21 /* for request_sense */
22 #include <linux/cdrom.h>
23 #include <asm/byteorder.h>
24 #include <asm/io.h>
25 
26 #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
27 # define SUPPORT_VLB_SYNC 0
28 #else
29 # define SUPPORT_VLB_SYNC 1
30 #endif
31 
32 /*
33  * Probably not wise to fiddle with these
34  */
35 #define IDE_DEFAULT_MAX_FAILURES	1
36 #define ERROR_MAX	8	/* Max read/write errors per sector */
37 #define ERROR_RESET	3	/* Reset controller every 4th retry */
38 #define ERROR_RECAL	1	/* Recalibrate every 2nd retry */
39 
40 struct device;
41 
42 /* IDE-specific values for req->cmd_type */
43 enum ata_cmd_type_bits {
44 	REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1,
45 	REQ_TYPE_ATA_PC,
46 	REQ_TYPE_ATA_SENSE,	/* sense request */
47 	REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */
48 	REQ_TYPE_ATA_PM_RESUME,	/* resume request */
49 };
50 
51 #define ata_pm_request(rq)	\
52 	((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \
53 	 (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME)
54 
55 /* Error codes returned in rq->errors to the higher part of the driver. */
56 enum {
57 	IDE_DRV_ERROR_GENERAL	= 101,
58 	IDE_DRV_ERROR_FILEMARK	= 102,
59 	IDE_DRV_ERROR_EOD	= 103,
60 };
61 
62 /*
63  * Definitions for accessing IDE controller registers
64  */
65 #define IDE_NR_PORTS		(10)
66 
67 struct ide_io_ports {
68 	unsigned long	data_addr;
69 
70 	union {
71 		unsigned long error_addr;	/*   read:  error */
72 		unsigned long feature_addr;	/*  write: feature */
73 	};
74 
75 	unsigned long	nsect_addr;
76 	unsigned long	lbal_addr;
77 	unsigned long	lbam_addr;
78 	unsigned long	lbah_addr;
79 
80 	unsigned long	device_addr;
81 
82 	union {
83 		unsigned long status_addr;	/*  read: status  */
84 		unsigned long command_addr;	/* write: command */
85 	};
86 
87 	unsigned long	ctl_addr;
88 
89 	unsigned long	irq_addr;
90 };
91 
92 #define OK_STAT(stat,good,bad)	(((stat)&((good)|(bad)))==(good))
93 
94 #define BAD_R_STAT	(ATA_BUSY | ATA_ERR)
95 #define BAD_W_STAT	(BAD_R_STAT | ATA_DF)
96 #define BAD_STAT	(BAD_R_STAT | ATA_DRQ)
97 #define DRIVE_READY	(ATA_DRDY | ATA_DSC)
98 
99 #define BAD_CRC		(ATA_ABORTED | ATA_ICRC)
100 
101 #define SATA_NR_PORTS		(3)	/* 16 possible ?? */
102 
103 #define SATA_STATUS_OFFSET	(0)
104 #define SATA_ERROR_OFFSET	(1)
105 #define SATA_CONTROL_OFFSET	(2)
106 
107 /*
108  * Our Physical Region Descriptor (PRD) table should be large enough
109  * to handle the biggest I/O request we are likely to see.  Since requests
110  * can have no more than 256 sectors, and since the typical blocksize is
111  * two or more sectors, we could get by with a limit of 128 entries here for
112  * the usual worst case.  Most requests seem to include some contiguous blocks,
113  * further reducing the number of table entries required.
114  *
115  * The driver reverts to PIO mode for individual requests that exceed
116  * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
117  * 100% of all crazy scenarios here is not necessary.
118  *
119  * As it turns out though, we must allocate a full 4KB page for this,
120  * so the two PRD tables (ide0 & ide1) will each get half of that,
121  * allowing each to have about 256 entries (8 bytes each) from this.
122  */
123 #define PRD_BYTES       8
124 #define PRD_ENTRIES	256
125 
126 /*
127  * Some more useful definitions
128  */
129 #define PARTN_BITS	6	/* number of minor dev bits for partitions */
130 #define MAX_DRIVES	2	/* per interface; 2 assumed by lots of code */
131 #define SECTOR_SIZE	512
132 
133 /*
134  * Timeouts for various operations:
135  */
136 enum {
137 	/* spec allows up to 20ms, but CF cards and SSD drives need more */
138 	WAIT_DRQ	= 1 * HZ,	/* 1s */
139 	/* some laptops are very slow */
140 	WAIT_READY	= 5 * HZ,	/* 5s */
141 	/* should be less than 3ms (?), if all ATAPI CD is closed at boot */
142 	WAIT_PIDENTIFY	= 10 * HZ,	/* 10s */
143 	/* worst case when spinning up */
144 	WAIT_WORSTCASE	= 30 * HZ,	/* 30s */
145 	/* maximum wait for an IRQ to happen */
146 	WAIT_CMD	= 10 * HZ,	/* 10s */
147 	/* Some drives require a longer IRQ timeout. */
148 	WAIT_FLOPPY_CMD	= 50 * HZ,	/* 50s */
149 	/*
150 	 * Some drives (for example, Seagate STT3401A Travan) require a very
151 	 * long timeout, because they don't return an interrupt or clear their
152 	 * BSY bit until after the command completes (even retension commands).
153 	 */
154 	WAIT_TAPE_CMD	= 900 * HZ,	/* 900s */
155 	/* minimum sleep time */
156 	WAIT_MIN_SLEEP	= HZ / 50,	/* 20ms */
157 };
158 
159 /*
160  * Op codes for special requests to be handled by ide_special_rq().
161  * Values should be in the range of 0x20 to 0x3f.
162  */
163 #define REQ_DRIVE_RESET		0x20
164 #define REQ_DEVSET_EXEC		0x21
165 #define REQ_PARK_HEADS		0x22
166 #define REQ_UNPARK_HEADS	0x23
167 
168 /*
169  * hwif_chipset_t is used to keep track of the specific hardware
170  * chipset used by each IDE interface, if known.
171  */
172 enum {		ide_unknown,	ide_generic,	ide_pci,
173 		ide_cmd640,	ide_dtc2278,	ide_ali14xx,
174 		ide_qd65xx,	ide_umc8672,	ide_ht6560b,
175 		ide_4drives,	ide_pmac,	ide_acorn,
176 		ide_au1xxx,	ide_palm3710
177 };
178 
179 typedef u8 hwif_chipset_t;
180 
181 /*
182  * Structure to hold all information about the location of this port
183  */
184 struct ide_hw {
185 	union {
186 		struct ide_io_ports	io_ports;
187 		unsigned long		io_ports_array[IDE_NR_PORTS];
188 	};
189 
190 	int		irq;			/* our irq number */
191 	struct device	*dev, *parent;
192 	unsigned long	config;
193 };
194 
ide_std_init_ports(struct ide_hw * hw,unsigned long io_addr,unsigned long ctl_addr)195 static inline void ide_std_init_ports(struct ide_hw *hw,
196 				      unsigned long io_addr,
197 				      unsigned long ctl_addr)
198 {
199 	unsigned int i;
200 
201 	for (i = 0; i <= 7; i++)
202 		hw->io_ports_array[i] = io_addr++;
203 
204 	hw->io_ports.ctl_addr = ctl_addr;
205 }
206 
207 #define MAX_HWIFS	10
208 
209 /*
210  * Now for the data we need to maintain per-drive:  ide_drive_t
211  */
212 
213 #define ide_scsi	0x21
214 #define ide_disk	0x20
215 #define ide_optical	0x7
216 #define ide_cdrom	0x5
217 #define ide_tape	0x1
218 #define ide_floppy	0x0
219 
220 /*
221  * Special Driver Flags
222  */
223 enum {
224 	IDE_SFLAG_SET_GEOMETRY		= (1 << 0),
225 	IDE_SFLAG_RECALIBRATE		= (1 << 1),
226 	IDE_SFLAG_SET_MULTMODE		= (1 << 2),
227 };
228 
229 /*
230  * Status returned from various ide_ functions
231  */
232 typedef enum {
233 	ide_stopped,	/* no drive operation was started */
234 	ide_started,	/* a drive operation was started, handler was set */
235 } ide_startstop_t;
236 
237 enum {
238 	IDE_VALID_ERROR 		= (1 << 1),
239 	IDE_VALID_FEATURE		= IDE_VALID_ERROR,
240 	IDE_VALID_NSECT 		= (1 << 2),
241 	IDE_VALID_LBAL			= (1 << 3),
242 	IDE_VALID_LBAM			= (1 << 4),
243 	IDE_VALID_LBAH			= (1 << 5),
244 	IDE_VALID_DEVICE		= (1 << 6),
245 	IDE_VALID_LBA			= IDE_VALID_LBAL |
246 					  IDE_VALID_LBAM |
247 					  IDE_VALID_LBAH,
248 	IDE_VALID_OUT_TF		= IDE_VALID_FEATURE |
249 					  IDE_VALID_NSECT |
250 					  IDE_VALID_LBA,
251 	IDE_VALID_IN_TF 		= IDE_VALID_NSECT |
252 					  IDE_VALID_LBA,
253 	IDE_VALID_OUT_HOB		= IDE_VALID_OUT_TF,
254 	IDE_VALID_IN_HOB		= IDE_VALID_ERROR |
255 					  IDE_VALID_NSECT |
256 					  IDE_VALID_LBA,
257 };
258 
259 enum {
260 	IDE_TFLAG_LBA48			= (1 << 0),
261 	IDE_TFLAG_WRITE			= (1 << 1),
262 	IDE_TFLAG_CUSTOM_HANDLER	= (1 << 2),
263 	IDE_TFLAG_DMA_PIO_FALLBACK	= (1 << 3),
264 	/* force 16-bit I/O operations */
265 	IDE_TFLAG_IO_16BIT		= (1 << 4),
266 	/* struct ide_cmd was allocated using kmalloc() */
267 	IDE_TFLAG_DYN			= (1 << 5),
268 	IDE_TFLAG_FS			= (1 << 6),
269 	IDE_TFLAG_MULTI_PIO		= (1 << 7),
270 	IDE_TFLAG_SET_XFER		= (1 << 8),
271 };
272 
273 enum {
274 	IDE_FTFLAG_FLAGGED		= (1 << 0),
275 	IDE_FTFLAG_SET_IN_FLAGS		= (1 << 1),
276 	IDE_FTFLAG_OUT_DATA		= (1 << 2),
277 	IDE_FTFLAG_IN_DATA		= (1 << 3),
278 };
279 
280 struct ide_taskfile {
281 	u8	data;		/* 0: data byte (for TASKFILE ioctl) */
282 	union {			/* 1: */
283 		u8 error;	/*  read: error */
284 		u8 feature;	/* write: feature */
285 	};
286 	u8	nsect;		/* 2: number of sectors */
287 	u8	lbal;		/* 3: LBA low */
288 	u8	lbam;		/* 4: LBA mid */
289 	u8	lbah;		/* 5: LBA high */
290 	u8	device;		/* 6: device select */
291 	union {			/* 7: */
292 		u8 status;	/*  read: status */
293 		u8 command;	/* write: command */
294 	};
295 };
296 
297 struct ide_cmd {
298 	struct ide_taskfile	tf;
299 	struct ide_taskfile	hob;
300 	struct {
301 		struct {
302 			u8		tf;
303 			u8		hob;
304 		} out, in;
305 	} valid;
306 
307 	u16			tf_flags;
308 	u8			ftf_flags;	/* for TASKFILE ioctl */
309 	int			protocol;
310 
311 	int			sg_nents;	  /* number of sg entries */
312 	int			orig_sg_nents;
313 	int			sg_dma_direction; /* DMA transfer direction */
314 
315 	unsigned int		nbytes;
316 	unsigned int		nleft;
317 	unsigned int		last_xfer_len;
318 
319 	struct scatterlist	*cursg;
320 	unsigned int		cursg_ofs;
321 
322 	struct request		*rq;		/* copy of request */
323 };
324 
325 /* ATAPI packet command flags */
326 enum {
327 	/* set when an error is considered normal - no retry (ide-tape) */
328 	PC_FLAG_ABORT			= (1 << 0),
329 	PC_FLAG_SUPPRESS_ERROR		= (1 << 1),
330 	PC_FLAG_WAIT_FOR_DSC		= (1 << 2),
331 	PC_FLAG_DMA_OK			= (1 << 3),
332 	PC_FLAG_DMA_IN_PROGRESS		= (1 << 4),
333 	PC_FLAG_DMA_ERROR		= (1 << 5),
334 	PC_FLAG_WRITING			= (1 << 6),
335 };
336 
337 #define ATAPI_WAIT_PC		(60 * HZ)
338 
339 struct ide_atapi_pc {
340 	/* actual packet bytes */
341 	u8 c[12];
342 	/* incremented on each retry */
343 	int retries;
344 	int error;
345 
346 	/* bytes to transfer */
347 	int req_xfer;
348 
349 	/* the corresponding request */
350 	struct request *rq;
351 
352 	unsigned long flags;
353 
354 	/*
355 	 * those are more or less driver-specific and some of them are subject
356 	 * to change/removal later.
357 	 */
358 	unsigned long timeout;
359 };
360 
361 struct ide_devset;
362 struct ide_driver;
363 
364 #ifdef CONFIG_BLK_DEV_IDEACPI
365 struct ide_acpi_drive_link;
366 struct ide_acpi_hwif_link;
367 #endif
368 
369 struct ide_drive_s;
370 
371 struct ide_disk_ops {
372 	int		(*check)(struct ide_drive_s *, const char *);
373 	int		(*get_capacity)(struct ide_drive_s *);
374 	void		(*unlock_native_capacity)(struct ide_drive_s *);
375 	void		(*setup)(struct ide_drive_s *);
376 	void		(*flush)(struct ide_drive_s *);
377 	int		(*init_media)(struct ide_drive_s *, struct gendisk *);
378 	int		(*set_doorlock)(struct ide_drive_s *, struct gendisk *,
379 					int);
380 	ide_startstop_t	(*do_request)(struct ide_drive_s *, struct request *,
381 				      sector_t);
382 	int		(*ioctl)(struct ide_drive_s *, struct block_device *,
383 				 fmode_t, unsigned int, unsigned long);
384 };
385 
386 /* ATAPI device flags */
387 enum {
388 	IDE_AFLAG_DRQ_INTERRUPT		= (1 << 0),
389 
390 	/* ide-cd */
391 	/* Drive cannot eject the disc. */
392 	IDE_AFLAG_NO_EJECT		= (1 << 1),
393 	/* Drive is a pre ATAPI 1.2 drive. */
394 	IDE_AFLAG_PRE_ATAPI12		= (1 << 2),
395 	/* TOC addresses are in BCD. */
396 	IDE_AFLAG_TOCADDR_AS_BCD	= (1 << 3),
397 	/* TOC track numbers are in BCD. */
398 	IDE_AFLAG_TOCTRACKS_AS_BCD	= (1 << 4),
399 	/* Saved TOC information is current. */
400 	IDE_AFLAG_TOC_VALID		= (1 << 6),
401 	/* We think that the drive door is locked. */
402 	IDE_AFLAG_DOOR_LOCKED		= (1 << 7),
403 	/* SET_CD_SPEED command is unsupported. */
404 	IDE_AFLAG_NO_SPEED_SELECT	= (1 << 8),
405 	IDE_AFLAG_VERTOS_300_SSD	= (1 << 9),
406 	IDE_AFLAG_VERTOS_600_ESD	= (1 << 10),
407 	IDE_AFLAG_SANYO_3CD		= (1 << 11),
408 	IDE_AFLAG_FULL_CAPS_PAGE	= (1 << 12),
409 	IDE_AFLAG_PLAY_AUDIO_OK		= (1 << 13),
410 	IDE_AFLAG_LE_SPEED_FIELDS	= (1 << 14),
411 
412 	/* ide-floppy */
413 	/* Avoid commands not supported in Clik drive */
414 	IDE_AFLAG_CLIK_DRIVE		= (1 << 15),
415 	/* Requires BH algorithm for packets */
416 	IDE_AFLAG_ZIP_DRIVE		= (1 << 16),
417 	/* Supports format progress report */
418 	IDE_AFLAG_SRFP			= (1 << 17),
419 
420 	/* ide-tape */
421 	IDE_AFLAG_IGNORE_DSC		= (1 << 18),
422 	/* 0 When the tape position is unknown */
423 	IDE_AFLAG_ADDRESS_VALID		= (1 <<	19),
424 	/* Device already opened */
425 	IDE_AFLAG_BUSY			= (1 << 20),
426 	/* Attempt to auto-detect the current user block size */
427 	IDE_AFLAG_DETECT_BS		= (1 << 21),
428 	/* Currently on a filemark */
429 	IDE_AFLAG_FILEMARK		= (1 << 22),
430 	/* 0 = no tape is loaded, so we don't rewind after ejecting */
431 	IDE_AFLAG_MEDIUM_PRESENT	= (1 << 23),
432 
433 	IDE_AFLAG_NO_AUTOCLOSE		= (1 << 24),
434 };
435 
436 /* device flags */
437 enum {
438 	/* restore settings after device reset */
439 	IDE_DFLAG_KEEP_SETTINGS		= (1 << 0),
440 	/* device is using DMA for read/write */
441 	IDE_DFLAG_USING_DMA		= (1 << 1),
442 	/* okay to unmask other IRQs */
443 	IDE_DFLAG_UNMASK		= (1 << 2),
444 	/* don't attempt flushes */
445 	IDE_DFLAG_NOFLUSH		= (1 << 3),
446 	/* DSC overlap */
447 	IDE_DFLAG_DSC_OVERLAP		= (1 << 4),
448 	/* give potential excess bandwidth */
449 	IDE_DFLAG_NICE1			= (1 << 5),
450 	/* device is physically present */
451 	IDE_DFLAG_PRESENT		= (1 << 6),
452 	/* disable Host Protected Area */
453 	IDE_DFLAG_NOHPA			= (1 << 7),
454 	/* id read from device (synthetic if not set) */
455 	IDE_DFLAG_ID_READ		= (1 << 8),
456 	IDE_DFLAG_NOPROBE		= (1 << 9),
457 	/* need to do check_media_change() */
458 	IDE_DFLAG_REMOVABLE		= (1 << 10),
459 	/* needed for removable devices */
460 	IDE_DFLAG_ATTACH		= (1 << 11),
461 	IDE_DFLAG_FORCED_GEOM		= (1 << 12),
462 	/* disallow setting unmask bit */
463 	IDE_DFLAG_NO_UNMASK		= (1 << 13),
464 	/* disallow enabling 32-bit I/O */
465 	IDE_DFLAG_NO_IO_32BIT		= (1 << 14),
466 	/* for removable only: door lock/unlock works */
467 	IDE_DFLAG_DOORLOCKING		= (1 << 15),
468 	/* disallow DMA */
469 	IDE_DFLAG_NODMA			= (1 << 16),
470 	/* powermanagement told us not to do anything, so sleep nicely */
471 	IDE_DFLAG_BLOCKED		= (1 << 17),
472 	/* sleeping & sleep field valid */
473 	IDE_DFLAG_SLEEPING		= (1 << 18),
474 	IDE_DFLAG_POST_RESET		= (1 << 19),
475 	IDE_DFLAG_UDMA33_WARNED		= (1 << 20),
476 	IDE_DFLAG_LBA48			= (1 << 21),
477 	/* status of write cache */
478 	IDE_DFLAG_WCACHE		= (1 << 22),
479 	/* used for ignoring ATA_DF */
480 	IDE_DFLAG_NOWERR		= (1 << 23),
481 	/* retrying in PIO */
482 	IDE_DFLAG_DMA_PIO_RETRY		= (1 << 24),
483 	IDE_DFLAG_LBA			= (1 << 25),
484 	/* don't unload heads */
485 	IDE_DFLAG_NO_UNLOAD		= (1 << 26),
486 	/* heads unloaded, please don't reset port */
487 	IDE_DFLAG_PARKED		= (1 << 27),
488 	IDE_DFLAG_MEDIA_CHANGED		= (1 << 28),
489 	/* write protect */
490 	IDE_DFLAG_WP			= (1 << 29),
491 	IDE_DFLAG_FORMAT_IN_PROGRESS	= (1 << 30),
492 	IDE_DFLAG_NIEN_QUIRK		= (1 << 31),
493 };
494 
495 struct ide_drive_s {
496 	char		name[4];	/* drive name, such as "hda" */
497         char            driver_req[10];	/* requests specific driver */
498 
499 	struct request_queue	*queue;	/* request queue */
500 
501 	struct request		*rq;	/* current request */
502 	void		*driver_data;	/* extra driver data */
503 	u16			*id;	/* identification info */
504 #ifdef CONFIG_IDE_PROC_FS
505 	struct proc_dir_entry *proc;	/* /proc/ide/ directory entry */
506 	const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
507 #endif
508 	struct hwif_s		*hwif;	/* actually (ide_hwif_t *) */
509 
510 	const struct ide_disk_ops *disk_ops;
511 
512 	unsigned long dev_flags;
513 
514 	unsigned long sleep;		/* sleep until this time */
515 	unsigned long timeout;		/* max time to wait for irq */
516 
517 	u8	special_flags;		/* special action flags */
518 
519 	u8	select;			/* basic drive/head select reg value */
520 	u8	retry_pio;		/* retrying dma capable host in pio */
521 	u8	waiting_for_dma;	/* dma currently in progress */
522 	u8	dma;			/* atapi dma flag */
523 
524         u8	init_speed;	/* transfer rate set at boot */
525         u8	current_speed;	/* current transfer rate set */
526 	u8	desired_speed;	/* desired transfer rate set */
527 	u8	pio_mode;	/* for ->set_pio_mode _only_ */
528 	u8	dma_mode;	/* for ->set_dma_mode _only_ */
529 	u8	dn;		/* now wide spread use */
530 	u8	acoustic;	/* acoustic management */
531 	u8	media;		/* disk, cdrom, tape, floppy, ... */
532 	u8	ready_stat;	/* min status value for drive ready */
533 	u8	mult_count;	/* current multiple sector setting */
534 	u8	mult_req;	/* requested multiple sector setting */
535 	u8	io_32bit;	/* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
536 	u8	bad_wstat;	/* used for ignoring ATA_DF */
537 	u8	head;		/* "real" number of heads */
538 	u8	sect;		/* "real" sectors per track */
539 	u8	bios_head;	/* BIOS/fdisk/LILO number of heads */
540 	u8	bios_sect;	/* BIOS/fdisk/LILO sectors per track */
541 
542 	/* delay this long before sending packet command */
543 	u8 pc_delay;
544 
545 	unsigned int	bios_cyl;	/* BIOS/fdisk/LILO number of cyls */
546 	unsigned int	cyl;		/* "real" number of cyls */
547 	void		*drive_data;	/* used by set_pio_mode/dev_select() */
548 	unsigned int	failures;	/* current failure count */
549 	unsigned int	max_failures;	/* maximum allowed failure count */
550 	u64		probed_capacity;/* initial/native media capacity */
551 	u64		capacity64;	/* total number of sectors */
552 
553 	int		lun;		/* logical unit */
554 	int		crc_count;	/* crc counter to reduce drive speed */
555 
556 	unsigned long	debug_mask;	/* debugging levels switch */
557 
558 #ifdef CONFIG_BLK_DEV_IDEACPI
559 	struct ide_acpi_drive_link *acpidata;
560 #endif
561 	struct list_head list;
562 	struct device	gendev;
563 	struct completion gendev_rel_comp;	/* to deal with device release() */
564 
565 	/* current packet command */
566 	struct ide_atapi_pc *pc;
567 
568 	/* last failed packet command */
569 	struct ide_atapi_pc *failed_pc;
570 
571 	/* callback for packet commands */
572 	int  (*pc_callback)(struct ide_drive_s *, int);
573 
574 	ide_startstop_t (*irq_handler)(struct ide_drive_s *);
575 
576 	unsigned long atapi_flags;
577 
578 	struct ide_atapi_pc request_sense_pc;
579 
580 	/* current sense rq and buffer */
581 	bool sense_rq_armed;
582 	struct request sense_rq;
583 	struct request_sense sense_data;
584 };
585 
586 typedef struct ide_drive_s ide_drive_t;
587 
588 #define to_ide_device(dev)		container_of(dev, ide_drive_t, gendev)
589 
590 #define to_ide_drv(obj, cont_type)	\
591 	container_of(obj, struct cont_type, dev)
592 
593 #define ide_drv_g(disk, cont_type)	\
594 	container_of((disk)->private_data, struct cont_type, driver)
595 
596 struct ide_port_info;
597 
598 struct ide_tp_ops {
599 	void	(*exec_command)(struct hwif_s *, u8);
600 	u8	(*read_status)(struct hwif_s *);
601 	u8	(*read_altstatus)(struct hwif_s *);
602 	void	(*write_devctl)(struct hwif_s *, u8);
603 
604 	void	(*dev_select)(ide_drive_t *);
605 	void	(*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
606 	void	(*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
607 
608 	void	(*input_data)(ide_drive_t *, struct ide_cmd *,
609 			      void *, unsigned int);
610 	void	(*output_data)(ide_drive_t *, struct ide_cmd *,
611 			       void *, unsigned int);
612 };
613 
614 extern const struct ide_tp_ops default_tp_ops;
615 
616 /**
617  * struct ide_port_ops - IDE port operations
618  *
619  * @init_dev:		host specific initialization of a device
620  * @set_pio_mode:	routine to program host for PIO mode
621  * @set_dma_mode:	routine to program host for DMA mode
622  * @reset_poll:		chipset polling based on hba specifics
623  * @pre_reset:		chipset specific changes to default for device-hba resets
624  * @resetproc:		routine to reset controller after a disk reset
625  * @maskproc:		special host masking for drive selection
626  * @quirkproc:		check host's drive quirk list
627  * @clear_irq:		clear IRQ
628  *
629  * @mdma_filter:	filter MDMA modes
630  * @udma_filter:	filter UDMA modes
631  *
632  * @cable_detect:	detect cable type
633  */
634 struct ide_port_ops {
635 	void	(*init_dev)(ide_drive_t *);
636 	void	(*set_pio_mode)(struct hwif_s *, ide_drive_t *);
637 	void	(*set_dma_mode)(struct hwif_s *, ide_drive_t *);
638 	int	(*reset_poll)(ide_drive_t *);
639 	void	(*pre_reset)(ide_drive_t *);
640 	void	(*resetproc)(ide_drive_t *);
641 	void	(*maskproc)(ide_drive_t *, int);
642 	void	(*quirkproc)(ide_drive_t *);
643 	void	(*clear_irq)(ide_drive_t *);
644 	int	(*test_irq)(struct hwif_s *);
645 
646 	u8	(*mdma_filter)(ide_drive_t *);
647 	u8	(*udma_filter)(ide_drive_t *);
648 
649 	u8	(*cable_detect)(struct hwif_s *);
650 };
651 
652 struct ide_dma_ops {
653 	void	(*dma_host_set)(struct ide_drive_s *, int);
654 	int	(*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
655 	void	(*dma_start)(struct ide_drive_s *);
656 	int	(*dma_end)(struct ide_drive_s *);
657 	int	(*dma_test_irq)(struct ide_drive_s *);
658 	void	(*dma_lost_irq)(struct ide_drive_s *);
659 	/* below ones are optional */
660 	int	(*dma_check)(struct ide_drive_s *, struct ide_cmd *);
661 	int	(*dma_timer_expiry)(struct ide_drive_s *);
662 	void	(*dma_clear)(struct ide_drive_s *);
663 	/*
664 	 * The following method is optional and only required to be
665 	 * implemented for the SFF-8038i compatible controllers.
666 	 */
667 	u8	(*dma_sff_read_status)(struct hwif_s *);
668 };
669 
670 enum {
671 	IDE_PFLAG_PROBING		= (1 << 0),
672 };
673 
674 struct ide_host;
675 
676 typedef struct hwif_s {
677 	struct hwif_s *mate;		/* other hwif from same PCI chip */
678 	struct proc_dir_entry *proc;	/* /proc/ide/ directory entry */
679 
680 	struct ide_host *host;
681 
682 	char name[6];			/* name of interface, eg. "ide0" */
683 
684 	struct ide_io_ports	io_ports;
685 
686 	unsigned long	sata_scr[SATA_NR_PORTS];
687 
688 	ide_drive_t	*devices[MAX_DRIVES + 1];
689 
690 	unsigned long	port_flags;
691 
692 	u8 major;	/* our major number */
693 	u8 index;	/* 0 for ide0; 1 for ide1; ... */
694 	u8 channel;	/* for dual-port chips: 0=primary, 1=secondary */
695 
696 	u32 host_flags;
697 
698 	u8 pio_mask;
699 
700 	u8 ultra_mask;
701 	u8 mwdma_mask;
702 	u8 swdma_mask;
703 
704 	u8 cbl;		/* cable type */
705 
706 	hwif_chipset_t chipset;	/* sub-module for tuning.. */
707 
708 	struct device *dev;
709 
710 	void (*rw_disk)(ide_drive_t *, struct request *);
711 
712 	const struct ide_tp_ops		*tp_ops;
713 	const struct ide_port_ops	*port_ops;
714 	const struct ide_dma_ops	*dma_ops;
715 
716 	/* dma physical region descriptor table (cpu view) */
717 	unsigned int	*dmatable_cpu;
718 	/* dma physical region descriptor table (dma view) */
719 	dma_addr_t	dmatable_dma;
720 
721 	/* maximum number of PRD table entries */
722 	int prd_max_nents;
723 	/* PRD entry size in bytes */
724 	int prd_ent_size;
725 
726 	/* Scatter-gather list used to build the above */
727 	struct scatterlist *sg_table;
728 	int sg_max_nents;		/* Maximum number of entries in it */
729 
730 	struct ide_cmd cmd;		/* current command */
731 
732 	int		rqsize;		/* max sectors per request */
733 	int		irq;		/* our irq number */
734 
735 	unsigned long	dma_base;	/* base addr for dma ports */
736 
737 	unsigned long	config_data;	/* for use by chipset-specific code */
738 	unsigned long	select_data;	/* for use by chipset-specific code */
739 
740 	unsigned long	extra_base;	/* extra addr for dma ports */
741 	unsigned	extra_ports;	/* number of extra dma ports */
742 
743 	unsigned	present    : 1;	/* this interface exists */
744 	unsigned	busy	   : 1; /* serializes devices on a port */
745 
746 	struct device		gendev;
747 	struct device		*portdev;
748 
749 	struct completion gendev_rel_comp; /* To deal with device release() */
750 
751 	void		*hwif_data;	/* extra hwif data */
752 
753 #ifdef CONFIG_BLK_DEV_IDEACPI
754 	struct ide_acpi_hwif_link *acpidata;
755 #endif
756 
757 	/* IRQ handler, if active */
758 	ide_startstop_t	(*handler)(ide_drive_t *);
759 
760 	/* BOOL: polling active & poll_timeout field valid */
761 	unsigned int polling : 1;
762 
763 	/* current drive */
764 	ide_drive_t *cur_dev;
765 
766 	/* current request */
767 	struct request *rq;
768 
769 	/* failsafe timer */
770 	struct timer_list timer;
771 	/* timeout value during long polls */
772 	unsigned long poll_timeout;
773 	/* queried upon timeouts */
774 	int (*expiry)(ide_drive_t *);
775 
776 	int req_gen;
777 	int req_gen_timer;
778 
779 	spinlock_t lock;
780 } ____cacheline_internodealigned_in_smp ide_hwif_t;
781 
782 #define MAX_HOST_PORTS 4
783 
784 struct ide_host {
785 	ide_hwif_t	*ports[MAX_HOST_PORTS + 1];
786 	unsigned int	n_ports;
787 	struct device	*dev[2];
788 
789 	int		(*init_chipset)(struct pci_dev *);
790 
791 	void		(*get_lock)(irq_handler_t, void *);
792 	void		(*release_lock)(void);
793 
794 	irq_handler_t	irq_handler;
795 
796 	unsigned long	host_flags;
797 
798 	int		irq_flags;
799 
800 	void		*host_priv;
801 	ide_hwif_t	*cur_port;	/* for hosts requiring serialization */
802 
803 	/* used for hosts requiring serialization */
804 	volatile unsigned long	host_busy;
805 };
806 
807 #define IDE_HOST_BUSY 0
808 
809 /*
810  *  internal ide interrupt handler type
811  */
812 typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
813 typedef int (ide_expiry_t)(ide_drive_t *);
814 
815 /* used by ide-cd, ide-floppy, etc. */
816 typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
817 
818 extern struct mutex ide_setting_mtx;
819 
820 /*
821  * configurable drive settings
822  */
823 
824 #define DS_SYNC	(1 << 0)
825 
826 struct ide_devset {
827 	int		(*get)(ide_drive_t *);
828 	int		(*set)(ide_drive_t *, int);
829 	unsigned int	flags;
830 };
831 
832 #define __DEVSET(_flags, _get, _set) { \
833 	.flags	= _flags, \
834 	.get	= _get,	\
835 	.set	= _set,	\
836 }
837 
838 #define ide_devset_get(name, field) \
839 static int get_##name(ide_drive_t *drive) \
840 { \
841 	return drive->field; \
842 }
843 
844 #define ide_devset_set(name, field) \
845 static int set_##name(ide_drive_t *drive, int arg) \
846 { \
847 	drive->field = arg; \
848 	return 0; \
849 }
850 
851 #define ide_devset_get_flag(name, flag) \
852 static int get_##name(ide_drive_t *drive) \
853 { \
854 	return !!(drive->dev_flags & flag); \
855 }
856 
857 #define ide_devset_set_flag(name, flag) \
858 static int set_##name(ide_drive_t *drive, int arg) \
859 { \
860 	if (arg) \
861 		drive->dev_flags |= flag; \
862 	else \
863 		drive->dev_flags &= ~flag; \
864 	return 0; \
865 }
866 
867 #define __IDE_DEVSET(_name, _flags, _get, _set) \
868 const struct ide_devset ide_devset_##_name = \
869 	__DEVSET(_flags, _get, _set)
870 
871 #define IDE_DEVSET(_name, _flags, _get, _set) \
872 static __IDE_DEVSET(_name, _flags, _get, _set)
873 
874 #define ide_devset_rw(_name, _func) \
875 IDE_DEVSET(_name, 0, get_##_func, set_##_func)
876 
877 #define ide_devset_w(_name, _func) \
878 IDE_DEVSET(_name, 0, NULL, set_##_func)
879 
880 #define ide_ext_devset_rw(_name, _func) \
881 __IDE_DEVSET(_name, 0, get_##_func, set_##_func)
882 
883 #define ide_ext_devset_rw_sync(_name, _func) \
884 __IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
885 
886 #define ide_decl_devset(_name) \
887 extern const struct ide_devset ide_devset_##_name
888 
889 ide_decl_devset(io_32bit);
890 ide_decl_devset(keepsettings);
891 ide_decl_devset(pio_mode);
892 ide_decl_devset(unmaskirq);
893 ide_decl_devset(using_dma);
894 
895 #ifdef CONFIG_IDE_PROC_FS
896 /*
897  * /proc/ide interface
898  */
899 
900 #define ide_devset_rw_field(_name, _field) \
901 ide_devset_get(_name, _field); \
902 ide_devset_set(_name, _field); \
903 IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
904 
905 #define ide_devset_rw_flag(_name, _field) \
906 ide_devset_get_flag(_name, _field); \
907 ide_devset_set_flag(_name, _field); \
908 IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
909 
910 struct ide_proc_devset {
911 	const char		*name;
912 	const struct ide_devset	*setting;
913 	int			min, max;
914 	int			(*mulf)(ide_drive_t *);
915 	int			(*divf)(ide_drive_t *);
916 };
917 
918 #define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
919 	.name = __stringify(_name), \
920 	.setting = &ide_devset_##_name, \
921 	.min = _min, \
922 	.max = _max, \
923 	.mulf = _mulf, \
924 	.divf = _divf, \
925 }
926 
927 #define IDE_PROC_DEVSET(_name, _min, _max) \
928 __IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
929 
930 typedef struct {
931 	const char	*name;
932 	umode_t		mode;
933 	const struct file_operations *proc_fops;
934 } ide_proc_entry_t;
935 
936 void proc_ide_create(void);
937 void proc_ide_destroy(void);
938 void ide_proc_register_port(ide_hwif_t *);
939 void ide_proc_port_register_devices(ide_hwif_t *);
940 void ide_proc_unregister_device(ide_drive_t *);
941 void ide_proc_unregister_port(ide_hwif_t *);
942 void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
943 void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
944 
945 extern const struct file_operations ide_capacity_proc_fops;
946 extern const struct file_operations ide_geometry_proc_fops;
947 #else
proc_ide_create(void)948 static inline void proc_ide_create(void) { ; }
proc_ide_destroy(void)949 static inline void proc_ide_destroy(void) { ; }
ide_proc_register_port(ide_hwif_t * hwif)950 static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
ide_proc_port_register_devices(ide_hwif_t * hwif)951 static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
ide_proc_unregister_device(ide_drive_t * drive)952 static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
ide_proc_unregister_port(ide_hwif_t * hwif)953 static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
ide_proc_register_driver(ide_drive_t * drive,struct ide_driver * driver)954 static inline void ide_proc_register_driver(ide_drive_t *drive,
955 					    struct ide_driver *driver) { ; }
ide_proc_unregister_driver(ide_drive_t * drive,struct ide_driver * driver)956 static inline void ide_proc_unregister_driver(ide_drive_t *drive,
957 					      struct ide_driver *driver) { ; }
958 #endif
959 
960 enum {
961 	/* enter/exit functions */
962 	IDE_DBG_FUNC =			(1 << 0),
963 	/* sense key/asc handling */
964 	IDE_DBG_SENSE =			(1 << 1),
965 	/* packet commands handling */
966 	IDE_DBG_PC =			(1 << 2),
967 	/* request handling */
968 	IDE_DBG_RQ =			(1 << 3),
969 	/* driver probing/setup */
970 	IDE_DBG_PROBE =			(1 << 4),
971 };
972 
973 /* DRV_NAME has to be defined in the driver before using the macro below */
974 #define __ide_debug_log(lvl, fmt, args...)				\
975 {									\
976 	if (unlikely(drive->debug_mask & lvl))				\
977 		printk(KERN_INFO DRV_NAME ": %s: " fmt "\n",		\
978 					  __func__, ## args);		\
979 }
980 
981 /*
982  * Power Management state machine (rq->pm->pm_step).
983  *
984  * For each step, the core calls ide_start_power_step() first.
985  * This can return:
986  *	- ide_stopped :	In this case, the core calls us back again unless
987  *			step have been set to ide_power_state_completed.
988  *	- ide_started :	In this case, the channel is left busy until an
989  *			async event (interrupt) occurs.
990  * Typically, ide_start_power_step() will issue a taskfile request with
991  * do_rw_taskfile().
992  *
993  * Upon reception of the interrupt, the core will call ide_complete_power_step()
994  * with the error code if any. This routine should update the step value
995  * and return. It should not start a new request. The core will call
996  * ide_start_power_step() for the new step value, unless step have been
997  * set to IDE_PM_COMPLETED.
998  */
999 enum {
1000 	IDE_PM_START_SUSPEND,
1001 	IDE_PM_FLUSH_CACHE	= IDE_PM_START_SUSPEND,
1002 	IDE_PM_STANDBY,
1003 
1004 	IDE_PM_START_RESUME,
1005 	IDE_PM_RESTORE_PIO	= IDE_PM_START_RESUME,
1006 	IDE_PM_IDLE,
1007 	IDE_PM_RESTORE_DMA,
1008 
1009 	IDE_PM_COMPLETED,
1010 };
1011 
1012 int generic_ide_suspend(struct device *, pm_message_t);
1013 int generic_ide_resume(struct device *);
1014 
1015 void ide_complete_power_step(ide_drive_t *, struct request *);
1016 ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
1017 void ide_complete_pm_rq(ide_drive_t *, struct request *);
1018 void ide_check_pm_state(ide_drive_t *, struct request *);
1019 
1020 /*
1021  * Subdrivers support.
1022  *
1023  * The gendriver.owner field should be set to the module owner of this driver.
1024  * The gendriver.name field should be set to the name of this driver
1025  */
1026 struct ide_driver {
1027 	const char			*version;
1028 	ide_startstop_t	(*do_request)(ide_drive_t *, struct request *, sector_t);
1029 	struct device_driver	gen_driver;
1030 	int		(*probe)(ide_drive_t *);
1031 	void		(*remove)(ide_drive_t *);
1032 	void		(*resume)(ide_drive_t *);
1033 	void		(*shutdown)(ide_drive_t *);
1034 #ifdef CONFIG_IDE_PROC_FS
1035 	ide_proc_entry_t *		(*proc_entries)(ide_drive_t *);
1036 	const struct ide_proc_devset *	(*proc_devsets)(ide_drive_t *);
1037 #endif
1038 };
1039 
1040 #define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
1041 
1042 int ide_device_get(ide_drive_t *);
1043 void ide_device_put(ide_drive_t *);
1044 
1045 struct ide_ioctl_devset {
1046 	unsigned int	get_ioctl;
1047 	unsigned int	set_ioctl;
1048 	const struct ide_devset *setting;
1049 };
1050 
1051 int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
1052 		      unsigned long, const struct ide_ioctl_devset *);
1053 
1054 int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
1055 
1056 extern int ide_vlb_clk;
1057 extern int ide_pci_clk;
1058 
1059 int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
1060 void ide_kill_rq(ide_drive_t *, struct request *);
1061 
1062 void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
1063 void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
1064 
1065 void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
1066 			 unsigned int);
1067 
1068 void ide_pad_transfer(ide_drive_t *, int, int);
1069 
1070 ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
1071 
1072 void ide_fix_driveid(u16 *);
1073 
1074 extern void ide_fixstring(u8 *, const int, const int);
1075 
1076 int ide_busy_sleep(ide_drive_t *, unsigned long, int);
1077 
1078 int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
1079 int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
1080 
1081 ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
1082 ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
1083 
1084 extern ide_startstop_t ide_do_reset (ide_drive_t *);
1085 
1086 extern int ide_devset_execute(ide_drive_t *drive,
1087 			      const struct ide_devset *setting, int arg);
1088 
1089 void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
1090 int ide_complete_rq(ide_drive_t *, int, unsigned int);
1091 
1092 void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
1093 void ide_tf_dump(const char *, struct ide_cmd *);
1094 
1095 void ide_exec_command(ide_hwif_t *, u8);
1096 u8 ide_read_status(ide_hwif_t *);
1097 u8 ide_read_altstatus(ide_hwif_t *);
1098 void ide_write_devctl(ide_hwif_t *, u8);
1099 
1100 void ide_dev_select(ide_drive_t *);
1101 void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
1102 void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
1103 
1104 void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
1105 void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
1106 
1107 void SELECT_MASK(ide_drive_t *, int);
1108 
1109 u8 ide_read_error(ide_drive_t *);
1110 void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
1111 
1112 int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
1113 
1114 int ide_check_atapi_device(ide_drive_t *, const char *);
1115 
1116 void ide_init_pc(struct ide_atapi_pc *);
1117 
1118 /* Disk head parking */
1119 extern wait_queue_head_t ide_park_wq;
1120 ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
1121 		      char *buf);
1122 ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
1123 		       const char *buf, size_t len);
1124 
1125 /*
1126  * Special requests for ide-tape block device strategy routine.
1127  *
1128  * In order to service a character device command, we add special requests to
1129  * the tail of our block device request queue and wait for their completion.
1130  */
1131 enum {
1132 	REQ_IDETAPE_PC1		= (1 << 0), /* packet command (first stage) */
1133 	REQ_IDETAPE_PC2		= (1 << 1), /* packet command (second stage) */
1134 	REQ_IDETAPE_READ	= (1 << 2),
1135 	REQ_IDETAPE_WRITE	= (1 << 3),
1136 };
1137 
1138 int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
1139 		      void *, unsigned int);
1140 
1141 int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
1142 int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
1143 int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
1144 void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
1145 void ide_retry_pc(ide_drive_t *drive);
1146 
1147 void ide_prep_sense(ide_drive_t *drive, struct request *rq);
1148 int ide_queue_sense_rq(ide_drive_t *drive, void *special);
1149 
1150 int ide_cd_expiry(ide_drive_t *);
1151 
1152 int ide_cd_get_xferlen(struct request *);
1153 
1154 ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
1155 
1156 ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
1157 
1158 void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
1159 
1160 void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
1161 
1162 int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
1163 int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
1164 
1165 int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
1166 
1167 int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
1168 
1169 extern int ide_driveid_update(ide_drive_t *);
1170 extern int ide_config_drive_speed(ide_drive_t *, u8);
1171 extern u8 eighty_ninty_three (ide_drive_t *);
1172 extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
1173 
1174 extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
1175 
1176 extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
1177 
1178 extern void ide_timer_expiry(unsigned long);
1179 extern irqreturn_t ide_intr(int irq, void *dev_id);
1180 extern void do_ide_request(struct request_queue *);
1181 extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
1182 
1183 void ide_init_disk(struct gendisk *, ide_drive_t *);
1184 
1185 #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
1186 extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
1187 #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
1188 #else
1189 #define ide_pci_register_driver(d) pci_register_driver(d)
1190 #endif
1191 
ide_pci_is_in_compatibility_mode(struct pci_dev * dev)1192 static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
1193 {
1194 	if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
1195 		return 1;
1196 	return 0;
1197 }
1198 
1199 void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
1200 			 struct ide_hw *, struct ide_hw **);
1201 void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
1202 
1203 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
1204 int ide_pci_set_master(struct pci_dev *, const char *);
1205 unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
1206 int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
1207 int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
1208 #else
ide_hwif_setup_dma(ide_hwif_t * hwif,const struct ide_port_info * d)1209 static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
1210 				     const struct ide_port_info *d)
1211 {
1212 	return -EINVAL;
1213 }
1214 #endif
1215 
1216 struct ide_pci_enablebit {
1217 	u8	reg;	/* byte pci reg holding the enable-bit */
1218 	u8	mask;	/* mask to isolate the enable-bit */
1219 	u8	val;	/* value of masked reg when "enabled" */
1220 };
1221 
1222 enum {
1223 	/* Uses ISA control ports not PCI ones. */
1224 	IDE_HFLAG_ISA_PORTS		= (1 << 0),
1225 	/* single port device */
1226 	IDE_HFLAG_SINGLE		= (1 << 1),
1227 	/* don't use legacy PIO blacklist */
1228 	IDE_HFLAG_PIO_NO_BLACKLIST	= (1 << 2),
1229 	/* set for the second port of QD65xx */
1230 	IDE_HFLAG_QD_2ND_PORT		= (1 << 3),
1231 	/* use PIO8/9 for prefetch off/on */
1232 	IDE_HFLAG_ABUSE_PREFETCH	= (1 << 4),
1233 	/* use PIO6/7 for fast-devsel off/on */
1234 	IDE_HFLAG_ABUSE_FAST_DEVSEL	= (1 << 5),
1235 	/* use 100-102 and 200-202 PIO values to set DMA modes */
1236 	IDE_HFLAG_ABUSE_DMA_MODES	= (1 << 6),
1237 	/*
1238 	 * keep DMA setting when programming PIO mode, may be used only
1239 	 * for hosts which have separate PIO and DMA timings (ie. PMAC)
1240 	 */
1241 	IDE_HFLAG_SET_PIO_MODE_KEEP_DMA	= (1 << 7),
1242 	/* program host for the transfer mode after programming device */
1243 	IDE_HFLAG_POST_SET_MODE		= (1 << 8),
1244 	/* don't program host/device for the transfer mode ("smart" hosts) */
1245 	IDE_HFLAG_NO_SET_MODE		= (1 << 9),
1246 	/* trust BIOS for programming chipset/device for DMA */
1247 	IDE_HFLAG_TRUST_BIOS_FOR_DMA	= (1 << 10),
1248 	/* host is CS5510/CS5520 */
1249 	IDE_HFLAG_CS5520		= (1 << 11),
1250 	/* ATAPI DMA is unsupported */
1251 	IDE_HFLAG_NO_ATAPI_DMA		= (1 << 12),
1252 	/* set if host is a "non-bootable" controller */
1253 	IDE_HFLAG_NON_BOOTABLE		= (1 << 13),
1254 	/* host doesn't support DMA */
1255 	IDE_HFLAG_NO_DMA		= (1 << 14),
1256 	/* check if host is PCI IDE device before allowing DMA */
1257 	IDE_HFLAG_NO_AUTODMA		= (1 << 15),
1258 	/* host uses MMIO */
1259 	IDE_HFLAG_MMIO			= (1 << 16),
1260 	/* no LBA48 */
1261 	IDE_HFLAG_NO_LBA48		= (1 << 17),
1262 	/* no LBA48 DMA */
1263 	IDE_HFLAG_NO_LBA48_DMA		= (1 << 18),
1264 	/* data FIFO is cleared by an error */
1265 	IDE_HFLAG_ERROR_STOPS_FIFO	= (1 << 19),
1266 	/* serialize ports */
1267 	IDE_HFLAG_SERIALIZE		= (1 << 20),
1268 	/* host is DTC2278 */
1269 	IDE_HFLAG_DTC2278		= (1 << 21),
1270 	/* 4 devices on a single set of I/O ports */
1271 	IDE_HFLAG_4DRIVES		= (1 << 22),
1272 	/* host is TRM290 */
1273 	IDE_HFLAG_TRM290		= (1 << 23),
1274 	/* use 32-bit I/O ops */
1275 	IDE_HFLAG_IO_32BIT		= (1 << 24),
1276 	/* unmask IRQs */
1277 	IDE_HFLAG_UNMASK_IRQS		= (1 << 25),
1278 	IDE_HFLAG_BROKEN_ALTSTATUS	= (1 << 26),
1279 	/* serialize ports if DMA is possible (for sl82c105) */
1280 	IDE_HFLAG_SERIALIZE_DMA		= (1 << 27),
1281 	/* force host out of "simplex" mode */
1282 	IDE_HFLAG_CLEAR_SIMPLEX		= (1 << 28),
1283 	/* DSC overlap is unsupported */
1284 	IDE_HFLAG_NO_DSC		= (1 << 29),
1285 	/* never use 32-bit I/O ops */
1286 	IDE_HFLAG_NO_IO_32BIT		= (1 << 30),
1287 	/* never unmask IRQs */
1288 	IDE_HFLAG_NO_UNMASK_IRQS	= (1 << 31),
1289 };
1290 
1291 #ifdef CONFIG_BLK_DEV_OFFBOARD
1292 # define IDE_HFLAG_OFF_BOARD	0
1293 #else
1294 # define IDE_HFLAG_OFF_BOARD	IDE_HFLAG_NON_BOOTABLE
1295 #endif
1296 
1297 struct ide_port_info {
1298 	char			*name;
1299 
1300 	int			(*init_chipset)(struct pci_dev *);
1301 
1302 	void			(*get_lock)(irq_handler_t, void *);
1303 	void			(*release_lock)(void);
1304 
1305 	void			(*init_iops)(ide_hwif_t *);
1306 	void                    (*init_hwif)(ide_hwif_t *);
1307 	int			(*init_dma)(ide_hwif_t *,
1308 					    const struct ide_port_info *);
1309 
1310 	const struct ide_tp_ops		*tp_ops;
1311 	const struct ide_port_ops	*port_ops;
1312 	const struct ide_dma_ops	*dma_ops;
1313 
1314 	struct ide_pci_enablebit	enablebits[2];
1315 
1316 	hwif_chipset_t		chipset;
1317 
1318 	u16			max_sectors;	/* if < than the default one */
1319 
1320 	u32			host_flags;
1321 
1322 	int			irq_flags;
1323 
1324 	u8			pio_mask;
1325 	u8			swdma_mask;
1326 	u8			mwdma_mask;
1327 	u8			udma_mask;
1328 };
1329 
1330 /*
1331  * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
1332  * requests.
1333  */
1334 struct ide_pm_state {
1335 	/* PM state machine step value, currently driver specific */
1336 	int	pm_step;
1337 	/* requested PM state value (S1, S2, S3, S4, ...) */
1338 	u32	pm_state;
1339 	void*	data;		/* for driver use */
1340 };
1341 
1342 
1343 int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
1344 int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
1345 		     const struct ide_port_info *, void *);
1346 void ide_pci_remove(struct pci_dev *);
1347 
1348 #ifdef CONFIG_PM
1349 int ide_pci_suspend(struct pci_dev *, pm_message_t);
1350 int ide_pci_resume(struct pci_dev *);
1351 #else
1352 #define ide_pci_suspend NULL
1353 #define ide_pci_resume NULL
1354 #endif
1355 
1356 void ide_map_sg(ide_drive_t *, struct ide_cmd *);
1357 void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
1358 
1359 #define BAD_DMA_DRIVE		0
1360 #define GOOD_DMA_DRIVE		1
1361 
1362 struct drive_list_entry {
1363 	const char *id_model;
1364 	const char *id_firmware;
1365 };
1366 
1367 int ide_in_drive_list(u16 *, const struct drive_list_entry *);
1368 
1369 #ifdef CONFIG_BLK_DEV_IDEDMA
1370 int ide_dma_good_drive(ide_drive_t *);
1371 int __ide_dma_bad_drive(ide_drive_t *);
1372 
1373 u8 ide_find_dma_mode(ide_drive_t *, u8);
1374 
ide_max_dma_mode(ide_drive_t * drive)1375 static inline u8 ide_max_dma_mode(ide_drive_t *drive)
1376 {
1377 	return ide_find_dma_mode(drive, XFER_UDMA_6);
1378 }
1379 
1380 void ide_dma_off_quietly(ide_drive_t *);
1381 void ide_dma_off(ide_drive_t *);
1382 void ide_dma_on(ide_drive_t *);
1383 int ide_set_dma(ide_drive_t *);
1384 void ide_check_dma_crc(ide_drive_t *);
1385 ide_startstop_t ide_dma_intr(ide_drive_t *);
1386 
1387 int ide_allocate_dma_engine(ide_hwif_t *);
1388 void ide_release_dma_engine(ide_hwif_t *);
1389 
1390 int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
1391 void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
1392 
1393 #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
1394 int config_drive_for_dma(ide_drive_t *);
1395 int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
1396 void ide_dma_host_set(ide_drive_t *, int);
1397 int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
1398 extern void ide_dma_start(ide_drive_t *);
1399 int ide_dma_end(ide_drive_t *);
1400 int ide_dma_test_irq(ide_drive_t *);
1401 int ide_dma_sff_timer_expiry(ide_drive_t *);
1402 u8 ide_dma_sff_read_status(ide_hwif_t *);
1403 extern const struct ide_dma_ops sff_dma_ops;
1404 #else
config_drive_for_dma(ide_drive_t * drive)1405 static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
1406 #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
1407 
1408 void ide_dma_lost_irq(ide_drive_t *);
1409 ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
1410 
1411 #else
ide_find_dma_mode(ide_drive_t * drive,u8 speed)1412 static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
ide_max_dma_mode(ide_drive_t * drive)1413 static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
ide_dma_off_quietly(ide_drive_t * drive)1414 static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
ide_dma_off(ide_drive_t * drive)1415 static inline void ide_dma_off(ide_drive_t *drive) { ; }
ide_dma_on(ide_drive_t * drive)1416 static inline void ide_dma_on(ide_drive_t *drive) { ; }
ide_dma_verbose(ide_drive_t * drive)1417 static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
ide_set_dma(ide_drive_t * drive)1418 static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
ide_check_dma_crc(ide_drive_t * drive)1419 static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
ide_dma_intr(ide_drive_t * drive)1420 static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
ide_dma_timeout_retry(ide_drive_t * drive,int error)1421 static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
ide_release_dma_engine(ide_hwif_t * hwif)1422 static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
ide_dma_prepare(ide_drive_t * drive,struct ide_cmd * cmd)1423 static inline int ide_dma_prepare(ide_drive_t *drive,
1424 				  struct ide_cmd *cmd) { return 1; }
ide_dma_unmap_sg(ide_drive_t * drive,struct ide_cmd * cmd)1425 static inline void ide_dma_unmap_sg(ide_drive_t *drive,
1426 				    struct ide_cmd *cmd) { ; }
1427 #endif /* CONFIG_BLK_DEV_IDEDMA */
1428 
1429 #ifdef CONFIG_BLK_DEV_IDEACPI
1430 int ide_acpi_init(void);
1431 bool ide_port_acpi(ide_hwif_t *hwif);
1432 extern int ide_acpi_exec_tfs(ide_drive_t *drive);
1433 extern void ide_acpi_get_timing(ide_hwif_t *hwif);
1434 extern void ide_acpi_push_timing(ide_hwif_t *hwif);
1435 void ide_acpi_init_port(ide_hwif_t *);
1436 void ide_acpi_port_init_devices(ide_hwif_t *);
1437 extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
1438 #else
ide_acpi_init(void)1439 static inline int ide_acpi_init(void) { return 0; }
ide_port_acpi(ide_hwif_t * hwif)1440 static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
ide_acpi_exec_tfs(ide_drive_t * drive)1441 static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
ide_acpi_get_timing(ide_hwif_t * hwif)1442 static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
ide_acpi_push_timing(ide_hwif_t * hwif)1443 static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
ide_acpi_init_port(ide_hwif_t * hwif)1444 static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
ide_acpi_port_init_devices(ide_hwif_t * hwif)1445 static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
ide_acpi_set_state(ide_hwif_t * hwif,int on)1446 static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
1447 #endif
1448 
1449 void ide_register_region(struct gendisk *);
1450 void ide_unregister_region(struct gendisk *);
1451 
1452 void ide_check_nien_quirk_list(ide_drive_t *);
1453 void ide_undecoded_slave(ide_drive_t *);
1454 
1455 void ide_port_apply_params(ide_hwif_t *);
1456 int ide_sysfs_register_port(ide_hwif_t *);
1457 
1458 struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
1459 				unsigned int);
1460 void ide_host_free(struct ide_host *);
1461 int ide_host_register(struct ide_host *, const struct ide_port_info *,
1462 		      struct ide_hw **);
1463 int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
1464 		 struct ide_host **);
1465 void ide_host_remove(struct ide_host *);
1466 int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
1467 void ide_port_unregister_devices(ide_hwif_t *);
1468 void ide_port_scan(ide_hwif_t *);
1469 
ide_get_hwifdata(ide_hwif_t * hwif)1470 static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
1471 {
1472 	return hwif->hwif_data;
1473 }
1474 
ide_set_hwifdata(ide_hwif_t * hwif,void * data)1475 static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
1476 {
1477 	hwif->hwif_data = data;
1478 }
1479 
1480 extern void ide_toggle_bounce(ide_drive_t *drive, int on);
1481 
1482 u64 ide_get_lba_addr(struct ide_cmd *, int);
1483 u8 ide_dump_status(ide_drive_t *, const char *, u8);
1484 
1485 struct ide_timing {
1486 	u8  mode;
1487 	u8  setup;	/* t1 */
1488 	u16 act8b;	/* t2 for 8-bit io */
1489 	u16 rec8b;	/* t2i for 8-bit io */
1490 	u16 cyc8b;	/* t0 for 8-bit io */
1491 	u16 active;	/* t2 or tD */
1492 	u16 recover;	/* t2i or tK */
1493 	u16 cycle;	/* t0 */
1494 	u16 udma;	/* t2CYCTYP/2 */
1495 };
1496 
1497 enum {
1498 	IDE_TIMING_SETUP	= (1 << 0),
1499 	IDE_TIMING_ACT8B	= (1 << 1),
1500 	IDE_TIMING_REC8B	= (1 << 2),
1501 	IDE_TIMING_CYC8B	= (1 << 3),
1502 	IDE_TIMING_8BIT		= IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
1503 				  IDE_TIMING_CYC8B,
1504 	IDE_TIMING_ACTIVE	= (1 << 4),
1505 	IDE_TIMING_RECOVER	= (1 << 5),
1506 	IDE_TIMING_CYCLE	= (1 << 6),
1507 	IDE_TIMING_UDMA		= (1 << 7),
1508 	IDE_TIMING_ALL		= IDE_TIMING_SETUP | IDE_TIMING_8BIT |
1509 				  IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
1510 				  IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
1511 };
1512 
1513 struct ide_timing *ide_timing_find_mode(u8);
1514 u16 ide_pio_cycle_time(ide_drive_t *, u8);
1515 void ide_timing_merge(struct ide_timing *, struct ide_timing *,
1516 		      struct ide_timing *, unsigned int);
1517 int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
1518 
1519 #ifdef CONFIG_IDE_XFER_MODE
1520 int ide_scan_pio_blacklist(char *);
1521 const char *ide_xfer_verbose(u8);
1522 int ide_pio_need_iordy(ide_drive_t *, const u8);
1523 int ide_set_pio_mode(ide_drive_t *, u8);
1524 int ide_set_dma_mode(ide_drive_t *, u8);
1525 void ide_set_pio(ide_drive_t *, u8);
1526 int ide_set_xfer_rate(ide_drive_t *, u8);
1527 #else
ide_set_pio(ide_drive_t * drive,u8 pio)1528 static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
ide_set_xfer_rate(ide_drive_t * drive,u8 rate)1529 static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
1530 #endif
1531 
ide_set_max_pio(ide_drive_t * drive)1532 static inline void ide_set_max_pio(ide_drive_t *drive)
1533 {
1534 	ide_set_pio(drive, 255);
1535 }
1536 
1537 char *ide_media_string(ide_drive_t *);
1538 
1539 extern const struct attribute_group *ide_dev_groups[];
1540 extern struct bus_type ide_bus_type;
1541 extern struct class *ide_port_class;
1542 
ide_dump_identify(u8 * id)1543 static inline void ide_dump_identify(u8 *id)
1544 {
1545 	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
1546 }
1547 
hwif_to_node(ide_hwif_t * hwif)1548 static inline int hwif_to_node(ide_hwif_t *hwif)
1549 {
1550 	return hwif->dev ? dev_to_node(hwif->dev) : -1;
1551 }
1552 
ide_get_pair_dev(ide_drive_t * drive)1553 static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
1554 {
1555 	ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
1556 
1557 	return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
1558 }
1559 
ide_get_drivedata(ide_drive_t * drive)1560 static inline void *ide_get_drivedata(ide_drive_t *drive)
1561 {
1562 	return drive->drive_data;
1563 }
1564 
ide_set_drivedata(ide_drive_t * drive,void * data)1565 static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
1566 {
1567 	drive->drive_data = data;
1568 }
1569 
1570 #define ide_port_for_each_dev(i, dev, port) \
1571 	for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
1572 
1573 #define ide_port_for_each_present_dev(i, dev, port) \
1574 	for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
1575 		if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
1576 
1577 #define ide_host_for_each_port(i, port, host) \
1578 	for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
1579 
1580 
1581 #endif /* _IDE_H */
1582