1 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2  * All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or (at
7  * your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  * NON INFRINGEMENT.  See the GNU General Public License for more
13  * details.
14  */
15 
16 #include <linux/debugfs.h>
17 #include <linux/skbuff.h>
18 #include <linux/kthread.h>
19 #include <scsi/scsi.h>
20 #include <scsi/scsi_host.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_device.h>
23 
24 #include "visorbus.h"
25 #include "iochannel.h"
26 
27 /* The Send and Receive Buffers of the IO Queue may both be full */
28 
29 #define IOS_ERROR_THRESHOLD	1000
30 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
31  *         = 4800 bytes ~ 2^13 = 8192 bytes
32  */
33 #define MAX_BUF			8192
34 #define MAX_PENDING_REQUESTS	(MIN_NUMSIGNALS * 2)
35 #define VISORHBA_ERROR_COUNT	30
36 #define VISORHBA_OPEN_MAX	1
37 
38 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
39 				      void (*visorhba_cmnd_done)
40 					    (struct scsi_cmnd *));
41 #ifdef DEF_SCSI_QCMD
42 static DEF_SCSI_QCMD(visorhba_queue_command)
43 #else
44 #define visorhba_queue_command visorhba_queue_command_lck
45 #endif
46 static int visorhba_probe(struct visor_device *dev);
47 static void visorhba_remove(struct visor_device *dev);
48 static int visorhba_pause(struct visor_device *dev,
49 			  visorbus_state_complete_func complete_func);
50 static int visorhba_resume(struct visor_device *dev,
51 			   visorbus_state_complete_func complete_func);
52 
53 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
54 				 size_t len, loff_t *offset);
55 static struct dentry *visorhba_debugfs_dir;
56 static const struct file_operations debugfs_info_fops = {
57 	.read = info_debugfs_read,
58 };
59 
60 /* GUIDS for HBA channel type supported by this driver */
61 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
62 	/* Note that the only channel type we expect to be reported by the
63 	 * bus driver is the SPAR_VHBA channel.
64 	 */
65 	{ SPAR_VHBA_CHANNEL_PROTOCOL_UUID, "sparvhba" },
66 	{ NULL_UUID_LE, NULL }
67 };
68 
69 /* This is used to tell the visor bus driver which types of visor devices
70  * we support, and what functions to call when a visor device that we support
71  * is attached or removed.
72  */
73 static struct visor_driver visorhba_driver = {
74 	.name = "visorhba",
75 	.owner = THIS_MODULE,
76 	.channel_types = visorhba_channel_types,
77 	.probe = visorhba_probe,
78 	.remove = visorhba_remove,
79 	.pause = visorhba_pause,
80 	.resume = visorhba_resume,
81 	.channel_interrupt = NULL,
82 };
83 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
84 MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
85 
86 struct visor_thread_info {
87 	struct task_struct *task;
88 	struct completion has_stopped;
89 	int id;
90 };
91 
92 struct visordisk_info {
93 	u32 valid;
94 	u32 channel, id, lun;	/* Disk Path */
95 	atomic_t ios_threshold;
96 	atomic_t error_count;
97 	struct visordisk_info *next;
98 };
99 
100 struct scsipending {
101 	struct uiscmdrsp cmdrsp;
102 	void *sent;		/* The Data being tracked */
103 	char cmdtype;		/* Type of pointer that is being stored */
104 };
105 
106 /* Work Data for dar_work_queue */
107 struct diskaddremove {
108 	u8 add;			/* 0-remove, 1-add */
109 	struct Scsi_Host *shost; /* Scsi Host for this visorhba instance */
110 	u32 channel, id, lun;	/* Disk Path */
111 	struct diskaddremove *next;
112 };
113 
114 /* Each scsi_host has a host_data area that contains this struct. */
115 struct visorhba_devdata {
116 	struct Scsi_Host *scsihost;
117 	struct visor_device *dev;
118 	struct list_head dev_info_list;
119 	/* Tracks the requests that have been forwarded to
120 	 * the IOVM and haven't returned yet
121 	 */
122 	struct scsipending pending[MAX_PENDING_REQUESTS];
123 	/* Start search for next pending free slot here */
124 	unsigned int nextinsert;
125 	spinlock_t privlock; /* lock to protect data in devdata */
126 	bool serverdown;
127 	bool serverchangingstate;
128 	unsigned long long acquire_failed_cnt;
129 	unsigned long long interrupts_rcvd;
130 	unsigned long long interrupts_notme;
131 	unsigned long long interrupts_disabled;
132 	u64 __iomem *flags_addr;
133 	atomic_t interrupt_rcvd;
134 	wait_queue_head_t rsp_queue;
135 	struct visordisk_info head;
136 	unsigned int max_buff_len;
137 	int devnum;
138 	struct visor_thread_info threadinfo;
139 	int thread_wait_ms;
140 };
141 
142 struct visorhba_devices_open {
143 	struct visorhba_devdata *devdata;
144 };
145 
146 static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
147 
148 #define for_each_vdisk_match(iter, list, match)			  \
149 	for (iter = &list->head; iter->next; iter = iter->next) \
150 		if ((iter->channel == match->channel) &&		  \
151 		    (iter->id == match->id) &&			  \
152 		    (iter->lun == match->lun))
153 /**
154  *	visor_thread_start - starts a thread for the device
155  *	@thrinfo: The thread to start
156  *	@threadfn: Function the thread starts
157  *	@thrcontext: Context to pass to the thread, i.e. devdata
158  *	@name: string describing name of thread
159  *
160  *	Starts a thread for the device.
161  *
162  *	Return 0 on success;
163  */
visor_thread_start(struct visor_thread_info * thrinfo,int (* threadfn)(void *),void * thrcontext,char * name)164 static int visor_thread_start(struct visor_thread_info *thrinfo,
165 			      int (*threadfn)(void *),
166 			      void *thrcontext, char *name)
167 {
168 	/* used to stop the thread */
169 	init_completion(&thrinfo->has_stopped);
170 	thrinfo->task = kthread_run(threadfn, thrcontext, name);
171 	if (IS_ERR(thrinfo->task)) {
172 		thrinfo->id = 0;
173 		return PTR_ERR(thrinfo->task);
174 	}
175 	thrinfo->id = thrinfo->task->pid;
176 	return 0;
177 }
178 
179 /**
180  *	add_scsipending_entry - save off io command that is pending in
181  *				Service Partition
182  *	@devdata: Pointer to devdata
183  *	@cmdtype: Specifies the type of command pending
184  *	@new:	The command to be saved
185  *
186  *	Saves off the io command that is being handled by the Service
187  *	Partition so that it can be handled when it completes. If new is
188  *	NULL it is assumed the entry refers only to the cmdrsp.
189  *	Returns insert_location where entry was added,
190  *	SCSI_MLQUEUE_DEVICE_BUSY if it can't
191  */
add_scsipending_entry(struct visorhba_devdata * devdata,char cmdtype,void * new)192 static int add_scsipending_entry(struct visorhba_devdata *devdata,
193 				 char cmdtype, void *new)
194 {
195 	unsigned long flags;
196 	struct scsipending *entry;
197 	int insert_location;
198 
199 	spin_lock_irqsave(&devdata->privlock, flags);
200 	insert_location = devdata->nextinsert;
201 	while (devdata->pending[insert_location].sent) {
202 		insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
203 		if (insert_location == (int)devdata->nextinsert) {
204 			spin_unlock_irqrestore(&devdata->privlock, flags);
205 			return -1;
206 		}
207 	}
208 
209 	entry = &devdata->pending[insert_location];
210 	memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
211 	entry->cmdtype = cmdtype;
212 	if (new)
213 		entry->sent = new;
214 	else /* wants to send cmdrsp */
215 		entry->sent = &entry->cmdrsp;
216 	devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
217 	spin_unlock_irqrestore(&devdata->privlock, flags);
218 
219 	return insert_location;
220 }
221 
222 /**
223  *	del_scsipending_enty - removes an entry from the pending array
224  *	@devdata: Device holding the pending array
225  *	@del: Entry to remove
226  *
227  *	Removes the entry pointed at by del and returns it.
228  *	Returns the scsipending entry pointed at
229  */
del_scsipending_ent(struct visorhba_devdata * devdata,int del)230 static void *del_scsipending_ent(struct visorhba_devdata *devdata,
231 				 int del)
232 {
233 	unsigned long flags;
234 	void *sent = NULL;
235 
236 	if (del < MAX_PENDING_REQUESTS) {
237 		spin_lock_irqsave(&devdata->privlock, flags);
238 		sent = devdata->pending[del].sent;
239 
240 		devdata->pending[del].cmdtype = 0;
241 		devdata->pending[del].sent = NULL;
242 		spin_unlock_irqrestore(&devdata->privlock, flags);
243 	}
244 
245 	return sent;
246 }
247 
248 /**
249  *	get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
250  *	#ddata: Device holding the pending array
251  *	@ent: Entry that stores the cmdrsp
252  *
253  *	Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
254  *	if the "sent" field is not NULL
255  *	Returns a pointer to the cmdrsp.
256  */
get_scsipending_cmdrsp(struct visorhba_devdata * ddata,int ent)257 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
258 						int ent)
259 {
260 	if (ddata->pending[ent].sent)
261 		return &ddata->pending[ent].cmdrsp;
262 
263 	return NULL;
264 }
265 
266 /**
267  *	forward_taskmgmt_command - send taskmegmt command to the Service
268  *				   Partition
269  *	@tasktype: Type of taskmgmt command
270  *	@scsidev: Scsidev that issued command
271  *
272  *	Create a cmdrsp packet and send it to the Serivce Partition
273  *	that will service this request.
274  *	Returns whether the command was queued successfully or not.
275  */
forward_taskmgmt_command(enum task_mgmt_types tasktype,struct scsi_cmnd * scsicmd)276 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
277 				    struct scsi_cmnd *scsicmd)
278 {
279 	struct uiscmdrsp *cmdrsp;
280 	struct scsi_device *scsidev = scsicmd->device;
281 	struct visorhba_devdata *devdata =
282 		(struct visorhba_devdata *)scsidev->host->hostdata;
283 	int notifyresult = 0xffff;
284 	wait_queue_head_t notifyevent;
285 	int scsicmd_id = 0;
286 
287 	if (devdata->serverdown || devdata->serverchangingstate)
288 		return FAILED;
289 
290 	scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
291 					   NULL);
292 	if (scsicmd_id < 0)
293 		return FAILED;
294 
295 	cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
296 
297 	init_waitqueue_head(&notifyevent);
298 
299 	/* issue TASK_MGMT_ABORT_TASK */
300 	cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
301 	/* specify the event that has to be triggered when this */
302 	/* cmd is complete */
303 	cmdrsp->scsitaskmgmt.notify_handle = (u64)&notifyevent;
304 	cmdrsp->scsitaskmgmt.notifyresult_handle = (u64)&notifyresult;
305 
306 	/* save destination */
307 	cmdrsp->scsitaskmgmt.tasktype = tasktype;
308 	cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
309 	cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
310 	cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
311 	cmdrsp->scsitaskmgmt.handle = scsicmd_id;
312 
313 	if (!visorchannel_signalinsert(devdata->dev->visorchannel,
314 				       IOCHAN_TO_IOPART,
315 				       cmdrsp))
316 		goto err_del_scsipending_ent;
317 
318 	/* It can take the Service Partition up to 35 seconds to complete
319 	 * an IO in some cases, so wait 45 seconds and error out
320 	 */
321 	if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
322 				msecs_to_jiffies(45000)))
323 		goto err_del_scsipending_ent;
324 
325 	if (tasktype == TASK_MGMT_ABORT_TASK)
326 		scsicmd->result = (DID_ABORT << 16);
327 	else
328 		scsicmd->result = (DID_RESET << 16);
329 
330 	scsicmd->scsi_done(scsicmd);
331 
332 	return SUCCESS;
333 
334 err_del_scsipending_ent:
335 	del_scsipending_ent(devdata, scsicmd_id);
336 	return FAILED;
337 }
338 
339 /**
340  *	visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
341  *	@scsicmd: The scsicmd that needs aborted
342  *
343  *	Returns SUCCESS if inserted, failure otherwise
344  *
345  */
visorhba_abort_handler(struct scsi_cmnd * scsicmd)346 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
347 {
348 	/* issue TASK_MGMT_ABORT_TASK */
349 	struct scsi_device *scsidev;
350 	struct visordisk_info *vdisk;
351 	struct visorhba_devdata *devdata;
352 
353 	scsidev = scsicmd->device;
354 	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
355 	for_each_vdisk_match(vdisk, devdata, scsidev) {
356 		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
357 			atomic_inc(&vdisk->error_count);
358 		else
359 			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
360 	}
361 	return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
362 }
363 
364 /**
365  *	visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
366  *	@scsicmd: The scsicmd that needs aborted
367  *
368  *	Returns SUCCESS if inserted, failure otherwise
369  */
visorhba_device_reset_handler(struct scsi_cmnd * scsicmd)370 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
371 {
372 	/* issue TASK_MGMT_LUN_RESET */
373 	struct scsi_device *scsidev;
374 	struct visordisk_info *vdisk;
375 	struct visorhba_devdata *devdata;
376 
377 	scsidev = scsicmd->device;
378 	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
379 	for_each_vdisk_match(vdisk, devdata, scsidev) {
380 		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
381 			atomic_inc(&vdisk->error_count);
382 		else
383 			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
384 	}
385 	return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
386 }
387 
388 /**
389  *	visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
390  *				     target on the bus
391  *	@scsicmd: The scsicmd that needs aborted
392  *
393  *	Returns SUCCESS
394  */
visorhba_bus_reset_handler(struct scsi_cmnd * scsicmd)395 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
396 {
397 	struct scsi_device *scsidev;
398 	struct visordisk_info *vdisk;
399 	struct visorhba_devdata *devdata;
400 
401 	scsidev = scsicmd->device;
402 	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
403 	for_each_vdisk_match(vdisk, devdata, scsidev) {
404 		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
405 			atomic_inc(&vdisk->error_count);
406 		else
407 			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
408 	}
409 	return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
410 }
411 
412 /**
413  *	visorhba_host_reset_handler - Not supported
414  *	@scsicmd: The scsicmd that needs aborted
415  *
416  *	Not supported, return SUCCESS
417  *	Returns SUCCESS
418  */
419 static int
visorhba_host_reset_handler(struct scsi_cmnd * scsicmd)420 visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
421 {
422 	/* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
423 	return SUCCESS;
424 }
425 
426 /**
427  *	visorhba_get_info
428  *	@shp: Scsi host that is requesting information
429  *
430  *	Returns string with info
431  */
visorhba_get_info(struct Scsi_Host * shp)432 static const char *visorhba_get_info(struct Scsi_Host *shp)
433 {
434 	/* Return version string */
435 	return "visorhba";
436 }
437 
438 /**
439  *	visorhba_queue_command_lck -- queues command to the Service Partition
440  *	@scsicmd: Command to be queued
441  *	@vsiorhba_cmnd_done: Done command to call when scsicmd is returned
442  *
443  *	Queues to scsicmd to the ServicePartition after converting it to a
444  *	uiscmdrsp structure.
445  *
446  *	Returns success if queued to the Service Partition, otherwise
447  *	failure.
448  */
449 static int
visorhba_queue_command_lck(struct scsi_cmnd * scsicmd,void (* visorhba_cmnd_done)(struct scsi_cmnd *))450 visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
451 			   void (*visorhba_cmnd_done)(struct scsi_cmnd *))
452 {
453 	struct uiscmdrsp *cmdrsp;
454 	struct scsi_device *scsidev = scsicmd->device;
455 	int insert_location;
456 	unsigned char op;
457 	unsigned char *cdb = scsicmd->cmnd;
458 	struct Scsi_Host *scsihost = scsidev->host;
459 	unsigned int i;
460 	struct visorhba_devdata *devdata =
461 		(struct visorhba_devdata *)scsihost->hostdata;
462 	struct scatterlist *sg = NULL;
463 	struct scatterlist *sglist = NULL;
464 	int err = 0;
465 
466 	if (devdata->serverdown || devdata->serverchangingstate)
467 		return SCSI_MLQUEUE_DEVICE_BUSY;
468 
469 	insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
470 						(void *)scsicmd);
471 
472 	if (insert_location < 0)
473 		return SCSI_MLQUEUE_DEVICE_BUSY;
474 
475 	cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
476 
477 	cmdrsp->cmdtype = CMD_SCSI_TYPE;
478 	/* save the pending insertion location. Deletion from pending
479 	 * will return the scsicmd pointer for completion
480 	 */
481 	cmdrsp->scsi.handle = insert_location;
482 
483 	/* save done function that we have call when cmd is complete */
484 	scsicmd->scsi_done = visorhba_cmnd_done;
485 	/* save destination */
486 	cmdrsp->scsi.vdest.channel = scsidev->channel;
487 	cmdrsp->scsi.vdest.id = scsidev->id;
488 	cmdrsp->scsi.vdest.lun = scsidev->lun;
489 	/* save datadir */
490 	cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
491 	memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
492 
493 	cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
494 
495 	/* keep track of the max buffer length so far. */
496 	if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
497 		devdata->max_buff_len = cmdrsp->scsi.bufflen;
498 
499 	if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) {
500 		err = SCSI_MLQUEUE_DEVICE_BUSY;
501 		goto err_del_scsipending_ent;
502 	}
503 
504 	/* convert buffer to phys information  */
505 	/* buffer is scatterlist - copy it out */
506 	sglist = scsi_sglist(scsicmd);
507 
508 	for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
509 		cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
510 		cmdrsp->scsi.gpi_list[i].length = sg->length;
511 	}
512 	cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
513 
514 	op = cdb[0];
515 	if (!visorchannel_signalinsert(devdata->dev->visorchannel,
516 				       IOCHAN_TO_IOPART,
517 				       cmdrsp)) {
518 		/* queue must be full and we aren't going to wait */
519 		err = SCSI_MLQUEUE_DEVICE_BUSY;
520 		goto err_del_scsipending_ent;
521 	}
522 	return 0;
523 
524 err_del_scsipending_ent:
525 	del_scsipending_ent(devdata, insert_location);
526 	return err;
527 }
528 
529 /**
530  *	visorhba_slave_alloc - called when new disk is discovered
531  *	@scsidev: New disk
532  *
533  *	Create a new visordisk_info structure and add it to our
534  *	list of vdisks.
535  *
536  *	Returns success when created, otherwise error.
537  */
visorhba_slave_alloc(struct scsi_device * scsidev)538 static int visorhba_slave_alloc(struct scsi_device *scsidev)
539 {
540 	/* this is called by the midlayer before scan for new devices --
541 	 * LLD can alloc any struct & do init if needed.
542 	 */
543 	struct visordisk_info *vdisk;
544 	struct visordisk_info *tmpvdisk;
545 	struct visorhba_devdata *devdata;
546 	struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
547 
548 	devdata = (struct visorhba_devdata *)scsihost->hostdata;
549 	if (!devdata)
550 		return 0; /* even though we errored, treat as success */
551 
552 	for_each_vdisk_match(vdisk, devdata, scsidev)
553 		return 0; /* already allocated return success */
554 
555 	tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
556 	if (!tmpvdisk)
557 		return -ENOMEM;
558 
559 	tmpvdisk->channel = scsidev->channel;
560 	tmpvdisk->id = scsidev->id;
561 	tmpvdisk->lun = scsidev->lun;
562 	vdisk->next = tmpvdisk;
563 	return 0;
564 }
565 
566 /**
567  *	visorhba_slave_destroy - disk is going away
568  *	@scsidev: scsi device going away
569  *
570  *	Disk is going away, clean up resources.
571  *	Returns void.
572  */
visorhba_slave_destroy(struct scsi_device * scsidev)573 static void visorhba_slave_destroy(struct scsi_device *scsidev)
574 {
575 	/* midlevel calls this after device has been quiesced and
576 	 * before it is to be deleted.
577 	 */
578 	struct visordisk_info *vdisk, *delvdisk;
579 	struct visorhba_devdata *devdata;
580 	struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
581 
582 	devdata = (struct visorhba_devdata *)scsihost->hostdata;
583 	for_each_vdisk_match(vdisk, devdata, scsidev) {
584 		delvdisk = vdisk->next;
585 		vdisk->next = delvdisk->next;
586 		kfree(delvdisk);
587 		return;
588 	}
589 }
590 
591 static struct scsi_host_template visorhba_driver_template = {
592 	.name = "Unisys Visor HBA",
593 	.info = visorhba_get_info,
594 	.queuecommand = visorhba_queue_command,
595 	.eh_abort_handler = visorhba_abort_handler,
596 	.eh_device_reset_handler = visorhba_device_reset_handler,
597 	.eh_bus_reset_handler = visorhba_bus_reset_handler,
598 	.eh_host_reset_handler = visorhba_host_reset_handler,
599 	.shost_attrs = NULL,
600 #define visorhba_MAX_CMNDS 128
601 	.can_queue = visorhba_MAX_CMNDS,
602 	.sg_tablesize = 64,
603 	.this_id = -1,
604 	.slave_alloc = visorhba_slave_alloc,
605 	.slave_destroy = visorhba_slave_destroy,
606 	.use_clustering = ENABLE_CLUSTERING,
607 };
608 
609 /**
610  *	info_debugfs_read - debugfs interface to dump visorhba states
611  *	@file: Debug file
612  *	@buf: buffer to send back to user
613  *	@len: len that can be written to buf
614  *	@offset: offset into buf
615  *
616  *	Dumps information about the visorhba driver and devices
617  *	TODO: Make this per vhba
618  *	Returns bytes_read
619  */
info_debugfs_read(struct file * file,char __user * buf,size_t len,loff_t * offset)620 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
621 				 size_t len, loff_t *offset)
622 {
623 	ssize_t bytes_read = 0;
624 	int str_pos = 0;
625 	u64 phys_flags_addr;
626 	int i;
627 	struct visorhba_devdata *devdata;
628 	char *vbuf;
629 
630 	if (len > MAX_BUF)
631 		len = MAX_BUF;
632 	vbuf = kzalloc(len, GFP_KERNEL);
633 	if (!vbuf)
634 		return -ENOMEM;
635 
636 	for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
637 		if (!visorhbas_open[i].devdata)
638 			continue;
639 
640 		devdata = visorhbas_open[i].devdata;
641 
642 		str_pos += scnprintf(vbuf + str_pos,
643 				len - str_pos, "max_buff_len:%u\n",
644 				devdata->max_buff_len);
645 
646 		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
647 				"\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
648 				devdata->interrupts_rcvd,
649 				devdata->interrupts_disabled);
650 		str_pos += scnprintf(vbuf + str_pos,
651 				len - str_pos, "\ninterrupts_notme = %llu,\n",
652 				devdata->interrupts_notme);
653 		phys_flags_addr = virt_to_phys((__force  void *)
654 					       devdata->flags_addr);
655 		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
656 				"flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
657 				devdata->flags_addr, phys_flags_addr,
658 				(__le64)readq(devdata->flags_addr));
659 		str_pos += scnprintf(vbuf + str_pos,
660 			len - str_pos, "acquire_failed_cnt:%llu\n",
661 			devdata->acquire_failed_cnt);
662 		str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
663 	}
664 
665 	bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
666 	kfree(vbuf);
667 	return bytes_read;
668 }
669 
670 /**
671  *	visorhba_serverdown_complete - Called when we are done cleaning up
672  *				       from serverdown
673  *	@work: work structure for this serverdown request
674  *
675  *	Called when we are done cleanning up from serverdown, stop processing
676  *	queue, fail pending IOs.
677  *	Returns void when finished cleaning up
678  */
visorhba_serverdown_complete(struct visorhba_devdata * devdata)679 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
680 {
681 	int i;
682 	struct scsipending *pendingdel = NULL;
683 	struct scsi_cmnd *scsicmd = NULL;
684 	struct uiscmdrsp *cmdrsp;
685 	unsigned long flags;
686 
687 	/* Stop using the IOVM response queue (queue should be drained
688 	 * by the end)
689 	 */
690 	kthread_stop(devdata->threadinfo.task);
691 
692 	/* Fail commands that weren't completed */
693 	spin_lock_irqsave(&devdata->privlock, flags);
694 	for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
695 		pendingdel = &devdata->pending[i];
696 		switch (pendingdel->cmdtype) {
697 		case CMD_SCSI_TYPE:
698 			scsicmd = pendingdel->sent;
699 			scsicmd->result = DID_RESET << 16;
700 			if (scsicmd->scsi_done)
701 				scsicmd->scsi_done(scsicmd);
702 			break;
703 		case CMD_SCSITASKMGMT_TYPE:
704 			cmdrsp = pendingdel->sent;
705 			cmdrsp->scsitaskmgmt.notifyresult_handle
706 							= TASK_MGMT_FAILED;
707 			wake_up_all((wait_queue_head_t *)
708 				    cmdrsp->scsitaskmgmt.notify_handle);
709 			break;
710 		case CMD_VDISKMGMT_TYPE:
711 			cmdrsp = pendingdel->sent;
712 			cmdrsp->vdiskmgmt.notifyresult_handle
713 							= VDISK_MGMT_FAILED;
714 			wake_up_all((wait_queue_head_t *)
715 				    cmdrsp->vdiskmgmt.notify_handle);
716 			break;
717 		default:
718 			break;
719 		}
720 		pendingdel->cmdtype = 0;
721 		pendingdel->sent = NULL;
722 	}
723 	spin_unlock_irqrestore(&devdata->privlock, flags);
724 
725 	devdata->serverdown = true;
726 	devdata->serverchangingstate = false;
727 }
728 
729 /**
730  *	visorhba_serverdown - Got notified that the IOVM is down
731  *	@devdata: visorhba that is being serviced by downed IOVM.
732  *
733  *	Something happened to the IOVM, return immediately and
734  *	schedule work cleanup work.
735  *	Return SUCCESS or EINVAL
736  */
visorhba_serverdown(struct visorhba_devdata * devdata)737 static int visorhba_serverdown(struct visorhba_devdata *devdata)
738 {
739 	if (!devdata->serverdown && !devdata->serverchangingstate) {
740 		devdata->serverchangingstate = true;
741 		visorhba_serverdown_complete(devdata);
742 	} else if (devdata->serverchangingstate) {
743 		return -EINVAL;
744 	}
745 	return 0;
746 }
747 
748 /**
749  *	do_scsi_linuxstat - scsi command returned linuxstat
750  *	@cmdrsp: response from IOVM
751  *	@scsicmd: Command issued.
752  *
753  *	Don't log errors for disk-not-present inquiries
754  *	Returns void
755  */
756 static void
do_scsi_linuxstat(struct uiscmdrsp * cmdrsp,struct scsi_cmnd * scsicmd)757 do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
758 {
759 	struct visorhba_devdata *devdata;
760 	struct visordisk_info *vdisk;
761 	struct scsi_device *scsidev;
762 	struct sense_data *sd;
763 
764 	scsidev = scsicmd->device;
765 	memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
766 	sd = (struct sense_data *)scsicmd->sense_buffer;
767 
768 	/* Do not log errors for disk-not-present inquiries */
769 	if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
770 	    (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
771 	    (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
772 		return;
773 	/* Okay see what our error_count is here.... */
774 	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
775 	for_each_vdisk_match(vdisk, devdata, scsidev) {
776 		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
777 			atomic_inc(&vdisk->error_count);
778 			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
779 		}
780 	}
781 }
782 
783 /**
784  *	do_scsi_nolinuxstat - scsi command didn't have linuxstat
785  *	@cmdrsp: response from IOVM
786  *	@scsicmd: Command issued.
787  *
788  *	Handle response when no linuxstat was returned
789  *	Returns void
790  */
791 static void
do_scsi_nolinuxstat(struct uiscmdrsp * cmdrsp,struct scsi_cmnd * scsicmd)792 do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
793 {
794 	struct scsi_device *scsidev;
795 	unsigned char buf[36];
796 	struct scatterlist *sg;
797 	unsigned int i;
798 	char *this_page;
799 	char *this_page_orig;
800 	int bufind = 0;
801 	struct visordisk_info *vdisk;
802 	struct visorhba_devdata *devdata;
803 
804 	scsidev = scsicmd->device;
805 	if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
806 	    (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
807 		if (cmdrsp->scsi.no_disk_result == 0)
808 			return;
809 
810 		/* Linux scsi code wants a device at Lun 0
811 		 * to issue report luns, but we don't want
812 		 * a disk there so we'll present a processor
813 		 * there.
814 		 */
815 		SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
816 					   scsidev->lun,
817 					   DEV_DISK_CAPABLE_NOT_PRESENT,
818 					   DEV_NOT_CAPABLE);
819 
820 		if (scsi_sg_count(scsicmd) == 0) {
821 			memcpy(scsi_sglist(scsicmd), buf,
822 			       cmdrsp->scsi.bufflen);
823 			return;
824 		}
825 
826 		sg = scsi_sglist(scsicmd);
827 		for (i = 0; i < scsi_sg_count(scsicmd); i++) {
828 			this_page_orig = kmap_atomic(sg_page(sg + i));
829 			this_page = (void *)((unsigned long)this_page_orig |
830 					     sg[i].offset);
831 			memcpy(this_page, buf + bufind, sg[i].length);
832 			kunmap_atomic(this_page_orig);
833 		}
834 	} else {
835 		devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
836 		for_each_vdisk_match(vdisk, devdata, scsidev) {
837 			if (atomic_read(&vdisk->ios_threshold) > 0) {
838 				atomic_dec(&vdisk->ios_threshold);
839 				if (atomic_read(&vdisk->ios_threshold) == 0)
840 					atomic_set(&vdisk->error_count, 0);
841 			}
842 		}
843 	}
844 }
845 
846 /**
847  *	complete_scsi_command - complete a scsi command
848  *	@uiscmdrsp: Response from Service Partition
849  *	@scsicmd: The scsi command
850  *
851  *	Response returned by the Service Partition, finish it and send
852  *	completion to the scsi midlayer.
853  *	Returns void.
854  */
855 static void
complete_scsi_command(struct uiscmdrsp * cmdrsp,struct scsi_cmnd * scsicmd)856 complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
857 {
858 	/* take what we need out of cmdrsp and complete the scsicmd */
859 	scsicmd->result = cmdrsp->scsi.linuxstat;
860 	if (cmdrsp->scsi.linuxstat)
861 		do_scsi_linuxstat(cmdrsp, scsicmd);
862 	else
863 		do_scsi_nolinuxstat(cmdrsp, scsicmd);
864 
865 	scsicmd->scsi_done(scsicmd);
866 }
867 
868 /* DELETE VDISK TASK MGMT COMMANDS */
complete_vdiskmgmt_command(struct uiscmdrsp * cmdrsp)869 static inline void complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
870 {
871 	/* copy the result of the taskmgmt and
872 	 * wake up the error handler that is waiting for this
873 	 */
874 	cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
875 	wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify_handle);
876 }
877 
878 /**
879  *	complete_taskmgmt_command - complete task management
880  *	@cmdrsp: Response from the IOVM
881  *
882  *	Service Partition returned the result of the task management
883  *	command. Wake up anyone waiting for it.
884  *	Returns void
885  */
complete_taskmgmt_command(struct uiscmdrsp * cmdrsp)886 static inline void complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
887 {
888 	/* copy the result of the taskgmgt and
889 	 * wake up the error handler that is waiting for this
890 	 */
891 	cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
892 	wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify_handle);
893 }
894 
895 static struct work_struct dar_work_queue;
896 static struct diskaddremove *dar_work_queue_head;
897 static spinlock_t dar_work_queue_lock; /* Lock to protet dar_work_queue_head */
898 static unsigned short dar_work_queue_sched;
899 
900 /**
901  *	queue_disk_add_remove - IOSP has sent us a add/remove request
902  *	@dar: disk add/remove request
903  *
904  *	Queue the work needed to add/remove a disk.
905  *	Returns void
906  */
queue_disk_add_remove(struct diskaddremove * dar)907 static inline void queue_disk_add_remove(struct diskaddremove *dar)
908 {
909 	unsigned long flags;
910 
911 	spin_lock_irqsave(&dar_work_queue_lock, flags);
912 	if (!dar_work_queue_head) {
913 		dar_work_queue_head = dar;
914 		dar->next = NULL;
915 	} else {
916 		dar->next = dar_work_queue_head;
917 		dar_work_queue_head = dar;
918 	}
919 	if (!dar_work_queue_sched) {
920 		schedule_work(&dar_work_queue);
921 		dar_work_queue_sched = 1;
922 	}
923 	spin_unlock_irqrestore(&dar_work_queue_lock, flags);
924 }
925 
926 /**
927  *	process_disk_notify - IOSP has sent a process disk notify event
928  *	@shost: Scsi hot
929  *	@cmdrsp: Response from the IOSP
930  *
931  *	Queue it to the work queue.
932  *	Return void.
933  */
process_disk_notify(struct Scsi_Host * shost,struct uiscmdrsp * cmdrsp)934 static void process_disk_notify(struct Scsi_Host *shost,
935 				struct uiscmdrsp *cmdrsp)
936 {
937 	struct diskaddremove *dar;
938 
939 	dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
940 	if (dar) {
941 		dar->add = cmdrsp->disknotify.add;
942 		dar->shost = shost;
943 		dar->channel = cmdrsp->disknotify.channel;
944 		dar->id = cmdrsp->disknotify.id;
945 		dar->lun = cmdrsp->disknotify.lun;
946 		queue_disk_add_remove(dar);
947 	}
948 }
949 
950 /**
951  *	drain_queue - pull responses out of iochannel
952  *	@cmdrsp: Response from the IOSP
953  *	@devdata: device that owns this iochannel
954  *
955  *	Pulls responses out of the iochannel and process the responses.
956  *	Restuns void
957  */
958 static void
drain_queue(struct uiscmdrsp * cmdrsp,struct visorhba_devdata * devdata)959 drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
960 {
961 	struct scsi_cmnd *scsicmd;
962 	struct Scsi_Host *shost = devdata->scsihost;
963 
964 	while (1) {
965 		if (!visorchannel_signalremove(devdata->dev->visorchannel,
966 					       IOCHAN_FROM_IOPART,
967 					       cmdrsp))
968 			break; /* queue empty */
969 
970 		if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
971 			/* scsicmd location is returned by the
972 			 * deletion
973 			 */
974 			scsicmd = del_scsipending_ent(devdata,
975 						      cmdrsp->scsi.handle);
976 			if (!scsicmd)
977 				break;
978 			/* complete the orig cmd */
979 			complete_scsi_command(cmdrsp, scsicmd);
980 		} else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
981 			if (!del_scsipending_ent(devdata,
982 						 cmdrsp->scsitaskmgmt.handle))
983 				break;
984 			complete_taskmgmt_command(cmdrsp);
985 		} else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
986 			/* The vHba pointer has no meaning in a
987 			 * guest partition. Let's be safe and set it
988 			 * to NULL now. Do not use it here!
989 			 */
990 			cmdrsp->disknotify.v_hba = NULL;
991 			process_disk_notify(shost, cmdrsp);
992 		} else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
993 			if (!del_scsipending_ent(devdata,
994 						 cmdrsp->vdiskmgmt.handle))
995 				break;
996 			complete_vdiskmgmt_command(cmdrsp);
997 		}
998 		/* cmdrsp is now available for resuse */
999 	}
1000 }
1001 
1002 /**
1003  *	process_incoming_rsps - Process responses from IOSP
1004  *	@v: void pointer to visorhba_devdata
1005  *
1006  *	Main function for the thread that processes the responses
1007  *	from the IO Service Partition. When the queue is empty, wait
1008  *	to check to see if it is full again.
1009  */
process_incoming_rsps(void * v)1010 static int process_incoming_rsps(void *v)
1011 {
1012 	struct visorhba_devdata *devdata = v;
1013 	struct uiscmdrsp *cmdrsp = NULL;
1014 	const int size = sizeof(*cmdrsp);
1015 
1016 	cmdrsp = kmalloc(size, GFP_ATOMIC);
1017 	if (!cmdrsp)
1018 		return -ENOMEM;
1019 
1020 	while (1) {
1021 		if (kthread_should_stop())
1022 			break;
1023 		wait_event_interruptible_timeout(
1024 			devdata->rsp_queue, (atomic_read(
1025 					     &devdata->interrupt_rcvd) == 1),
1026 				msecs_to_jiffies(devdata->thread_wait_ms));
1027 		/* drain queue */
1028 		drain_queue(cmdrsp, devdata);
1029 	}
1030 	kfree(cmdrsp);
1031 	return 0;
1032 }
1033 
1034 /**
1035  *	visorhba_pause - function to handle visorbus pause messages
1036  *	@dev: device that is pausing.
1037  *	@complete_func: function to call when finished
1038  *
1039  *	Something has happened to the IO Service Partition that is
1040  *	handling this device. Quiet this device and reset commands
1041  *	so that the Service Partition can be corrected.
1042  *	Returns SUCCESS
1043  */
visorhba_pause(struct visor_device * dev,visorbus_state_complete_func complete_func)1044 static int visorhba_pause(struct visor_device *dev,
1045 			  visorbus_state_complete_func complete_func)
1046 {
1047 	struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1048 
1049 	visorhba_serverdown(devdata);
1050 	complete_func(dev, 0);
1051 	return 0;
1052 }
1053 
1054 /**
1055  *	visorhba_resume - function called when the IO Service Partition is back
1056  *	@dev: device that is pausing.
1057  *	@complete_func: function to call when finished
1058  *
1059  *	Yay! The IO Service Partition is back, the channel has been wiped
1060  *	so lets re-establish connection and start processing responses.
1061  *	Returns 0 on success, error on failure.
1062  */
visorhba_resume(struct visor_device * dev,visorbus_state_complete_func complete_func)1063 static int visorhba_resume(struct visor_device *dev,
1064 			   visorbus_state_complete_func complete_func)
1065 {
1066 	struct visorhba_devdata *devdata;
1067 
1068 	devdata = dev_get_drvdata(&dev->device);
1069 	if (!devdata)
1070 		return -EINVAL;
1071 
1072 	if (devdata->serverdown && !devdata->serverchangingstate)
1073 		devdata->serverchangingstate = 1;
1074 
1075 	visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
1076 			   devdata, "vhba_incming");
1077 
1078 	devdata->serverdown = false;
1079 	devdata->serverchangingstate = false;
1080 
1081 	return 0;
1082 }
1083 
1084 /**
1085  *	visorhba_probe - device has been discovered, do acquire
1086  *	@dev: visor_device that was discovered
1087  *
1088  *	A new HBA was discovered, do the initial connections of it.
1089  *	Return 0 on success, otherwise error.
1090  */
visorhba_probe(struct visor_device * dev)1091 static int visorhba_probe(struct visor_device *dev)
1092 {
1093 	struct Scsi_Host *scsihost;
1094 	struct vhba_config_max max;
1095 	struct visorhba_devdata *devdata = NULL;
1096 	int i, err, channel_offset;
1097 	u64 features;
1098 
1099 	scsihost = scsi_host_alloc(&visorhba_driver_template,
1100 				   sizeof(*devdata));
1101 	if (!scsihost)
1102 		return -ENODEV;
1103 
1104 	channel_offset = offsetof(struct spar_io_channel_protocol,
1105 				  vhba.max);
1106 	err = visorbus_read_channel(dev, channel_offset, &max,
1107 				    sizeof(struct vhba_config_max));
1108 	if (err < 0)
1109 		goto err_scsi_host_put;
1110 
1111 	scsihost->max_id = (unsigned)max.max_id;
1112 	scsihost->max_lun = (unsigned)max.max_lun;
1113 	scsihost->cmd_per_lun = (unsigned)max.cmd_per_lun;
1114 	scsihost->max_sectors =
1115 	    (unsigned short)(max.max_io_size >> 9);
1116 	scsihost->sg_tablesize =
1117 	    (unsigned short)(max.max_io_size / PAGE_SIZE);
1118 	if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1119 		scsihost->sg_tablesize = MAX_PHYS_INFO;
1120 	err = scsi_add_host(scsihost, &dev->device);
1121 	if (err < 0)
1122 		goto err_scsi_host_put;
1123 
1124 	devdata = (struct visorhba_devdata *)scsihost->hostdata;
1125 	for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
1126 		if (!visorhbas_open[i].devdata) {
1127 			visorhbas_open[i].devdata = devdata;
1128 			break;
1129 		}
1130 	}
1131 
1132 	devdata->dev = dev;
1133 	dev_set_drvdata(&dev->device, devdata);
1134 
1135 	init_waitqueue_head(&devdata->rsp_queue);
1136 	spin_lock_init(&devdata->privlock);
1137 	devdata->serverdown = false;
1138 	devdata->serverchangingstate = false;
1139 	devdata->scsihost = scsihost;
1140 
1141 	channel_offset = offsetof(struct spar_io_channel_protocol,
1142 				  channel_header.features);
1143 	err = visorbus_read_channel(dev, channel_offset, &features, 8);
1144 	if (err)
1145 		goto err_scsi_remove_host;
1146 	features |= ULTRA_IO_CHANNEL_IS_POLLING;
1147 	err = visorbus_write_channel(dev, channel_offset, &features, 8);
1148 	if (err)
1149 		goto err_scsi_remove_host;
1150 
1151 	devdata->thread_wait_ms = 2;
1152 	visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
1153 			   devdata, "vhba_incoming");
1154 
1155 	scsi_scan_host(scsihost);
1156 
1157 	return 0;
1158 
1159 err_scsi_remove_host:
1160 	scsi_remove_host(scsihost);
1161 
1162 err_scsi_host_put:
1163 	scsi_host_put(scsihost);
1164 	return err;
1165 }
1166 
1167 /**
1168  *	visorhba_remove - remove a visorhba device
1169  *	@dev: Device to remove
1170  *
1171  *	Removes the visorhba device.
1172  *	Returns void.
1173  */
visorhba_remove(struct visor_device * dev)1174 static void visorhba_remove(struct visor_device *dev)
1175 {
1176 	struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1177 	struct Scsi_Host *scsihost = NULL;
1178 
1179 	if (!devdata)
1180 		return;
1181 
1182 	scsihost = devdata->scsihost;
1183 	kthread_stop(devdata->threadinfo.task);
1184 	scsi_remove_host(scsihost);
1185 	scsi_host_put(scsihost);
1186 
1187 	dev_set_drvdata(&dev->device, NULL);
1188 }
1189 
1190 /**
1191  *	visorhba_init		- driver init routine
1192  *
1193  *	Initialize the visorhba driver and register it with visorbus
1194  *	to handle s-Par virtual host bus adapter.
1195  */
visorhba_init(void)1196 static int visorhba_init(void)
1197 {
1198 	struct dentry *ret;
1199 	int rc = -ENOMEM;
1200 
1201 	visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1202 	if (!visorhba_debugfs_dir)
1203 		return -ENOMEM;
1204 
1205 	ret = debugfs_create_file("info", S_IRUSR, visorhba_debugfs_dir, NULL,
1206 				  &debugfs_info_fops);
1207 
1208 	if (!ret) {
1209 		rc = -EIO;
1210 		goto cleanup_debugfs;
1211 	}
1212 
1213 	rc = visorbus_register_visor_driver(&visorhba_driver);
1214 	if (rc)
1215 		goto cleanup_debugfs;
1216 
1217 	return rc;
1218 
1219 cleanup_debugfs:
1220 	debugfs_remove_recursive(visorhba_debugfs_dir);
1221 
1222 	return rc;
1223 }
1224 
1225 /**
1226  *	visorhba_cleanup	- driver exit routine
1227  *
1228  *	Unregister driver from the bus and free up memory.
1229  */
visorhba_exit(void)1230 static void visorhba_exit(void)
1231 {
1232 	visorbus_unregister_visor_driver(&visorhba_driver);
1233 	debugfs_remove_recursive(visorhba_debugfs_dir);
1234 }
1235 
1236 module_init(visorhba_init);
1237 module_exit(visorhba_exit);
1238 
1239 MODULE_AUTHOR("Unisys");
1240 MODULE_LICENSE("GPL");
1241 MODULE_DESCRIPTION("s-Par hba driver");
1242