1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2015 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  */
16 
17 #include <linux/acpi.h>
18 #include <linux/cdev.h>
19 #include <linux/ctype.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/nls.h>
23 #include <linux/netdevice.h>
24 #include <linux/platform_device.h>
25 #include <linux/uuid.h>
26 #include <linux/crash_dump.h>
27 
28 #include "channel_guid.h"
29 #include "controlvmchannel.h"
30 #include "controlvmcompletionstatus.h"
31 #include "guestlinuxdebug.h"
32 #include "periodic_work.h"
33 #include "version.h"
34 #include "visorbus.h"
35 #include "visorbus_private.h"
36 #include "vmcallinterface.h"
37 
38 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
39 
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE   50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45 
46 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
47 
48 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET	0x00000000
49 
50 
51 #define UNISYS_SPAR_LEAF_ID 0x40000000
52 
53 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
54 #define UNISYS_SPAR_ID_EBX 0x73696e55
55 #define UNISYS_SPAR_ID_ECX 0x70537379
56 #define UNISYS_SPAR_ID_EDX 0x34367261
57 
58 /*
59  * Module parameters
60  */
61 static int visorchipset_major;
62 static int visorchipset_visorbusregwait = 1;	/* default is on */
63 static int visorchipset_holdchipsetready;
64 static unsigned long controlvm_payload_bytes_buffered;
65 
66 static int
visorchipset_open(struct inode * inode,struct file * file)67 visorchipset_open(struct inode *inode, struct file *file)
68 {
69 	unsigned minor_number = iminor(inode);
70 
71 	if (minor_number)
72 		return -ENODEV;
73 	file->private_data = NULL;
74 	return 0;
75 }
76 
77 static int
visorchipset_release(struct inode * inode,struct file * file)78 visorchipset_release(struct inode *inode, struct file *file)
79 {
80 	return 0;
81 }
82 
83 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
84 * we switch to slow polling mode.  As soon as we get a controlvm
85 * message, we switch back to fast polling mode.
86 */
87 #define MIN_IDLE_SECONDS 10
88 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
89 static unsigned long most_recent_message_jiffies;	/* when we got our last
90 						 * controlvm message */
91 static int visorbusregistered;
92 
93 #define MAX_CHIPSET_EVENTS 2
94 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
95 
96 struct parser_context {
97 	unsigned long allocbytes;
98 	unsigned long param_bytes;
99 	u8 *curr;
100 	unsigned long bytes_remaining;
101 	bool byte_stream;
102 	char data[0];
103 };
104 
105 static struct delayed_work periodic_controlvm_work;
106 static struct workqueue_struct *periodic_controlvm_workqueue;
107 static DEFINE_SEMAPHORE(notifier_lock);
108 
109 static struct cdev file_cdev;
110 static struct visorchannel **file_controlvm_channel;
111 static struct controlvm_message_header g_chipset_msg_hdr;
112 static struct controlvm_message_packet g_devicechangestate_packet;
113 
114 static LIST_HEAD(bus_info_list);
115 static LIST_HEAD(dev_info_list);
116 
117 static struct visorchannel *controlvm_channel;
118 
119 /* Manages the request payload in the controlvm channel */
120 struct visor_controlvm_payload_info {
121 	u8 *ptr;		/* pointer to base address of payload pool */
122 	u64 offset;		/* offset from beginning of controlvm
123 				 * channel to beginning of payload * pool */
124 	u32 bytes;		/* number of bytes in payload pool */
125 };
126 
127 static struct visor_controlvm_payload_info controlvm_payload_info;
128 
129 /* The following globals are used to handle the scenario where we are unable to
130  * offload the payload from a controlvm message due to memory requirements.  In
131  * this scenario, we simply stash the controlvm message, then attempt to
132  * process it again the next time controlvm_periodic_work() runs.
133  */
134 static struct controlvm_message controlvm_pending_msg;
135 static bool controlvm_pending_msg_valid;
136 
137 /* This identifies a data buffer that has been received via a controlvm messages
138  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
139  */
140 struct putfile_buffer_entry {
141 	struct list_head next;	/* putfile_buffer_entry list */
142 	struct parser_context *parser_ctx; /* points to input data buffer */
143 };
144 
145 /* List of struct putfile_request *, via next_putfile_request member.
146  * Each entry in this list identifies an outstanding TRANSMIT_FILE
147  * conversation.
148  */
149 static LIST_HEAD(putfile_request_list);
150 
151 /* This describes a buffer and its current state of transfer (e.g., how many
152  * bytes have already been supplied as putfile data, and how many bytes are
153  * remaining) for a putfile_request.
154  */
155 struct putfile_active_buffer {
156 	/* a payload from a controlvm message, containing a file data buffer */
157 	struct parser_context *parser_ctx;
158 	/* points within data area of parser_ctx to next byte of data */
159 	u8 *pnext;
160 	/* # bytes left from <pnext> to the end of this data buffer */
161 	size_t bytes_remaining;
162 };
163 
164 #define PUTFILE_REQUEST_SIG 0x0906101302281211
165 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
166  * conversation.  Structs of this type are dynamically linked into
167  * <Putfile_request_list>.
168  */
169 struct putfile_request {
170 	u64 sig;		/* PUTFILE_REQUEST_SIG */
171 
172 	/* header from original TransmitFile request */
173 	struct controlvm_message_header controlvm_header;
174 	u64 file_request_number;	/* from original TransmitFile request */
175 
176 	/* link to next struct putfile_request */
177 	struct list_head next_putfile_request;
178 
179 	/* most-recent sequence number supplied via a controlvm message */
180 	u64 data_sequence_number;
181 
182 	/* head of putfile_buffer_entry list, which describes the data to be
183 	 * supplied as putfile data;
184 	 * - this list is added to when controlvm messages come in that supply
185 	 * file data
186 	 * - this list is removed from via the hotplug program that is actually
187 	 * consuming these buffers to write as file data */
188 	struct list_head input_buffer_list;
189 	spinlock_t req_list_lock;	/* lock for input_buffer_list */
190 
191 	/* waiters for input_buffer_list to go non-empty */
192 	wait_queue_head_t input_buffer_wq;
193 
194 	/* data not yet read within current putfile_buffer_entry */
195 	struct putfile_active_buffer active_buf;
196 
197 	/* <0 = failed, 0 = in-progress, >0 = successful; */
198 	/* note that this must be set with req_list_lock, and if you set <0, */
199 	/* it is your responsibility to also free up all of the other objects */
200 	/* in this struct (like input_buffer_list, active_buf.parser_ctx) */
201 	/* before releasing the lock */
202 	int completion_status;
203 };
204 
205 struct parahotplug_request {
206 	struct list_head list;
207 	int id;
208 	unsigned long expiration;
209 	struct controlvm_message msg;
210 };
211 
212 static LIST_HEAD(parahotplug_request_list);
213 static DEFINE_SPINLOCK(parahotplug_request_list_lock);	/* lock for above */
214 static void parahotplug_process_list(void);
215 
216 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
217  * CONTROLVM_REPORTEVENT.
218  */
219 static struct visorchipset_busdev_notifiers busdev_notifiers;
220 
221 static void bus_create_response(struct visor_device *p, int response);
222 static void bus_destroy_response(struct visor_device *p, int response);
223 static void device_create_response(struct visor_device *p, int response);
224 static void device_destroy_response(struct visor_device *p, int response);
225 static void device_resume_response(struct visor_device *p, int response);
226 
227 static void visorchipset_device_pause_response(struct visor_device *p,
228 					       int response);
229 
230 static struct visorchipset_busdev_responders busdev_responders = {
231 	.bus_create = bus_create_response,
232 	.bus_destroy = bus_destroy_response,
233 	.device_create = device_create_response,
234 	.device_destroy = device_destroy_response,
235 	.device_pause = visorchipset_device_pause_response,
236 	.device_resume = device_resume_response,
237 };
238 
239 /* info for /dev/visorchipset */
240 static dev_t major_dev = -1; /**< indicates major num for device */
241 
242 /* prototypes for attributes */
243 static ssize_t toolaction_show(struct device *dev,
244 			       struct device_attribute *attr, char *buf);
245 static ssize_t toolaction_store(struct device *dev,
246 				struct device_attribute *attr,
247 				const char *buf, size_t count);
248 static DEVICE_ATTR_RW(toolaction);
249 
250 static ssize_t boottotool_show(struct device *dev,
251 			       struct device_attribute *attr, char *buf);
252 static ssize_t boottotool_store(struct device *dev,
253 				struct device_attribute *attr, const char *buf,
254 				size_t count);
255 static DEVICE_ATTR_RW(boottotool);
256 
257 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
258 			  char *buf);
259 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
260 			   const char *buf, size_t count);
261 static DEVICE_ATTR_RW(error);
262 
263 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
264 			   char *buf);
265 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
266 			    const char *buf, size_t count);
267 static DEVICE_ATTR_RW(textid);
268 
269 static ssize_t remaining_steps_show(struct device *dev,
270 				    struct device_attribute *attr, char *buf);
271 static ssize_t remaining_steps_store(struct device *dev,
272 				     struct device_attribute *attr,
273 				     const char *buf, size_t count);
274 static DEVICE_ATTR_RW(remaining_steps);
275 
276 static ssize_t chipsetready_store(struct device *dev,
277 				  struct device_attribute *attr,
278 				  const char *buf, size_t count);
279 static DEVICE_ATTR_WO(chipsetready);
280 
281 static ssize_t devicedisabled_store(struct device *dev,
282 				    struct device_attribute *attr,
283 				    const char *buf, size_t count);
284 static DEVICE_ATTR_WO(devicedisabled);
285 
286 static ssize_t deviceenabled_store(struct device *dev,
287 				   struct device_attribute *attr,
288 				   const char *buf, size_t count);
289 static DEVICE_ATTR_WO(deviceenabled);
290 
291 static struct attribute *visorchipset_install_attrs[] = {
292 	&dev_attr_toolaction.attr,
293 	&dev_attr_boottotool.attr,
294 	&dev_attr_error.attr,
295 	&dev_attr_textid.attr,
296 	&dev_attr_remaining_steps.attr,
297 	NULL
298 };
299 
300 static struct attribute_group visorchipset_install_group = {
301 	.name = "install",
302 	.attrs = visorchipset_install_attrs
303 };
304 
305 static struct attribute *visorchipset_guest_attrs[] = {
306 	&dev_attr_chipsetready.attr,
307 	NULL
308 };
309 
310 static struct attribute_group visorchipset_guest_group = {
311 	.name = "guest",
312 	.attrs = visorchipset_guest_attrs
313 };
314 
315 static struct attribute *visorchipset_parahotplug_attrs[] = {
316 	&dev_attr_devicedisabled.attr,
317 	&dev_attr_deviceenabled.attr,
318 	NULL
319 };
320 
321 static struct attribute_group visorchipset_parahotplug_group = {
322 	.name = "parahotplug",
323 	.attrs = visorchipset_parahotplug_attrs
324 };
325 
326 static const struct attribute_group *visorchipset_dev_groups[] = {
327 	&visorchipset_install_group,
328 	&visorchipset_guest_group,
329 	&visorchipset_parahotplug_group,
330 	NULL
331 };
332 
visorchipset_dev_release(struct device * dev)333 static void visorchipset_dev_release(struct device *dev)
334 {
335 }
336 
337 /* /sys/devices/platform/visorchipset */
338 static struct platform_device visorchipset_platform_device = {
339 	.name = "visorchipset",
340 	.id = -1,
341 	.dev.groups = visorchipset_dev_groups,
342 	.dev.release = visorchipset_dev_release,
343 };
344 
345 /* Function prototypes */
346 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
347 			      int response);
348 static void controlvm_respond_chipset_init(
349 		struct controlvm_message_header *msg_hdr, int response,
350 		enum ultra_chipset_feature features);
351 static void controlvm_respond_physdev_changestate(
352 		struct controlvm_message_header *msg_hdr, int response,
353 		struct spar_segment_state state);
354 
355 
356 static void parser_done(struct parser_context *ctx);
357 
358 static struct parser_context *
parser_init_byte_stream(u64 addr,u32 bytes,bool local,bool * retry)359 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
360 {
361 	int allocbytes = sizeof(struct parser_context) + bytes;
362 	struct parser_context *rc = NULL;
363 	struct parser_context *ctx = NULL;
364 
365 	if (retry)
366 		*retry = false;
367 
368 	/*
369 	 * alloc an 0 extra byte to ensure payload is
370 	 * '\0'-terminated
371 	 */
372 	allocbytes++;
373 	if ((controlvm_payload_bytes_buffered + bytes)
374 	    > MAX_CONTROLVM_PAYLOAD_BYTES) {
375 		if (retry)
376 			*retry = true;
377 		rc = NULL;
378 		goto cleanup;
379 	}
380 	ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
381 	if (!ctx) {
382 		if (retry)
383 			*retry = true;
384 		rc = NULL;
385 		goto cleanup;
386 	}
387 
388 	ctx->allocbytes = allocbytes;
389 	ctx->param_bytes = bytes;
390 	ctx->curr = NULL;
391 	ctx->bytes_remaining = 0;
392 	ctx->byte_stream = false;
393 	if (local) {
394 		void *p;
395 
396 		if (addr > virt_to_phys(high_memory - 1)) {
397 			rc = NULL;
398 			goto cleanup;
399 		}
400 		p = __va((unsigned long) (addr));
401 		memcpy(ctx->data, p, bytes);
402 	} else {
403 		void *mapping;
404 
405 		if (!request_mem_region(addr, bytes, "visorchipset")) {
406 			rc = NULL;
407 			goto cleanup;
408 		}
409 
410 		mapping = memremap(addr, bytes, MEMREMAP_WB);
411 		if (!mapping) {
412 			release_mem_region(addr, bytes);
413 			rc = NULL;
414 			goto cleanup;
415 		}
416 		memcpy(ctx->data, mapping, bytes);
417 		release_mem_region(addr, bytes);
418 		memunmap(mapping);
419 	}
420 
421 	ctx->byte_stream = true;
422 	rc = ctx;
423 cleanup:
424 	if (rc) {
425 		controlvm_payload_bytes_buffered += ctx->param_bytes;
426 	} else {
427 		if (ctx) {
428 			parser_done(ctx);
429 			ctx = NULL;
430 		}
431 	}
432 	return rc;
433 }
434 
435 static uuid_le
parser_id_get(struct parser_context * ctx)436 parser_id_get(struct parser_context *ctx)
437 {
438 	struct spar_controlvm_parameters_header *phdr = NULL;
439 
440 	if (ctx == NULL)
441 		return NULL_UUID_LE;
442 	phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
443 	return phdr->id;
444 }
445 
446 /** Describes the state from the perspective of which controlvm messages have
447  *  been received for a bus or device.
448  */
449 
450 enum PARSER_WHICH_STRING {
451 	PARSERSTRING_INITIATOR,
452 	PARSERSTRING_TARGET,
453 	PARSERSTRING_CONNECTION,
454 	PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
455 };
456 
457 static void
parser_param_start(struct parser_context * ctx,enum PARSER_WHICH_STRING which_string)458 parser_param_start(struct parser_context *ctx,
459 		   enum PARSER_WHICH_STRING which_string)
460 {
461 	struct spar_controlvm_parameters_header *phdr = NULL;
462 
463 	if (ctx == NULL)
464 		goto Away;
465 	phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
466 	switch (which_string) {
467 	case PARSERSTRING_INITIATOR:
468 		ctx->curr = ctx->data + phdr->initiator_offset;
469 		ctx->bytes_remaining = phdr->initiator_length;
470 		break;
471 	case PARSERSTRING_TARGET:
472 		ctx->curr = ctx->data + phdr->target_offset;
473 		ctx->bytes_remaining = phdr->target_length;
474 		break;
475 	case PARSERSTRING_CONNECTION:
476 		ctx->curr = ctx->data + phdr->connection_offset;
477 		ctx->bytes_remaining = phdr->connection_length;
478 		break;
479 	case PARSERSTRING_NAME:
480 		ctx->curr = ctx->data + phdr->name_offset;
481 		ctx->bytes_remaining = phdr->name_length;
482 		break;
483 	default:
484 		break;
485 	}
486 
487 Away:
488 	return;
489 }
490 
parser_done(struct parser_context * ctx)491 static void parser_done(struct parser_context *ctx)
492 {
493 	if (!ctx)
494 		return;
495 	controlvm_payload_bytes_buffered -= ctx->param_bytes;
496 	kfree(ctx);
497 }
498 
499 static void *
parser_string_get(struct parser_context * ctx)500 parser_string_get(struct parser_context *ctx)
501 {
502 	u8 *pscan;
503 	unsigned long nscan;
504 	int value_length = -1;
505 	void *value = NULL;
506 	int i;
507 
508 	if (!ctx)
509 		return NULL;
510 	pscan = ctx->curr;
511 	nscan = ctx->bytes_remaining;
512 	if (nscan == 0)
513 		return NULL;
514 	if (!pscan)
515 		return NULL;
516 	for (i = 0, value_length = -1; i < nscan; i++)
517 		if (pscan[i] == '\0') {
518 			value_length = i;
519 			break;
520 		}
521 	if (value_length < 0)	/* '\0' was not included in the length */
522 		value_length = nscan;
523 	value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
524 	if (value == NULL)
525 		return NULL;
526 	if (value_length > 0)
527 		memcpy(value, pscan, value_length);
528 	((u8 *) (value))[value_length] = '\0';
529 	return value;
530 }
531 
532 
toolaction_show(struct device * dev,struct device_attribute * attr,char * buf)533 static ssize_t toolaction_show(struct device *dev,
534 			       struct device_attribute *attr,
535 			       char *buf)
536 {
537 	u8 tool_action;
538 
539 	visorchannel_read(controlvm_channel,
540 		offsetof(struct spar_controlvm_channel_protocol,
541 			 tool_action), &tool_action, sizeof(u8));
542 	return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
543 }
544 
toolaction_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)545 static ssize_t toolaction_store(struct device *dev,
546 				struct device_attribute *attr,
547 				const char *buf, size_t count)
548 {
549 	u8 tool_action;
550 	int ret;
551 
552 	if (kstrtou8(buf, 10, &tool_action))
553 		return -EINVAL;
554 
555 	ret = visorchannel_write(controlvm_channel,
556 		offsetof(struct spar_controlvm_channel_protocol,
557 			 tool_action),
558 		&tool_action, sizeof(u8));
559 
560 	if (ret)
561 		return ret;
562 	return count;
563 }
564 
boottotool_show(struct device * dev,struct device_attribute * attr,char * buf)565 static ssize_t boottotool_show(struct device *dev,
566 			       struct device_attribute *attr,
567 			       char *buf)
568 {
569 	struct efi_spar_indication efi_spar_indication;
570 
571 	visorchannel_read(controlvm_channel,
572 			  offsetof(struct spar_controlvm_channel_protocol,
573 				   efi_spar_ind), &efi_spar_indication,
574 			  sizeof(struct efi_spar_indication));
575 	return scnprintf(buf, PAGE_SIZE, "%u\n",
576 			 efi_spar_indication.boot_to_tool);
577 }
578 
boottotool_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)579 static ssize_t boottotool_store(struct device *dev,
580 				struct device_attribute *attr,
581 				const char *buf, size_t count)
582 {
583 	int val, ret;
584 	struct efi_spar_indication efi_spar_indication;
585 
586 	if (kstrtoint(buf, 10, &val))
587 		return -EINVAL;
588 
589 	efi_spar_indication.boot_to_tool = val;
590 	ret = visorchannel_write(controlvm_channel,
591 			offsetof(struct spar_controlvm_channel_protocol,
592 				 efi_spar_ind), &(efi_spar_indication),
593 				 sizeof(struct efi_spar_indication));
594 
595 	if (ret)
596 		return ret;
597 	return count;
598 }
599 
error_show(struct device * dev,struct device_attribute * attr,char * buf)600 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
601 			  char *buf)
602 {
603 	u32 error;
604 
605 	visorchannel_read(controlvm_channel,
606 			  offsetof(struct spar_controlvm_channel_protocol,
607 				   installation_error),
608 			  &error, sizeof(u32));
609 	return scnprintf(buf, PAGE_SIZE, "%i\n", error);
610 }
611 
error_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)612 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
613 			   const char *buf, size_t count)
614 {
615 	u32 error;
616 	int ret;
617 
618 	if (kstrtou32(buf, 10, &error))
619 		return -EINVAL;
620 
621 	ret = visorchannel_write(controlvm_channel,
622 		offsetof(struct spar_controlvm_channel_protocol,
623 			 installation_error),
624 		&error, sizeof(u32));
625 	if (ret)
626 		return ret;
627 	return count;
628 }
629 
textid_show(struct device * dev,struct device_attribute * attr,char * buf)630 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
631 			   char *buf)
632 {
633 	u32 text_id;
634 
635 	visorchannel_read(controlvm_channel,
636 			  offsetof(struct spar_controlvm_channel_protocol,
637 				   installation_text_id),
638 			  &text_id, sizeof(u32));
639 	return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
640 }
641 
textid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)642 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
643 			    const char *buf, size_t count)
644 {
645 	u32 text_id;
646 	int ret;
647 
648 	if (kstrtou32(buf, 10, &text_id))
649 		return -EINVAL;
650 
651 	ret = visorchannel_write(controlvm_channel,
652 		offsetof(struct spar_controlvm_channel_protocol,
653 			 installation_text_id),
654 		&text_id, sizeof(u32));
655 	if (ret)
656 		return ret;
657 	return count;
658 }
659 
remaining_steps_show(struct device * dev,struct device_attribute * attr,char * buf)660 static ssize_t remaining_steps_show(struct device *dev,
661 				    struct device_attribute *attr, char *buf)
662 {
663 	u16 remaining_steps;
664 
665 	visorchannel_read(controlvm_channel,
666 			  offsetof(struct spar_controlvm_channel_protocol,
667 				   installation_remaining_steps),
668 			  &remaining_steps, sizeof(u16));
669 	return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
670 }
671 
remaining_steps_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)672 static ssize_t remaining_steps_store(struct device *dev,
673 				     struct device_attribute *attr,
674 				     const char *buf, size_t count)
675 {
676 	u16 remaining_steps;
677 	int ret;
678 
679 	if (kstrtou16(buf, 10, &remaining_steps))
680 		return -EINVAL;
681 
682 	ret = visorchannel_write(controlvm_channel,
683 		offsetof(struct spar_controlvm_channel_protocol,
684 			 installation_remaining_steps),
685 		&remaining_steps, sizeof(u16));
686 	if (ret)
687 		return ret;
688 	return count;
689 }
690 
691 struct visor_busdev {
692 	u32 bus_no;
693 	u32 dev_no;
694 };
695 
match_visorbus_dev_by_id(struct device * dev,void * data)696 static int match_visorbus_dev_by_id(struct device *dev, void *data)
697 {
698 	struct visor_device *vdev = to_visor_device(dev);
699 	struct visor_busdev *id = data;
700 	u32 bus_no = id->bus_no;
701 	u32 dev_no = id->dev_no;
702 
703 	if ((vdev->chipset_bus_no == bus_no) &&
704 	    (vdev->chipset_dev_no == dev_no))
705 		return 1;
706 
707 	return 0;
708 }
visorbus_get_device_by_id(u32 bus_no,u32 dev_no,struct visor_device * from)709 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
710 					       struct visor_device *from)
711 {
712 	struct device *dev;
713 	struct device *dev_start = NULL;
714 	struct visor_device *vdev = NULL;
715 	struct visor_busdev id = {
716 			.bus_no = bus_no,
717 			.dev_no = dev_no
718 		};
719 
720 	if (from)
721 		dev_start = &from->device;
722 	dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
723 			      match_visorbus_dev_by_id);
724 	if (dev)
725 		vdev = to_visor_device(dev);
726 	return vdev;
727 }
728 EXPORT_SYMBOL(visorbus_get_device_by_id);
729 
730 static u8
check_chipset_events(void)731 check_chipset_events(void)
732 {
733 	int i;
734 	u8 send_msg = 1;
735 	/* Check events to determine if response should be sent */
736 	for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
737 		send_msg &= chipset_events[i];
738 	return send_msg;
739 }
740 
741 static void
clear_chipset_events(void)742 clear_chipset_events(void)
743 {
744 	int i;
745 	/* Clear chipset_events */
746 	for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
747 		chipset_events[i] = 0;
748 }
749 
750 void
visorchipset_register_busdev(struct visorchipset_busdev_notifiers * notifiers,struct visorchipset_busdev_responders * responders,struct ultra_vbus_deviceinfo * driver_info)751 visorchipset_register_busdev(
752 			struct visorchipset_busdev_notifiers *notifiers,
753 			struct visorchipset_busdev_responders *responders,
754 			struct ultra_vbus_deviceinfo *driver_info)
755 {
756 	down(&notifier_lock);
757 	if (!notifiers) {
758 		memset(&busdev_notifiers, 0,
759 		       sizeof(busdev_notifiers));
760 		visorbusregistered = 0;	/* clear flag */
761 	} else {
762 		busdev_notifiers = *notifiers;
763 		visorbusregistered = 1;	/* set flag */
764 	}
765 	if (responders)
766 		*responders = busdev_responders;
767 	if (driver_info)
768 		bus_device_info_init(driver_info, "chipset", "visorchipset",
769 				     VERSION, NULL);
770 
771 	up(&notifier_lock);
772 }
773 EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
774 
775 static void
chipset_init(struct controlvm_message * inmsg)776 chipset_init(struct controlvm_message *inmsg)
777 {
778 	static int chipset_inited;
779 	enum ultra_chipset_feature features = 0;
780 	int rc = CONTROLVM_RESP_SUCCESS;
781 
782 	POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
783 	if (chipset_inited) {
784 		rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
785 		goto cleanup;
786 	}
787 	chipset_inited = 1;
788 	POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
789 
790 	/* Set features to indicate we support parahotplug (if Command
791 	 * also supports it). */
792 	features =
793 	    inmsg->cmd.init_chipset.
794 	    features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
795 
796 	/* Set the "reply" bit so Command knows this is a
797 	 * features-aware driver. */
798 	features |= ULTRA_CHIPSET_FEATURE_REPLY;
799 
800 cleanup:
801 	if (inmsg->hdr.flags.response_expected)
802 		controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
803 }
804 
805 static void
controlvm_init_response(struct controlvm_message * msg,struct controlvm_message_header * msg_hdr,int response)806 controlvm_init_response(struct controlvm_message *msg,
807 			struct controlvm_message_header *msg_hdr, int response)
808 {
809 	memset(msg, 0, sizeof(struct controlvm_message));
810 	memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
811 	msg->hdr.payload_bytes = 0;
812 	msg->hdr.payload_vm_offset = 0;
813 	msg->hdr.payload_max_bytes = 0;
814 	if (response < 0) {
815 		msg->hdr.flags.failed = 1;
816 		msg->hdr.completion_status = (u32) (-response);
817 	}
818 }
819 
820 static void
controlvm_respond(struct controlvm_message_header * msg_hdr,int response)821 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
822 {
823 	struct controlvm_message outmsg;
824 
825 	controlvm_init_response(&outmsg, msg_hdr, response);
826 	if (outmsg.hdr.flags.test_message == 1)
827 		return;
828 
829 	if (!visorchannel_signalinsert(controlvm_channel,
830 				       CONTROLVM_QUEUE_REQUEST, &outmsg)) {
831 		return;
832 	}
833 }
834 
835 static void
controlvm_respond_chipset_init(struct controlvm_message_header * msg_hdr,int response,enum ultra_chipset_feature features)836 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
837 			       int response,
838 			       enum ultra_chipset_feature features)
839 {
840 	struct controlvm_message outmsg;
841 
842 	controlvm_init_response(&outmsg, msg_hdr, response);
843 	outmsg.cmd.init_chipset.features = features;
844 	if (!visorchannel_signalinsert(controlvm_channel,
845 				       CONTROLVM_QUEUE_REQUEST, &outmsg)) {
846 		return;
847 	}
848 }
849 
controlvm_respond_physdev_changestate(struct controlvm_message_header * msg_hdr,int response,struct spar_segment_state state)850 static void controlvm_respond_physdev_changestate(
851 		struct controlvm_message_header *msg_hdr, int response,
852 		struct spar_segment_state state)
853 {
854 	struct controlvm_message outmsg;
855 
856 	controlvm_init_response(&outmsg, msg_hdr, response);
857 	outmsg.cmd.device_change_state.state = state;
858 	outmsg.cmd.device_change_state.flags.phys_device = 1;
859 	if (!visorchannel_signalinsert(controlvm_channel,
860 				       CONTROLVM_QUEUE_REQUEST, &outmsg)) {
861 		return;
862 	}
863 }
864 
865 enum crash_obj_type {
866 	CRASH_DEV,
867 	CRASH_BUS,
868 };
869 
870 static void
bus_responder(enum controlvm_id cmd_id,struct controlvm_message_header * pending_msg_hdr,int response)871 bus_responder(enum controlvm_id cmd_id,
872 	      struct controlvm_message_header *pending_msg_hdr,
873 	      int response)
874 {
875 	if (pending_msg_hdr == NULL)
876 		return;		/* no controlvm response needed */
877 
878 	if (pending_msg_hdr->id != (u32)cmd_id)
879 		return;
880 
881 	controlvm_respond(pending_msg_hdr, response);
882 }
883 
884 static void
device_changestate_responder(enum controlvm_id cmd_id,struct visor_device * p,int response,struct spar_segment_state response_state)885 device_changestate_responder(enum controlvm_id cmd_id,
886 			     struct visor_device *p, int response,
887 			     struct spar_segment_state response_state)
888 {
889 	struct controlvm_message outmsg;
890 	u32 bus_no = p->chipset_bus_no;
891 	u32 dev_no = p->chipset_dev_no;
892 
893 	if (p->pending_msg_hdr == NULL)
894 		return;		/* no controlvm response needed */
895 	if (p->pending_msg_hdr->id != cmd_id)
896 		return;
897 
898 	controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
899 
900 	outmsg.cmd.device_change_state.bus_no = bus_no;
901 	outmsg.cmd.device_change_state.dev_no = dev_no;
902 	outmsg.cmd.device_change_state.state = response_state;
903 
904 	if (!visorchannel_signalinsert(controlvm_channel,
905 				       CONTROLVM_QUEUE_REQUEST, &outmsg))
906 		return;
907 }
908 
909 static void
device_responder(enum controlvm_id cmd_id,struct controlvm_message_header * pending_msg_hdr,int response)910 device_responder(enum controlvm_id cmd_id,
911 		 struct controlvm_message_header *pending_msg_hdr,
912 		 int response)
913 {
914 	if (pending_msg_hdr == NULL)
915 		return;		/* no controlvm response needed */
916 
917 	if (pending_msg_hdr->id != (u32)cmd_id)
918 		return;
919 
920 	controlvm_respond(pending_msg_hdr, response);
921 }
922 
923 static void
bus_epilog(struct visor_device * bus_info,u32 cmd,struct controlvm_message_header * msg_hdr,int response,bool need_response)924 bus_epilog(struct visor_device *bus_info,
925 	   u32 cmd, struct controlvm_message_header *msg_hdr,
926 	   int response, bool need_response)
927 {
928 	bool notified = false;
929 	struct controlvm_message_header *pmsg_hdr = NULL;
930 
931 	if (!bus_info) {
932 		/* relying on a valid passed in response code */
933 		/* be lazy and re-use msg_hdr for this failure, is this ok?? */
934 		pmsg_hdr = msg_hdr;
935 		goto away;
936 	}
937 
938 	if (bus_info->pending_msg_hdr) {
939 		/* only non-NULL if dev is still waiting on a response */
940 		response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
941 		pmsg_hdr = bus_info->pending_msg_hdr;
942 		goto away;
943 	}
944 
945 	if (need_response) {
946 		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
947 		if (!pmsg_hdr) {
948 			response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
949 			goto away;
950 		}
951 
952 		memcpy(pmsg_hdr, msg_hdr,
953 		       sizeof(struct controlvm_message_header));
954 		bus_info->pending_msg_hdr = pmsg_hdr;
955 	}
956 
957 	down(&notifier_lock);
958 	if (response == CONTROLVM_RESP_SUCCESS) {
959 		switch (cmd) {
960 		case CONTROLVM_BUS_CREATE:
961 			if (busdev_notifiers.bus_create) {
962 				(*busdev_notifiers.bus_create) (bus_info);
963 				notified = true;
964 			}
965 			break;
966 		case CONTROLVM_BUS_DESTROY:
967 			if (busdev_notifiers.bus_destroy) {
968 				(*busdev_notifiers.bus_destroy) (bus_info);
969 				notified = true;
970 			}
971 			break;
972 		}
973 	}
974 away:
975 	if (notified)
976 		/* The callback function just called above is responsible
977 		 * for calling the appropriate visorchipset_busdev_responders
978 		 * function, which will call bus_responder()
979 		 */
980 		;
981 	else
982 		/*
983 		 * Do not kfree(pmsg_hdr) as this is the failure path.
984 		 * The success path ('notified') will call the responder
985 		 * directly and kfree() there.
986 		 */
987 		bus_responder(cmd, pmsg_hdr, response);
988 	up(&notifier_lock);
989 }
990 
991 static void
device_epilog(struct visor_device * dev_info,struct spar_segment_state state,u32 cmd,struct controlvm_message_header * msg_hdr,int response,bool need_response,bool for_visorbus)992 device_epilog(struct visor_device *dev_info,
993 	      struct spar_segment_state state, u32 cmd,
994 	      struct controlvm_message_header *msg_hdr, int response,
995 	      bool need_response, bool for_visorbus)
996 {
997 	struct visorchipset_busdev_notifiers *notifiers;
998 	bool notified = false;
999 	struct controlvm_message_header *pmsg_hdr = NULL;
1000 
1001 	notifiers = &busdev_notifiers;
1002 
1003 	if (!dev_info) {
1004 		/* relying on a valid passed in response code */
1005 		/* be lazy and re-use msg_hdr for this failure, is this ok?? */
1006 		pmsg_hdr = msg_hdr;
1007 		goto away;
1008 	}
1009 
1010 	if (dev_info->pending_msg_hdr) {
1011 		/* only non-NULL if dev is still waiting on a response */
1012 		response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1013 		pmsg_hdr = dev_info->pending_msg_hdr;
1014 		goto away;
1015 	}
1016 
1017 	if (need_response) {
1018 		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1019 		if (!pmsg_hdr) {
1020 			response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1021 			goto away;
1022 		}
1023 
1024 		memcpy(pmsg_hdr, msg_hdr,
1025 		       sizeof(struct controlvm_message_header));
1026 		dev_info->pending_msg_hdr = pmsg_hdr;
1027 	}
1028 
1029 	down(&notifier_lock);
1030 	if (response >= 0) {
1031 		switch (cmd) {
1032 		case CONTROLVM_DEVICE_CREATE:
1033 			if (notifiers->device_create) {
1034 				(*notifiers->device_create) (dev_info);
1035 				notified = true;
1036 			}
1037 			break;
1038 		case CONTROLVM_DEVICE_CHANGESTATE:
1039 			/* ServerReady / ServerRunning / SegmentStateRunning */
1040 			if (state.alive == segment_state_running.alive &&
1041 			    state.operating ==
1042 				segment_state_running.operating) {
1043 				if (notifiers->device_resume) {
1044 					(*notifiers->device_resume) (dev_info);
1045 					notified = true;
1046 				}
1047 			}
1048 			/* ServerNotReady / ServerLost / SegmentStateStandby */
1049 			else if (state.alive == segment_state_standby.alive &&
1050 				 state.operating ==
1051 				 segment_state_standby.operating) {
1052 				/* technically this is standby case
1053 				 * where server is lost
1054 				 */
1055 				if (notifiers->device_pause) {
1056 					(*notifiers->device_pause) (dev_info);
1057 					notified = true;
1058 				}
1059 			}
1060 			break;
1061 		case CONTROLVM_DEVICE_DESTROY:
1062 			if (notifiers->device_destroy) {
1063 				(*notifiers->device_destroy) (dev_info);
1064 				notified = true;
1065 			}
1066 			break;
1067 		}
1068 	}
1069 away:
1070 	if (notified)
1071 		/* The callback function just called above is responsible
1072 		 * for calling the appropriate visorchipset_busdev_responders
1073 		 * function, which will call device_responder()
1074 		 */
1075 		;
1076 	else
1077 		/*
1078 		 * Do not kfree(pmsg_hdr) as this is the failure path.
1079 		 * The success path ('notified') will call the responder
1080 		 * directly and kfree() there.
1081 		 */
1082 		device_responder(cmd, pmsg_hdr, response);
1083 	up(&notifier_lock);
1084 }
1085 
1086 static void
bus_create(struct controlvm_message * inmsg)1087 bus_create(struct controlvm_message *inmsg)
1088 {
1089 	struct controlvm_message_packet *cmd = &inmsg->cmd;
1090 	u32 bus_no = cmd->create_bus.bus_no;
1091 	int rc = CONTROLVM_RESP_SUCCESS;
1092 	struct visor_device *bus_info;
1093 	struct visorchannel *visorchannel;
1094 
1095 	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1096 	if (bus_info && (bus_info->state.created == 1)) {
1097 		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1098 				 POSTCODE_SEVERITY_ERR);
1099 		rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1100 		goto cleanup;
1101 	}
1102 	bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1103 	if (!bus_info) {
1104 		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1105 				 POSTCODE_SEVERITY_ERR);
1106 		rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1107 		goto cleanup;
1108 	}
1109 
1110 	INIT_LIST_HEAD(&bus_info->list_all);
1111 	bus_info->chipset_bus_no = bus_no;
1112 	bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
1113 
1114 	POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1115 
1116 	visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1117 					   cmd->create_bus.channel_bytes,
1118 					   GFP_KERNEL,
1119 					   cmd->create_bus.bus_data_type_uuid);
1120 
1121 	if (!visorchannel) {
1122 		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1123 				 POSTCODE_SEVERITY_ERR);
1124 		rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1125 		kfree(bus_info);
1126 		bus_info = NULL;
1127 		goto cleanup;
1128 	}
1129 	bus_info->visorchannel = visorchannel;
1130 
1131 	POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1132 
1133 cleanup:
1134 	bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1135 		   rc, inmsg->hdr.flags.response_expected == 1);
1136 }
1137 
1138 static void
bus_destroy(struct controlvm_message * inmsg)1139 bus_destroy(struct controlvm_message *inmsg)
1140 {
1141 	struct controlvm_message_packet *cmd = &inmsg->cmd;
1142 	u32 bus_no = cmd->destroy_bus.bus_no;
1143 	struct visor_device *bus_info;
1144 	int rc = CONTROLVM_RESP_SUCCESS;
1145 
1146 	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1147 	if (!bus_info)
1148 		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1149 	else if (bus_info->state.created == 0)
1150 		rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1151 
1152 	bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1153 		   rc, inmsg->hdr.flags.response_expected == 1);
1154 
1155 	/* bus_info is freed as part of the busdevice_release function */
1156 }
1157 
1158 static void
bus_configure(struct controlvm_message * inmsg,struct parser_context * parser_ctx)1159 bus_configure(struct controlvm_message *inmsg,
1160 	      struct parser_context *parser_ctx)
1161 {
1162 	struct controlvm_message_packet *cmd = &inmsg->cmd;
1163 	u32 bus_no;
1164 	struct visor_device *bus_info;
1165 	int rc = CONTROLVM_RESP_SUCCESS;
1166 
1167 	bus_no = cmd->configure_bus.bus_no;
1168 	POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1169 			 POSTCODE_SEVERITY_INFO);
1170 
1171 	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1172 	if (!bus_info) {
1173 		POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1174 				 POSTCODE_SEVERITY_ERR);
1175 		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1176 	} else if (bus_info->state.created == 0) {
1177 		POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1178 				 POSTCODE_SEVERITY_ERR);
1179 		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1180 	} else if (bus_info->pending_msg_hdr != NULL) {
1181 		POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1182 				 POSTCODE_SEVERITY_ERR);
1183 		rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1184 	} else {
1185 		visorchannel_set_clientpartition(bus_info->visorchannel,
1186 				cmd->configure_bus.guest_handle);
1187 		bus_info->partition_uuid = parser_id_get(parser_ctx);
1188 		parser_param_start(parser_ctx, PARSERSTRING_NAME);
1189 		bus_info->name = parser_string_get(parser_ctx);
1190 
1191 		POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1192 				 POSTCODE_SEVERITY_INFO);
1193 	}
1194 	bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1195 		   rc, inmsg->hdr.flags.response_expected == 1);
1196 }
1197 
1198 static void
my_device_create(struct controlvm_message * inmsg)1199 my_device_create(struct controlvm_message *inmsg)
1200 {
1201 	struct controlvm_message_packet *cmd = &inmsg->cmd;
1202 	u32 bus_no = cmd->create_device.bus_no;
1203 	u32 dev_no = cmd->create_device.dev_no;
1204 	struct visor_device *dev_info = NULL;
1205 	struct visor_device *bus_info;
1206 	struct visorchannel *visorchannel;
1207 	int rc = CONTROLVM_RESP_SUCCESS;
1208 
1209 	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1210 	if (!bus_info) {
1211 		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1212 				 POSTCODE_SEVERITY_ERR);
1213 		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1214 		goto cleanup;
1215 	}
1216 
1217 	if (bus_info->state.created == 0) {
1218 		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1219 				 POSTCODE_SEVERITY_ERR);
1220 		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1221 		goto cleanup;
1222 	}
1223 
1224 	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1225 	if (dev_info && (dev_info->state.created == 1)) {
1226 		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1227 				 POSTCODE_SEVERITY_ERR);
1228 		rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1229 		goto cleanup;
1230 	}
1231 
1232 	dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1233 	if (!dev_info) {
1234 		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1235 				 POSTCODE_SEVERITY_ERR);
1236 		rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1237 		goto cleanup;
1238 	}
1239 
1240 	dev_info->chipset_bus_no = bus_no;
1241 	dev_info->chipset_dev_no = dev_no;
1242 	dev_info->inst = cmd->create_device.dev_inst_uuid;
1243 
1244 	/* not sure where the best place to set the 'parent' */
1245 	dev_info->device.parent = &bus_info->device;
1246 
1247 	POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1248 			 POSTCODE_SEVERITY_INFO);
1249 
1250 	visorchannel =
1251 	       visorchannel_create_with_lock(cmd->create_device.channel_addr,
1252 					     cmd->create_device.channel_bytes,
1253 					     GFP_KERNEL,
1254 					     cmd->create_device.data_type_uuid);
1255 
1256 	if (!visorchannel) {
1257 		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1258 				 POSTCODE_SEVERITY_ERR);
1259 		rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1260 		kfree(dev_info);
1261 		dev_info = NULL;
1262 		goto cleanup;
1263 	}
1264 	dev_info->visorchannel = visorchannel;
1265 	dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
1266 	POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1267 			 POSTCODE_SEVERITY_INFO);
1268 cleanup:
1269 	device_epilog(dev_info, segment_state_running,
1270 		      CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1271 		      inmsg->hdr.flags.response_expected == 1, 1);
1272 }
1273 
1274 static void
my_device_changestate(struct controlvm_message * inmsg)1275 my_device_changestate(struct controlvm_message *inmsg)
1276 {
1277 	struct controlvm_message_packet *cmd = &inmsg->cmd;
1278 	u32 bus_no = cmd->device_change_state.bus_no;
1279 	u32 dev_no = cmd->device_change_state.dev_no;
1280 	struct spar_segment_state state = cmd->device_change_state.state;
1281 	struct visor_device *dev_info;
1282 	int rc = CONTROLVM_RESP_SUCCESS;
1283 
1284 	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1285 	if (!dev_info) {
1286 		POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1287 				 POSTCODE_SEVERITY_ERR);
1288 		rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1289 	} else if (dev_info->state.created == 0) {
1290 		POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1291 				 POSTCODE_SEVERITY_ERR);
1292 		rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1293 	}
1294 	if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1295 		device_epilog(dev_info, state,
1296 			      CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1297 			      inmsg->hdr.flags.response_expected == 1, 1);
1298 }
1299 
1300 static void
my_device_destroy(struct controlvm_message * inmsg)1301 my_device_destroy(struct controlvm_message *inmsg)
1302 {
1303 	struct controlvm_message_packet *cmd = &inmsg->cmd;
1304 	u32 bus_no = cmd->destroy_device.bus_no;
1305 	u32 dev_no = cmd->destroy_device.dev_no;
1306 	struct visor_device *dev_info;
1307 	int rc = CONTROLVM_RESP_SUCCESS;
1308 
1309 	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1310 	if (!dev_info)
1311 		rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1312 	else if (dev_info->state.created == 0)
1313 		rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1314 
1315 	if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1316 		device_epilog(dev_info, segment_state_running,
1317 			      CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1318 			      inmsg->hdr.flags.response_expected == 1, 1);
1319 }
1320 
1321 /* When provided with the physical address of the controlvm channel
1322  * (phys_addr), the offset to the payload area we need to manage
1323  * (offset), and the size of this payload area (bytes), fills in the
1324  * controlvm_payload_info struct.  Returns true for success or false
1325  * for failure.
1326  */
1327 static int
initialize_controlvm_payload_info(u64 phys_addr,u64 offset,u32 bytes,struct visor_controlvm_payload_info * info)1328 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1329 				  struct visor_controlvm_payload_info *info)
1330 {
1331 	u8 *payload = NULL;
1332 	int rc = CONTROLVM_RESP_SUCCESS;
1333 
1334 	if (!info) {
1335 		rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1336 		goto cleanup;
1337 	}
1338 	memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1339 	if ((offset == 0) || (bytes == 0)) {
1340 		rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1341 		goto cleanup;
1342 	}
1343 	payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
1344 	if (!payload) {
1345 		rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1346 		goto cleanup;
1347 	}
1348 
1349 	info->offset = offset;
1350 	info->bytes = bytes;
1351 	info->ptr = payload;
1352 
1353 cleanup:
1354 	if (rc < 0) {
1355 		if (payload) {
1356 			memunmap(payload);
1357 			payload = NULL;
1358 		}
1359 	}
1360 	return rc;
1361 }
1362 
1363 static void
destroy_controlvm_payload_info(struct visor_controlvm_payload_info * info)1364 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1365 {
1366 	if (info->ptr) {
1367 		memunmap(info->ptr);
1368 		info->ptr = NULL;
1369 	}
1370 	memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1371 }
1372 
1373 static void
initialize_controlvm_payload(void)1374 initialize_controlvm_payload(void)
1375 {
1376 	u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1377 	u64 payload_offset = 0;
1378 	u32 payload_bytes = 0;
1379 
1380 	if (visorchannel_read(controlvm_channel,
1381 			      offsetof(struct spar_controlvm_channel_protocol,
1382 				       request_payload_offset),
1383 			      &payload_offset, sizeof(payload_offset)) < 0) {
1384 		POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1385 				 POSTCODE_SEVERITY_ERR);
1386 		return;
1387 	}
1388 	if (visorchannel_read(controlvm_channel,
1389 			      offsetof(struct spar_controlvm_channel_protocol,
1390 				       request_payload_bytes),
1391 			      &payload_bytes, sizeof(payload_bytes)) < 0) {
1392 		POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1393 				 POSTCODE_SEVERITY_ERR);
1394 		return;
1395 	}
1396 	initialize_controlvm_payload_info(phys_addr,
1397 					  payload_offset, payload_bytes,
1398 					  &controlvm_payload_info);
1399 }
1400 
1401 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1402  *  Returns CONTROLVM_RESP_xxx code.
1403  */
1404 static int
visorchipset_chipset_ready(void)1405 visorchipset_chipset_ready(void)
1406 {
1407 	kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1408 	return CONTROLVM_RESP_SUCCESS;
1409 }
1410 
1411 static int
visorchipset_chipset_selftest(void)1412 visorchipset_chipset_selftest(void)
1413 {
1414 	char env_selftest[20];
1415 	char *envp[] = { env_selftest, NULL };
1416 
1417 	sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1418 	kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1419 			   envp);
1420 	return CONTROLVM_RESP_SUCCESS;
1421 }
1422 
1423 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1424  *  Returns CONTROLVM_RESP_xxx code.
1425  */
1426 static int
visorchipset_chipset_notready(void)1427 visorchipset_chipset_notready(void)
1428 {
1429 	kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1430 	return CONTROLVM_RESP_SUCCESS;
1431 }
1432 
1433 static void
chipset_ready(struct controlvm_message_header * msg_hdr)1434 chipset_ready(struct controlvm_message_header *msg_hdr)
1435 {
1436 	int rc = visorchipset_chipset_ready();
1437 
1438 	if (rc != CONTROLVM_RESP_SUCCESS)
1439 		rc = -rc;
1440 	if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1441 		controlvm_respond(msg_hdr, rc);
1442 	if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1443 		/* Send CHIPSET_READY response when all modules have been loaded
1444 		 * and disks mounted for the partition
1445 		 */
1446 		g_chipset_msg_hdr = *msg_hdr;
1447 	}
1448 }
1449 
1450 static void
chipset_selftest(struct controlvm_message_header * msg_hdr)1451 chipset_selftest(struct controlvm_message_header *msg_hdr)
1452 {
1453 	int rc = visorchipset_chipset_selftest();
1454 
1455 	if (rc != CONTROLVM_RESP_SUCCESS)
1456 		rc = -rc;
1457 	if (msg_hdr->flags.response_expected)
1458 		controlvm_respond(msg_hdr, rc);
1459 }
1460 
1461 static void
chipset_notready(struct controlvm_message_header * msg_hdr)1462 chipset_notready(struct controlvm_message_header *msg_hdr)
1463 {
1464 	int rc = visorchipset_chipset_notready();
1465 
1466 	if (rc != CONTROLVM_RESP_SUCCESS)
1467 		rc = -rc;
1468 	if (msg_hdr->flags.response_expected)
1469 		controlvm_respond(msg_hdr, rc);
1470 }
1471 
1472 /* This is your "one-stop" shop for grabbing the next message from the
1473  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1474  */
1475 static bool
read_controlvm_event(struct controlvm_message * msg)1476 read_controlvm_event(struct controlvm_message *msg)
1477 {
1478 	if (visorchannel_signalremove(controlvm_channel,
1479 				      CONTROLVM_QUEUE_EVENT, msg)) {
1480 		/* got a message */
1481 		if (msg->hdr.flags.test_message == 1)
1482 			return false;
1483 		return true;
1484 	}
1485 	return false;
1486 }
1487 
1488 /*
1489  * The general parahotplug flow works as follows.  The visorchipset
1490  * driver receives a DEVICE_CHANGESTATE message from Command
1491  * specifying a physical device to enable or disable.  The CONTROLVM
1492  * message handler calls parahotplug_process_message, which then adds
1493  * the message to a global list and kicks off a udev event which
1494  * causes a user level script to enable or disable the specified
1495  * device.  The udev script then writes to
1496  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1497  * to get called, at which point the appropriate CONTROLVM message is
1498  * retrieved from the list and responded to.
1499  */
1500 
1501 #define PARAHOTPLUG_TIMEOUT_MS 2000
1502 
1503 /*
1504  * Generate unique int to match an outstanding CONTROLVM message with a
1505  * udev script /proc response
1506  */
1507 static int
parahotplug_next_id(void)1508 parahotplug_next_id(void)
1509 {
1510 	static atomic_t id = ATOMIC_INIT(0);
1511 
1512 	return atomic_inc_return(&id);
1513 }
1514 
1515 /*
1516  * Returns the time (in jiffies) when a CONTROLVM message on the list
1517  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1518  */
1519 static unsigned long
parahotplug_next_expiration(void)1520 parahotplug_next_expiration(void)
1521 {
1522 	return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1523 }
1524 
1525 /*
1526  * Create a parahotplug_request, which is basically a wrapper for a
1527  * CONTROLVM_MESSAGE that we can stick on a list
1528  */
1529 static struct parahotplug_request *
parahotplug_request_create(struct controlvm_message * msg)1530 parahotplug_request_create(struct controlvm_message *msg)
1531 {
1532 	struct parahotplug_request *req;
1533 
1534 	req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1535 	if (!req)
1536 		return NULL;
1537 
1538 	req->id = parahotplug_next_id();
1539 	req->expiration = parahotplug_next_expiration();
1540 	req->msg = *msg;
1541 
1542 	return req;
1543 }
1544 
1545 /*
1546  * Free a parahotplug_request.
1547  */
1548 static void
parahotplug_request_destroy(struct parahotplug_request * req)1549 parahotplug_request_destroy(struct parahotplug_request *req)
1550 {
1551 	kfree(req);
1552 }
1553 
1554 /*
1555  * Cause uevent to run the user level script to do the disable/enable
1556  * specified in (the CONTROLVM message in) the specified
1557  * parahotplug_request
1558  */
1559 static void
parahotplug_request_kickoff(struct parahotplug_request * req)1560 parahotplug_request_kickoff(struct parahotplug_request *req)
1561 {
1562 	struct controlvm_message_packet *cmd = &req->msg.cmd;
1563 	char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1564 	    env_func[40];
1565 	char *envp[] = {
1566 		env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1567 	};
1568 
1569 	sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1570 	sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1571 	sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1572 		cmd->device_change_state.state.active);
1573 	sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1574 		cmd->device_change_state.bus_no);
1575 	sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1576 		cmd->device_change_state.dev_no >> 3);
1577 	sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1578 		cmd->device_change_state.dev_no & 0x7);
1579 
1580 	kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1581 			   envp);
1582 }
1583 
1584 /*
1585  * Remove any request from the list that's been on there too long and
1586  * respond with an error.
1587  */
1588 static void
parahotplug_process_list(void)1589 parahotplug_process_list(void)
1590 {
1591 	struct list_head *pos;
1592 	struct list_head *tmp;
1593 
1594 	spin_lock(&parahotplug_request_list_lock);
1595 
1596 	list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1597 		struct parahotplug_request *req =
1598 		    list_entry(pos, struct parahotplug_request, list);
1599 
1600 		if (!time_after_eq(jiffies, req->expiration))
1601 			continue;
1602 
1603 		list_del(pos);
1604 		if (req->msg.hdr.flags.response_expected)
1605 			controlvm_respond_physdev_changestate(
1606 				&req->msg.hdr,
1607 				CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1608 				req->msg.cmd.device_change_state.state);
1609 		parahotplug_request_destroy(req);
1610 	}
1611 
1612 	spin_unlock(&parahotplug_request_list_lock);
1613 }
1614 
1615 /*
1616  * Called from the /proc handler, which means the user script has
1617  * finished the enable/disable.  Find the matching identifier, and
1618  * respond to the CONTROLVM message with success.
1619  */
1620 static int
parahotplug_request_complete(int id,u16 active)1621 parahotplug_request_complete(int id, u16 active)
1622 {
1623 	struct list_head *pos;
1624 	struct list_head *tmp;
1625 
1626 	spin_lock(&parahotplug_request_list_lock);
1627 
1628 	/* Look for a request matching "id". */
1629 	list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1630 		struct parahotplug_request *req =
1631 		    list_entry(pos, struct parahotplug_request, list);
1632 		if (req->id == id) {
1633 			/* Found a match.  Remove it from the list and
1634 			 * respond.
1635 			 */
1636 			list_del(pos);
1637 			spin_unlock(&parahotplug_request_list_lock);
1638 			req->msg.cmd.device_change_state.state.active = active;
1639 			if (req->msg.hdr.flags.response_expected)
1640 				controlvm_respond_physdev_changestate(
1641 					&req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1642 					req->msg.cmd.device_change_state.state);
1643 			parahotplug_request_destroy(req);
1644 			return 0;
1645 		}
1646 	}
1647 
1648 	spin_unlock(&parahotplug_request_list_lock);
1649 	return -1;
1650 }
1651 
1652 /*
1653  * Enables or disables a PCI device by kicking off a udev script
1654  */
1655 static void
parahotplug_process_message(struct controlvm_message * inmsg)1656 parahotplug_process_message(struct controlvm_message *inmsg)
1657 {
1658 	struct parahotplug_request *req;
1659 
1660 	req = parahotplug_request_create(inmsg);
1661 
1662 	if (!req)
1663 		return;
1664 
1665 	if (inmsg->cmd.device_change_state.state.active) {
1666 		/* For enable messages, just respond with success
1667 		* right away.  This is a bit of a hack, but there are
1668 		* issues with the early enable messages we get (with
1669 		* either the udev script not detecting that the device
1670 		* is up, or not getting called at all).  Fortunately
1671 		* the messages that get lost don't matter anyway, as
1672 		* devices are automatically enabled at
1673 		* initialization.
1674 		*/
1675 		parahotplug_request_kickoff(req);
1676 		controlvm_respond_physdev_changestate(&inmsg->hdr,
1677 			CONTROLVM_RESP_SUCCESS,
1678 			inmsg->cmd.device_change_state.state);
1679 		parahotplug_request_destroy(req);
1680 	} else {
1681 		/* For disable messages, add the request to the
1682 		* request list before kicking off the udev script.  It
1683 		* won't get responded to until the script has
1684 		* indicated it's done.
1685 		*/
1686 		spin_lock(&parahotplug_request_list_lock);
1687 		list_add_tail(&req->list, &parahotplug_request_list);
1688 		spin_unlock(&parahotplug_request_list_lock);
1689 
1690 		parahotplug_request_kickoff(req);
1691 	}
1692 }
1693 
1694 /* Process a controlvm message.
1695  * Return result:
1696  *    false - this function will return false only in the case where the
1697  *            controlvm message was NOT processed, but processing must be
1698  *            retried before reading the next controlvm message; a
1699  *            scenario where this can occur is when we need to throttle
1700  *            the allocation of memory in which to copy out controlvm
1701  *            payload data
1702  *    true  - processing of the controlvm message completed,
1703  *            either successfully or with an error.
1704  */
1705 static bool
handle_command(struct controlvm_message inmsg,u64 channel_addr)1706 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1707 {
1708 	struct controlvm_message_packet *cmd = &inmsg.cmd;
1709 	u64 parm_addr;
1710 	u32 parm_bytes;
1711 	struct parser_context *parser_ctx = NULL;
1712 	bool local_addr;
1713 	struct controlvm_message ackmsg;
1714 
1715 	/* create parsing context if necessary */
1716 	local_addr = (inmsg.hdr.flags.test_message == 1);
1717 	if (channel_addr == 0)
1718 		return true;
1719 	parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1720 	parm_bytes = inmsg.hdr.payload_bytes;
1721 
1722 	/* Parameter and channel addresses within test messages actually lie
1723 	 * within our OS-controlled memory.  We need to know that, because it
1724 	 * makes a difference in how we compute the virtual address.
1725 	 */
1726 	if (parm_addr && parm_bytes) {
1727 		bool retry = false;
1728 
1729 		parser_ctx =
1730 		    parser_init_byte_stream(parm_addr, parm_bytes,
1731 					    local_addr, &retry);
1732 		if (!parser_ctx && retry)
1733 			return false;
1734 	}
1735 
1736 	if (!local_addr) {
1737 		controlvm_init_response(&ackmsg, &inmsg.hdr,
1738 					CONTROLVM_RESP_SUCCESS);
1739 		if (controlvm_channel)
1740 			visorchannel_signalinsert(controlvm_channel,
1741 						  CONTROLVM_QUEUE_ACK,
1742 						  &ackmsg);
1743 	}
1744 	switch (inmsg.hdr.id) {
1745 	case CONTROLVM_CHIPSET_INIT:
1746 		chipset_init(&inmsg);
1747 		break;
1748 	case CONTROLVM_BUS_CREATE:
1749 		bus_create(&inmsg);
1750 		break;
1751 	case CONTROLVM_BUS_DESTROY:
1752 		bus_destroy(&inmsg);
1753 		break;
1754 	case CONTROLVM_BUS_CONFIGURE:
1755 		bus_configure(&inmsg, parser_ctx);
1756 		break;
1757 	case CONTROLVM_DEVICE_CREATE:
1758 		my_device_create(&inmsg);
1759 		break;
1760 	case CONTROLVM_DEVICE_CHANGESTATE:
1761 		if (cmd->device_change_state.flags.phys_device) {
1762 			parahotplug_process_message(&inmsg);
1763 		} else {
1764 			/* save the hdr and cmd structures for later use */
1765 			/* when sending back the response to Command */
1766 			my_device_changestate(&inmsg);
1767 			g_devicechangestate_packet = inmsg.cmd;
1768 			break;
1769 		}
1770 		break;
1771 	case CONTROLVM_DEVICE_DESTROY:
1772 		my_device_destroy(&inmsg);
1773 		break;
1774 	case CONTROLVM_DEVICE_CONFIGURE:
1775 		/* no op for now, just send a respond that we passed */
1776 		if (inmsg.hdr.flags.response_expected)
1777 			controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1778 		break;
1779 	case CONTROLVM_CHIPSET_READY:
1780 		chipset_ready(&inmsg.hdr);
1781 		break;
1782 	case CONTROLVM_CHIPSET_SELFTEST:
1783 		chipset_selftest(&inmsg.hdr);
1784 		break;
1785 	case CONTROLVM_CHIPSET_STOP:
1786 		chipset_notready(&inmsg.hdr);
1787 		break;
1788 	default:
1789 		if (inmsg.hdr.flags.response_expected)
1790 			controlvm_respond(&inmsg.hdr,
1791 				-CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1792 		break;
1793 	}
1794 
1795 	if (parser_ctx) {
1796 		parser_done(parser_ctx);
1797 		parser_ctx = NULL;
1798 	}
1799 	return true;
1800 }
1801 
1802 static inline unsigned int
issue_vmcall_io_controlvm_addr(u64 * control_addr,u32 * control_bytes)1803 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1804 {
1805 	struct vmcall_io_controlvm_addr_params params;
1806 	int result = VMCALL_SUCCESS;
1807 	u64 physaddr;
1808 
1809 	physaddr = virt_to_phys(&params);
1810 	ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1811 	if (VMCALL_SUCCESSFUL(result)) {
1812 		*control_addr = params.address;
1813 		*control_bytes = params.channel_bytes;
1814 	}
1815 	return result;
1816 }
1817 
controlvm_get_channel_address(void)1818 static u64 controlvm_get_channel_address(void)
1819 {
1820 	u64 addr = 0;
1821 	u32 size = 0;
1822 
1823 	if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1824 		return 0;
1825 
1826 	return addr;
1827 }
1828 
1829 static void
controlvm_periodic_work(struct work_struct * work)1830 controlvm_periodic_work(struct work_struct *work)
1831 {
1832 	struct controlvm_message inmsg;
1833 	bool got_command = false;
1834 	bool handle_command_failed = false;
1835 	static u64 poll_count;
1836 
1837 	/* make sure visorbus server is registered for controlvm callbacks */
1838 	if (visorchipset_visorbusregwait && !visorbusregistered)
1839 		goto cleanup;
1840 
1841 	poll_count++;
1842 	if (poll_count >= 250)
1843 		;	/* keep going */
1844 	else
1845 		goto cleanup;
1846 
1847 	/* Check events to determine if response to CHIPSET_READY
1848 	 * should be sent
1849 	 */
1850 	if (visorchipset_holdchipsetready &&
1851 	    (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1852 		if (check_chipset_events() == 1) {
1853 			controlvm_respond(&g_chipset_msg_hdr, 0);
1854 			clear_chipset_events();
1855 			memset(&g_chipset_msg_hdr, 0,
1856 			       sizeof(struct controlvm_message_header));
1857 		}
1858 	}
1859 
1860 	while (visorchannel_signalremove(controlvm_channel,
1861 					 CONTROLVM_QUEUE_RESPONSE,
1862 					 &inmsg))
1863 		;
1864 	if (!got_command) {
1865 		if (controlvm_pending_msg_valid) {
1866 			/* we throttled processing of a prior
1867 			* msg, so try to process it again
1868 			* rather than reading a new one
1869 			*/
1870 			inmsg = controlvm_pending_msg;
1871 			controlvm_pending_msg_valid = false;
1872 			got_command = true;
1873 		} else {
1874 			got_command = read_controlvm_event(&inmsg);
1875 		}
1876 	}
1877 
1878 	handle_command_failed = false;
1879 	while (got_command && (!handle_command_failed)) {
1880 		most_recent_message_jiffies = jiffies;
1881 		if (handle_command(inmsg,
1882 				   visorchannel_get_physaddr
1883 				   (controlvm_channel)))
1884 			got_command = read_controlvm_event(&inmsg);
1885 		else {
1886 			/* this is a scenario where throttling
1887 			* is required, but probably NOT an
1888 			* error...; we stash the current
1889 			* controlvm msg so we will attempt to
1890 			* reprocess it on our next loop
1891 			*/
1892 			handle_command_failed = true;
1893 			controlvm_pending_msg = inmsg;
1894 			controlvm_pending_msg_valid = true;
1895 		}
1896 	}
1897 
1898 	/* parahotplug_worker */
1899 	parahotplug_process_list();
1900 
1901 cleanup:
1902 
1903 	if (time_after(jiffies,
1904 		       most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1905 		/* it's been longer than MIN_IDLE_SECONDS since we
1906 		* processed our last controlvm message; slow down the
1907 		* polling
1908 		*/
1909 		if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1910 			poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1911 	} else {
1912 		if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1913 			poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1914 	}
1915 
1916 	queue_delayed_work(periodic_controlvm_workqueue,
1917 			   &periodic_controlvm_work, poll_jiffies);
1918 }
1919 
1920 static void
setup_crash_devices_work_queue(struct work_struct * work)1921 setup_crash_devices_work_queue(struct work_struct *work)
1922 {
1923 	struct controlvm_message local_crash_bus_msg;
1924 	struct controlvm_message local_crash_dev_msg;
1925 	struct controlvm_message msg;
1926 	u32 local_crash_msg_offset;
1927 	u16 local_crash_msg_count;
1928 
1929 	/* make sure visorbus is registered for controlvm callbacks */
1930 	if (visorchipset_visorbusregwait && !visorbusregistered)
1931 		goto cleanup;
1932 
1933 	POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1934 
1935 	/* send init chipset msg */
1936 	msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1937 	msg.cmd.init_chipset.bus_count = 23;
1938 	msg.cmd.init_chipset.switch_count = 0;
1939 
1940 	chipset_init(&msg);
1941 
1942 	/* get saved message count */
1943 	if (visorchannel_read(controlvm_channel,
1944 			      offsetof(struct spar_controlvm_channel_protocol,
1945 				       saved_crash_message_count),
1946 			      &local_crash_msg_count, sizeof(u16)) < 0) {
1947 		POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1948 				 POSTCODE_SEVERITY_ERR);
1949 		return;
1950 	}
1951 
1952 	if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1953 		POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1954 				 local_crash_msg_count,
1955 				 POSTCODE_SEVERITY_ERR);
1956 		return;
1957 	}
1958 
1959 	/* get saved crash message offset */
1960 	if (visorchannel_read(controlvm_channel,
1961 			      offsetof(struct spar_controlvm_channel_protocol,
1962 				       saved_crash_message_offset),
1963 			      &local_crash_msg_offset, sizeof(u32)) < 0) {
1964 		POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1965 				 POSTCODE_SEVERITY_ERR);
1966 		return;
1967 	}
1968 
1969 	/* read create device message for storage bus offset */
1970 	if (visorchannel_read(controlvm_channel,
1971 			      local_crash_msg_offset,
1972 			      &local_crash_bus_msg,
1973 			      sizeof(struct controlvm_message)) < 0) {
1974 		POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1975 				 POSTCODE_SEVERITY_ERR);
1976 		return;
1977 	}
1978 
1979 	/* read create device message for storage device */
1980 	if (visorchannel_read(controlvm_channel,
1981 			      local_crash_msg_offset +
1982 			      sizeof(struct controlvm_message),
1983 			      &local_crash_dev_msg,
1984 			      sizeof(struct controlvm_message)) < 0) {
1985 		POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1986 				 POSTCODE_SEVERITY_ERR);
1987 		return;
1988 	}
1989 
1990 	/* reuse IOVM create bus message */
1991 	if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1992 		bus_create(&local_crash_bus_msg);
1993 	} else {
1994 		POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1995 				 POSTCODE_SEVERITY_ERR);
1996 		return;
1997 	}
1998 
1999 	/* reuse create device message for storage device */
2000 	if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2001 		my_device_create(&local_crash_dev_msg);
2002 	} else {
2003 		POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2004 				 POSTCODE_SEVERITY_ERR);
2005 		return;
2006 	}
2007 	POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2008 	return;
2009 
2010 cleanup:
2011 
2012 	poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2013 
2014 	queue_delayed_work(periodic_controlvm_workqueue,
2015 			   &periodic_controlvm_work, poll_jiffies);
2016 }
2017 
2018 static void
bus_create_response(struct visor_device * bus_info,int response)2019 bus_create_response(struct visor_device *bus_info, int response)
2020 {
2021 	if (response >= 0)
2022 		bus_info->state.created = 1;
2023 
2024 	bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
2025 		      response);
2026 
2027 	kfree(bus_info->pending_msg_hdr);
2028 	bus_info->pending_msg_hdr = NULL;
2029 }
2030 
2031 static void
bus_destroy_response(struct visor_device * bus_info,int response)2032 bus_destroy_response(struct visor_device *bus_info, int response)
2033 {
2034 	bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
2035 		      response);
2036 
2037 	kfree(bus_info->pending_msg_hdr);
2038 	bus_info->pending_msg_hdr = NULL;
2039 }
2040 
2041 static void
device_create_response(struct visor_device * dev_info,int response)2042 device_create_response(struct visor_device *dev_info, int response)
2043 {
2044 	if (response >= 0)
2045 		dev_info->state.created = 1;
2046 
2047 	device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
2048 			 response);
2049 
2050 	kfree(dev_info->pending_msg_hdr);
2051 	dev_info->pending_msg_hdr = NULL;
2052 }
2053 
2054 static void
device_destroy_response(struct visor_device * dev_info,int response)2055 device_destroy_response(struct visor_device *dev_info, int response)
2056 {
2057 	device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
2058 			 response);
2059 
2060 	kfree(dev_info->pending_msg_hdr);
2061 	dev_info->pending_msg_hdr = NULL;
2062 }
2063 
2064 static void
visorchipset_device_pause_response(struct visor_device * dev_info,int response)2065 visorchipset_device_pause_response(struct visor_device *dev_info,
2066 				   int response)
2067 {
2068 	device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2069 				     dev_info, response,
2070 				     segment_state_standby);
2071 
2072 	kfree(dev_info->pending_msg_hdr);
2073 	dev_info->pending_msg_hdr = NULL;
2074 }
2075 
2076 static void
device_resume_response(struct visor_device * dev_info,int response)2077 device_resume_response(struct visor_device *dev_info, int response)
2078 {
2079 	device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2080 				     dev_info, response,
2081 				     segment_state_running);
2082 
2083 	kfree(dev_info->pending_msg_hdr);
2084 	dev_info->pending_msg_hdr = NULL;
2085 }
2086 
chipsetready_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2087 static ssize_t chipsetready_store(struct device *dev,
2088 				  struct device_attribute *attr,
2089 				  const char *buf, size_t count)
2090 {
2091 	char msgtype[64];
2092 
2093 	if (sscanf(buf, "%63s", msgtype) != 1)
2094 		return -EINVAL;
2095 
2096 	if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2097 		chipset_events[0] = 1;
2098 		return count;
2099 	} else if (!strcmp(msgtype, "MODULES_LOADED")) {
2100 		chipset_events[1] = 1;
2101 		return count;
2102 	}
2103 	return -EINVAL;
2104 }
2105 
2106 /* The parahotplug/devicedisabled interface gets called by our support script
2107  * when an SR-IOV device has been shut down. The ID is passed to the script
2108  * and then passed back when the device has been removed.
2109  */
devicedisabled_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2110 static ssize_t devicedisabled_store(struct device *dev,
2111 				    struct device_attribute *attr,
2112 				    const char *buf, size_t count)
2113 {
2114 	unsigned int id;
2115 
2116 	if (kstrtouint(buf, 10, &id))
2117 		return -EINVAL;
2118 
2119 	parahotplug_request_complete(id, 0);
2120 	return count;
2121 }
2122 
2123 /* The parahotplug/deviceenabled interface gets called by our support script
2124  * when an SR-IOV device has been recovered. The ID is passed to the script
2125  * and then passed back when the device has been brought back up.
2126  */
deviceenabled_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2127 static ssize_t deviceenabled_store(struct device *dev,
2128 				   struct device_attribute *attr,
2129 				   const char *buf, size_t count)
2130 {
2131 	unsigned int id;
2132 
2133 	if (kstrtouint(buf, 10, &id))
2134 		return -EINVAL;
2135 
2136 	parahotplug_request_complete(id, 1);
2137 	return count;
2138 }
2139 
2140 static int
visorchipset_mmap(struct file * file,struct vm_area_struct * vma)2141 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2142 {
2143 	unsigned long physaddr = 0;
2144 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2145 	u64 addr = 0;
2146 
2147 	/* sv_enable_dfp(); */
2148 	if (offset & (PAGE_SIZE - 1))
2149 		return -ENXIO;	/* need aligned offsets */
2150 
2151 	switch (offset) {
2152 	case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2153 		vma->vm_flags |= VM_IO;
2154 		if (!*file_controlvm_channel)
2155 			return -ENXIO;
2156 
2157 		visorchannel_read(*file_controlvm_channel,
2158 			offsetof(struct spar_controlvm_channel_protocol,
2159 				 gp_control_channel),
2160 			&addr, sizeof(addr));
2161 		if (!addr)
2162 			return -ENXIO;
2163 
2164 		physaddr = (unsigned long)addr;
2165 		if (remap_pfn_range(vma, vma->vm_start,
2166 				    physaddr >> PAGE_SHIFT,
2167 				    vma->vm_end - vma->vm_start,
2168 				    /*pgprot_noncached */
2169 				    (vma->vm_page_prot))) {
2170 			return -EAGAIN;
2171 		}
2172 		break;
2173 	default:
2174 		return -ENXIO;
2175 	}
2176 	return 0;
2177 }
2178 
issue_vmcall_query_guest_virtual_time_offset(void)2179 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2180 {
2181 	u64 result = VMCALL_SUCCESS;
2182 	u64 physaddr = 0;
2183 
2184 	ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2185 			result);
2186 	return result;
2187 }
2188 
issue_vmcall_update_physical_time(u64 adjustment)2189 static inline int issue_vmcall_update_physical_time(u64 adjustment)
2190 {
2191 	int result = VMCALL_SUCCESS;
2192 
2193 	ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2194 	return result;
2195 }
2196 
visorchipset_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2197 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2198 			       unsigned long arg)
2199 {
2200 	s64 adjustment;
2201 	s64 vrtc_offset;
2202 
2203 	switch (cmd) {
2204 	case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2205 		/* get the physical rtc offset */
2206 		vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2207 		if (copy_to_user((void __user *)arg, &vrtc_offset,
2208 				 sizeof(vrtc_offset))) {
2209 			return -EFAULT;
2210 		}
2211 		return 0;
2212 	case VMCALL_UPDATE_PHYSICAL_TIME:
2213 		if (copy_from_user(&adjustment, (void __user *)arg,
2214 				   sizeof(adjustment))) {
2215 			return -EFAULT;
2216 		}
2217 		return issue_vmcall_update_physical_time(adjustment);
2218 	default:
2219 		return -EFAULT;
2220 	}
2221 }
2222 
2223 static const struct file_operations visorchipset_fops = {
2224 	.owner = THIS_MODULE,
2225 	.open = visorchipset_open,
2226 	.read = NULL,
2227 	.write = NULL,
2228 	.unlocked_ioctl = visorchipset_ioctl,
2229 	.release = visorchipset_release,
2230 	.mmap = visorchipset_mmap,
2231 };
2232 
2233 static int
visorchipset_file_init(dev_t major_dev,struct visorchannel ** controlvm_channel)2234 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2235 {
2236 	int rc = 0;
2237 
2238 	file_controlvm_channel = controlvm_channel;
2239 	cdev_init(&file_cdev, &visorchipset_fops);
2240 	file_cdev.owner = THIS_MODULE;
2241 	if (MAJOR(major_dev) == 0) {
2242 		rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2243 		/* dynamic major device number registration required */
2244 		if (rc < 0)
2245 			return rc;
2246 	} else {
2247 		/* static major device number registration required */
2248 		rc = register_chrdev_region(major_dev, 1, "visorchipset");
2249 		if (rc < 0)
2250 			return rc;
2251 	}
2252 	rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2253 	if (rc < 0) {
2254 		unregister_chrdev_region(major_dev, 1);
2255 		return rc;
2256 	}
2257 	return 0;
2258 }
2259 
2260 static int
visorchipset_init(struct acpi_device * acpi_device)2261 visorchipset_init(struct acpi_device *acpi_device)
2262 {
2263 	int rc = 0;
2264 	u64 addr;
2265 	int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2266 	uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2267 
2268 	addr = controlvm_get_channel_address();
2269 	if (!addr)
2270 		return -ENODEV;
2271 
2272 	memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2273 	memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2274 
2275 	controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2276 							  GFP_KERNEL, uuid);
2277 	if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2278 		    visorchannel_get_header(controlvm_channel))) {
2279 		initialize_controlvm_payload();
2280 	} else {
2281 		visorchannel_destroy(controlvm_channel);
2282 		controlvm_channel = NULL;
2283 		return -ENODEV;
2284 	}
2285 
2286 	major_dev = MKDEV(visorchipset_major, 0);
2287 	rc = visorchipset_file_init(major_dev, &controlvm_channel);
2288 	if (rc < 0) {
2289 		POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2290 		goto cleanup;
2291 	}
2292 
2293 	memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2294 
2295 	/* if booting in a crash kernel */
2296 	if (is_kdump_kernel())
2297 		INIT_DELAYED_WORK(&periodic_controlvm_work,
2298 				  setup_crash_devices_work_queue);
2299 	else
2300 		INIT_DELAYED_WORK(&periodic_controlvm_work,
2301 				  controlvm_periodic_work);
2302 	periodic_controlvm_workqueue =
2303 	    create_singlethread_workqueue("visorchipset_controlvm");
2304 
2305 	if (!periodic_controlvm_workqueue) {
2306 		POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2307 				 DIAG_SEVERITY_ERR);
2308 		rc = -ENOMEM;
2309 		goto cleanup;
2310 	}
2311 	most_recent_message_jiffies = jiffies;
2312 	poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2313 	rc = queue_delayed_work(periodic_controlvm_workqueue,
2314 				&periodic_controlvm_work, poll_jiffies);
2315 	if (rc < 0) {
2316 		POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2317 				 DIAG_SEVERITY_ERR);
2318 		goto cleanup;
2319 	}
2320 
2321 	visorchipset_platform_device.dev.devt = major_dev;
2322 	if (platform_device_register(&visorchipset_platform_device) < 0) {
2323 		POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2324 		rc = -1;
2325 		goto cleanup;
2326 	}
2327 	POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2328 
2329 	rc = visorbus_init();
2330 cleanup:
2331 	if (rc) {
2332 		POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2333 				 POSTCODE_SEVERITY_ERR);
2334 	}
2335 	return rc;
2336 }
2337 
2338 static void
visorchipset_file_cleanup(dev_t major_dev)2339 visorchipset_file_cleanup(dev_t major_dev)
2340 {
2341 	if (file_cdev.ops)
2342 		cdev_del(&file_cdev);
2343 	file_cdev.ops = NULL;
2344 	unregister_chrdev_region(major_dev, 1);
2345 }
2346 
2347 static int
visorchipset_exit(struct acpi_device * acpi_device)2348 visorchipset_exit(struct acpi_device *acpi_device)
2349 {
2350 	POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2351 
2352 	visorbus_exit();
2353 
2354 	cancel_delayed_work(&periodic_controlvm_work);
2355 	flush_workqueue(periodic_controlvm_workqueue);
2356 	destroy_workqueue(periodic_controlvm_workqueue);
2357 	periodic_controlvm_workqueue = NULL;
2358 	destroy_controlvm_payload_info(&controlvm_payload_info);
2359 
2360 	memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2361 
2362 	visorchannel_destroy(controlvm_channel);
2363 
2364 	visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2365 	platform_device_unregister(&visorchipset_platform_device);
2366 	POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2367 
2368 	return 0;
2369 }
2370 
2371 static const struct acpi_device_id unisys_device_ids[] = {
2372 	{"PNP0A07", 0},
2373 	{"", 0},
2374 };
2375 
2376 static struct acpi_driver unisys_acpi_driver = {
2377 	.name = "unisys_acpi",
2378 	.class = "unisys_acpi_class",
2379 	.owner = THIS_MODULE,
2380 	.ids = unisys_device_ids,
2381 	.ops = {
2382 		.add = visorchipset_init,
2383 		.remove = visorchipset_exit,
2384 		},
2385 };
2386 
2387 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
2388 
visorutil_spar_detect(void)2389 static __init uint32_t visorutil_spar_detect(void)
2390 {
2391 	unsigned int eax, ebx, ecx, edx;
2392 
2393 	if (cpu_has_hypervisor) {
2394 		/* check the ID */
2395 		cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2396 		return  (ebx == UNISYS_SPAR_ID_EBX) &&
2397 			(ecx == UNISYS_SPAR_ID_ECX) &&
2398 			(edx == UNISYS_SPAR_ID_EDX);
2399 	} else {
2400 		return 0;
2401 	}
2402 }
2403 
init_unisys(void)2404 static int init_unisys(void)
2405 {
2406 	int result;
2407 
2408 	if (!visorutil_spar_detect())
2409 		return -ENODEV;
2410 
2411 	result = acpi_bus_register_driver(&unisys_acpi_driver);
2412 	if (result)
2413 		return -ENODEV;
2414 
2415 	pr_info("Unisys Visorchipset Driver Loaded.\n");
2416 	return 0;
2417 };
2418 
exit_unisys(void)2419 static void exit_unisys(void)
2420 {
2421 	acpi_bus_unregister_driver(&unisys_acpi_driver);
2422 }
2423 
2424 module_param_named(major, visorchipset_major, int, S_IRUGO);
2425 MODULE_PARM_DESC(visorchipset_major,
2426 		 "major device number to use for the device node");
2427 module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2428 MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2429 		 "1 to have the module wait for the visor bus to register");
2430 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2431 		   int, S_IRUGO);
2432 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2433 		 "1 to hold response to CHIPSET_READY");
2434 
2435 module_init(init_unisys);
2436 module_exit(exit_unisys);
2437 
2438 MODULE_AUTHOR("Unisys");
2439 MODULE_LICENSE("GPL");
2440 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2441 		   VERSION);
2442 MODULE_VERSION(VERSION);
2443