1 /* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18 #include "globals.h"
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
23 #include "file.h"
24 #include "parser.h"
25 #include "uisutils.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
28
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
33
34 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37 #define TEST_VNIC_SWITCHNO 1
38 #define TEST_VNIC_BUSNO 9
39
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE 50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47 * we switch to slow polling mode. As soon as we get a controlvm
48 * message, we switch back to fast polling mode.
49 */
50 #define MIN_IDLE_SECONDS 10
51 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
52 static ulong most_recent_message_jiffies; /* when we got our last
53 * controlvm message */
54 static inline char *
NONULLSTR(char * s)55 NONULLSTR(char *s)
56 {
57 if (s)
58 return s;
59 return "";
60 }
61
62 static int serverregistered;
63 static int clientregistered;
64
65 #define MAX_CHIPSET_EVENTS 2
66 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
67
68 static struct delayed_work periodic_controlvm_work;
69 static struct workqueue_struct *periodic_controlvm_workqueue;
70 static DEFINE_SEMAPHORE(notifier_lock);
71
72 static struct controlvm_message_header g_diag_msg_hdr;
73 static struct controlvm_message_header g_chipset_msg_hdr;
74 static struct controlvm_message_header g_del_dump_msg_hdr;
75 static const uuid_le spar_diag_pool_channel_protocol_uuid =
76 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
77 /* 0xffffff is an invalid Bus/Device number */
78 static ulong g_diagpool_bus_no = 0xffffff;
79 static ulong g_diagpool_dev_no = 0xffffff;
80 static struct controlvm_message_packet g_devicechangestate_packet;
81
82 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
83 * "visorhackbus")
84 */
85 #define FOR_VISORHACKBUS(channel_type_guid) \
86 (((uuid_le_cmp(channel_type_guid,\
87 spar_vnic_channel_protocol_uuid) == 0) ||\
88 (uuid_le_cmp(channel_type_guid,\
89 spar_vhba_channel_protocol_uuid) == 0)))
90 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
91
92 #define is_diagpool_channel(channel_type_guid) \
93 (uuid_le_cmp(channel_type_guid,\
94 spar_diag_pool_channel_protocol_uuid) == 0)
95
96 static LIST_HEAD(bus_info_list);
97 static LIST_HEAD(dev_info_list);
98
99 static struct visorchannel *controlvm_channel;
100
101 /* Manages the request payload in the controlvm channel */
102 static struct controlvm_payload_info {
103 u8 __iomem *ptr; /* pointer to base address of payload pool */
104 u64 offset; /* offset from beginning of controlvm
105 * channel to beginning of payload * pool */
106 u32 bytes; /* number of bytes in payload pool */
107 } controlvm_payload_info;
108
109 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
110 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
111 */
112 static struct livedump_info {
113 struct controlvm_message_header dumpcapture_header;
114 struct controlvm_message_header gettextdump_header;
115 struct controlvm_message_header dumpcomplete_header;
116 BOOL gettextdump_outstanding;
117 u32 crc32;
118 ulong length;
119 atomic_t buffers_in_use;
120 ulong destination;
121 } livedump_info;
122
123 /* The following globals are used to handle the scenario where we are unable to
124 * offload the payload from a controlvm message due to memory requirements. In
125 * this scenario, we simply stash the controlvm message, then attempt to
126 * process it again the next time controlvm_periodic_work() runs.
127 */
128 static struct controlvm_message controlvm_pending_msg;
129 static BOOL controlvm_pending_msg_valid = FALSE;
130
131 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
132 * TRANSMIT_FILE PutFile payloads.
133 */
134 static struct kmem_cache *putfile_buffer_list_pool;
135 static const char putfile_buffer_list_pool_name[] =
136 "controlvm_putfile_buffer_list_pool";
137
138 /* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140 */
141 struct putfile_buffer_entry {
142 struct list_head next; /* putfile_buffer_entry list */
143 struct parser_context *parser_ctx; /* points to input data buffer */
144 };
145
146 /* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
148 * conversation.
149 */
150 static LIST_HEAD(putfile_request_list);
151
152 /* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
155 */
156 struct putfile_active_buffer {
157 /* a payload from a controlvm message, containing a file data buffer */
158 struct parser_context *parser_ctx;
159 /* points within data area of parser_ctx to next byte of data */
160 u8 *pnext;
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining;
163 };
164
165 #define PUTFILE_REQUEST_SIG 0x0906101302281211
166 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
169 */
170 struct putfile_request {
171 u64 sig; /* PUTFILE_REQUEST_SIG */
172
173 /* header from original TransmitFile request */
174 struct controlvm_message_header controlvm_header;
175 u64 file_request_number; /* from original TransmitFile request */
176
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request;
179
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number;
182
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
186 * file data
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
191
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
194
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
197
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
204 };
205
206 static atomic_t visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
207
208 struct parahotplug_request {
209 struct list_head list;
210 int id;
211 unsigned long expiration;
212 struct controlvm_message msg;
213 };
214
215 static LIST_HEAD(parahotplug_request_list);
216 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
217 static void parahotplug_process_list(void);
218
219 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
220 * CONTROLVM_REPORTEVENT.
221 */
222 static struct visorchipset_busdev_notifiers busdev_server_notifiers;
223 static struct visorchipset_busdev_notifiers busdev_client_notifiers;
224
225 static void bus_create_response(ulong bus_no, int response);
226 static void bus_destroy_response(ulong bus_no, int response);
227 static void device_create_response(ulong bus_no, ulong dev_no, int response);
228 static void device_destroy_response(ulong bus_no, ulong dev_no, int response);
229 static void device_resume_response(ulong bus_no, ulong dev_no, int response);
230
231 static struct visorchipset_busdev_responders busdev_responders = {
232 .bus_create = bus_create_response,
233 .bus_destroy = bus_destroy_response,
234 .device_create = device_create_response,
235 .device_destroy = device_destroy_response,
236 .device_pause = visorchipset_device_pause_response,
237 .device_resume = device_resume_response,
238 };
239
240 /* info for /dev/visorchipset */
241 static dev_t major_dev = -1; /**< indicates major num for device */
242
243 /* prototypes for attributes */
244 static ssize_t toolaction_show(struct device *dev,
245 struct device_attribute *attr, char *buf);
246 static ssize_t toolaction_store(struct device *dev,
247 struct device_attribute *attr,
248 const char *buf, size_t count);
249 static DEVICE_ATTR_RW(toolaction);
250
251 static ssize_t boottotool_show(struct device *dev,
252 struct device_attribute *attr, char *buf);
253 static ssize_t boottotool_store(struct device *dev,
254 struct device_attribute *attr, const char *buf,
255 size_t count);
256 static DEVICE_ATTR_RW(boottotool);
257
258 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
259 char *buf);
260 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
261 const char *buf, size_t count);
262 static DEVICE_ATTR_RW(error);
263
264 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
265 char *buf);
266 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
267 const char *buf, size_t count);
268 static DEVICE_ATTR_RW(textid);
269
270 static ssize_t remaining_steps_show(struct device *dev,
271 struct device_attribute *attr, char *buf);
272 static ssize_t remaining_steps_store(struct device *dev,
273 struct device_attribute *attr,
274 const char *buf, size_t count);
275 static DEVICE_ATTR_RW(remaining_steps);
276
277 static ssize_t chipsetready_store(struct device *dev,
278 struct device_attribute *attr,
279 const char *buf, size_t count);
280 static DEVICE_ATTR_WO(chipsetready);
281
282 static ssize_t devicedisabled_store(struct device *dev,
283 struct device_attribute *attr,
284 const char *buf, size_t count);
285 static DEVICE_ATTR_WO(devicedisabled);
286
287 static ssize_t deviceenabled_store(struct device *dev,
288 struct device_attribute *attr,
289 const char *buf, size_t count);
290 static DEVICE_ATTR_WO(deviceenabled);
291
292 static struct attribute *visorchipset_install_attrs[] = {
293 &dev_attr_toolaction.attr,
294 &dev_attr_boottotool.attr,
295 &dev_attr_error.attr,
296 &dev_attr_textid.attr,
297 &dev_attr_remaining_steps.attr,
298 NULL
299 };
300
301 static struct attribute_group visorchipset_install_group = {
302 .name = "install",
303 .attrs = visorchipset_install_attrs
304 };
305
306 static struct attribute *visorchipset_guest_attrs[] = {
307 &dev_attr_chipsetready.attr,
308 NULL
309 };
310
311 static struct attribute_group visorchipset_guest_group = {
312 .name = "guest",
313 .attrs = visorchipset_guest_attrs
314 };
315
316 static struct attribute *visorchipset_parahotplug_attrs[] = {
317 &dev_attr_devicedisabled.attr,
318 &dev_attr_deviceenabled.attr,
319 NULL
320 };
321
322 static struct attribute_group visorchipset_parahotplug_group = {
323 .name = "parahotplug",
324 .attrs = visorchipset_parahotplug_attrs
325 };
326
327 static const struct attribute_group *visorchipset_dev_groups[] = {
328 &visorchipset_install_group,
329 &visorchipset_guest_group,
330 &visorchipset_parahotplug_group,
331 NULL
332 };
333
334 /* /sys/devices/platform/visorchipset */
335 static struct platform_device visorchipset_platform_device = {
336 .name = "visorchipset",
337 .id = -1,
338 .dev.groups = visorchipset_dev_groups,
339 };
340
341 /* Function prototypes */
342 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
343 int response);
344 static void controlvm_respond_chipset_init(
345 struct controlvm_message_header *msg_hdr, int response,
346 enum ultra_chipset_feature features);
347 static void controlvm_respond_physdev_changestate(
348 struct controlvm_message_header *msg_hdr, int response,
349 struct spar_segment_state state);
350
toolaction_show(struct device * dev,struct device_attribute * attr,char * buf)351 static ssize_t toolaction_show(struct device *dev,
352 struct device_attribute *attr,
353 char *buf)
354 {
355 u8 tool_action;
356
357 visorchannel_read(controlvm_channel,
358 offsetof(struct spar_controlvm_channel_protocol,
359 tool_action), &tool_action, sizeof(u8));
360 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
361 }
362
toolaction_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)363 static ssize_t toolaction_store(struct device *dev,
364 struct device_attribute *attr,
365 const char *buf, size_t count)
366 {
367 u8 tool_action;
368 int ret;
369
370 if (kstrtou8(buf, 10, &tool_action) != 0)
371 return -EINVAL;
372
373 ret = visorchannel_write(controlvm_channel,
374 offsetof(struct spar_controlvm_channel_protocol,
375 tool_action),
376 &tool_action, sizeof(u8));
377
378 if (ret)
379 return ret;
380 return count;
381 }
382
boottotool_show(struct device * dev,struct device_attribute * attr,char * buf)383 static ssize_t boottotool_show(struct device *dev,
384 struct device_attribute *attr,
385 char *buf)
386 {
387 struct efi_spar_indication efi_spar_indication;
388
389 visorchannel_read(controlvm_channel,
390 offsetof(struct spar_controlvm_channel_protocol,
391 efi_spar_ind), &efi_spar_indication,
392 sizeof(struct efi_spar_indication));
393 return scnprintf(buf, PAGE_SIZE, "%u\n",
394 efi_spar_indication.boot_to_tool);
395 }
396
boottotool_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)397 static ssize_t boottotool_store(struct device *dev,
398 struct device_attribute *attr,
399 const char *buf, size_t count)
400 {
401 int val, ret;
402 struct efi_spar_indication efi_spar_indication;
403
404 if (kstrtoint(buf, 10, &val) != 0)
405 return -EINVAL;
406
407 efi_spar_indication.boot_to_tool = val;
408 ret = visorchannel_write(controlvm_channel,
409 offsetof(struct spar_controlvm_channel_protocol,
410 efi_spar_ind), &(efi_spar_indication),
411 sizeof(struct efi_spar_indication));
412
413 if (ret)
414 return ret;
415 return count;
416 }
417
error_show(struct device * dev,struct device_attribute * attr,char * buf)418 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
419 char *buf)
420 {
421 u32 error;
422
423 visorchannel_read(controlvm_channel,
424 offsetof(struct spar_controlvm_channel_protocol,
425 installation_error),
426 &error, sizeof(u32));
427 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
428 }
429
error_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)430 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
431 const char *buf, size_t count)
432 {
433 u32 error;
434 int ret;
435
436 if (kstrtou32(buf, 10, &error) != 0)
437 return -EINVAL;
438
439 ret = visorchannel_write(controlvm_channel,
440 offsetof(struct spar_controlvm_channel_protocol,
441 installation_error),
442 &error, sizeof(u32));
443 if (ret)
444 return ret;
445 return count;
446 }
447
textid_show(struct device * dev,struct device_attribute * attr,char * buf)448 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
449 char *buf)
450 {
451 u32 text_id;
452
453 visorchannel_read(controlvm_channel,
454 offsetof(struct spar_controlvm_channel_protocol,
455 installation_text_id),
456 &text_id, sizeof(u32));
457 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
458 }
459
textid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)460 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
461 const char *buf, size_t count)
462 {
463 u32 text_id;
464 int ret;
465
466 if (kstrtou32(buf, 10, &text_id) != 0)
467 return -EINVAL;
468
469 ret = visorchannel_write(controlvm_channel,
470 offsetof(struct spar_controlvm_channel_protocol,
471 installation_text_id),
472 &text_id, sizeof(u32));
473 if (ret)
474 return ret;
475 return count;
476 }
477
remaining_steps_show(struct device * dev,struct device_attribute * attr,char * buf)478 static ssize_t remaining_steps_show(struct device *dev,
479 struct device_attribute *attr, char *buf)
480 {
481 u16 remaining_steps;
482
483 visorchannel_read(controlvm_channel,
484 offsetof(struct spar_controlvm_channel_protocol,
485 installation_remaining_steps),
486 &remaining_steps, sizeof(u16));
487 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
488 }
489
remaining_steps_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)490 static ssize_t remaining_steps_store(struct device *dev,
491 struct device_attribute *attr,
492 const char *buf, size_t count)
493 {
494 u16 remaining_steps;
495 int ret;
496
497 if (kstrtou16(buf, 10, &remaining_steps) != 0)
498 return -EINVAL;
499
500 ret = visorchannel_write(controlvm_channel,
501 offsetof(struct spar_controlvm_channel_protocol,
502 installation_remaining_steps),
503 &remaining_steps, sizeof(u16));
504 if (ret)
505 return ret;
506 return count;
507 }
508
509 static void
bus_info_clear(void * v)510 bus_info_clear(void *v)
511 {
512 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
513
514 kfree(p->name);
515 p->name = NULL;
516
517 kfree(p->description);
518 p->description = NULL;
519
520 p->state.created = 0;
521 memset(p, 0, sizeof(struct visorchipset_bus_info));
522 }
523
524 static void
dev_info_clear(void * v)525 dev_info_clear(void *v)
526 {
527 struct visorchipset_device_info *p =
528 (struct visorchipset_device_info *)(v);
529
530 p->state.created = 0;
531 memset(p, 0, sizeof(struct visorchipset_device_info));
532 }
533
534 static u8
check_chipset_events(void)535 check_chipset_events(void)
536 {
537 int i;
538 u8 send_msg = 1;
539 /* Check events to determine if response should be sent */
540 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
541 send_msg &= chipset_events[i];
542 return send_msg;
543 }
544
545 static void
clear_chipset_events(void)546 clear_chipset_events(void)
547 {
548 int i;
549 /* Clear chipset_events */
550 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
551 chipset_events[i] = 0;
552 }
553
554 void
visorchipset_register_busdev_server(struct visorchipset_busdev_notifiers * notifiers,struct visorchipset_busdev_responders * responders,struct ultra_vbus_deviceinfo * driver_info)555 visorchipset_register_busdev_server(
556 struct visorchipset_busdev_notifiers *notifiers,
557 struct visorchipset_busdev_responders *responders,
558 struct ultra_vbus_deviceinfo *driver_info)
559 {
560 down(¬ifier_lock);
561 if (!notifiers) {
562 memset(&busdev_server_notifiers, 0,
563 sizeof(busdev_server_notifiers));
564 serverregistered = 0; /* clear flag */
565 } else {
566 busdev_server_notifiers = *notifiers;
567 serverregistered = 1; /* set flag */
568 }
569 if (responders)
570 *responders = busdev_responders;
571 if (driver_info)
572 bus_device_info_init(driver_info, "chipset", "visorchipset",
573 VERSION, NULL);
574
575 up(¬ifier_lock);
576 }
577 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
578
579 void
visorchipset_register_busdev_client(struct visorchipset_busdev_notifiers * notifiers,struct visorchipset_busdev_responders * responders,struct ultra_vbus_deviceinfo * driver_info)580 visorchipset_register_busdev_client(
581 struct visorchipset_busdev_notifiers *notifiers,
582 struct visorchipset_busdev_responders *responders,
583 struct ultra_vbus_deviceinfo *driver_info)
584 {
585 down(¬ifier_lock);
586 if (!notifiers) {
587 memset(&busdev_client_notifiers, 0,
588 sizeof(busdev_client_notifiers));
589 clientregistered = 0; /* clear flag */
590 } else {
591 busdev_client_notifiers = *notifiers;
592 clientregistered = 1; /* set flag */
593 }
594 if (responders)
595 *responders = busdev_responders;
596 if (driver_info)
597 bus_device_info_init(driver_info, "chipset(bolts)",
598 "visorchipset", VERSION, NULL);
599 up(¬ifier_lock);
600 }
601 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
602
603 static void
cleanup_controlvm_structures(void)604 cleanup_controlvm_structures(void)
605 {
606 struct visorchipset_bus_info *bi, *tmp_bi;
607 struct visorchipset_device_info *di, *tmp_di;
608
609 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
610 bus_info_clear(bi);
611 list_del(&bi->entry);
612 kfree(bi);
613 }
614
615 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
616 dev_info_clear(di);
617 list_del(&di->entry);
618 kfree(di);
619 }
620 }
621
622 static void
chipset_init(struct controlvm_message * inmsg)623 chipset_init(struct controlvm_message *inmsg)
624 {
625 static int chipset_inited;
626 enum ultra_chipset_feature features = 0;
627 int rc = CONTROLVM_RESP_SUCCESS;
628
629 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
630 if (chipset_inited) {
631 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
632 goto cleanup;
633 }
634 chipset_inited = 1;
635 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
636
637 /* Set features to indicate we support parahotplug (if Command
638 * also supports it). */
639 features =
640 inmsg->cmd.init_chipset.
641 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
642
643 /* Set the "reply" bit so Command knows this is a
644 * features-aware driver. */
645 features |= ULTRA_CHIPSET_FEATURE_REPLY;
646
647 cleanup:
648 if (rc < 0)
649 cleanup_controlvm_structures();
650 if (inmsg->hdr.flags.response_expected)
651 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
652 }
653
654 static void
controlvm_init_response(struct controlvm_message * msg,struct controlvm_message_header * msg_hdr,int response)655 controlvm_init_response(struct controlvm_message *msg,
656 struct controlvm_message_header *msg_hdr, int response)
657 {
658 memset(msg, 0, sizeof(struct controlvm_message));
659 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
660 msg->hdr.payload_bytes = 0;
661 msg->hdr.payload_vm_offset = 0;
662 msg->hdr.payload_max_bytes = 0;
663 if (response < 0) {
664 msg->hdr.flags.failed = 1;
665 msg->hdr.completion_status = (u32) (-response);
666 }
667 }
668
669 static void
controlvm_respond(struct controlvm_message_header * msg_hdr,int response)670 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
671 {
672 struct controlvm_message outmsg;
673
674 controlvm_init_response(&outmsg, msg_hdr, response);
675 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
676 * back the deviceChangeState structure in the packet. */
677 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
678 g_devicechangestate_packet.device_change_state.bus_no ==
679 g_diagpool_bus_no &&
680 g_devicechangestate_packet.device_change_state.dev_no ==
681 g_diagpool_dev_no)
682 outmsg.cmd = g_devicechangestate_packet;
683 if (outmsg.hdr.flags.test_message == 1)
684 return;
685
686 if (!visorchannel_signalinsert(controlvm_channel,
687 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
688 return;
689 }
690 }
691
692 static void
controlvm_respond_chipset_init(struct controlvm_message_header * msg_hdr,int response,enum ultra_chipset_feature features)693 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
694 int response,
695 enum ultra_chipset_feature features)
696 {
697 struct controlvm_message outmsg;
698
699 controlvm_init_response(&outmsg, msg_hdr, response);
700 outmsg.cmd.init_chipset.features = features;
701 if (!visorchannel_signalinsert(controlvm_channel,
702 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
703 return;
704 }
705 }
706
controlvm_respond_physdev_changestate(struct controlvm_message_header * msg_hdr,int response,struct spar_segment_state state)707 static void controlvm_respond_physdev_changestate(
708 struct controlvm_message_header *msg_hdr, int response,
709 struct spar_segment_state state)
710 {
711 struct controlvm_message outmsg;
712
713 controlvm_init_response(&outmsg, msg_hdr, response);
714 outmsg.cmd.device_change_state.state = state;
715 outmsg.cmd.device_change_state.flags.phys_device = 1;
716 if (!visorchannel_signalinsert(controlvm_channel,
717 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
718 return;
719 }
720 }
721
722 void
visorchipset_save_message(struct controlvm_message * msg,enum crash_obj_type type)723 visorchipset_save_message(struct controlvm_message *msg,
724 enum crash_obj_type type)
725 {
726 u32 crash_msg_offset;
727 u16 crash_msg_count;
728
729 /* get saved message count */
730 if (visorchannel_read(controlvm_channel,
731 offsetof(struct spar_controlvm_channel_protocol,
732 saved_crash_message_count),
733 &crash_msg_count, sizeof(u16)) < 0) {
734 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
735 POSTCODE_SEVERITY_ERR);
736 return;
737 }
738
739 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
740 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
741 crash_msg_count,
742 POSTCODE_SEVERITY_ERR);
743 return;
744 }
745
746 /* get saved crash message offset */
747 if (visorchannel_read(controlvm_channel,
748 offsetof(struct spar_controlvm_channel_protocol,
749 saved_crash_message_offset),
750 &crash_msg_offset, sizeof(u32)) < 0) {
751 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
752 POSTCODE_SEVERITY_ERR);
753 return;
754 }
755
756 if (type == CRASH_BUS) {
757 if (visorchannel_write(controlvm_channel,
758 crash_msg_offset,
759 msg,
760 sizeof(struct controlvm_message)) < 0) {
761 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
762 POSTCODE_SEVERITY_ERR);
763 return;
764 }
765 } else {
766 if (visorchannel_write(controlvm_channel,
767 crash_msg_offset +
768 sizeof(struct controlvm_message), msg,
769 sizeof(struct controlvm_message)) < 0) {
770 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
771 POSTCODE_SEVERITY_ERR);
772 return;
773 }
774 }
775 }
776 EXPORT_SYMBOL_GPL(visorchipset_save_message);
777
778 static void
bus_responder(enum controlvm_id cmd_id,ulong bus_no,int response)779 bus_responder(enum controlvm_id cmd_id, ulong bus_no, int response)
780 {
781 struct visorchipset_bus_info *p = NULL;
782 BOOL need_clear = FALSE;
783
784 p = findbus(&bus_info_list, bus_no);
785 if (!p)
786 return;
787
788 if (response < 0) {
789 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
790 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
791 /* undo the row we just created... */
792 delbusdevices(&dev_info_list, bus_no);
793 } else {
794 if (cmd_id == CONTROLVM_BUS_CREATE)
795 p->state.created = 1;
796 if (cmd_id == CONTROLVM_BUS_DESTROY)
797 need_clear = TRUE;
798 }
799
800 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
801 return; /* no controlvm response needed */
802 if (p->pending_msg_hdr.id != (u32)cmd_id)
803 return;
804 controlvm_respond(&p->pending_msg_hdr, response);
805 p->pending_msg_hdr.id = CONTROLVM_INVALID;
806 if (need_clear) {
807 bus_info_clear(p);
808 delbusdevices(&dev_info_list, bus_no);
809 }
810 }
811
812 static void
device_changestate_responder(enum controlvm_id cmd_id,ulong bus_no,ulong dev_no,int response,struct spar_segment_state response_state)813 device_changestate_responder(enum controlvm_id cmd_id,
814 ulong bus_no, ulong dev_no, int response,
815 struct spar_segment_state response_state)
816 {
817 struct visorchipset_device_info *p = NULL;
818 struct controlvm_message outmsg;
819
820 p = finddevice(&dev_info_list, bus_no, dev_no);
821 if (!p)
822 return;
823 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
824 return; /* no controlvm response needed */
825 if (p->pending_msg_hdr.id != cmd_id)
826 return;
827
828 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
829
830 outmsg.cmd.device_change_state.bus_no = bus_no;
831 outmsg.cmd.device_change_state.dev_no = dev_no;
832 outmsg.cmd.device_change_state.state = response_state;
833
834 if (!visorchannel_signalinsert(controlvm_channel,
835 CONTROLVM_QUEUE_REQUEST, &outmsg))
836 return;
837
838 p->pending_msg_hdr.id = CONTROLVM_INVALID;
839 }
840
841 static void
device_responder(enum controlvm_id cmd_id,ulong bus_no,ulong dev_no,int response)842 device_responder(enum controlvm_id cmd_id, ulong bus_no, ulong dev_no,
843 int response)
844 {
845 struct visorchipset_device_info *p = NULL;
846 BOOL need_clear = FALSE;
847
848 p = finddevice(&dev_info_list, bus_no, dev_no);
849 if (!p)
850 return;
851 if (response >= 0) {
852 if (cmd_id == CONTROLVM_DEVICE_CREATE)
853 p->state.created = 1;
854 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
855 need_clear = TRUE;
856 }
857
858 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
859 return; /* no controlvm response needed */
860
861 if (p->pending_msg_hdr.id != (u32)cmd_id)
862 return;
863
864 controlvm_respond(&p->pending_msg_hdr, response);
865 p->pending_msg_hdr.id = CONTROLVM_INVALID;
866 if (need_clear)
867 dev_info_clear(p);
868 }
869
870 static void
bus_epilog(u32 bus_no,u32 cmd,struct controlvm_message_header * msg_hdr,int response,BOOL need_response)871 bus_epilog(u32 bus_no,
872 u32 cmd, struct controlvm_message_header *msg_hdr,
873 int response, BOOL need_response)
874 {
875 BOOL notified = FALSE;
876
877 struct visorchipset_bus_info *bus_info = findbus(&bus_info_list,
878 bus_no);
879
880 if (!bus_info)
881 return;
882
883 if (need_response) {
884 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
885 sizeof(struct controlvm_message_header));
886 } else {
887 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
888 }
889
890 down(¬ifier_lock);
891 if (response == CONTROLVM_RESP_SUCCESS) {
892 switch (cmd) {
893 case CONTROLVM_BUS_CREATE:
894 /* We can't tell from the bus_create
895 * information which of our 2 bus flavors the
896 * devices on this bus will ultimately end up.
897 * FORTUNATELY, it turns out it is harmless to
898 * send the bus_create to both of them. We can
899 * narrow things down a little bit, though,
900 * because we know: - BusDev_Server can handle
901 * either server or client devices
902 * - BusDev_Client can handle ONLY client
903 * devices */
904 if (busdev_server_notifiers.bus_create) {
905 (*busdev_server_notifiers.bus_create) (bus_no);
906 notified = TRUE;
907 }
908 if ((!bus_info->flags.server) /*client */ &&
909 busdev_client_notifiers.bus_create) {
910 (*busdev_client_notifiers.bus_create) (bus_no);
911 notified = TRUE;
912 }
913 break;
914 case CONTROLVM_BUS_DESTROY:
915 if (busdev_server_notifiers.bus_destroy) {
916 (*busdev_server_notifiers.bus_destroy) (bus_no);
917 notified = TRUE;
918 }
919 if ((!bus_info->flags.server) /*client */ &&
920 busdev_client_notifiers.bus_destroy) {
921 (*busdev_client_notifiers.bus_destroy) (bus_no);
922 notified = TRUE;
923 }
924 break;
925 }
926 }
927 if (notified)
928 /* The callback function just called above is responsible
929 * for calling the appropriate visorchipset_busdev_responders
930 * function, which will call bus_responder()
931 */
932 ;
933 else
934 bus_responder(cmd, bus_no, response);
935 up(¬ifier_lock);
936 }
937
938 static void
device_epilog(u32 bus_no,u32 dev_no,struct spar_segment_state state,u32 cmd,struct controlvm_message_header * msg_hdr,int response,BOOL need_response,BOOL for_visorbus)939 device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
940 struct controlvm_message_header *msg_hdr, int response,
941 BOOL need_response, BOOL for_visorbus)
942 {
943 struct visorchipset_busdev_notifiers *notifiers = NULL;
944 BOOL notified = FALSE;
945
946 struct visorchipset_device_info *dev_info =
947 finddevice(&dev_info_list, bus_no, dev_no);
948 char *envp[] = {
949 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
950 NULL
951 };
952
953 if (!dev_info)
954 return;
955
956 if (for_visorbus)
957 notifiers = &busdev_server_notifiers;
958 else
959 notifiers = &busdev_client_notifiers;
960 if (need_response) {
961 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
962 sizeof(struct controlvm_message_header));
963 } else {
964 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
965 }
966
967 down(¬ifier_lock);
968 if (response >= 0) {
969 switch (cmd) {
970 case CONTROLVM_DEVICE_CREATE:
971 if (notifiers->device_create) {
972 (*notifiers->device_create) (bus_no, dev_no);
973 notified = TRUE;
974 }
975 break;
976 case CONTROLVM_DEVICE_CHANGESTATE:
977 /* ServerReady / ServerRunning / SegmentStateRunning */
978 if (state.alive == segment_state_running.alive &&
979 state.operating ==
980 segment_state_running.operating) {
981 if (notifiers->device_resume) {
982 (*notifiers->device_resume) (bus_no,
983 dev_no);
984 notified = TRUE;
985 }
986 }
987 /* ServerNotReady / ServerLost / SegmentStateStandby */
988 else if (state.alive == segment_state_standby.alive &&
989 state.operating ==
990 segment_state_standby.operating) {
991 /* technically this is standby case
992 * where server is lost
993 */
994 if (notifiers->device_pause) {
995 (*notifiers->device_pause) (bus_no,
996 dev_no);
997 notified = TRUE;
998 }
999 } else if (state.alive == segment_state_paused.alive &&
1000 state.operating ==
1001 segment_state_paused.operating) {
1002 /* this is lite pause where channel is
1003 * still valid just 'pause' of it
1004 */
1005 if (bus_no == g_diagpool_bus_no &&
1006 dev_no == g_diagpool_dev_no) {
1007 /* this will trigger the
1008 * diag_shutdown.sh script in
1009 * the visorchipset hotplug */
1010 kobject_uevent_env
1011 (&visorchipset_platform_device.dev.
1012 kobj, KOBJ_ONLINE, envp);
1013 }
1014 }
1015 break;
1016 case CONTROLVM_DEVICE_DESTROY:
1017 if (notifiers->device_destroy) {
1018 (*notifiers->device_destroy) (bus_no, dev_no);
1019 notified = TRUE;
1020 }
1021 break;
1022 }
1023 }
1024 if (notified)
1025 /* The callback function just called above is responsible
1026 * for calling the appropriate visorchipset_busdev_responders
1027 * function, which will call device_responder()
1028 */
1029 ;
1030 else
1031 device_responder(cmd, bus_no, dev_no, response);
1032 up(¬ifier_lock);
1033 }
1034
1035 static void
bus_create(struct controlvm_message * inmsg)1036 bus_create(struct controlvm_message *inmsg)
1037 {
1038 struct controlvm_message_packet *cmd = &inmsg->cmd;
1039 ulong bus_no = cmd->create_bus.bus_no;
1040 int rc = CONTROLVM_RESP_SUCCESS;
1041 struct visorchipset_bus_info *bus_info = NULL;
1042
1043 bus_info = findbus(&bus_info_list, bus_no);
1044 if (bus_info && (bus_info->state.created == 1)) {
1045 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1046 POSTCODE_SEVERITY_ERR);
1047 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1048 goto cleanup;
1049 }
1050 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1051 if (!bus_info) {
1052 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1053 POSTCODE_SEVERITY_ERR);
1054 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1055 goto cleanup;
1056 }
1057
1058 INIT_LIST_HEAD(&bus_info->entry);
1059 bus_info->bus_no = bus_no;
1060 bus_info->dev_no = cmd->create_bus.dev_count;
1061
1062 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1063
1064 if (inmsg->hdr.flags.test_message == 1)
1065 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1066 else
1067 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1068
1069 bus_info->flags.server = inmsg->hdr.flags.server;
1070 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1071 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1072 bus_info->chan_info.channel_type_uuid =
1073 cmd->create_bus.bus_data_type_uuid;
1074 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1075
1076 list_add(&bus_info->entry, &bus_info_list);
1077
1078 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1079
1080 cleanup:
1081 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1082 rc, inmsg->hdr.flags.response_expected == 1);
1083 }
1084
1085 static void
bus_destroy(struct controlvm_message * inmsg)1086 bus_destroy(struct controlvm_message *inmsg)
1087 {
1088 struct controlvm_message_packet *cmd = &inmsg->cmd;
1089 ulong bus_no = cmd->destroy_bus.bus_no;
1090 struct visorchipset_bus_info *bus_info;
1091 int rc = CONTROLVM_RESP_SUCCESS;
1092
1093 bus_info = findbus(&bus_info_list, bus_no);
1094 if (!bus_info)
1095 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1096 else if (bus_info->state.created == 0)
1097 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1098
1099 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1100 rc, inmsg->hdr.flags.response_expected == 1);
1101 }
1102
1103 static void
bus_configure(struct controlvm_message * inmsg,struct parser_context * parser_ctx)1104 bus_configure(struct controlvm_message *inmsg,
1105 struct parser_context *parser_ctx)
1106 {
1107 struct controlvm_message_packet *cmd = &inmsg->cmd;
1108 ulong bus_no = cmd->configure_bus.bus_no;
1109 struct visorchipset_bus_info *bus_info = NULL;
1110 int rc = CONTROLVM_RESP_SUCCESS;
1111 char s[99];
1112
1113 bus_no = cmd->configure_bus.bus_no;
1114 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1115 POSTCODE_SEVERITY_INFO);
1116
1117 bus_info = findbus(&bus_info_list, bus_no);
1118 if (!bus_info) {
1119 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1120 POSTCODE_SEVERITY_ERR);
1121 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1122 } else if (bus_info->state.created == 0) {
1123 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1124 POSTCODE_SEVERITY_ERR);
1125 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1126 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1127 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1128 POSTCODE_SEVERITY_ERR);
1129 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1130 } else {
1131 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1132 bus_info->partition_uuid = parser_id_get(parser_ctx);
1133 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1134 bus_info->name = parser_string_get(parser_ctx);
1135
1136 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1137 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1138 POSTCODE_SEVERITY_INFO);
1139 }
1140 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1141 rc, inmsg->hdr.flags.response_expected == 1);
1142 }
1143
1144 static void
my_device_create(struct controlvm_message * inmsg)1145 my_device_create(struct controlvm_message *inmsg)
1146 {
1147 struct controlvm_message_packet *cmd = &inmsg->cmd;
1148 ulong bus_no = cmd->create_device.bus_no;
1149 ulong dev_no = cmd->create_device.dev_no;
1150 struct visorchipset_device_info *dev_info = NULL;
1151 struct visorchipset_bus_info *bus_info = NULL;
1152 int rc = CONTROLVM_RESP_SUCCESS;
1153
1154 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1155 if (dev_info && (dev_info->state.created == 1)) {
1156 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1157 POSTCODE_SEVERITY_ERR);
1158 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1159 goto cleanup;
1160 }
1161 bus_info = findbus(&bus_info_list, bus_no);
1162 if (!bus_info) {
1163 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1164 POSTCODE_SEVERITY_ERR);
1165 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1166 goto cleanup;
1167 }
1168 if (bus_info->state.created == 0) {
1169 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1170 POSTCODE_SEVERITY_ERR);
1171 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1172 goto cleanup;
1173 }
1174 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1175 if (!dev_info) {
1176 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1177 POSTCODE_SEVERITY_ERR);
1178 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1179 goto cleanup;
1180 }
1181
1182 INIT_LIST_HEAD(&dev_info->entry);
1183 dev_info->bus_no = bus_no;
1184 dev_info->dev_no = dev_no;
1185 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1186 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1187 POSTCODE_SEVERITY_INFO);
1188
1189 if (inmsg->hdr.flags.test_message == 1)
1190 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1191 else
1192 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1193 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1194 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1195 dev_info->chan_info.channel_type_uuid =
1196 cmd->create_device.data_type_uuid;
1197 dev_info->chan_info.intr = cmd->create_device.intr;
1198 list_add(&dev_info->entry, &dev_info_list);
1199 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1200 POSTCODE_SEVERITY_INFO);
1201 cleanup:
1202 /* get the bus and devNo for DiagPool channel */
1203 if (dev_info &&
1204 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1205 g_diagpool_bus_no = bus_no;
1206 g_diagpool_dev_no = dev_no;
1207 }
1208 device_epilog(bus_no, dev_no, segment_state_running,
1209 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1210 inmsg->hdr.flags.response_expected == 1,
1211 FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
1212 }
1213
1214 static void
my_device_changestate(struct controlvm_message * inmsg)1215 my_device_changestate(struct controlvm_message *inmsg)
1216 {
1217 struct controlvm_message_packet *cmd = &inmsg->cmd;
1218 ulong bus_no = cmd->device_change_state.bus_no;
1219 ulong dev_no = cmd->device_change_state.dev_no;
1220 struct spar_segment_state state = cmd->device_change_state.state;
1221 struct visorchipset_device_info *dev_info = NULL;
1222 int rc = CONTROLVM_RESP_SUCCESS;
1223
1224 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1225 if (!dev_info) {
1226 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1227 POSTCODE_SEVERITY_ERR);
1228 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1229 } else if (dev_info->state.created == 0) {
1230 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1231 POSTCODE_SEVERITY_ERR);
1232 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1233 }
1234 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1235 device_epilog(bus_no, dev_no, state,
1236 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1237 inmsg->hdr.flags.response_expected == 1,
1238 FOR_VISORBUS(
1239 dev_info->chan_info.channel_type_uuid));
1240 }
1241
1242 static void
my_device_destroy(struct controlvm_message * inmsg)1243 my_device_destroy(struct controlvm_message *inmsg)
1244 {
1245 struct controlvm_message_packet *cmd = &inmsg->cmd;
1246 ulong bus_no = cmd->destroy_device.bus_no;
1247 ulong dev_no = cmd->destroy_device.dev_no;
1248 struct visorchipset_device_info *dev_info = NULL;
1249 int rc = CONTROLVM_RESP_SUCCESS;
1250
1251 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1252 if (!dev_info)
1253 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1254 else if (dev_info->state.created == 0)
1255 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1256
1257 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1258 device_epilog(bus_no, dev_no, segment_state_running,
1259 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1260 inmsg->hdr.flags.response_expected == 1,
1261 FOR_VISORBUS(
1262 dev_info->chan_info.channel_type_uuid));
1263 }
1264
1265 /* When provided with the physical address of the controlvm channel
1266 * (phys_addr), the offset to the payload area we need to manage
1267 * (offset), and the size of this payload area (bytes), fills in the
1268 * controlvm_payload_info struct. Returns TRUE for success or FALSE
1269 * for failure.
1270 */
1271 static int
initialize_controlvm_payload_info(HOSTADDRESS phys_addr,u64 offset,u32 bytes,struct controlvm_payload_info * info)1272 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1273 struct controlvm_payload_info *info)
1274 {
1275 u8 __iomem *payload = NULL;
1276 int rc = CONTROLVM_RESP_SUCCESS;
1277
1278 if (!info) {
1279 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1280 goto cleanup;
1281 }
1282 memset(info, 0, sizeof(struct controlvm_payload_info));
1283 if ((offset == 0) || (bytes == 0)) {
1284 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1285 goto cleanup;
1286 }
1287 payload = ioremap_cache(phys_addr + offset, bytes);
1288 if (!payload) {
1289 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1290 goto cleanup;
1291 }
1292
1293 info->offset = offset;
1294 info->bytes = bytes;
1295 info->ptr = payload;
1296
1297 cleanup:
1298 if (rc < 0) {
1299 if (payload) {
1300 iounmap(payload);
1301 payload = NULL;
1302 }
1303 }
1304 return rc;
1305 }
1306
1307 static void
destroy_controlvm_payload_info(struct controlvm_payload_info * info)1308 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1309 {
1310 if (info->ptr) {
1311 iounmap(info->ptr);
1312 info->ptr = NULL;
1313 }
1314 memset(info, 0, sizeof(struct controlvm_payload_info));
1315 }
1316
1317 static void
initialize_controlvm_payload(void)1318 initialize_controlvm_payload(void)
1319 {
1320 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1321 u64 payload_offset = 0;
1322 u32 payload_bytes = 0;
1323
1324 if (visorchannel_read(controlvm_channel,
1325 offsetof(struct spar_controlvm_channel_protocol,
1326 request_payload_offset),
1327 &payload_offset, sizeof(payload_offset)) < 0) {
1328 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1329 POSTCODE_SEVERITY_ERR);
1330 return;
1331 }
1332 if (visorchannel_read(controlvm_channel,
1333 offsetof(struct spar_controlvm_channel_protocol,
1334 request_payload_bytes),
1335 &payload_bytes, sizeof(payload_bytes)) < 0) {
1336 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1337 POSTCODE_SEVERITY_ERR);
1338 return;
1339 }
1340 initialize_controlvm_payload_info(phys_addr,
1341 payload_offset, payload_bytes,
1342 &controlvm_payload_info);
1343 }
1344
1345 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1346 * Returns CONTROLVM_RESP_xxx code.
1347 */
1348 int
visorchipset_chipset_ready(void)1349 visorchipset_chipset_ready(void)
1350 {
1351 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1352 return CONTROLVM_RESP_SUCCESS;
1353 }
1354 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1355
1356 int
visorchipset_chipset_selftest(void)1357 visorchipset_chipset_selftest(void)
1358 {
1359 char env_selftest[20];
1360 char *envp[] = { env_selftest, NULL };
1361
1362 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1363 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1364 envp);
1365 return CONTROLVM_RESP_SUCCESS;
1366 }
1367 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1368
1369 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1370 * Returns CONTROLVM_RESP_xxx code.
1371 */
1372 int
visorchipset_chipset_notready(void)1373 visorchipset_chipset_notready(void)
1374 {
1375 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1376 return CONTROLVM_RESP_SUCCESS;
1377 }
1378 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1379
1380 static void
chipset_ready(struct controlvm_message_header * msg_hdr)1381 chipset_ready(struct controlvm_message_header *msg_hdr)
1382 {
1383 int rc = visorchipset_chipset_ready();
1384
1385 if (rc != CONTROLVM_RESP_SUCCESS)
1386 rc = -rc;
1387 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1388 controlvm_respond(msg_hdr, rc);
1389 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1390 /* Send CHIPSET_READY response when all modules have been loaded
1391 * and disks mounted for the partition
1392 */
1393 g_chipset_msg_hdr = *msg_hdr;
1394 }
1395 }
1396
1397 static void
chipset_selftest(struct controlvm_message_header * msg_hdr)1398 chipset_selftest(struct controlvm_message_header *msg_hdr)
1399 {
1400 int rc = visorchipset_chipset_selftest();
1401
1402 if (rc != CONTROLVM_RESP_SUCCESS)
1403 rc = -rc;
1404 if (msg_hdr->flags.response_expected)
1405 controlvm_respond(msg_hdr, rc);
1406 }
1407
1408 static void
chipset_notready(struct controlvm_message_header * msg_hdr)1409 chipset_notready(struct controlvm_message_header *msg_hdr)
1410 {
1411 int rc = visorchipset_chipset_notready();
1412
1413 if (rc != CONTROLVM_RESP_SUCCESS)
1414 rc = -rc;
1415 if (msg_hdr->flags.response_expected)
1416 controlvm_respond(msg_hdr, rc);
1417 }
1418
1419 /* This is your "one-stop" shop for grabbing the next message from the
1420 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1421 */
1422 static BOOL
read_controlvm_event(struct controlvm_message * msg)1423 read_controlvm_event(struct controlvm_message *msg)
1424 {
1425 if (visorchannel_signalremove(controlvm_channel,
1426 CONTROLVM_QUEUE_EVENT, msg)) {
1427 /* got a message */
1428 if (msg->hdr.flags.test_message == 1)
1429 return FALSE;
1430 return TRUE;
1431 }
1432 return FALSE;
1433 }
1434
1435 /*
1436 * The general parahotplug flow works as follows. The visorchipset
1437 * driver receives a DEVICE_CHANGESTATE message from Command
1438 * specifying a physical device to enable or disable. The CONTROLVM
1439 * message handler calls parahotplug_process_message, which then adds
1440 * the message to a global list and kicks off a udev event which
1441 * causes a user level script to enable or disable the specified
1442 * device. The udev script then writes to
1443 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1444 * to get called, at which point the appropriate CONTROLVM message is
1445 * retrieved from the list and responded to.
1446 */
1447
1448 #define PARAHOTPLUG_TIMEOUT_MS 2000
1449
1450 /*
1451 * Generate unique int to match an outstanding CONTROLVM message with a
1452 * udev script /proc response
1453 */
1454 static int
parahotplug_next_id(void)1455 parahotplug_next_id(void)
1456 {
1457 static atomic_t id = ATOMIC_INIT(0);
1458
1459 return atomic_inc_return(&id);
1460 }
1461
1462 /*
1463 * Returns the time (in jiffies) when a CONTROLVM message on the list
1464 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1465 */
1466 static unsigned long
parahotplug_next_expiration(void)1467 parahotplug_next_expiration(void)
1468 {
1469 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1470 }
1471
1472 /*
1473 * Create a parahotplug_request, which is basically a wrapper for a
1474 * CONTROLVM_MESSAGE that we can stick on a list
1475 */
1476 static struct parahotplug_request *
parahotplug_request_create(struct controlvm_message * msg)1477 parahotplug_request_create(struct controlvm_message *msg)
1478 {
1479 struct parahotplug_request *req;
1480
1481 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1482 if (!req)
1483 return NULL;
1484
1485 req->id = parahotplug_next_id();
1486 req->expiration = parahotplug_next_expiration();
1487 req->msg = *msg;
1488
1489 return req;
1490 }
1491
1492 /*
1493 * Free a parahotplug_request.
1494 */
1495 static void
parahotplug_request_destroy(struct parahotplug_request * req)1496 parahotplug_request_destroy(struct parahotplug_request *req)
1497 {
1498 kfree(req);
1499 }
1500
1501 /*
1502 * Cause uevent to run the user level script to do the disable/enable
1503 * specified in (the CONTROLVM message in) the specified
1504 * parahotplug_request
1505 */
1506 static void
parahotplug_request_kickoff(struct parahotplug_request * req)1507 parahotplug_request_kickoff(struct parahotplug_request *req)
1508 {
1509 struct controlvm_message_packet *cmd = &req->msg.cmd;
1510 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1511 env_func[40];
1512 char *envp[] = {
1513 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1514 };
1515
1516 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1517 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1518 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1519 cmd->device_change_state.state.active);
1520 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1521 cmd->device_change_state.bus_no);
1522 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1523 cmd->device_change_state.dev_no >> 3);
1524 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1525 cmd->device_change_state.dev_no & 0x7);
1526
1527 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1528 envp);
1529 }
1530
1531 /*
1532 * Remove any request from the list that's been on there too long and
1533 * respond with an error.
1534 */
1535 static void
parahotplug_process_list(void)1536 parahotplug_process_list(void)
1537 {
1538 struct list_head *pos = NULL;
1539 struct list_head *tmp = NULL;
1540
1541 spin_lock(¶hotplug_request_list_lock);
1542
1543 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1544 struct parahotplug_request *req =
1545 list_entry(pos, struct parahotplug_request, list);
1546
1547 if (!time_after_eq(jiffies, req->expiration))
1548 continue;
1549
1550 list_del(pos);
1551 if (req->msg.hdr.flags.response_expected)
1552 controlvm_respond_physdev_changestate(
1553 &req->msg.hdr,
1554 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1555 req->msg.cmd.device_change_state.state);
1556 parahotplug_request_destroy(req);
1557 }
1558
1559 spin_unlock(¶hotplug_request_list_lock);
1560 }
1561
1562 /*
1563 * Called from the /proc handler, which means the user script has
1564 * finished the enable/disable. Find the matching identifier, and
1565 * respond to the CONTROLVM message with success.
1566 */
1567 static int
parahotplug_request_complete(int id,u16 active)1568 parahotplug_request_complete(int id, u16 active)
1569 {
1570 struct list_head *pos = NULL;
1571 struct list_head *tmp = NULL;
1572
1573 spin_lock(¶hotplug_request_list_lock);
1574
1575 /* Look for a request matching "id". */
1576 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1577 struct parahotplug_request *req =
1578 list_entry(pos, struct parahotplug_request, list);
1579 if (req->id == id) {
1580 /* Found a match. Remove it from the list and
1581 * respond.
1582 */
1583 list_del(pos);
1584 spin_unlock(¶hotplug_request_list_lock);
1585 req->msg.cmd.device_change_state.state.active = active;
1586 if (req->msg.hdr.flags.response_expected)
1587 controlvm_respond_physdev_changestate(
1588 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1589 req->msg.cmd.device_change_state.state);
1590 parahotplug_request_destroy(req);
1591 return 0;
1592 }
1593 }
1594
1595 spin_unlock(¶hotplug_request_list_lock);
1596 return -1;
1597 }
1598
1599 /*
1600 * Enables or disables a PCI device by kicking off a udev script
1601 */
1602 static void
parahotplug_process_message(struct controlvm_message * inmsg)1603 parahotplug_process_message(struct controlvm_message *inmsg)
1604 {
1605 struct parahotplug_request *req;
1606
1607 req = parahotplug_request_create(inmsg);
1608
1609 if (!req)
1610 return;
1611
1612 if (inmsg->cmd.device_change_state.state.active) {
1613 /* For enable messages, just respond with success
1614 * right away. This is a bit of a hack, but there are
1615 * issues with the early enable messages we get (with
1616 * either the udev script not detecting that the device
1617 * is up, or not getting called at all). Fortunately
1618 * the messages that get lost don't matter anyway, as
1619 * devices are automatically enabled at
1620 * initialization.
1621 */
1622 parahotplug_request_kickoff(req);
1623 controlvm_respond_physdev_changestate(&inmsg->hdr,
1624 CONTROLVM_RESP_SUCCESS,
1625 inmsg->cmd.device_change_state.state);
1626 parahotplug_request_destroy(req);
1627 } else {
1628 /* For disable messages, add the request to the
1629 * request list before kicking off the udev script. It
1630 * won't get responded to until the script has
1631 * indicated it's done.
1632 */
1633 spin_lock(¶hotplug_request_list_lock);
1634 list_add_tail(&req->list, ¶hotplug_request_list);
1635 spin_unlock(¶hotplug_request_list_lock);
1636
1637 parahotplug_request_kickoff(req);
1638 }
1639 }
1640
1641 /* Process a controlvm message.
1642 * Return result:
1643 * FALSE - this function will return FALSE only in the case where the
1644 * controlvm message was NOT processed, but processing must be
1645 * retried before reading the next controlvm message; a
1646 * scenario where this can occur is when we need to throttle
1647 * the allocation of memory in which to copy out controlvm
1648 * payload data
1649 * TRUE - processing of the controlvm message completed,
1650 * either successfully or with an error.
1651 */
1652 static BOOL
handle_command(struct controlvm_message inmsg,HOSTADDRESS channel_addr)1653 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1654 {
1655 struct controlvm_message_packet *cmd = &inmsg.cmd;
1656 u64 parm_addr = 0;
1657 u32 parm_bytes = 0;
1658 struct parser_context *parser_ctx = NULL;
1659 bool local_addr = false;
1660 struct controlvm_message ackmsg;
1661
1662 /* create parsing context if necessary */
1663 local_addr = (inmsg.hdr.flags.test_message == 1);
1664 if (channel_addr == 0)
1665 return TRUE;
1666 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1667 parm_bytes = inmsg.hdr.payload_bytes;
1668
1669 /* Parameter and channel addresses within test messages actually lie
1670 * within our OS-controlled memory. We need to know that, because it
1671 * makes a difference in how we compute the virtual address.
1672 */
1673 if (parm_addr != 0 && parm_bytes != 0) {
1674 BOOL retry = FALSE;
1675
1676 parser_ctx =
1677 parser_init_byte_stream(parm_addr, parm_bytes,
1678 local_addr, &retry);
1679 if (!parser_ctx && retry)
1680 return FALSE;
1681 }
1682
1683 if (!local_addr) {
1684 controlvm_init_response(&ackmsg, &inmsg.hdr,
1685 CONTROLVM_RESP_SUCCESS);
1686 if (controlvm_channel)
1687 visorchannel_signalinsert(controlvm_channel,
1688 CONTROLVM_QUEUE_ACK,
1689 &ackmsg);
1690 }
1691 switch (inmsg.hdr.id) {
1692 case CONTROLVM_CHIPSET_INIT:
1693 chipset_init(&inmsg);
1694 break;
1695 case CONTROLVM_BUS_CREATE:
1696 bus_create(&inmsg);
1697 break;
1698 case CONTROLVM_BUS_DESTROY:
1699 bus_destroy(&inmsg);
1700 break;
1701 case CONTROLVM_BUS_CONFIGURE:
1702 bus_configure(&inmsg, parser_ctx);
1703 break;
1704 case CONTROLVM_DEVICE_CREATE:
1705 my_device_create(&inmsg);
1706 break;
1707 case CONTROLVM_DEVICE_CHANGESTATE:
1708 if (cmd->device_change_state.flags.phys_device) {
1709 parahotplug_process_message(&inmsg);
1710 } else {
1711 /* save the hdr and cmd structures for later use */
1712 /* when sending back the response to Command */
1713 my_device_changestate(&inmsg);
1714 g_diag_msg_hdr = inmsg.hdr;
1715 g_devicechangestate_packet = inmsg.cmd;
1716 break;
1717 }
1718 break;
1719 case CONTROLVM_DEVICE_DESTROY:
1720 my_device_destroy(&inmsg);
1721 break;
1722 case CONTROLVM_DEVICE_CONFIGURE:
1723 /* no op for now, just send a respond that we passed */
1724 if (inmsg.hdr.flags.response_expected)
1725 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1726 break;
1727 case CONTROLVM_CHIPSET_READY:
1728 chipset_ready(&inmsg.hdr);
1729 break;
1730 case CONTROLVM_CHIPSET_SELFTEST:
1731 chipset_selftest(&inmsg.hdr);
1732 break;
1733 case CONTROLVM_CHIPSET_STOP:
1734 chipset_notready(&inmsg.hdr);
1735 break;
1736 default:
1737 if (inmsg.hdr.flags.response_expected)
1738 controlvm_respond(&inmsg.hdr,
1739 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1740 break;
1741 }
1742
1743 if (parser_ctx) {
1744 parser_done(parser_ctx);
1745 parser_ctx = NULL;
1746 }
1747 return TRUE;
1748 }
1749
controlvm_get_channel_address(void)1750 static HOSTADDRESS controlvm_get_channel_address(void)
1751 {
1752 u64 addr = 0;
1753 u32 size = 0;
1754
1755 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1756 return 0;
1757
1758 return addr;
1759 }
1760
1761 static void
controlvm_periodic_work(struct work_struct * work)1762 controlvm_periodic_work(struct work_struct *work)
1763 {
1764 struct controlvm_message inmsg;
1765 BOOL got_command = FALSE;
1766 BOOL handle_command_failed = FALSE;
1767 static u64 poll_count;
1768
1769 /* make sure visorbus server is registered for controlvm callbacks */
1770 if (visorchipset_serverregwait && !serverregistered)
1771 goto cleanup;
1772 /* make sure visorclientbus server is regsitered for controlvm
1773 * callbacks
1774 */
1775 if (visorchipset_clientregwait && !clientregistered)
1776 goto cleanup;
1777
1778 poll_count++;
1779 if (poll_count >= 250)
1780 ; /* keep going */
1781 else
1782 goto cleanup;
1783
1784 /* Check events to determine if response to CHIPSET_READY
1785 * should be sent
1786 */
1787 if (visorchipset_holdchipsetready &&
1788 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1789 if (check_chipset_events() == 1) {
1790 controlvm_respond(&g_chipset_msg_hdr, 0);
1791 clear_chipset_events();
1792 memset(&g_chipset_msg_hdr, 0,
1793 sizeof(struct controlvm_message_header));
1794 }
1795 }
1796
1797 while (visorchannel_signalremove(controlvm_channel,
1798 CONTROLVM_QUEUE_RESPONSE,
1799 &inmsg))
1800 ;
1801 if (!got_command) {
1802 if (controlvm_pending_msg_valid) {
1803 /* we throttled processing of a prior
1804 * msg, so try to process it again
1805 * rather than reading a new one
1806 */
1807 inmsg = controlvm_pending_msg;
1808 controlvm_pending_msg_valid = FALSE;
1809 got_command = true;
1810 } else {
1811 got_command = read_controlvm_event(&inmsg);
1812 }
1813 }
1814
1815 handle_command_failed = FALSE;
1816 while (got_command && (!handle_command_failed)) {
1817 most_recent_message_jiffies = jiffies;
1818 if (handle_command(inmsg,
1819 visorchannel_get_physaddr
1820 (controlvm_channel)))
1821 got_command = read_controlvm_event(&inmsg);
1822 else {
1823 /* this is a scenario where throttling
1824 * is required, but probably NOT an
1825 * error...; we stash the current
1826 * controlvm msg so we will attempt to
1827 * reprocess it on our next loop
1828 */
1829 handle_command_failed = TRUE;
1830 controlvm_pending_msg = inmsg;
1831 controlvm_pending_msg_valid = TRUE;
1832 }
1833 }
1834
1835 /* parahotplug_worker */
1836 parahotplug_process_list();
1837
1838 cleanup:
1839
1840 if (time_after(jiffies,
1841 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1842 /* it's been longer than MIN_IDLE_SECONDS since we
1843 * processed our last controlvm message; slow down the
1844 * polling
1845 */
1846 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1847 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1848 } else {
1849 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1850 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1851 }
1852
1853 queue_delayed_work(periodic_controlvm_workqueue,
1854 &periodic_controlvm_work, poll_jiffies);
1855 }
1856
1857 static void
setup_crash_devices_work_queue(struct work_struct * work)1858 setup_crash_devices_work_queue(struct work_struct *work)
1859 {
1860 struct controlvm_message local_crash_bus_msg;
1861 struct controlvm_message local_crash_dev_msg;
1862 struct controlvm_message msg;
1863 u32 local_crash_msg_offset;
1864 u16 local_crash_msg_count;
1865
1866 /* make sure visorbus server is registered for controlvm callbacks */
1867 if (visorchipset_serverregwait && !serverregistered)
1868 goto cleanup;
1869
1870 /* make sure visorclientbus server is regsitered for controlvm
1871 * callbacks
1872 */
1873 if (visorchipset_clientregwait && !clientregistered)
1874 goto cleanup;
1875
1876 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1877
1878 /* send init chipset msg */
1879 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1880 msg.cmd.init_chipset.bus_count = 23;
1881 msg.cmd.init_chipset.switch_count = 0;
1882
1883 chipset_init(&msg);
1884
1885 /* get saved message count */
1886 if (visorchannel_read(controlvm_channel,
1887 offsetof(struct spar_controlvm_channel_protocol,
1888 saved_crash_message_count),
1889 &local_crash_msg_count, sizeof(u16)) < 0) {
1890 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1891 POSTCODE_SEVERITY_ERR);
1892 return;
1893 }
1894
1895 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1896 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1897 local_crash_msg_count,
1898 POSTCODE_SEVERITY_ERR);
1899 return;
1900 }
1901
1902 /* get saved crash message offset */
1903 if (visorchannel_read(controlvm_channel,
1904 offsetof(struct spar_controlvm_channel_protocol,
1905 saved_crash_message_offset),
1906 &local_crash_msg_offset, sizeof(u32)) < 0) {
1907 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1908 POSTCODE_SEVERITY_ERR);
1909 return;
1910 }
1911
1912 /* read create device message for storage bus offset */
1913 if (visorchannel_read(controlvm_channel,
1914 local_crash_msg_offset,
1915 &local_crash_bus_msg,
1916 sizeof(struct controlvm_message)) < 0) {
1917 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1918 POSTCODE_SEVERITY_ERR);
1919 return;
1920 }
1921
1922 /* read create device message for storage device */
1923 if (visorchannel_read(controlvm_channel,
1924 local_crash_msg_offset +
1925 sizeof(struct controlvm_message),
1926 &local_crash_dev_msg,
1927 sizeof(struct controlvm_message)) < 0) {
1928 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1929 POSTCODE_SEVERITY_ERR);
1930 return;
1931 }
1932
1933 /* reuse IOVM create bus message */
1934 if (local_crash_bus_msg.cmd.create_bus.channel_addr != 0) {
1935 bus_create(&local_crash_bus_msg);
1936 } else {
1937 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1938 POSTCODE_SEVERITY_ERR);
1939 return;
1940 }
1941
1942 /* reuse create device message for storage device */
1943 if (local_crash_dev_msg.cmd.create_device.channel_addr != 0) {
1944 my_device_create(&local_crash_dev_msg);
1945 } else {
1946 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1947 POSTCODE_SEVERITY_ERR);
1948 return;
1949 }
1950 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1951 return;
1952
1953 cleanup:
1954
1955 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1956
1957 queue_delayed_work(periodic_controlvm_workqueue,
1958 &periodic_controlvm_work, poll_jiffies);
1959 }
1960
1961 static void
bus_create_response(ulong bus_no,int response)1962 bus_create_response(ulong bus_no, int response)
1963 {
1964 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
1965 }
1966
1967 static void
bus_destroy_response(ulong bus_no,int response)1968 bus_destroy_response(ulong bus_no, int response)
1969 {
1970 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
1971 }
1972
1973 static void
device_create_response(ulong bus_no,ulong dev_no,int response)1974 device_create_response(ulong bus_no, ulong dev_no, int response)
1975 {
1976 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
1977 }
1978
1979 static void
device_destroy_response(ulong bus_no,ulong dev_no,int response)1980 device_destroy_response(ulong bus_no, ulong dev_no, int response)
1981 {
1982 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
1983 }
1984
1985 void
visorchipset_device_pause_response(ulong bus_no,ulong dev_no,int response)1986 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
1987 {
1988 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1989 bus_no, dev_no, response,
1990 segment_state_standby);
1991 }
1992 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
1993
1994 static void
device_resume_response(ulong bus_no,ulong dev_no,int response)1995 device_resume_response(ulong bus_no, ulong dev_no, int response)
1996 {
1997 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1998 bus_no, dev_no, response,
1999 segment_state_running);
2000 }
2001
2002 BOOL
visorchipset_get_bus_info(ulong bus_no,struct visorchipset_bus_info * bus_info)2003 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2004 {
2005 void *p = findbus(&bus_info_list, bus_no);
2006
2007 if (!p)
2008 return FALSE;
2009 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2010 return TRUE;
2011 }
2012 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2013
2014 BOOL
visorchipset_set_bus_context(ulong bus_no,void * context)2015 visorchipset_set_bus_context(ulong bus_no, void *context)
2016 {
2017 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2018
2019 if (!p)
2020 return FALSE;
2021 p->bus_driver_context = context;
2022 return TRUE;
2023 }
2024 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2025
2026 BOOL
visorchipset_get_device_info(ulong bus_no,ulong dev_no,struct visorchipset_device_info * dev_info)2027 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2028 struct visorchipset_device_info *dev_info)
2029 {
2030 void *p = finddevice(&dev_info_list, bus_no, dev_no);
2031
2032 if (!p)
2033 return FALSE;
2034 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2035 return TRUE;
2036 }
2037 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2038
2039 BOOL
visorchipset_set_device_context(ulong bus_no,ulong dev_no,void * context)2040 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2041 {
2042 struct visorchipset_device_info *p =
2043 finddevice(&dev_info_list, bus_no, dev_no);
2044
2045 if (!p)
2046 return FALSE;
2047 p->bus_driver_context = context;
2048 return TRUE;
2049 }
2050 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2051
2052 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2053 */
2054 void *
visorchipset_cache_alloc(struct kmem_cache * pool,BOOL ok_to_block,char * fn,int ln)2055 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2056 char *fn, int ln)
2057 {
2058 gfp_t gfp;
2059 void *p;
2060
2061 if (ok_to_block)
2062 gfp = GFP_KERNEL;
2063 else
2064 gfp = GFP_ATOMIC;
2065 /* __GFP_NORETRY means "ok to fail", meaning
2066 * kmem_cache_alloc() can return NULL, implying the caller CAN
2067 * cope with failure. If you do NOT specify __GFP_NORETRY,
2068 * Linux will go to extreme measures to get memory for you
2069 * (like, invoke oom killer), which will probably cripple the
2070 * system.
2071 */
2072 gfp |= __GFP_NORETRY;
2073 p = kmem_cache_alloc(pool, gfp);
2074 if (!p)
2075 return NULL;
2076
2077 atomic_inc(&visorchipset_cache_buffers_in_use);
2078 return p;
2079 }
2080
2081 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2082 */
2083 void
visorchipset_cache_free(struct kmem_cache * pool,void * p,char * fn,int ln)2084 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2085 {
2086 if (!p)
2087 return;
2088
2089 atomic_dec(&visorchipset_cache_buffers_in_use);
2090 kmem_cache_free(pool, p);
2091 }
2092
chipsetready_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2093 static ssize_t chipsetready_store(struct device *dev,
2094 struct device_attribute *attr,
2095 const char *buf, size_t count)
2096 {
2097 char msgtype[64];
2098
2099 if (sscanf(buf, "%63s", msgtype) != 1)
2100 return -EINVAL;
2101
2102 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2103 chipset_events[0] = 1;
2104 return count;
2105 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2106 chipset_events[1] = 1;
2107 return count;
2108 }
2109 return -EINVAL;
2110 }
2111
2112 /* The parahotplug/devicedisabled interface gets called by our support script
2113 * when an SR-IOV device has been shut down. The ID is passed to the script
2114 * and then passed back when the device has been removed.
2115 */
devicedisabled_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2116 static ssize_t devicedisabled_store(struct device *dev,
2117 struct device_attribute *attr,
2118 const char *buf, size_t count)
2119 {
2120 uint id;
2121
2122 if (kstrtouint(buf, 10, &id) != 0)
2123 return -EINVAL;
2124
2125 parahotplug_request_complete(id, 0);
2126 return count;
2127 }
2128
2129 /* The parahotplug/deviceenabled interface gets called by our support script
2130 * when an SR-IOV device has been recovered. The ID is passed to the script
2131 * and then passed back when the device has been brought back up.
2132 */
deviceenabled_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2133 static ssize_t deviceenabled_store(struct device *dev,
2134 struct device_attribute *attr,
2135 const char *buf, size_t count)
2136 {
2137 uint id;
2138
2139 if (kstrtouint(buf, 10, &id) != 0)
2140 return -EINVAL;
2141
2142 parahotplug_request_complete(id, 1);
2143 return count;
2144 }
2145
2146 static int __init
visorchipset_init(void)2147 visorchipset_init(void)
2148 {
2149 int rc = 0, x = 0;
2150 HOSTADDRESS addr;
2151
2152 if (!unisys_spar_platform)
2153 return -ENODEV;
2154
2155 memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2156 memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
2157 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2158 memset(&livedump_info, 0, sizeof(livedump_info));
2159 atomic_set(&livedump_info.buffers_in_use, 0);
2160
2161 if (visorchipset_testvnic) {
2162 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2163 rc = x;
2164 goto cleanup;
2165 }
2166
2167 addr = controlvm_get_channel_address();
2168 if (addr != 0) {
2169 controlvm_channel =
2170 visorchannel_create_with_lock
2171 (addr,
2172 sizeof(struct spar_controlvm_channel_protocol),
2173 spar_controlvm_channel_protocol_uuid);
2174 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2175 visorchannel_get_header(controlvm_channel))) {
2176 initialize_controlvm_payload();
2177 } else {
2178 visorchannel_destroy(controlvm_channel);
2179 controlvm_channel = NULL;
2180 return -ENODEV;
2181 }
2182 } else {
2183 return -ENODEV;
2184 }
2185
2186 major_dev = MKDEV(visorchipset_major, 0);
2187 rc = visorchipset_file_init(major_dev, &controlvm_channel);
2188 if (rc < 0) {
2189 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2190 goto cleanup;
2191 }
2192
2193 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2194
2195 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2196
2197 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2198
2199 putfile_buffer_list_pool =
2200 kmem_cache_create(putfile_buffer_list_pool_name,
2201 sizeof(struct putfile_buffer_entry),
2202 0, SLAB_HWCACHE_ALIGN, NULL);
2203 if (!putfile_buffer_list_pool) {
2204 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2205 rc = -1;
2206 goto cleanup;
2207 }
2208 if (!visorchipset_disable_controlvm) {
2209 /* if booting in a crash kernel */
2210 if (visorchipset_crash_kernel)
2211 INIT_DELAYED_WORK(&periodic_controlvm_work,
2212 setup_crash_devices_work_queue);
2213 else
2214 INIT_DELAYED_WORK(&periodic_controlvm_work,
2215 controlvm_periodic_work);
2216 periodic_controlvm_workqueue =
2217 create_singlethread_workqueue("visorchipset_controlvm");
2218
2219 if (!periodic_controlvm_workqueue) {
2220 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2221 DIAG_SEVERITY_ERR);
2222 rc = -ENOMEM;
2223 goto cleanup;
2224 }
2225 most_recent_message_jiffies = jiffies;
2226 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2227 rc = queue_delayed_work(periodic_controlvm_workqueue,
2228 &periodic_controlvm_work, poll_jiffies);
2229 if (rc < 0) {
2230 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2231 DIAG_SEVERITY_ERR);
2232 goto cleanup;
2233 }
2234 }
2235
2236 visorchipset_platform_device.dev.devt = major_dev;
2237 if (platform_device_register(&visorchipset_platform_device) < 0) {
2238 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2239 rc = -1;
2240 goto cleanup;
2241 }
2242 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2243 rc = 0;
2244 cleanup:
2245 if (rc) {
2246 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2247 POSTCODE_SEVERITY_ERR);
2248 }
2249 return rc;
2250 }
2251
2252 static void
visorchipset_exit(void)2253 visorchipset_exit(void)
2254 {
2255 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2256
2257 if (visorchipset_disable_controlvm) {
2258 ;
2259 } else {
2260 cancel_delayed_work(&periodic_controlvm_work);
2261 flush_workqueue(periodic_controlvm_workqueue);
2262 destroy_workqueue(periodic_controlvm_workqueue);
2263 periodic_controlvm_workqueue = NULL;
2264 destroy_controlvm_payload_info(&controlvm_payload_info);
2265 }
2266 if (putfile_buffer_list_pool) {
2267 kmem_cache_destroy(putfile_buffer_list_pool);
2268 putfile_buffer_list_pool = NULL;
2269 }
2270
2271 cleanup_controlvm_structures();
2272
2273 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2274
2275 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2276
2277 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2278
2279 visorchannel_destroy(controlvm_channel);
2280
2281 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2282 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2283 }
2284
2285 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2286 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2287 int visorchipset_testvnic = 0;
2288
2289 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2290 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2291 int visorchipset_testvnicclient = 0;
2292
2293 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2294 MODULE_PARM_DESC(visorchipset_testmsg,
2295 "1 to manufacture the chipset, bus, and switch messages");
2296 int visorchipset_testmsg = 0;
2297
2298 module_param_named(major, visorchipset_major, int, S_IRUGO);
2299 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2300 int visorchipset_major = 0;
2301
2302 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2303 MODULE_PARM_DESC(visorchipset_serverreqwait,
2304 "1 to have the module wait for the visor bus to register");
2305 int visorchipset_serverregwait = 0; /* default is off */
2306 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2307 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2308 int visorchipset_clientregwait = 1; /* default is on */
2309 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2310 MODULE_PARM_DESC(visorchipset_testteardown,
2311 "1 to test teardown of the chipset, bus, and switch");
2312 int visorchipset_testteardown = 0; /* default is off */
2313 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2314 S_IRUGO);
2315 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2316 "1 to disable polling of controlVm channel");
2317 int visorchipset_disable_controlvm = 0; /* default is off */
2318 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2319 MODULE_PARM_DESC(visorchipset_crash_kernel,
2320 "1 means we are running in crash kernel");
2321 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2322 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2323 int, S_IRUGO);
2324 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2325 "1 to hold response to CHIPSET_READY");
2326 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2327 * response immediately */
2328 module_init(visorchipset_init);
2329 module_exit(visorchipset_exit);
2330
2331 MODULE_AUTHOR("Unisys");
2332 MODULE_LICENSE("GPL");
2333 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2334 VERSION);
2335 MODULE_VERSION(VERSION);
2336