staging: unisys: refactor parser_init_byteStream
[firefly-linux-kernel-4.4.55.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  */
17
18 #include "globals.h"
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
23 #include "file.h"
24 #include "parser.h"
25 #include "uisutils.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
28
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
33
34 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35 #define TEST_VNIC_PHYSITF "eth0"        /* physical network itf for
36                                          * vnic loopback test */
37 #define TEST_VNIC_SWITCHNO 1
38 #define TEST_VNIC_BUSNO 9
39
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE   50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47 * we switch to slow polling mode.  As soon as we get a controlvm
48 * message, we switch back to fast polling mode.
49 */
50 #define MIN_IDLE_SECONDS 10
51 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
52 static ulong Most_recent_message_jiffies;       /* when we got our last
53                                                  * controlvm message */
54 static inline char *
55 NONULLSTR(char *s)
56 {
57         if (s)
58                 return s;
59         return "";
60 }
61
62 static int serverregistered;
63 static int clientregistered;
64
65 #define MAX_CHIPSET_EVENTS 2
66 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
67
68 static struct delayed_work Periodic_controlvm_work;
69 static struct workqueue_struct *Periodic_controlvm_workqueue;
70 static DEFINE_SEMAPHORE(NotifierLock);
71
72 static struct controlvm_message_header g_DiagMsgHdr;
73 static struct controlvm_message_header g_ChipSetMsgHdr;
74 static struct controlvm_message_header g_DelDumpMsgHdr;
75 static const uuid_le UltraDiagPoolChannelProtocolGuid =
76         SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
77 /* 0xffffff is an invalid Bus/Device number */
78 static ulong g_diagpoolBusNo = 0xffffff;
79 static ulong g_diagpoolDevNo = 0xffffff;
80 static struct controlvm_message_packet g_DeviceChangeStatePacket;
81
82 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
83  * "visorhackbus")
84  */
85 #define FOR_VISORHACKBUS(channel_type_guid) \
86         (((uuid_le_cmp(channel_type_guid,\
87                        spar_vnic_channel_protocol_uuid) == 0)\
88         || (uuid_le_cmp(channel_type_guid,\
89                         spar_vhba_channel_protocol_uuid) == 0)))
90 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
91
92 #define is_diagpool_channel(channel_type_guid) \
93          (uuid_le_cmp(channel_type_guid, UltraDiagPoolChannelProtocolGuid) == 0)
94
95 static LIST_HEAD(BusInfoList);
96 static LIST_HEAD(DevInfoList);
97
98 static struct visorchannel *ControlVm_channel;
99
100 struct controlvm_payload_info {
101         u8 __iomem *ptr;        /* pointer to base address of payload pool */
102         u64 offset;             /* offset from beginning of controlvm
103                                  * channel to beginning of payload * pool */
104         u32 bytes;              /* number of bytes in payload pool */
105 };
106
107 /* Manages the request payload in the controlvm channel */
108 static struct controlvm_payload_info ControlVm_payload_info;
109
110 static struct channel_header *Test_Vnic_channel;
111
112 struct livedump_info {
113         struct controlvm_message_header Dumpcapture_header;
114         struct controlvm_message_header Gettextdump_header;
115         struct controlvm_message_header Dumpcomplete_header;
116         BOOL Gettextdump_outstanding;
117         u32 crc32;
118         ulong length;
119         atomic_t buffers_in_use;
120         ulong destination;
121 };
122 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
123  * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
124  */
125 static struct livedump_info LiveDump_info;
126
127 /* The following globals are used to handle the scenario where we are unable to
128  * offload the payload from a controlvm message due to memory requirements.  In
129  * this scenario, we simply stash the controlvm message, then attempt to
130  * process it again the next time controlvm_periodic_work() runs.
131  */
132 static struct controlvm_message ControlVm_Pending_Msg;
133 static BOOL ControlVm_Pending_Msg_Valid = FALSE;
134
135 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
136  * TRANSMIT_FILE PutFile payloads.
137  */
138 static struct kmem_cache *Putfile_buffer_list_pool;
139 static const char Putfile_buffer_list_pool_name[] =
140         "controlvm_putfile_buffer_list_pool";
141
142 /* This identifies a data buffer that has been received via a controlvm messages
143  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
144  */
145 struct putfile_buffer_entry {
146         struct list_head next;  /* putfile_buffer_entry list */
147         struct parser_context *parser_ctx; /* points to input data buffer */
148 };
149
150 /* List of struct putfile_request *, via next_putfile_request member.
151  * Each entry in this list identifies an outstanding TRANSMIT_FILE
152  * conversation.
153  */
154 static LIST_HEAD(Putfile_request_list);
155
156 /* This describes a buffer and its current state of transfer (e.g., how many
157  * bytes have already been supplied as putfile data, and how many bytes are
158  * remaining) for a putfile_request.
159  */
160 struct putfile_active_buffer {
161         /* a payload from a controlvm message, containing a file data buffer */
162         struct parser_context *parser_ctx;
163         /* points within data area of parser_ctx to next byte of data */
164         u8 *pnext;
165         /* # bytes left from <pnext> to the end of this data buffer */
166         size_t bytes_remaining;
167 };
168
169 #define PUTFILE_REQUEST_SIG 0x0906101302281211
170 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
171  * conversation.  Structs of this type are dynamically linked into
172  * <Putfile_request_list>.
173  */
174 struct putfile_request {
175         u64 sig;                /* PUTFILE_REQUEST_SIG */
176
177         /* header from original TransmitFile request */
178         struct controlvm_message_header controlvm_header;
179         u64 file_request_number;        /* from original TransmitFile request */
180
181         /* link to next struct putfile_request */
182         struct list_head next_putfile_request;
183
184         /* most-recent sequence number supplied via a controlvm message */
185         u64 data_sequence_number;
186
187         /* head of putfile_buffer_entry list, which describes the data to be
188          * supplied as putfile data;
189          * - this list is added to when controlvm messages come in that supply
190          * file data
191          * - this list is removed from via the hotplug program that is actually
192          * consuming these buffers to write as file data */
193         struct list_head input_buffer_list;
194         spinlock_t req_list_lock;       /* lock for input_buffer_list */
195
196         /* waiters for input_buffer_list to go non-empty */
197         wait_queue_head_t input_buffer_wq;
198
199         /* data not yet read within current putfile_buffer_entry */
200         struct putfile_active_buffer active_buf;
201
202         /* <0 = failed, 0 = in-progress, >0 = successful; */
203         /* note that this must be set with req_list_lock, and if you set <0, */
204         /* it is your responsibility to also free up all of the other objects */
205         /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
206         /* before releasing the lock */
207         int completion_status;
208 };
209
210 static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
211
212 struct parahotplug_request {
213         struct list_head list;
214         int id;
215         unsigned long expiration;
216         struct controlvm_message msg;
217 };
218
219 static LIST_HEAD(Parahotplug_request_list);
220 static DEFINE_SPINLOCK(Parahotplug_request_list_lock);  /* lock for above */
221 static void parahotplug_process_list(void);
222
223 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
224  * CONTROLVM_REPORTEVENT.
225  */
226 static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
227 static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
228
229 static void bus_create_response(ulong busNo, int response);
230 static void bus_destroy_response(ulong busNo, int response);
231 static void device_create_response(ulong busNo, ulong devNo, int response);
232 static void device_destroy_response(ulong busNo, ulong devNo, int response);
233 static void device_resume_response(ulong busNo, ulong devNo, int response);
234
235 static struct visorchipset_busdev_responders BusDev_Responders = {
236         .bus_create = bus_create_response,
237         .bus_destroy = bus_destroy_response,
238         .device_create = device_create_response,
239         .device_destroy = device_destroy_response,
240         .device_pause = visorchipset_device_pause_response,
241         .device_resume = device_resume_response,
242 };
243
244 /* info for /dev/visorchipset */
245 static dev_t MajorDev = -1; /**< indicates major num for device */
246
247 /* prototypes for attributes */
248 static ssize_t toolaction_show(struct device *dev,
249         struct device_attribute *attr, char *buf);
250 static ssize_t toolaction_store(struct device *dev,
251         struct device_attribute *attr, const char *buf, size_t count);
252 static DEVICE_ATTR_RW(toolaction);
253
254 static ssize_t boottotool_show(struct device *dev,
255         struct device_attribute *attr, char *buf);
256 static ssize_t boottotool_store(struct device *dev,
257         struct device_attribute *attr, const char *buf, size_t count);
258 static DEVICE_ATTR_RW(boottotool);
259
260 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
261         char *buf);
262 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
263         const char *buf, size_t count);
264 static DEVICE_ATTR_RW(error);
265
266 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
267         char *buf);
268 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
269         const char *buf, size_t count);
270 static DEVICE_ATTR_RW(textid);
271
272 static ssize_t remaining_steps_show(struct device *dev,
273         struct device_attribute *attr, char *buf);
274 static ssize_t remaining_steps_store(struct device *dev,
275         struct device_attribute *attr, const char *buf, size_t count);
276 static DEVICE_ATTR_RW(remaining_steps);
277
278 static ssize_t chipsetready_store(struct device *dev,
279                 struct device_attribute *attr, const char *buf, size_t count);
280 static DEVICE_ATTR_WO(chipsetready);
281
282 static ssize_t devicedisabled_store(struct device *dev,
283                 struct device_attribute *attr, const char *buf, size_t count);
284 static DEVICE_ATTR_WO(devicedisabled);
285
286 static ssize_t deviceenabled_store(struct device *dev,
287                 struct device_attribute *attr, const char *buf, size_t count);
288 static DEVICE_ATTR_WO(deviceenabled);
289
290 static struct attribute *visorchipset_install_attrs[] = {
291         &dev_attr_toolaction.attr,
292         &dev_attr_boottotool.attr,
293         &dev_attr_error.attr,
294         &dev_attr_textid.attr,
295         &dev_attr_remaining_steps.attr,
296         NULL
297 };
298
299 static struct attribute_group visorchipset_install_group = {
300         .name = "install",
301         .attrs = visorchipset_install_attrs
302 };
303
304 static struct attribute *visorchipset_guest_attrs[] = {
305         &dev_attr_chipsetready.attr,
306         NULL
307 };
308
309 static struct attribute_group visorchipset_guest_group = {
310         .name = "guest",
311         .attrs = visorchipset_guest_attrs
312 };
313
314 static struct attribute *visorchipset_parahotplug_attrs[] = {
315         &dev_attr_devicedisabled.attr,
316         &dev_attr_deviceenabled.attr,
317         NULL
318 };
319
320 static struct attribute_group visorchipset_parahotplug_group = {
321         .name = "parahotplug",
322         .attrs = visorchipset_parahotplug_attrs
323 };
324
325 static const struct attribute_group *visorchipset_dev_groups[] = {
326         &visorchipset_install_group,
327         &visorchipset_guest_group,
328         &visorchipset_parahotplug_group,
329         NULL
330 };
331
332 /* /sys/devices/platform/visorchipset */
333 static struct platform_device Visorchipset_platform_device = {
334         .name = "visorchipset",
335         .id = -1,
336         .dev.groups = visorchipset_dev_groups,
337 };
338
339 /* Function prototypes */
340 static void controlvm_respond(struct controlvm_message_header *msgHdr,
341                               int response);
342 static void controlvm_respond_chipset_init(
343                 struct controlvm_message_header *msgHdr, int response,
344                 enum ultra_chipset_feature features);
345 static void controlvm_respond_physdev_changestate(
346                 struct controlvm_message_header *msgHdr, int response,
347                 struct spar_segment_state state);
348
349 static ssize_t toolaction_show(struct device *dev,
350                                struct device_attribute *attr,
351                                char *buf)
352 {
353         u8 toolAction;
354
355         visorchannel_read(ControlVm_channel,
356                 offsetof(struct spar_controlvm_channel_protocol,
357                            tool_action), &toolAction, sizeof(u8));
358         return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
359 }
360
361 static ssize_t toolaction_store(struct device *dev,
362                                 struct device_attribute *attr,
363                                 const char *buf, size_t count)
364 {
365         u8 toolAction;
366         int ret;
367
368         if (kstrtou8(buf, 10, &toolAction) != 0)
369                 return -EINVAL;
370
371         ret = visorchannel_write(ControlVm_channel,
372                 offsetof(struct spar_controlvm_channel_protocol, tool_action),
373                 &toolAction, sizeof(u8));
374
375         if (ret)
376                 return ret;
377         return count;
378 }
379
380 static ssize_t boottotool_show(struct device *dev,
381                                struct device_attribute *attr,
382                                char *buf)
383 {
384         struct efi_spar_indication efiSparIndication;
385
386         visorchannel_read(ControlVm_channel,
387                 offsetof(struct spar_controlvm_channel_protocol,
388                         efi_spar_ind), &efiSparIndication,
389                 sizeof(struct efi_spar_indication));
390         return scnprintf(buf, PAGE_SIZE, "%u\n",
391                         efiSparIndication.boot_to_tool);
392 }
393
394 static ssize_t boottotool_store(struct device *dev,
395                                 struct device_attribute *attr,
396                                 const char *buf, size_t count)
397 {
398         int val, ret;
399         struct efi_spar_indication efiSparIndication;
400
401         if (kstrtoint(buf, 10, &val) != 0)
402                 return -EINVAL;
403
404         efiSparIndication.boot_to_tool = val;
405         ret = visorchannel_write(ControlVm_channel,
406                         offsetof(struct spar_controlvm_channel_protocol,
407                                 efi_spar_ind),
408                         &(efiSparIndication),
409                 sizeof(struct efi_spar_indication));
410
411         if (ret)
412                 return ret;
413         return count;
414 }
415
416 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
417                 char *buf)
418 {
419         u32 error;
420
421         visorchannel_read(ControlVm_channel, offsetof(
422                 struct spar_controlvm_channel_protocol, installation_error),
423                 &error, sizeof(u32));
424         return scnprintf(buf, PAGE_SIZE, "%i\n", error);
425 }
426
427 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
428                 const char *buf, size_t count)
429 {
430         u32 error;
431         int ret;
432
433         if (kstrtou32(buf, 10, &error) != 0)
434                 return -EINVAL;
435
436         ret = visorchannel_write(ControlVm_channel,
437                         offsetof(struct spar_controlvm_channel_protocol,
438                                 installation_error),
439                         &error, sizeof(u32));
440         if (ret)
441                 return ret;
442         return count;
443 }
444
445 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
446                 char *buf)
447 {
448         u32 textId;
449
450         visorchannel_read(ControlVm_channel, offsetof(
451                 struct spar_controlvm_channel_protocol, installation_text_id),
452                 &textId, sizeof(u32));
453         return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
454 }
455
456 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
457                 const char *buf, size_t count)
458 {
459         u32 textId;
460         int ret;
461
462         if (kstrtou32(buf, 10, &textId) != 0)
463                 return -EINVAL;
464
465         ret = visorchannel_write(ControlVm_channel,
466                         offsetof(struct spar_controlvm_channel_protocol,
467                                 installation_text_id),
468                         &textId, sizeof(u32));
469         if (ret)
470                 return ret;
471         return count;
472 }
473
474
475 static ssize_t remaining_steps_show(struct device *dev,
476         struct device_attribute *attr, char *buf)
477 {
478         u16 remainingSteps;
479
480         visorchannel_read(ControlVm_channel,
481                 offsetof(struct spar_controlvm_channel_protocol,
482                         installation_remaining_steps),
483                 &remainingSteps,
484                 sizeof(u16));
485         return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
486 }
487
488 static ssize_t remaining_steps_store(struct device *dev,
489         struct device_attribute *attr, const char *buf, size_t count)
490 {
491         u16 remainingSteps;
492         int ret;
493
494         if (kstrtou16(buf, 10, &remainingSteps) != 0)
495                 return -EINVAL;
496
497         ret = visorchannel_write(ControlVm_channel,
498                         offsetof(struct spar_controlvm_channel_protocol,
499                                 installation_remaining_steps),
500                         &remainingSteps, sizeof(u16));
501         if (ret)
502                 return ret;
503         return count;
504 }
505
506 #if 0
507 static void
508 testUnicode(void)
509 {
510         wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
511         char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
512         wchar_t unicode2[99];
513
514         /* NOTE: Either due to a bug, or feature I don't understand, the
515          *       kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
516          *       trailed NUL byte!!   REALLY!!!!!    Arrrrgggghhhhh
517          */
518
519         LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
520         LOGINF("utf8_wcstombs=%d",
521                chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
522         if (chrs >= 0)
523                 s[chrs] = '\0'; /* GRRRRRRRR */
524         LOGINF("s='%s'", s);
525         LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
526         if (chrs >= 0)
527                 unicode2[chrs] = 0;     /* GRRRRRRRR */
528         if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
529                 LOGINF("strings match... good");
530         else
531                 LOGINF("strings did not match!!");
532 }
533 #endif
534
535 static void
536 busInfo_clear(void *v)
537 {
538         struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
539
540         if (p->proc_object) {
541                 visor_proc_DestroyObject(p->proc_object);
542                 p->proc_object = NULL;
543         }
544         kfree(p->name);
545         p->name = NULL;
546
547         kfree(p->description);
548         p->description = NULL;
549
550         p->state.created = 0;
551         memset(p, 0, sizeof(struct visorchipset_bus_info));
552 }
553
554 static void
555 devInfo_clear(void *v)
556 {
557         struct visorchipset_device_info *p =
558                         (struct visorchipset_device_info *)(v);
559
560         p->state.created = 0;
561         memset(p, 0, sizeof(struct visorchipset_device_info));
562 }
563
564 static u8
565 check_chipset_events(void)
566 {
567         int i;
568         u8 send_msg = 1;
569         /* Check events to determine if response should be sent */
570         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
571                 send_msg &= chipset_events[i];
572         return send_msg;
573 }
574
575 static void
576 clear_chipset_events(void)
577 {
578         int i;
579         /* Clear chipset_events */
580         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
581                 chipset_events[i] = 0;
582 }
583
584 void
585 visorchipset_register_busdev_server(
586                         struct visorchipset_busdev_notifiers *notifiers,
587                         struct visorchipset_busdev_responders *responders,
588                         struct ultra_vbus_deviceinfo *driver_info)
589 {
590         down(&NotifierLock);
591         if (notifiers == NULL) {
592                 memset(&BusDev_Server_Notifiers, 0,
593                        sizeof(BusDev_Server_Notifiers));
594                 serverregistered = 0;   /* clear flag */
595         } else {
596                 BusDev_Server_Notifiers = *notifiers;
597                 serverregistered = 1;   /* set flag */
598         }
599         if (responders)
600                 *responders = BusDev_Responders;
601         if (driver_info)
602                 bus_device_info_init(driver_info, "chipset", "visorchipset",
603                                    VERSION, NULL);
604
605         up(&NotifierLock);
606 }
607 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
608
609 void
610 visorchipset_register_busdev_client(
611                         struct visorchipset_busdev_notifiers *notifiers,
612                         struct visorchipset_busdev_responders *responders,
613                         struct ultra_vbus_deviceinfo *driver_info)
614 {
615         down(&NotifierLock);
616         if (notifiers == NULL) {
617                 memset(&BusDev_Client_Notifiers, 0,
618                        sizeof(BusDev_Client_Notifiers));
619                 clientregistered = 0;   /* clear flag */
620         } else {
621                 BusDev_Client_Notifiers = *notifiers;
622                 clientregistered = 1;   /* set flag */
623         }
624         if (responders)
625                 *responders = BusDev_Responders;
626         if (driver_info)
627                 bus_device_info_init(driver_info, "chipset(bolts)",
628                                      "visorchipset", VERSION, NULL);
629         up(&NotifierLock);
630 }
631 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
632
633 static void
634 cleanup_controlvm_structures(void)
635 {
636         struct visorchipset_bus_info *bi, *tmp_bi;
637         struct visorchipset_device_info *di, *tmp_di;
638
639         list_for_each_entry_safe(bi, tmp_bi, &BusInfoList, entry) {
640                 busInfo_clear(bi);
641                 list_del(&bi->entry);
642                 kfree(bi);
643         }
644
645         list_for_each_entry_safe(di, tmp_di, &DevInfoList, entry) {
646                 devInfo_clear(di);
647                 list_del(&di->entry);
648                 kfree(di);
649         }
650 }
651
652 static void
653 chipset_init(struct controlvm_message *inmsg)
654 {
655         static int chipset_inited;
656         enum ultra_chipset_feature features = 0;
657         int rc = CONTROLVM_RESP_SUCCESS;
658
659         POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
660         if (chipset_inited) {
661                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
662                 goto Away;
663         }
664         chipset_inited = 1;
665         POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
666
667         /* Set features to indicate we support parahotplug (if Command
668          * also supports it). */
669         features =
670             inmsg->cmd.init_chipset.
671             features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
672
673         /* Set the "reply" bit so Command knows this is a
674          * features-aware driver. */
675         features |= ULTRA_CHIPSET_FEATURE_REPLY;
676
677 Away:
678         if (rc < 0)
679                 cleanup_controlvm_structures();
680         if (inmsg->hdr.flags.response_expected)
681                 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
682 }
683
684 static void
685 controlvm_init_response(struct controlvm_message *msg,
686                         struct controlvm_message_header *msgHdr, int response)
687 {
688         memset(msg, 0, sizeof(struct controlvm_message));
689         memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
690         msg->hdr.payload_bytes = 0;
691         msg->hdr.payload_vm_offset = 0;
692         msg->hdr.payload_max_bytes = 0;
693         if (response < 0) {
694                 msg->hdr.flags.failed = 1;
695                 msg->hdr.completion_status = (u32) (-response);
696         }
697 }
698
699 static void
700 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
701 {
702         struct controlvm_message outmsg;
703
704         controlvm_init_response(&outmsg, msgHdr, response);
705         /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
706         * back the deviceChangeState structure in the packet. */
707         if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE
708             && g_DeviceChangeStatePacket.device_change_state.bus_no ==
709             g_diagpoolBusNo
710             && g_DeviceChangeStatePacket.device_change_state.dev_no ==
711             g_diagpoolDevNo)
712                 outmsg.cmd = g_DeviceChangeStatePacket;
713         if (outmsg.hdr.flags.test_message == 1)
714                 return;
715
716         if (!visorchannel_signalinsert(ControlVm_channel,
717                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
718                 return;
719         }
720 }
721
722 static void
723 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
724                                int response,
725                                enum ultra_chipset_feature features)
726 {
727         struct controlvm_message outmsg;
728
729         controlvm_init_response(&outmsg, msgHdr, response);
730         outmsg.cmd.init_chipset.features = features;
731         if (!visorchannel_signalinsert(ControlVm_channel,
732                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
733                 return;
734         }
735 }
736
737 static void controlvm_respond_physdev_changestate(
738                 struct controlvm_message_header *msgHdr, int response,
739                 struct spar_segment_state state)
740 {
741         struct controlvm_message outmsg;
742
743         controlvm_init_response(&outmsg, msgHdr, response);
744         outmsg.cmd.device_change_state.state = state;
745         outmsg.cmd.device_change_state.flags.phys_device = 1;
746         if (!visorchannel_signalinsert(ControlVm_channel,
747                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
748                 return;
749         }
750 }
751
752 void
753 visorchipset_save_message(struct controlvm_message *msg,
754                           enum crash_obj_type type)
755 {
756         u32 localSavedCrashMsgOffset;
757         u16 localSavedCrashMsgCount;
758
759         /* get saved message count */
760         if (visorchannel_read(ControlVm_channel,
761                               offsetof(struct spar_controlvm_channel_protocol,
762                                        saved_crash_message_count),
763                               &localSavedCrashMsgCount, sizeof(u16)) < 0) {
764                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
765                                  POSTCODE_SEVERITY_ERR);
766                 return;
767         }
768
769         if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
770                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
771                                  localSavedCrashMsgCount,
772                                  POSTCODE_SEVERITY_ERR);
773                 return;
774         }
775
776         /* get saved crash message offset */
777         if (visorchannel_read(ControlVm_channel,
778                               offsetof(struct spar_controlvm_channel_protocol,
779                                        saved_crash_message_offset),
780                               &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
781                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
782                                  POSTCODE_SEVERITY_ERR);
783                 return;
784         }
785
786         if (type == CRASH_BUS) {
787                 if (visorchannel_write(ControlVm_channel,
788                                        localSavedCrashMsgOffset,
789                                        msg,
790                                        sizeof(struct controlvm_message)) < 0) {
791                         POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
792                                          POSTCODE_SEVERITY_ERR);
793                         return;
794                 }
795         } else {
796                 if (visorchannel_write(ControlVm_channel,
797                                        localSavedCrashMsgOffset +
798                                        sizeof(struct controlvm_message), msg,
799                                        sizeof(struct controlvm_message)) < 0) {
800                         POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
801                                          POSTCODE_SEVERITY_ERR);
802                         return;
803                 }
804         }
805 }
806 EXPORT_SYMBOL_GPL(visorchipset_save_message);
807
808 static void
809 bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
810 {
811         struct visorchipset_bus_info *p = NULL;
812         BOOL need_clear = FALSE;
813
814         p = findbus(&BusInfoList, busNo);
815         if (!p)
816                 return;
817
818         if (response < 0) {
819                 if ((cmdId == CONTROLVM_BUS_CREATE) &&
820                     (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
821                         /* undo the row we just created... */
822                         delbusdevices(&DevInfoList, busNo);
823         } else {
824                 if (cmdId == CONTROLVM_BUS_CREATE)
825                         p->state.created = 1;
826                 if (cmdId == CONTROLVM_BUS_DESTROY)
827                         need_clear = TRUE;
828         }
829
830         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
831                 return;         /* no controlvm response needed */
832         if (p->pending_msg_hdr.id != (u32) cmdId)
833                 return;
834         controlvm_respond(&p->pending_msg_hdr, response);
835         p->pending_msg_hdr.id = CONTROLVM_INVALID;
836         if (need_clear) {
837                 busInfo_clear(p);
838                 delbusdevices(&DevInfoList, busNo);
839         }
840 }
841
842 static void
843 device_changestate_responder(enum controlvm_id cmdId,
844                              ulong busNo, ulong devNo, int response,
845                              struct spar_segment_state responseState)
846 {
847         struct visorchipset_device_info *p = NULL;
848         struct controlvm_message outmsg;
849
850         p = finddevice(&DevInfoList, busNo, devNo);
851         if (!p)
852                 return;
853         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
854                 return;         /* no controlvm response needed */
855         if (p->pending_msg_hdr.id != cmdId)
856                 return;
857
858         controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
859
860         outmsg.cmd.device_change_state.bus_no = busNo;
861         outmsg.cmd.device_change_state.dev_no = devNo;
862         outmsg.cmd.device_change_state.state = responseState;
863
864         if (!visorchannel_signalinsert(ControlVm_channel,
865                                        CONTROLVM_QUEUE_REQUEST, &outmsg))
866                 return;
867
868         p->pending_msg_hdr.id = CONTROLVM_INVALID;
869 }
870
871 static void
872 device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
873                  int response)
874 {
875         struct visorchipset_device_info *p = NULL;
876         BOOL need_clear = FALSE;
877
878         p = finddevice(&DevInfoList, busNo, devNo);
879         if (!p)
880                 return;
881         if (response >= 0) {
882                 if (cmdId == CONTROLVM_DEVICE_CREATE)
883                         p->state.created = 1;
884                 if (cmdId == CONTROLVM_DEVICE_DESTROY)
885                         need_clear = TRUE;
886         }
887
888         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
889                 return;         /* no controlvm response needed */
890
891         if (p->pending_msg_hdr.id != (u32) cmdId)
892                 return;
893
894         controlvm_respond(&p->pending_msg_hdr, response);
895         p->pending_msg_hdr.id = CONTROLVM_INVALID;
896         if (need_clear)
897                 devInfo_clear(p);
898 }
899
900 static void
901 bus_epilog(u32 busNo,
902            u32 cmd, struct controlvm_message_header *msgHdr,
903            int response, BOOL needResponse)
904 {
905         BOOL notified = FALSE;
906
907         struct visorchipset_bus_info *pBusInfo = findbus(&BusInfoList, busNo);
908
909         if (!pBusInfo)
910                 return;
911
912         if (needResponse) {
913                 memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
914                        sizeof(struct controlvm_message_header));
915         } else
916                 pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
917
918         down(&NotifierLock);
919         if (response == CONTROLVM_RESP_SUCCESS) {
920                 switch (cmd) {
921                 case CONTROLVM_BUS_CREATE:
922                         /* We can't tell from the bus_create
923                         * information which of our 2 bus flavors the
924                         * devices on this bus will ultimately end up.
925                         * FORTUNATELY, it turns out it is harmless to
926                         * send the bus_create to both of them.  We can
927                         * narrow things down a little bit, though,
928                         * because we know: - BusDev_Server can handle
929                         * either server or client devices
930                         * - BusDev_Client can handle ONLY client
931                         * devices */
932                         if (BusDev_Server_Notifiers.bus_create) {
933                                 (*BusDev_Server_Notifiers.bus_create) (busNo);
934                                 notified = TRUE;
935                         }
936                         if ((!pBusInfo->flags.server) /*client */ &&
937                             BusDev_Client_Notifiers.bus_create) {
938                                 (*BusDev_Client_Notifiers.bus_create) (busNo);
939                                 notified = TRUE;
940                         }
941                         break;
942                 case CONTROLVM_BUS_DESTROY:
943                         if (BusDev_Server_Notifiers.bus_destroy) {
944                                 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
945                                 notified = TRUE;
946                         }
947                         if ((!pBusInfo->flags.server) /*client */ &&
948                             BusDev_Client_Notifiers.bus_destroy) {
949                                 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
950                                 notified = TRUE;
951                         }
952                         break;
953                 }
954         }
955         if (notified)
956                 /* The callback function just called above is responsible
957                  * for calling the appropriate visorchipset_busdev_responders
958                  * function, which will call bus_responder()
959                  */
960                 ;
961         else
962                 bus_responder(cmd, busNo, response);
963         up(&NotifierLock);
964 }
965
966 static void
967 device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
968               struct controlvm_message_header *msgHdr, int response,
969               BOOL needResponse, BOOL for_visorbus)
970 {
971         struct visorchipset_busdev_notifiers *notifiers = NULL;
972         BOOL notified = FALSE;
973
974         struct visorchipset_device_info *pDevInfo =
975                 finddevice(&DevInfoList, busNo, devNo);
976         char *envp[] = {
977                 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
978                 NULL
979         };
980
981         if (!pDevInfo)
982                 return;
983
984         if (for_visorbus)
985                 notifiers = &BusDev_Server_Notifiers;
986         else
987                 notifiers = &BusDev_Client_Notifiers;
988         if (needResponse) {
989                 memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
990                        sizeof(struct controlvm_message_header));
991         } else
992                 pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
993
994         down(&NotifierLock);
995         if (response >= 0) {
996                 switch (cmd) {
997                 case CONTROLVM_DEVICE_CREATE:
998                         if (notifiers->device_create) {
999                                 (*notifiers->device_create) (busNo, devNo);
1000                                 notified = TRUE;
1001                         }
1002                         break;
1003                 case CONTROLVM_DEVICE_CHANGESTATE:
1004                         /* ServerReady / ServerRunning / SegmentStateRunning */
1005                         if (state.alive == segment_state_running.alive &&
1006                             state.operating ==
1007                                 segment_state_running.operating) {
1008                                 if (notifiers->device_resume) {
1009                                         (*notifiers->device_resume) (busNo,
1010                                                                      devNo);
1011                                         notified = TRUE;
1012                                 }
1013                         }
1014                         /* ServerNotReady / ServerLost / SegmentStateStandby */
1015                         else if (state.alive == segment_state_standby.alive &&
1016                                  state.operating ==
1017                                  segment_state_standby.operating) {
1018                                 /* technically this is standby case
1019                                  * where server is lost
1020                                  */
1021                                 if (notifiers->device_pause) {
1022                                         (*notifiers->device_pause) (busNo,
1023                                                                     devNo);
1024                                         notified = TRUE;
1025                                 }
1026                         } else if (state.alive == segment_state_paused.alive &&
1027                                    state.operating ==
1028                                    segment_state_paused.operating) {
1029                                 /* this is lite pause where channel is
1030                                  * still valid just 'pause' of it
1031                                  */
1032                                 if (busNo == g_diagpoolBusNo
1033                                     && devNo == g_diagpoolDevNo) {
1034                                         /* this will trigger the
1035                                          * diag_shutdown.sh script in
1036                                          * the visorchipset hotplug */
1037                                         kobject_uevent_env
1038                                             (&Visorchipset_platform_device.dev.
1039                                              kobj, KOBJ_ONLINE, envp);
1040                                 }
1041                         }
1042                         break;
1043                 case CONTROLVM_DEVICE_DESTROY:
1044                         if (notifiers->device_destroy) {
1045                                 (*notifiers->device_destroy) (busNo, devNo);
1046                                 notified = TRUE;
1047                         }
1048                         break;
1049                 }
1050         }
1051         if (notified)
1052                 /* The callback function just called above is responsible
1053                  * for calling the appropriate visorchipset_busdev_responders
1054                  * function, which will call device_responder()
1055                  */
1056                 ;
1057         else
1058                 device_responder(cmd, busNo, devNo, response);
1059         up(&NotifierLock);
1060 }
1061
1062 static void
1063 bus_create(struct controlvm_message *inmsg)
1064 {
1065         struct controlvm_message_packet *cmd = &inmsg->cmd;
1066         ulong busNo = cmd->create_bus.bus_no;
1067         int rc = CONTROLVM_RESP_SUCCESS;
1068         struct visorchipset_bus_info *pBusInfo = NULL;
1069
1070
1071         pBusInfo = findbus(&BusInfoList, busNo);
1072         if (pBusInfo && (pBusInfo->state.created == 1)) {
1073                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1074                                  POSTCODE_SEVERITY_ERR);
1075                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1076                 goto Away;
1077         }
1078         pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
1079         if (pBusInfo == NULL) {
1080                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1081                                  POSTCODE_SEVERITY_ERR);
1082                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1083                 goto Away;
1084         }
1085
1086         INIT_LIST_HEAD(&pBusInfo->entry);
1087         pBusInfo->bus_no = busNo;
1088         pBusInfo->dev_no = cmd->create_bus.dev_count;
1089
1090         POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1091
1092         if (inmsg->hdr.flags.test_message == 1)
1093                 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1094         else
1095                 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1096
1097         pBusInfo->flags.server = inmsg->hdr.flags.server;
1098         pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
1099         pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1100         pBusInfo->chan_info.channel_type_uuid =
1101                         cmd->create_bus.bus_data_type_uuid;
1102         pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1103
1104         list_add(&pBusInfo->entry, &BusInfoList);
1105
1106         POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1107
1108 Away:
1109         bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1110                    rc, inmsg->hdr.flags.response_expected == 1);
1111 }
1112
1113 static void
1114 bus_destroy(struct controlvm_message *inmsg)
1115 {
1116         struct controlvm_message_packet *cmd = &inmsg->cmd;
1117         ulong busNo = cmd->destroy_bus.bus_no;
1118         struct visorchipset_bus_info *pBusInfo;
1119         int rc = CONTROLVM_RESP_SUCCESS;
1120
1121         pBusInfo = findbus(&BusInfoList, busNo);
1122         if (!pBusInfo) {
1123                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1124                 goto Away;
1125         }
1126         if (pBusInfo->state.created == 0) {
1127                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1128                 goto Away;
1129         }
1130
1131 Away:
1132         bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1133                    rc, inmsg->hdr.flags.response_expected == 1);
1134 }
1135
1136 static void
1137 bus_configure(struct controlvm_message *inmsg,
1138               struct parser_context *parser_ctx)
1139 {
1140         struct controlvm_message_packet *cmd = &inmsg->cmd;
1141         ulong busNo = cmd->configure_bus.bus_no;
1142         struct visorchipset_bus_info *pBusInfo = NULL;
1143         int rc = CONTROLVM_RESP_SUCCESS;
1144         char s[99];
1145
1146         busNo = cmd->configure_bus.bus_no;
1147         POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1148
1149         pBusInfo = findbus(&BusInfoList, busNo);
1150         if (!pBusInfo) {
1151                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1152                                  POSTCODE_SEVERITY_ERR);
1153                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1154                 goto Away;
1155         }
1156         if (pBusInfo->state.created == 0) {
1157                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1158                                  POSTCODE_SEVERITY_ERR);
1159                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1160                 goto Away;
1161         }
1162         /* TBD - add this check to other commands also... */
1163         if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
1164                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1165                                  POSTCODE_SEVERITY_ERR);
1166                 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1167                 goto Away;
1168         }
1169
1170         pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1171         pBusInfo->partition_uuid = parser_id_get(parser_ctx);
1172         parser_param_start(parser_ctx, PARSERSTRING_NAME);
1173         pBusInfo->name = parser_string_get(parser_ctx);
1174
1175         visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
1176         POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1177 Away:
1178         bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1179                    rc, inmsg->hdr.flags.response_expected == 1);
1180 }
1181
1182 static void
1183 my_device_create(struct controlvm_message *inmsg)
1184 {
1185         struct controlvm_message_packet *cmd = &inmsg->cmd;
1186         ulong busNo = cmd->create_device.bus_no;
1187         ulong devNo = cmd->create_device.dev_no;
1188         struct visorchipset_device_info *pDevInfo = NULL;
1189         struct visorchipset_bus_info *pBusInfo = NULL;
1190         int rc = CONTROLVM_RESP_SUCCESS;
1191
1192         pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1193         if (pDevInfo && (pDevInfo->state.created == 1)) {
1194                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1195                                  POSTCODE_SEVERITY_ERR);
1196                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1197                 goto Away;
1198         }
1199         pBusInfo = findbus(&BusInfoList, busNo);
1200         if (!pBusInfo) {
1201                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1202                                  POSTCODE_SEVERITY_ERR);
1203                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1204                 goto Away;
1205         }
1206         if (pBusInfo->state.created == 0) {
1207                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1208                                  POSTCODE_SEVERITY_ERR);
1209                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1210                 goto Away;
1211         }
1212         pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
1213         if (pDevInfo == NULL) {
1214                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1215                                  POSTCODE_SEVERITY_ERR);
1216                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1217                 goto Away;
1218         }
1219
1220         INIT_LIST_HEAD(&pDevInfo->entry);
1221         pDevInfo->bus_no = busNo;
1222         pDevInfo->dev_no = devNo;
1223         pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1224         POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1225                          POSTCODE_SEVERITY_INFO);
1226
1227         if (inmsg->hdr.flags.test_message == 1)
1228                 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1229         else
1230                 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1231         pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1232         pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1233         pDevInfo->chan_info.channel_type_uuid =
1234                         cmd->create_device.data_type_uuid;
1235         pDevInfo->chan_info.intr = cmd->create_device.intr;
1236         list_add(&pDevInfo->entry, &DevInfoList);
1237         POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1238                          POSTCODE_SEVERITY_INFO);
1239 Away:
1240         /* get the bus and devNo for DiagPool channel */
1241         if (pDevInfo &&
1242             is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
1243                 g_diagpoolBusNo = busNo;
1244                 g_diagpoolDevNo = devNo;
1245         }
1246         device_epilog(busNo, devNo, segment_state_running,
1247                       CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1248                       inmsg->hdr.flags.response_expected == 1,
1249                       FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
1250 }
1251
1252 static void
1253 my_device_changestate(struct controlvm_message *inmsg)
1254 {
1255         struct controlvm_message_packet *cmd = &inmsg->cmd;
1256         ulong busNo = cmd->device_change_state.bus_no;
1257         ulong devNo = cmd->device_change_state.dev_no;
1258         struct spar_segment_state state = cmd->device_change_state.state;
1259         struct visorchipset_device_info *pDevInfo = NULL;
1260         int rc = CONTROLVM_RESP_SUCCESS;
1261
1262         pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1263         if (!pDevInfo) {
1264                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1265                                  POSTCODE_SEVERITY_ERR);
1266                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1267                 goto Away;
1268         }
1269         if (pDevInfo->state.created == 0) {
1270                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1271                                  POSTCODE_SEVERITY_ERR);
1272                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1273         }
1274 Away:
1275         if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1276                 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1277                               &inmsg->hdr, rc,
1278                               inmsg->hdr.flags.response_expected == 1,
1279                               FOR_VISORBUS(
1280                                         pDevInfo->chan_info.channel_type_uuid));
1281 }
1282
1283 static void
1284 my_device_destroy(struct controlvm_message *inmsg)
1285 {
1286         struct controlvm_message_packet *cmd = &inmsg->cmd;
1287         ulong busNo = cmd->destroy_device.bus_no;
1288         ulong devNo = cmd->destroy_device.dev_no;
1289         struct visorchipset_device_info *pDevInfo = NULL;
1290         int rc = CONTROLVM_RESP_SUCCESS;
1291
1292         pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1293         if (!pDevInfo) {
1294                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1295                 goto Away;
1296         }
1297         if (pDevInfo->state.created == 0) {
1298                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1299         }
1300
1301 Away:
1302         if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1303                 device_epilog(busNo, devNo, segment_state_running,
1304                               CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1305                               inmsg->hdr.flags.response_expected == 1,
1306                               FOR_VISORBUS(
1307                                         pDevInfo->chan_info.channel_type_uuid));
1308 }
1309
1310 /* When provided with the physical address of the controlvm channel
1311  * (phys_addr), the offset to the payload area we need to manage
1312  * (offset), and the size of this payload area (bytes), fills in the
1313  * controlvm_payload_info struct.  Returns TRUE for success or FALSE
1314  * for failure.
1315  */
1316 static int
1317 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1318                                   struct controlvm_payload_info *info)
1319 {
1320         u8 __iomem *payload = NULL;
1321         int rc = CONTROLVM_RESP_SUCCESS;
1322
1323         if (info == NULL) {
1324                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1325                 goto Away;
1326         }
1327         memset(info, 0, sizeof(struct controlvm_payload_info));
1328         if ((offset == 0) || (bytes == 0)) {
1329                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1330                 goto Away;
1331         }
1332         payload = ioremap_cache(phys_addr + offset, bytes);
1333         if (payload == NULL) {
1334                 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1335                 goto Away;
1336         }
1337
1338         info->offset = offset;
1339         info->bytes = bytes;
1340         info->ptr = payload;
1341
1342 Away:
1343         if (rc < 0) {
1344                 if (payload != NULL) {
1345                         iounmap(payload);
1346                         payload = NULL;
1347                 }
1348         }
1349         return rc;
1350 }
1351
1352 static void
1353 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1354 {
1355         if (info->ptr != NULL) {
1356                 iounmap(info->ptr);
1357                 info->ptr = NULL;
1358         }
1359         memset(info, 0, sizeof(struct controlvm_payload_info));
1360 }
1361
1362 static void
1363 initialize_controlvm_payload(void)
1364 {
1365         HOSTADDRESS phys_addr = visorchannel_get_physaddr(ControlVm_channel);
1366         u64 payloadOffset = 0;
1367         u32 payloadBytes = 0;
1368
1369         if (visorchannel_read(ControlVm_channel,
1370                               offsetof(struct spar_controlvm_channel_protocol,
1371                                        request_payload_offset),
1372                               &payloadOffset, sizeof(payloadOffset)) < 0) {
1373                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1374                                  POSTCODE_SEVERITY_ERR);
1375                 return;
1376         }
1377         if (visorchannel_read(ControlVm_channel,
1378                               offsetof(struct spar_controlvm_channel_protocol,
1379                                        request_payload_bytes),
1380                               &payloadBytes, sizeof(payloadBytes)) < 0) {
1381                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1382                                  POSTCODE_SEVERITY_ERR);
1383                 return;
1384         }
1385         initialize_controlvm_payload_info(phys_addr,
1386                                           payloadOffset, payloadBytes,
1387                                           &ControlVm_payload_info);
1388 }
1389
1390 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1391  *  Returns CONTROLVM_RESP_xxx code.
1392  */
1393 int
1394 visorchipset_chipset_ready(void)
1395 {
1396         kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1397         return CONTROLVM_RESP_SUCCESS;
1398 }
1399 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1400
1401 int
1402 visorchipset_chipset_selftest(void)
1403 {
1404         char env_selftest[20];
1405         char *envp[] = { env_selftest, NULL };
1406
1407         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1408         kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1409                            envp);
1410         return CONTROLVM_RESP_SUCCESS;
1411 }
1412 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1413
1414 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1415  *  Returns CONTROLVM_RESP_xxx code.
1416  */
1417 int
1418 visorchipset_chipset_notready(void)
1419 {
1420         kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1421         return CONTROLVM_RESP_SUCCESS;
1422 }
1423 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1424
1425 static void
1426 chipset_ready(struct controlvm_message_header *msgHdr)
1427 {
1428         int rc = visorchipset_chipset_ready();
1429
1430         if (rc != CONTROLVM_RESP_SUCCESS)
1431                 rc = -rc;
1432         if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1433                 controlvm_respond(msgHdr, rc);
1434         if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1435                 /* Send CHIPSET_READY response when all modules have been loaded
1436                  * and disks mounted for the partition
1437                  */
1438                 g_ChipSetMsgHdr = *msgHdr;
1439         }
1440 }
1441
1442 static void
1443 chipset_selftest(struct controlvm_message_header *msgHdr)
1444 {
1445         int rc = visorchipset_chipset_selftest();
1446
1447         if (rc != CONTROLVM_RESP_SUCCESS)
1448                 rc = -rc;
1449         if (msgHdr->flags.response_expected)
1450                 controlvm_respond(msgHdr, rc);
1451 }
1452
1453 static void
1454 chipset_notready(struct controlvm_message_header *msgHdr)
1455 {
1456         int rc = visorchipset_chipset_notready();
1457
1458         if (rc != CONTROLVM_RESP_SUCCESS)
1459                 rc = -rc;
1460         if (msgHdr->flags.response_expected)
1461                 controlvm_respond(msgHdr, rc);
1462 }
1463
1464 /* This is your "one-stop" shop for grabbing the next message from the
1465  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1466  */
1467 static BOOL
1468 read_controlvm_event(struct controlvm_message *msg)
1469 {
1470         if (visorchannel_signalremove(ControlVm_channel,
1471                                       CONTROLVM_QUEUE_EVENT, msg)) {
1472                 /* got a message */
1473                 if (msg->hdr.flags.test_message == 1)
1474                         return FALSE;
1475                 return TRUE;
1476         }
1477         return FALSE;
1478 }
1479
1480 /*
1481  * The general parahotplug flow works as follows.  The visorchipset
1482  * driver receives a DEVICE_CHANGESTATE message from Command
1483  * specifying a physical device to enable or disable.  The CONTROLVM
1484  * message handler calls parahotplug_process_message, which then adds
1485  * the message to a global list and kicks off a udev event which
1486  * causes a user level script to enable or disable the specified
1487  * device.  The udev script then writes to
1488  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1489  * to get called, at which point the appropriate CONTROLVM message is
1490  * retrieved from the list and responded to.
1491  */
1492
1493 #define PARAHOTPLUG_TIMEOUT_MS 2000
1494
1495 /*
1496  * Generate unique int to match an outstanding CONTROLVM message with a
1497  * udev script /proc response
1498  */
1499 static int
1500 parahotplug_next_id(void)
1501 {
1502         static atomic_t id = ATOMIC_INIT(0);
1503
1504         return atomic_inc_return(&id);
1505 }
1506
1507 /*
1508  * Returns the time (in jiffies) when a CONTROLVM message on the list
1509  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1510  */
1511 static unsigned long
1512 parahotplug_next_expiration(void)
1513 {
1514         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1515 }
1516
1517 /*
1518  * Create a parahotplug_request, which is basically a wrapper for a
1519  * CONTROLVM_MESSAGE that we can stick on a list
1520  */
1521 static struct parahotplug_request *
1522 parahotplug_request_create(struct controlvm_message *msg)
1523 {
1524         struct parahotplug_request *req;
1525
1526         req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
1527         if (req == NULL)
1528                 return NULL;
1529
1530         req->id = parahotplug_next_id();
1531         req->expiration = parahotplug_next_expiration();
1532         req->msg = *msg;
1533
1534         return req;
1535 }
1536
1537 /*
1538  * Free a parahotplug_request.
1539  */
1540 static void
1541 parahotplug_request_destroy(struct parahotplug_request *req)
1542 {
1543         kfree(req);
1544 }
1545
1546 /*
1547  * Cause uevent to run the user level script to do the disable/enable
1548  * specified in (the CONTROLVM message in) the specified
1549  * parahotplug_request
1550  */
1551 static void
1552 parahotplug_request_kickoff(struct parahotplug_request *req)
1553 {
1554         struct controlvm_message_packet *cmd = &req->msg.cmd;
1555         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1556             env_func[40];
1557         char *envp[] = {
1558                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1559         };
1560
1561         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1562         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1563         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1564                 cmd->device_change_state.state.active);
1565         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1566                 cmd->device_change_state.bus_no);
1567         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1568                 cmd->device_change_state.dev_no >> 3);
1569         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1570                 cmd->device_change_state.dev_no & 0x7);
1571
1572         kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1573                            envp);
1574 }
1575
1576 /*
1577  * Remove any request from the list that's been on there too long and
1578  * respond with an error.
1579  */
1580 static void
1581 parahotplug_process_list(void)
1582 {
1583         struct list_head *pos = NULL;
1584         struct list_head *tmp = NULL;
1585
1586         spin_lock(&Parahotplug_request_list_lock);
1587
1588         list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1589                 struct parahotplug_request *req =
1590                     list_entry(pos, struct parahotplug_request, list);
1591                 if (time_after_eq(jiffies, req->expiration)) {
1592                         list_del(pos);
1593                         if (req->msg.hdr.flags.response_expected)
1594                                 controlvm_respond_physdev_changestate(
1595                                         &req->msg.hdr,
1596                                         CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1597                                         req->msg.cmd.device_change_state.state);
1598                         parahotplug_request_destroy(req);
1599                 }
1600         }
1601
1602         spin_unlock(&Parahotplug_request_list_lock);
1603 }
1604
1605 /*
1606  * Called from the /proc handler, which means the user script has
1607  * finished the enable/disable.  Find the matching identifier, and
1608  * respond to the CONTROLVM message with success.
1609  */
1610 static int
1611 parahotplug_request_complete(int id, u16 active)
1612 {
1613         struct list_head *pos = NULL;
1614         struct list_head *tmp = NULL;
1615
1616         spin_lock(&Parahotplug_request_list_lock);
1617
1618         /* Look for a request matching "id". */
1619         list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1620                 struct parahotplug_request *req =
1621                     list_entry(pos, struct parahotplug_request, list);
1622                 if (req->id == id) {
1623                         /* Found a match.  Remove it from the list and
1624                          * respond.
1625                          */
1626                         list_del(pos);
1627                         spin_unlock(&Parahotplug_request_list_lock);
1628                         req->msg.cmd.device_change_state.state.active = active;
1629                         if (req->msg.hdr.flags.response_expected)
1630                                 controlvm_respond_physdev_changestate(
1631                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1632                                         req->msg.cmd.device_change_state.state);
1633                         parahotplug_request_destroy(req);
1634                         return 0;
1635                 }
1636         }
1637
1638         spin_unlock(&Parahotplug_request_list_lock);
1639         return -1;
1640 }
1641
1642 /*
1643  * Enables or disables a PCI device by kicking off a udev script
1644  */
1645 static void
1646 parahotplug_process_message(struct controlvm_message *inmsg)
1647 {
1648         struct parahotplug_request *req;
1649
1650         req = parahotplug_request_create(inmsg);
1651
1652         if (req == NULL)
1653                 return;
1654
1655         if (inmsg->cmd.device_change_state.state.active) {
1656                 /* For enable messages, just respond with success
1657                 * right away.  This is a bit of a hack, but there are
1658                 * issues with the early enable messages we get (with
1659                 * either the udev script not detecting that the device
1660                 * is up, or not getting called at all).  Fortunately
1661                 * the messages that get lost don't matter anyway, as
1662                 * devices are automatically enabled at
1663                 * initialization.
1664                 */
1665                 parahotplug_request_kickoff(req);
1666                 controlvm_respond_physdev_changestate(&inmsg->hdr,
1667                                 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1668                                 device_change_state.state);
1669                 parahotplug_request_destroy(req);
1670         } else {
1671                 /* For disable messages, add the request to the
1672                 * request list before kicking off the udev script.  It
1673                 * won't get responded to until the script has
1674                 * indicated it's done.
1675                 */
1676                 spin_lock(&Parahotplug_request_list_lock);
1677                 list_add_tail(&(req->list), &Parahotplug_request_list);
1678                 spin_unlock(&Parahotplug_request_list_lock);
1679
1680                 parahotplug_request_kickoff(req);
1681         }
1682 }
1683
1684 /* Process a controlvm message.
1685  * Return result:
1686  *    FALSE - this function will return FALSE only in the case where the
1687  *            controlvm message was NOT processed, but processing must be
1688  *            retried before reading the next controlvm message; a
1689  *            scenario where this can occur is when we need to throttle
1690  *            the allocation of memory in which to copy out controlvm
1691  *            payload data
1692  *    TRUE  - processing of the controlvm message completed,
1693  *            either successfully or with an error.
1694  */
1695 static BOOL
1696 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1697 {
1698         struct controlvm_message_packet *cmd = &inmsg.cmd;
1699         u64 parametersAddr = 0;
1700         u32 parametersBytes = 0;
1701         struct parser_context *parser_ctx = NULL;
1702         BOOL isLocalAddr = FALSE;
1703         struct controlvm_message ackmsg;
1704
1705         /* create parsing context if necessary */
1706         isLocalAddr = (inmsg.hdr.flags.test_message == 1);
1707         if (channel_addr == 0)
1708                 return TRUE;
1709         parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1710         parametersBytes = inmsg.hdr.payload_bytes;
1711
1712         /* Parameter and channel addresses within test messages actually lie
1713          * within our OS-controlled memory.  We need to know that, because it
1714          * makes a difference in how we compute the virtual address.
1715          */
1716         if (parametersAddr != 0 && parametersBytes != 0) {
1717                 BOOL retry = FALSE;
1718
1719                 parser_ctx =
1720                     parser_init_byte_stream(parametersAddr, parametersBytes,
1721                                            isLocalAddr, &retry);
1722                 if (!parser_ctx && retry)
1723                         return FALSE;
1724         }
1725
1726         if (!isLocalAddr) {
1727                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1728                                         CONTROLVM_RESP_SUCCESS);
1729                 if (ControlVm_channel)
1730                         visorchannel_signalinsert(ControlVm_channel,
1731                                                   CONTROLVM_QUEUE_ACK,
1732                                                   &ackmsg);
1733         }
1734         switch (inmsg.hdr.id) {
1735         case CONTROLVM_CHIPSET_INIT:
1736                 chipset_init(&inmsg);
1737                 break;
1738         case CONTROLVM_BUS_CREATE:
1739                 bus_create(&inmsg);
1740                 break;
1741         case CONTROLVM_BUS_DESTROY:
1742                 bus_destroy(&inmsg);
1743                 break;
1744         case CONTROLVM_BUS_CONFIGURE:
1745                 bus_configure(&inmsg, parser_ctx);
1746                 break;
1747         case CONTROLVM_DEVICE_CREATE:
1748                 my_device_create(&inmsg);
1749                 break;
1750         case CONTROLVM_DEVICE_CHANGESTATE:
1751                 if (cmd->device_change_state.flags.phys_device) {
1752                         parahotplug_process_message(&inmsg);
1753                 } else {
1754                         /* save the hdr and cmd structures for later use */
1755                         /* when sending back the response to Command */
1756                         my_device_changestate(&inmsg);
1757                         g_DiagMsgHdr = inmsg.hdr;
1758                         g_DeviceChangeStatePacket = inmsg.cmd;
1759                         break;
1760                 }
1761                 break;
1762         case CONTROLVM_DEVICE_DESTROY:
1763                 my_device_destroy(&inmsg);
1764                 break;
1765         case CONTROLVM_DEVICE_CONFIGURE:
1766                 /* no op for now, just send a respond that we passed */
1767                 if (inmsg.hdr.flags.response_expected)
1768                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1769                 break;
1770         case CONTROLVM_CHIPSET_READY:
1771                 chipset_ready(&inmsg.hdr);
1772                 break;
1773         case CONTROLVM_CHIPSET_SELFTEST:
1774                 chipset_selftest(&inmsg.hdr);
1775                 break;
1776         case CONTROLVM_CHIPSET_STOP:
1777                 chipset_notready(&inmsg.hdr);
1778                 break;
1779         default:
1780                 if (inmsg.hdr.flags.response_expected)
1781                         controlvm_respond(&inmsg.hdr,
1782                                           -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1783                 break;
1784         }
1785
1786         if (parser_ctx != NULL) {
1787                 parser_done(parser_ctx);
1788                 parser_ctx = NULL;
1789         }
1790         return TRUE;
1791 }
1792
1793 static HOSTADDRESS controlvm_get_channel_address(void)
1794 {
1795         u64 addr = 0;
1796         u32 size = 0;
1797
1798         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1799                 return 0;
1800
1801         return addr;
1802 }
1803
1804 static void
1805 controlvm_periodic_work(struct work_struct *work)
1806 {
1807         struct controlvm_message inmsg;
1808         BOOL gotACommand = FALSE;
1809         BOOL handle_command_failed = FALSE;
1810         static u64 Poll_Count;
1811
1812         /* make sure visorbus server is registered for controlvm callbacks */
1813         if (visorchipset_serverregwait && !serverregistered)
1814                 goto Away;
1815         /* make sure visorclientbus server is regsitered for controlvm
1816          * callbacks
1817          */
1818         if (visorchipset_clientregwait && !clientregistered)
1819                 goto Away;
1820
1821         Poll_Count++;
1822         if (Poll_Count >= 250)
1823                 ;       /* keep going */
1824         else
1825                 goto Away;
1826
1827         /* Check events to determine if response to CHIPSET_READY
1828          * should be sent
1829          */
1830         if (visorchipset_holdchipsetready
1831             && (g_ChipSetMsgHdr.id != CONTROLVM_INVALID)) {
1832                 if (check_chipset_events() == 1) {
1833                         controlvm_respond(&g_ChipSetMsgHdr, 0);
1834                         clear_chipset_events();
1835                         memset(&g_ChipSetMsgHdr, 0,
1836                                sizeof(struct controlvm_message_header));
1837                 }
1838         }
1839
1840         while (visorchannel_signalremove(ControlVm_channel,
1841                                          CONTROLVM_QUEUE_RESPONSE,
1842                                          &inmsg)) {
1843         }
1844         if (!gotACommand) {
1845                 if (ControlVm_Pending_Msg_Valid) {
1846                         /* we throttled processing of a prior
1847                         * msg, so try to process it again
1848                         * rather than reading a new one
1849                         */
1850                         inmsg = ControlVm_Pending_Msg;
1851                         ControlVm_Pending_Msg_Valid = FALSE;
1852                         gotACommand = TRUE;
1853                 } else
1854                         gotACommand = read_controlvm_event(&inmsg);
1855         }
1856
1857         handle_command_failed = FALSE;
1858         while (gotACommand && (!handle_command_failed)) {
1859                 Most_recent_message_jiffies = jiffies;
1860                 if (handle_command(inmsg,
1861                                    visorchannel_get_physaddr
1862                                    (ControlVm_channel)))
1863                         gotACommand = read_controlvm_event(&inmsg);
1864                 else {
1865                         /* this is a scenario where throttling
1866                         * is required, but probably NOT an
1867                         * error...; we stash the current
1868                         * controlvm msg so we will attempt to
1869                         * reprocess it on our next loop
1870                         */
1871                         handle_command_failed = TRUE;
1872                         ControlVm_Pending_Msg = inmsg;
1873                         ControlVm_Pending_Msg_Valid = TRUE;
1874                 }
1875         }
1876
1877         /* parahotplug_worker */
1878         parahotplug_process_list();
1879
1880 Away:
1881
1882         if (time_after(jiffies,
1883                        Most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1884                 /* it's been longer than MIN_IDLE_SECONDS since we
1885                 * processed our last controlvm message; slow down the
1886                 * polling
1887                 */
1888                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1889                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1890         } else {
1891                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1892                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1893         }
1894
1895         queue_delayed_work(Periodic_controlvm_workqueue,
1896                            &Periodic_controlvm_work, poll_jiffies);
1897 }
1898
1899 static void
1900 setup_crash_devices_work_queue(struct work_struct *work)
1901 {
1902
1903         struct controlvm_message localCrashCreateBusMsg;
1904         struct controlvm_message localCrashCreateDevMsg;
1905         struct controlvm_message msg;
1906         u32 localSavedCrashMsgOffset;
1907         u16 localSavedCrashMsgCount;
1908
1909         /* make sure visorbus server is registered for controlvm callbacks */
1910         if (visorchipset_serverregwait && !serverregistered)
1911                 goto Away;
1912
1913         /* make sure visorclientbus server is regsitered for controlvm
1914          * callbacks
1915          */
1916         if (visorchipset_clientregwait && !clientregistered)
1917                 goto Away;
1918
1919         POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1920
1921         /* send init chipset msg */
1922         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1923         msg.cmd.init_chipset.bus_count = 23;
1924         msg.cmd.init_chipset.switch_count = 0;
1925
1926         chipset_init(&msg);
1927
1928         /* get saved message count */
1929         if (visorchannel_read(ControlVm_channel,
1930                               offsetof(struct spar_controlvm_channel_protocol,
1931                                        saved_crash_message_count),
1932                               &localSavedCrashMsgCount, sizeof(u16)) < 0) {
1933                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1934                                  POSTCODE_SEVERITY_ERR);
1935                 return;
1936         }
1937
1938         if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
1939                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1940                                  localSavedCrashMsgCount,
1941                                  POSTCODE_SEVERITY_ERR);
1942                 return;
1943         }
1944
1945         /* get saved crash message offset */
1946         if (visorchannel_read(ControlVm_channel,
1947                               offsetof(struct spar_controlvm_channel_protocol,
1948                                        saved_crash_message_offset),
1949                               &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
1950                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1951                                  POSTCODE_SEVERITY_ERR);
1952                 return;
1953         }
1954
1955         /* read create device message for storage bus offset */
1956         if (visorchannel_read(ControlVm_channel,
1957                               localSavedCrashMsgOffset,
1958                               &localCrashCreateBusMsg,
1959                               sizeof(struct controlvm_message)) < 0) {
1960                 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1961                                  POSTCODE_SEVERITY_ERR);
1962                 return;
1963         }
1964
1965         /* read create device message for storage device */
1966         if (visorchannel_read(ControlVm_channel,
1967                               localSavedCrashMsgOffset +
1968                               sizeof(struct controlvm_message),
1969                               &localCrashCreateDevMsg,
1970                               sizeof(struct controlvm_message)) < 0) {
1971                 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1972                                  POSTCODE_SEVERITY_ERR);
1973                 return;
1974         }
1975
1976         /* reuse IOVM create bus message */
1977         if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
1978                 bus_create(&localCrashCreateBusMsg);
1979         else {
1980                 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1981                                  POSTCODE_SEVERITY_ERR);
1982                 return;
1983         }
1984
1985         /* reuse create device message for storage device */
1986         if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
1987                 my_device_create(&localCrashCreateDevMsg);
1988         else {
1989                 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1990                                  POSTCODE_SEVERITY_ERR);
1991                 return;
1992         }
1993         POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1994         return;
1995
1996 Away:
1997
1998         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1999
2000         queue_delayed_work(Periodic_controlvm_workqueue,
2001                            &Periodic_controlvm_work, poll_jiffies);
2002 }
2003
2004 static void
2005 bus_create_response(ulong busNo, int response)
2006 {
2007         bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
2008 }
2009
2010 static void
2011 bus_destroy_response(ulong busNo, int response)
2012 {
2013         bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
2014 }
2015
2016 static void
2017 device_create_response(ulong busNo, ulong devNo, int response)
2018 {
2019         device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
2020 }
2021
2022 static void
2023 device_destroy_response(ulong busNo, ulong devNo, int response)
2024 {
2025         device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
2026 }
2027
2028 void
2029 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
2030 {
2031
2032         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2033                                      bus_no, dev_no, response,
2034                                      segment_state_standby);
2035 }
2036 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2037
2038 static void
2039 device_resume_response(ulong busNo, ulong devNo, int response)
2040 {
2041         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2042                                      busNo, devNo, response,
2043                                      segment_state_running);
2044 }
2045
2046 BOOL
2047 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2048 {
2049         void *p = findbus(&BusInfoList, bus_no);
2050
2051         if (!p)
2052                 return FALSE;
2053         memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2054         return TRUE;
2055 }
2056 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2057
2058 BOOL
2059 visorchipset_set_bus_context(ulong bus_no, void *context)
2060 {
2061         struct visorchipset_bus_info *p = findbus(&BusInfoList, bus_no);
2062
2063         if (!p)
2064                 return FALSE;
2065         p->bus_driver_context = context;
2066         return TRUE;
2067 }
2068 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2069
2070 BOOL
2071 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2072                              struct visorchipset_device_info *dev_info)
2073 {
2074         void *p = finddevice(&DevInfoList, bus_no, dev_no);
2075
2076         if (!p)
2077                 return FALSE;
2078         memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2079         return TRUE;
2080 }
2081 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2082
2083 BOOL
2084 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2085 {
2086         struct visorchipset_device_info *p =
2087                         finddevice(&DevInfoList, bus_no, dev_no);
2088
2089         if (!p)
2090                 return FALSE;
2091         p->bus_driver_context = context;
2092         return TRUE;
2093 }
2094 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2095
2096 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2097  */
2098 void *
2099 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2100                          char *fn, int ln)
2101 {
2102         gfp_t gfp;
2103         void *p;
2104
2105         if (ok_to_block)
2106                 gfp = GFP_KERNEL;
2107         else
2108                 gfp = GFP_ATOMIC;
2109         /* __GFP_NORETRY means "ok to fail", meaning
2110          * kmem_cache_alloc() can return NULL, implying the caller CAN
2111          * cope with failure.  If you do NOT specify __GFP_NORETRY,
2112          * Linux will go to extreme measures to get memory for you
2113          * (like, invoke oom killer), which will probably cripple the
2114          * system.
2115          */
2116         gfp |= __GFP_NORETRY;
2117         p = kmem_cache_alloc(pool, gfp);
2118         if (!p)
2119                 return NULL;
2120
2121         atomic_inc(&Visorchipset_cache_buffers_in_use);
2122         return p;
2123 }
2124
2125 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2126  */
2127 void
2128 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2129 {
2130         if (!p)
2131                 return;
2132
2133         atomic_dec(&Visorchipset_cache_buffers_in_use);
2134         kmem_cache_free(pool, p);
2135 }
2136
2137 static ssize_t chipsetready_store(struct device *dev,
2138         struct device_attribute *attr, const char *buf, size_t count)
2139 {
2140         char msgtype[64];
2141
2142         if (sscanf(buf, "%63s", msgtype) != 1)
2143                 return -EINVAL;
2144
2145         if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2146                 chipset_events[0] = 1;
2147                 return count;
2148         } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2149                 chipset_events[1] = 1;
2150                 return count;
2151         }
2152         return -EINVAL;
2153 }
2154
2155 /* The parahotplug/devicedisabled interface gets called by our support script
2156  * when an SR-IOV device has been shut down. The ID is passed to the script
2157  * and then passed back when the device has been removed.
2158  */
2159 static ssize_t devicedisabled_store(struct device *dev,
2160         struct device_attribute *attr, const char *buf, size_t count)
2161 {
2162         uint id;
2163
2164         if (kstrtouint(buf, 10, &id) != 0)
2165                 return -EINVAL;
2166
2167         parahotplug_request_complete(id, 0);
2168         return count;
2169 }
2170
2171 /* The parahotplug/deviceenabled interface gets called by our support script
2172  * when an SR-IOV device has been recovered. The ID is passed to the script
2173  * and then passed back when the device has been brought back up.
2174  */
2175 static ssize_t deviceenabled_store(struct device *dev,
2176         struct device_attribute *attr, const char *buf, size_t count)
2177 {
2178         uint id;
2179
2180         if (kstrtouint(buf, 10, &id) != 0)
2181                 return -EINVAL;
2182
2183         parahotplug_request_complete(id, 1);
2184         return count;
2185 }
2186
2187 static int __init
2188 visorchipset_init(void)
2189 {
2190         int rc = 0, x = 0;
2191         HOSTADDRESS addr;
2192
2193         if (!unisys_spar_platform)
2194                 return -ENODEV;
2195
2196         memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2197         memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2198         memset(&ControlVm_payload_info, 0, sizeof(ControlVm_payload_info));
2199         memset(&LiveDump_info, 0, sizeof(LiveDump_info));
2200         atomic_set(&LiveDump_info.buffers_in_use, 0);
2201
2202         if (visorchipset_testvnic) {
2203                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2204                 rc = x;
2205                 goto Away;
2206         }
2207
2208         addr = controlvm_get_channel_address();
2209         if (addr != 0) {
2210                 ControlVm_channel =
2211                     visorchannel_create_with_lock
2212                     (addr,
2213                      sizeof(struct spar_controlvm_channel_protocol),
2214                      spar_controlvm_channel_protocol_uuid);
2215                 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2216                                 visorchannel_get_header(ControlVm_channel))) {
2217                         initialize_controlvm_payload();
2218                 } else {
2219                         visorchannel_destroy(ControlVm_channel);
2220                         ControlVm_channel = NULL;
2221                         return -ENODEV;
2222                 }
2223         } else {
2224                 return -ENODEV;
2225         }
2226
2227         MajorDev = MKDEV(visorchipset_major, 0);
2228         rc = visorchipset_file_init(MajorDev, &ControlVm_channel);
2229         if (rc < 0) {
2230                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2231                 goto Away;
2232         }
2233
2234         memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
2235
2236         memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
2237
2238         memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
2239
2240         Putfile_buffer_list_pool =
2241             kmem_cache_create(Putfile_buffer_list_pool_name,
2242                               sizeof(struct putfile_buffer_entry),
2243                               0, SLAB_HWCACHE_ALIGN, NULL);
2244         if (!Putfile_buffer_list_pool) {
2245                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2246                 rc = -1;
2247                 goto Away;
2248         }
2249         if (!visorchipset_disable_controlvm) {
2250                 /* if booting in a crash kernel */
2251                 if (visorchipset_crash_kernel)
2252                         INIT_DELAYED_WORK(&Periodic_controlvm_work,
2253                                           setup_crash_devices_work_queue);
2254                 else
2255                         INIT_DELAYED_WORK(&Periodic_controlvm_work,
2256                                           controlvm_periodic_work);
2257                 Periodic_controlvm_workqueue =
2258                     create_singlethread_workqueue("visorchipset_controlvm");
2259
2260                 if (Periodic_controlvm_workqueue == NULL) {
2261                         POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2262                                          DIAG_SEVERITY_ERR);
2263                         rc = -ENOMEM;
2264                         goto Away;
2265                 }
2266                 Most_recent_message_jiffies = jiffies;
2267                 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2268                 rc = queue_delayed_work(Periodic_controlvm_workqueue,
2269                                         &Periodic_controlvm_work, poll_jiffies);
2270                 if (rc < 0) {
2271                         POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2272                                          DIAG_SEVERITY_ERR);
2273                         goto Away;
2274                 }
2275
2276         }
2277
2278         Visorchipset_platform_device.dev.devt = MajorDev;
2279         if (platform_device_register(&Visorchipset_platform_device) < 0) {
2280                 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2281                 rc = -1;
2282                 goto Away;
2283         }
2284         POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2285         rc = 0;
2286 Away:
2287         if (rc) {
2288                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2289                                  POSTCODE_SEVERITY_ERR);
2290         }
2291         return rc;
2292 }
2293
2294 static void
2295 visorchipset_exit(void)
2296 {
2297         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2298
2299         if (visorchipset_disable_controlvm) {
2300                 ;
2301         } else {
2302                 cancel_delayed_work(&Periodic_controlvm_work);
2303                 flush_workqueue(Periodic_controlvm_workqueue);
2304                 destroy_workqueue(Periodic_controlvm_workqueue);
2305                 Periodic_controlvm_workqueue = NULL;
2306                 destroy_controlvm_payload_info(&ControlVm_payload_info);
2307         }
2308         Test_Vnic_channel = NULL;
2309         if (Putfile_buffer_list_pool) {
2310                 kmem_cache_destroy(Putfile_buffer_list_pool);
2311                 Putfile_buffer_list_pool = NULL;
2312         }
2313
2314         cleanup_controlvm_structures();
2315
2316         memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
2317
2318         memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
2319
2320         memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
2321
2322         visorchannel_destroy(ControlVm_channel);
2323
2324         visorchipset_file_cleanup();
2325         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2326 }
2327
2328 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2329 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2330 int visorchipset_testvnic = 0;
2331
2332 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2333 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2334 int visorchipset_testvnicclient = 0;
2335
2336 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2337 MODULE_PARM_DESC(visorchipset_testmsg,
2338                  "1 to manufacture the chipset, bus, and switch messages");
2339 int visorchipset_testmsg = 0;
2340
2341 module_param_named(major, visorchipset_major, int, S_IRUGO);
2342 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2343 int visorchipset_major = 0;
2344
2345 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2346 MODULE_PARM_DESC(visorchipset_serverreqwait,
2347                  "1 to have the module wait for the visor bus to register");
2348 int visorchipset_serverregwait = 0;     /* default is off */
2349 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2350 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2351 int visorchipset_clientregwait = 1;     /* default is on */
2352 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2353 MODULE_PARM_DESC(visorchipset_testteardown,
2354                  "1 to test teardown of the chipset, bus, and switch");
2355 int visorchipset_testteardown = 0;      /* default is off */
2356 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2357                    S_IRUGO);
2358 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2359                  "1 to disable polling of controlVm channel");
2360 int visorchipset_disable_controlvm = 0; /* default is off */
2361 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2362 MODULE_PARM_DESC(visorchipset_crash_kernel,
2363                  "1 means we are running in crash kernel");
2364 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2365 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2366                    int, S_IRUGO);
2367 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2368                  "1 to hold response to CHIPSET_READY");
2369 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2370                                       * response immediately */
2371 module_init(visorchipset_init);
2372 module_exit(visorchipset_exit);
2373
2374 MODULE_AUTHOR("Unisys");
2375 MODULE_LICENSE("GPL");
2376 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2377                    VERSION);
2378 MODULE_VERSION(VERSION);