3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
20 /* if you want to turn on some debugging of write device data or read
21 * device data, define these two undefs. You will probably want to
22 * customize the code which is here since it was written assuming
23 * reading and writing a specific data file df.64M.txt which is a
24 * 64Megabyte file created by Art Nilson using a scritp I wrote called
25 * cr_test_data.pl. The data file consists of 256 byte lines of text
26 * which start with an 8 digit sequence number, a colon, and then
27 * letters after that */
31 #include <linux/kernel.h>
32 #ifdef CONFIG_MODVERSIONS
33 #include <config/modversions.h>
37 #include "diagnostics/appos_subsystems.h"
40 #include "uisthread.h"
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/pci.h>
45 #include <linux/spinlock.h>
46 #include <linux/device.h>
47 #include <linux/slab.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_host.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <asm/param.h>
53 #include <linux/debugfs.h>
54 #include <linux/types.h>
58 #include "visorchipset.h"
60 #include "guestlinuxdebug.h"
61 /* this is shorter than using __FILE__ (full path name) in
62 * debug/info/error messages
64 #define CURRENT_FILE_PC VIRT_HBA_PC_virthba_c
65 #define __MYFILE__ "virthba.c"
67 /* NOTE: L1_CACHE_BYTES >=128 */
68 #define DEVICE_ATTRIBUTE struct device_attribute
70 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
71 * = 4800 bytes ~ 2^13 = 8192 bytes
75 /*****************************************************/
76 /* Forward declarations */
77 /*****************************************************/
78 static int virthba_probe(struct virtpci_dev *dev,
79 const struct pci_device_id *id);
80 static void virthba_remove(struct virtpci_dev *dev);
81 static int virthba_abort_handler(struct scsi_cmnd *scsicmd);
82 static int virthba_bus_reset_handler(struct scsi_cmnd *scsicmd);
83 static int virthba_device_reset_handler(struct scsi_cmnd *scsicmd);
84 static int virthba_host_reset_handler(struct scsi_cmnd *scsicmd);
85 static const char *virthba_get_info(struct Scsi_Host *shp);
86 static int virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
87 static int virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
88 void (*virthba_cmnd_done)
89 (struct scsi_cmnd *));
91 static const struct x86_cpu_id unisys_spar_ids[] = {
92 { X86_VENDOR_INTEL, 6, 62, X86_FEATURE_ANY },
97 MODULE_DEVICE_TABLE(x86cpu, unisys_spar_ids);
100 static DEF_SCSI_QCMD(virthba_queue_command)
102 #define virthba_queue_command virthba_queue_command_lck
105 static int virthba_slave_alloc(struct scsi_device *scsidev);
106 static int virthba_slave_configure(struct scsi_device *scsidev);
107 static void virthba_slave_destroy(struct scsi_device *scsidev);
108 static int process_incoming_rsps(void *);
109 static int virthba_serverup(struct virtpci_dev *virtpcidev);
110 static int virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state);
111 static void do_disk_add_remove(struct work_struct *work);
112 static void virthba_serverdown_complete(struct work_struct *work);
113 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
114 size_t len, loff_t *offset);
115 static ssize_t enable_ints_write(struct file *file,
116 const char __user *buffer, size_t count,
119 /*****************************************************/
121 /*****************************************************/
123 static int rsltq_wait_usecs = 4000; /* Default 4ms */
124 static unsigned int max_buff_len;
127 static char *virthba_options = "NONE";
129 static const struct pci_device_id virthba_id_table[] = {
130 {PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_VIRTHBA)},
134 /* export virthba_id_table */
135 MODULE_DEVICE_TABLE(pci, virthba_id_table);
137 static struct workqueue_struct *virthba_serverdown_workqueue;
139 static struct virtpci_driver virthba_driver = {
140 .name = "uisvirthba",
143 .id_table = virthba_id_table,
144 .probe = virthba_probe,
145 .remove = virthba_remove,
146 .resume = virthba_serverup,
147 .suspend = virthba_serverdown
150 /* The Send and Recive Buffers of the IO Queue may both be full */
151 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS*2)
152 #define INTERRUPT_VECTOR_MASK 0x3F
155 char cmdtype; /* Type of pointer that is being stored */
156 void *sent; /* The Data being tracked */
157 /* struct scsi_cmnd *type for virthba_queue_command */
158 /* struct uiscmdrsp *type for management commands */
161 #define VIRTHBA_ERROR_COUNT 30
162 #define IOS_ERROR_THRESHOLD 1000
163 struct virtdisk_info {
165 u32 channel, id, lun; /* Disk Path */
166 atomic_t ios_threshold;
167 atomic_t error_count;
168 struct virtdisk_info *next;
171 /* Each Scsi_Host has a host_data area that contains this struct. */
172 struct virthba_info {
173 struct Scsi_Host *scsihost;
174 struct virtpci_dev *virtpcidev;
175 struct list_head dev_info_list;
176 struct chaninfo chinfo;
177 struct irq_info intr; /* use recvInterrupt info to receive
178 interrupts when IOs complete */
179 int interrupt_vector;
180 struct scsipending pending[MAX_PENDING_REQUESTS]; /* Tracks the requests
182 /* forwarded to the IOVM and haven't returned yet */
183 unsigned int nextinsert; /* Start search for next pending
187 bool serverchangingstate;
188 unsigned long long acquire_failed_cnt;
189 unsigned long long interrupts_rcvd;
190 unsigned long long interrupts_notme;
191 unsigned long long interrupts_disabled;
192 struct work_struct serverdown_completion;
193 u64 __iomem *flags_addr;
194 atomic_t interrupt_rcvd;
195 wait_queue_head_t rsp_queue;
196 struct virtdisk_info head;
199 /* Work Data for dar_work_queue */
200 struct diskaddremove {
201 u8 add; /* 0-remove, 1-add */
202 struct Scsi_Host *shost; /* Scsi Host for this virthba instance */
203 u32 channel, id, lun; /* Disk Path */
204 struct diskaddremove *next;
207 #define virtpci_dev_to_virthba_virthba_get_info(d) \
208 container_of(d, struct virthba_info, virtpcidev)
210 static DEVICE_ATTRIBUTE *virthba_shost_attrs[];
211 static struct scsi_host_template virthba_driver_template = {
212 .name = "Unisys Virtual HBA",
213 .info = virthba_get_info,
214 .ioctl = virthba_ioctl,
215 .queuecommand = virthba_queue_command,
216 .eh_abort_handler = virthba_abort_handler,
217 .eh_device_reset_handler = virthba_device_reset_handler,
218 .eh_bus_reset_handler = virthba_bus_reset_handler,
219 .eh_host_reset_handler = virthba_host_reset_handler,
220 .shost_attrs = virthba_shost_attrs,
222 #define VIRTHBA_MAX_CMNDS 128
223 .can_queue = VIRTHBA_MAX_CMNDS,
224 .sg_tablesize = 64, /* largest number of address/length pairs */
226 .slave_alloc = virthba_slave_alloc,
227 .slave_configure = virthba_slave_configure,
228 .slave_destroy = virthba_slave_destroy,
229 .use_clustering = ENABLE_CLUSTERING,
232 struct virthba_devices_open {
233 struct virthba_info *virthbainfo;
236 static const struct file_operations debugfs_info_fops = {
237 .read = info_debugfs_read,
240 static const struct file_operations debugfs_enable_ints_fops = {
241 .write = enable_ints_write,
244 /*****************************************************/
246 /*****************************************************/
248 #define VIRTHBASOPENMAX 1
249 /* array of open devices maintained by open() and close(); */
250 static struct virthba_devices_open virthbas_open[VIRTHBASOPENMAX];
251 static struct dentry *virthba_debugfs_dir;
253 /*****************************************************/
254 /* Local Functions */
255 /*****************************************************/
257 add_scsipending_entry(struct virthba_info *vhbainfo, char cmdtype, void *new)
262 spin_lock_irqsave(&vhbainfo->privlock, flags);
263 insert_location = vhbainfo->nextinsert;
264 while (vhbainfo->pending[insert_location].sent != NULL) {
265 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
266 if (insert_location == (int)vhbainfo->nextinsert) {
267 LOGERR("Queue should be full. insert_location<<%d>> Unable to find open slot for pending commands.\n",
269 spin_unlock_irqrestore(&vhbainfo->privlock, flags);
274 vhbainfo->pending[insert_location].cmdtype = cmdtype;
275 vhbainfo->pending[insert_location].sent = new;
276 vhbainfo->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
277 spin_unlock_irqrestore(&vhbainfo->privlock, flags);
279 return insert_location;
283 add_scsipending_entry_with_wait(struct virthba_info *vhbainfo, char cmdtype,
286 int insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
288 while (insert_location == -1) {
289 LOGERR("Failed to find empty queue slot. Waiting to try again\n");
290 set_current_state(TASK_INTERRUPTIBLE);
291 schedule_timeout(msecs_to_jiffies(10));
292 insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
295 return (unsigned int)insert_location;
299 del_scsipending_entry(struct virthba_info *vhbainfo, uintptr_t del)
304 if (del >= MAX_PENDING_REQUESTS) {
305 LOGERR("Invalid queue position <<%lu>> given to delete. MAX_PENDING_REQUESTS <<%d>>\n",
306 (unsigned long)del, MAX_PENDING_REQUESTS);
308 spin_lock_irqsave(&vhbainfo->privlock, flags);
310 if (vhbainfo->pending[del].sent == NULL)
311 LOGERR("Deleting already cleared queue entry at <<%lu>>.\n",
314 sent = vhbainfo->pending[del].sent;
316 vhbainfo->pending[del].cmdtype = 0;
317 vhbainfo->pending[del].sent = NULL;
318 spin_unlock_irqrestore(&vhbainfo->privlock, flags);
324 /* dar_work_queue (Disk Add/Remove) */
325 static struct work_struct dar_work_queue;
326 static struct diskaddremove *dar_work_queue_head;
327 static spinlock_t dar_work_queue_lock;
328 static unsigned short dar_work_queue_sched;
329 #define QUEUE_DISKADDREMOVE(dar) { \
330 spin_lock_irqsave(&dar_work_queue_lock, flags); \
331 if (!dar_work_queue_head) { \
332 dar_work_queue_head = dar; \
336 dar->next = dar_work_queue_head; \
337 dar_work_queue_head = dar; \
339 if (!dar_work_queue_sched) { \
340 schedule_work(&dar_work_queue); \
341 dar_work_queue_sched = 1; \
343 spin_unlock_irqrestore(&dar_work_queue_lock, flags); \
347 send_disk_add_remove(struct diskaddremove *dar)
349 struct scsi_device *sdev;
352 sdev = scsi_device_lookup(dar->shost, dar->channel, dar->id, dar->lun);
355 scsi_remove_device(sdev);
356 } else if (dar->add) {
358 scsi_add_device(dar->shost, dar->channel, dar->id,
361 LOGERR("Failed scsi_add_device: host_no=%d[chan=%d:id=%d:lun=%d]\n",
362 dar->shost->host_no, dar->channel, dar->id,
365 LOGERR("Failed scsi_device_lookup:[chan=%d:id=%d:lun=%d]\n",
366 dar->channel, dar->id, dar->lun);
370 /*****************************************************/
371 /* dar_work_queue Handler Thread */
372 /*****************************************************/
374 do_disk_add_remove(struct work_struct *work)
376 struct diskaddremove *dar;
377 struct diskaddremove *tmphead;
381 spin_lock_irqsave(&dar_work_queue_lock, flags);
382 tmphead = dar_work_queue_head;
383 dar_work_queue_head = NULL;
384 dar_work_queue_sched = 0;
385 spin_unlock_irqrestore(&dar_work_queue_lock, flags);
389 send_disk_add_remove(dar);
394 /*****************************************************/
395 /* Routine to add entry to dar_work_queue */
396 /*****************************************************/
398 process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp)
400 struct diskaddremove *dar;
403 dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
405 dar->add = cmdrsp->disknotify.add;
407 dar->channel = cmdrsp->disknotify.channel;
408 dar->id = cmdrsp->disknotify.id;
409 dar->lun = cmdrsp->disknotify.lun;
410 QUEUE_DISKADDREMOVE(dar);
412 LOGERR("kmalloc failed for dar. host_no=%d[chan=%d:id=%d:lun=%d]\n",
413 shost->host_no, cmdrsp->disknotify.channel,
414 cmdrsp->disknotify.id, cmdrsp->disknotify.lun);
418 /*****************************************************/
419 /* Probe Remove Functions */
420 /*****************************************************/
422 virthba_isr(int irq, void *dev_id)
424 struct virthba_info *virthbainfo = (struct virthba_info *)dev_id;
425 struct channel_header __iomem *channel_header;
426 struct signal_queue_header __iomem *pqhdr;
428 unsigned long long rc1;
430 if (virthbainfo == NULL)
432 virthbainfo->interrupts_rcvd++;
433 channel_header = virthbainfo->chinfo.queueinfo->chan;
434 if (((readq(&channel_header->features)
435 & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0) &&
436 ((readq(&channel_header->features) &
437 ULTRA_IO_DRIVER_DISABLES_INTS) !=
439 virthbainfo->interrupts_disabled++;
440 mask = ~ULTRA_CHANNEL_ENABLE_INTS;
441 rc1 = uisqueue_interlocked_and(virthbainfo->flags_addr, mask);
443 if (spar_signalqueue_empty(channel_header, IOCHAN_FROM_IOPART)) {
444 virthbainfo->interrupts_notme++;
447 pqhdr = (struct signal_queue_header __iomem *)
448 ((char __iomem *)channel_header +
449 readq(&channel_header->ch_space_offset)) + IOCHAN_FROM_IOPART;
450 writeq(readq(&pqhdr->num_irq_received) + 1,
451 &pqhdr->num_irq_received);
452 atomic_set(&virthbainfo->interrupt_rcvd, 1);
453 wake_up_interruptible(&virthbainfo->rsp_queue);
458 virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
461 struct Scsi_Host *scsihost;
462 struct virthba_info *virthbainfo;
465 irq_handler_t handler = virthba_isr;
466 struct channel_header __iomem *channel_header;
467 struct signal_queue_header __iomem *pqhdr;
470 LOGVER("entering virthba_probe...\n");
471 LOGVER("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
472 virtpcidev->device_no);
474 LOGINF("entering virthba_probe...\n");
475 LOGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
476 virtpcidev->device_no);
477 POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
478 /* call scsi_host_alloc to register a scsi host adapter
479 * instance - this virthba that has just been created is an
480 * instance of a scsi host adapter. This scsi_host_alloc
481 * function allocates a new Scsi_Host struct & performs basic
482 * initialization. The host is not published to the scsi
483 * midlayer until scsi_add_host is called.
485 DBGINF("calling scsi_host_alloc.\n");
487 /* arg 2 passed in length of extra space we want allocated
488 * with scsi_host struct for our own use scsi_host_alloc
491 scsihost = scsi_host_alloc(&virthba_driver_template,
492 sizeof(struct virthba_info));
493 if (scsihost == NULL)
496 DBGINF("scsihost: 0x%p, scsihost->this_id: %d, host_no: %d.\n",
497 scsihost, scsihost->this_id, scsihost->host_no);
499 scsihost->this_id = UIS_MAGIC_VHBA;
500 /* linux treats max-channel differently than max-id & max-lun.
501 * In the latter cases, those two values result in 0 to max-1
502 * (inclusive) being scanned. But in the case of channels, the
503 * scan is 0 to max (inclusive); so we will subtract one from
504 * the max-channel value.
506 LOGINF("virtpcidev->scsi.max.max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_io_size=%u\n",
507 (unsigned)virtpcidev->scsi.max.max_channel - 1,
508 (unsigned)virtpcidev->scsi.max.max_id,
509 (unsigned)virtpcidev->scsi.max.max_lun,
510 (unsigned)virtpcidev->scsi.max.cmd_per_lun,
511 (unsigned)virtpcidev->scsi.max.max_io_size);
512 scsihost->max_channel = (unsigned)virtpcidev->scsi.max.max_channel;
513 scsihost->max_id = (unsigned)virtpcidev->scsi.max.max_id;
514 scsihost->max_lun = (unsigned)virtpcidev->scsi.max.max_lun;
515 scsihost->cmd_per_lun = (unsigned)virtpcidev->scsi.max.cmd_per_lun;
516 scsihost->max_sectors =
517 (unsigned short)(virtpcidev->scsi.max.max_io_size >> 9);
518 scsihost->sg_tablesize =
519 (unsigned short)(virtpcidev->scsi.max.max_io_size / PAGE_SIZE);
520 if (scsihost->sg_tablesize > MAX_PHYS_INFO)
521 scsihost->sg_tablesize = MAX_PHYS_INFO;
522 LOGINF("scsihost->max_channel=%u, max_id=%u, max_lun=%llu, cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
523 scsihost->max_channel, scsihost->max_id, scsihost->max_lun,
524 scsihost->cmd_per_lun, scsihost->max_sectors,
525 scsihost->sg_tablesize);
526 LOGINF("scsihost->can_queue=%u, scsihost->cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
527 scsihost->can_queue, scsihost->cmd_per_lun, scsihost->max_sectors,
528 scsihost->sg_tablesize);
530 DBGINF("calling scsi_add_host\n");
532 /* this creates "host%d" in sysfs. If 2nd argument is NULL,
533 * then this generic /sys/devices/platform/host? device is
534 * created and /sys/scsi_host/host? ->
535 * /sys/devices/platform/host? If 2nd argument is not NULL,
536 * then this generic /sys/devices/<path>/host? is created and
537 * host? points to that device instead.
539 error = scsi_add_host(scsihost, &virtpcidev->generic_dev);
541 LOGERR("scsi_add_host ****FAILED 0x%x TBD - RECOVER\n", error);
542 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
543 /* decr refcount on scsihost which was incremented by
544 * scsi_add_host so the scsi_host gets deleted
546 scsi_host_put(scsihost);
550 virthbainfo = (struct virthba_info *)scsihost->hostdata;
551 memset(virthbainfo, 0, sizeof(struct virthba_info));
552 for (i = 0; i < VIRTHBASOPENMAX; i++) {
553 if (virthbas_open[i].virthbainfo == NULL) {
554 virthbas_open[i].virthbainfo = virthbainfo;
558 virthbainfo->interrupt_vector = -1;
559 virthbainfo->chinfo.queueinfo = &virtpcidev->queueinfo;
560 virthbainfo->virtpcidev = virtpcidev;
561 spin_lock_init(&virthbainfo->chinfo.insertlock);
563 DBGINF("generic_dev: 0x%p, queueinfo: 0x%p.\n",
564 &virtpcidev->generic_dev, &virtpcidev->queueinfo);
566 init_waitqueue_head(&virthbainfo->rsp_queue);
567 spin_lock_init(&virthbainfo->privlock);
568 memset(&virthbainfo->pending, 0, sizeof(virthbainfo->pending));
569 virthbainfo->serverdown = false;
570 virthbainfo->serverchangingstate = false;
572 virthbainfo->intr = virtpcidev->intr;
573 /* save of host within virthba_info */
574 virthbainfo->scsihost = scsihost;
576 /* save of host within virtpci_dev */
577 virtpcidev->scsi.scsihost = scsihost;
579 /* Setup workqueue for serverdown messages */
580 INIT_WORK(&virthbainfo->serverdown_completion,
581 virthba_serverdown_complete);
583 writeq(readq(&virthbainfo->chinfo.queueinfo->chan->features) |
584 ULTRA_IO_CHANNEL_IS_POLLING,
585 &virthbainfo->chinfo.queueinfo->chan->features);
586 /* start thread that will receive scsicmnd responses */
587 DBGINF("starting rsp thread -- queueinfo: 0x%p, threadinfo: 0x%p.\n",
588 virthbainfo->chinfo.queueinfo, &virthbainfo->chinfo.threadinfo);
590 channel_header = virthbainfo->chinfo.queueinfo->chan;
591 pqhdr = (struct signal_queue_header __iomem *)
592 ((char __iomem *)channel_header +
593 readq(&channel_header->ch_space_offset)) + IOCHAN_FROM_IOPART;
594 virthbainfo->flags_addr = &pqhdr->features;
596 if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
597 process_incoming_rsps,
598 virthbainfo, "vhba_incoming")) {
599 LOGERR("uisthread_start rsp ****FAILED\n");
600 /* decr refcount on scsihost which was incremented by
601 * scsi_add_host so the scsi_host gets deleted
603 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
604 scsi_host_put(scsihost);
607 LOGINF("sendInterruptHandle=0x%16llX",
608 virthbainfo->intr.send_irq_handle);
609 LOGINF("recvInterruptHandle=0x%16llX",
610 virthbainfo->intr.recv_irq_handle);
611 LOGINF("recvInterruptVector=0x%8X",
612 virthbainfo->intr.recv_irq_vector);
613 LOGINF("recvInterruptShared=0x%2X",
614 virthbainfo->intr.recv_irq_shared);
615 LOGINF("scsihost.hostt->name=%s", scsihost->hostt->name);
616 virthbainfo->interrupt_vector =
617 virthbainfo->intr.recv_irq_handle & INTERRUPT_VECTOR_MASK;
618 rsp = request_irq(virthbainfo->interrupt_vector, handler, IRQF_SHARED,
619 scsihost->hostt->name, virthbainfo);
621 LOGERR("request_irq(%d) uislib_virthba_ISR request failed with rsp=%d\n",
622 virthbainfo->interrupt_vector, rsp);
623 virthbainfo->interrupt_vector = -1;
624 POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
626 u64 __iomem *Features_addr =
627 &virthbainfo->chinfo.queueinfo->chan->features;
628 LOGERR("request_irq(%d) uislib_virthba_ISR request succeeded\n",
629 virthbainfo->interrupt_vector);
630 mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
631 ULTRA_IO_DRIVER_DISABLES_INTS);
632 uisqueue_interlocked_and(Features_addr, mask);
633 mask = ULTRA_IO_DRIVER_ENABLES_INTS;
634 uisqueue_interlocked_or(Features_addr, mask);
635 rsltq_wait_usecs = 4000000;
638 DBGINF("calling scsi_scan_host.\n");
639 scsi_scan_host(scsihost);
640 DBGINF("return from scsi_scan_host.\n");
642 LOGINF("virthba added scsihost:0x%p\n", scsihost);
643 POSTCODE_LINUX_2(VHBA_PROBE_EXIT_PC, POSTCODE_SEVERITY_INFO);
648 virthba_remove(struct virtpci_dev *virtpcidev)
650 struct virthba_info *virthbainfo;
651 struct Scsi_Host *scsihost =
652 (struct Scsi_Host *)virtpcidev->scsi.scsihost;
654 LOGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
655 virtpcidev->device_no);
656 virthbainfo = (struct virthba_info *)scsihost->hostdata;
657 if (virthbainfo->interrupt_vector != -1)
658 free_irq(virthbainfo->interrupt_vector, virthbainfo);
659 LOGINF("Removing virtpcidev: 0x%p, virthbainfo: 0x%p\n", virtpcidev,
662 DBGINF("removing scsihost: 0x%p, scsihost->this_id: %d\n", scsihost,
664 scsi_remove_host(scsihost);
666 DBGINF("stopping thread.\n");
667 uisthread_stop(&virthbainfo->chinfo.threadinfo);
669 DBGINF("calling scsi_host_put\n");
671 /* decr refcount on scsihost which was incremented by
672 * scsi_add_host so the scsi_host gets deleted
674 scsi_host_put(scsihost);
675 LOGINF("virthba removed scsi_host.\n");
679 forward_vdiskmgmt_command(enum vdisk_mgmt_types vdiskcmdtype,
680 struct Scsi_Host *scsihost,
681 struct uisscsi_dest *vdest)
683 struct uiscmdrsp *cmdrsp;
684 struct virthba_info *virthbainfo =
685 (struct virthba_info *)scsihost->hostdata;
686 int notifyresult = 0xffff;
687 wait_queue_head_t notifyevent;
689 LOGINF("vDiskMgmt:%d %d:%d:%d\n", vdiskcmdtype,
690 vdest->channel, vdest->id, vdest->lun);
692 if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
693 DBGINF("Server is down/changing state. Returning Failure.\n");
697 cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
698 if (cmdrsp == NULL) {
699 LOGERR("kmalloc of cmdrsp failed.\n");
700 return FAILED; /* reject */
703 init_waitqueue_head(¬ifyevent);
705 /* issue VDISK_MGMT_CMD
706 * set type to command - as opposed to task mgmt
708 cmdrsp->cmdtype = CMD_VDISKMGMT_TYPE;
709 /* specify the event that has to be triggered when this cmd is
712 cmdrsp->vdiskmgmt.notify = (void *)¬ifyevent;
713 cmdrsp->vdiskmgmt.notifyresult = (void *)¬ifyresult;
715 /* save destination */
716 cmdrsp->vdiskmgmt.vdisktype = vdiskcmdtype;
717 cmdrsp->vdiskmgmt.vdest.channel = vdest->channel;
718 cmdrsp->vdiskmgmt.vdest.id = vdest->id;
719 cmdrsp->vdiskmgmt.vdest.lun = vdest->lun;
720 cmdrsp->vdiskmgmt.scsicmd =
722 add_scsipending_entry_with_wait(virthbainfo, CMD_VDISKMGMT_TYPE,
725 uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
726 cmdrsp, IOCHAN_TO_IOPART,
727 &virthbainfo->chinfo.insertlock,
728 DONT_ISSUE_INTERRUPT, (u64)NULL,
730 LOGINF("VdiskMgmt waiting on event notifyevent=0x%p\n",
731 cmdrsp->scsitaskmgmt.notify);
732 wait_event(notifyevent, notifyresult != 0xffff);
733 LOGINF("VdiskMgmt complete; result:%d\n", cmdrsp->vdiskmgmt.result);
738 /*****************************************************/
739 /* Scsi Host support functions */
740 /*****************************************************/
743 forward_taskmgmt_command(enum task_mgmt_types tasktype,
744 struct scsi_device *scsidev)
746 struct uiscmdrsp *cmdrsp;
747 struct virthba_info *virthbainfo =
748 (struct virthba_info *)scsidev->host->hostdata;
749 int notifyresult = 0xffff;
750 wait_queue_head_t notifyevent;
752 LOGINF("TaskMgmt:%d %d:%d:%llu\n", tasktype,
753 scsidev->channel, scsidev->id, scsidev->lun);
755 if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
756 DBGINF("Server is down/changing state. Returning Failure.\n");
760 cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
761 if (cmdrsp == NULL) {
762 LOGERR("kmalloc of cmdrsp failed.\n");
763 return FAILED; /* reject */
766 init_waitqueue_head(¬ifyevent);
768 /* issue TASK_MGMT_ABORT_TASK */
769 /* set type to command - as opposed to task mgmt */
770 cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
771 /* specify the event that has to be triggered when this */
772 /* cmd is complete */
773 cmdrsp->scsitaskmgmt.notify = (void *)¬ifyevent;
774 cmdrsp->scsitaskmgmt.notifyresult = (void *)¬ifyresult;
776 /* save destination */
777 cmdrsp->scsitaskmgmt.tasktype = tasktype;
778 cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
779 cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
780 cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
781 cmdrsp->scsitaskmgmt.scsicmd =
783 add_scsipending_entry_with_wait(virthbainfo,
784 CMD_SCSITASKMGMT_TYPE,
787 uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
788 cmdrsp, IOCHAN_TO_IOPART,
789 &virthbainfo->chinfo.insertlock,
790 DONT_ISSUE_INTERRUPT, (u64)NULL,
792 LOGINF("TaskMgmt waiting on event notifyevent=0x%p\n",
793 cmdrsp->scsitaskmgmt.notify);
794 wait_event(notifyevent, notifyresult != 0xffff);
795 LOGINF("TaskMgmt complete; result:%d\n", cmdrsp->scsitaskmgmt.result);
800 /* The abort handler returns SUCCESS if it has succeeded to make LLDD
801 * and all related hardware forget about the scmd.
804 virthba_abort_handler(struct scsi_cmnd *scsicmd)
806 /* issue TASK_MGMT_ABORT_TASK */
807 struct scsi_device *scsidev;
808 struct virtdisk_info *vdisk;
810 scsidev = scsicmd->device;
811 for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
812 vdisk->next; vdisk = vdisk->next) {
813 if ((scsidev->channel == vdisk->channel) &&
814 (scsidev->id == vdisk->id) &&
815 (scsidev->lun == vdisk->lun)) {
816 if (atomic_read(&vdisk->error_count) <
817 VIRTHBA_ERROR_COUNT) {
818 atomic_inc(&vdisk->error_count);
819 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
820 POSTCODE_SEVERITY_INFO);
822 atomic_set(&vdisk->ios_threshold,
823 IOS_ERROR_THRESHOLD);
826 return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd->device);
830 virthba_bus_reset_handler(struct scsi_cmnd *scsicmd)
832 /* issue TASK_MGMT_TARGET_RESET for each target on the bus */
833 struct scsi_device *scsidev;
834 struct virtdisk_info *vdisk;
836 scsidev = scsicmd->device;
837 for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
838 vdisk->next; vdisk = vdisk->next) {
839 if ((scsidev->channel == vdisk->channel) &&
840 (scsidev->id == vdisk->id) &&
841 (scsidev->lun == vdisk->lun)) {
842 if (atomic_read(&vdisk->error_count) <
843 VIRTHBA_ERROR_COUNT) {
844 atomic_inc(&vdisk->error_count);
845 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
846 POSTCODE_SEVERITY_INFO);
848 atomic_set(&vdisk->ios_threshold,
849 IOS_ERROR_THRESHOLD);
852 return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd->device);
856 virthba_device_reset_handler(struct scsi_cmnd *scsicmd)
858 /* issue TASK_MGMT_LUN_RESET */
859 struct scsi_device *scsidev;
860 struct virtdisk_info *vdisk;
862 scsidev = scsicmd->device;
863 for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
864 vdisk->next; vdisk = vdisk->next) {
865 if ((scsidev->channel == vdisk->channel) &&
866 (scsidev->id == vdisk->id) &&
867 (scsidev->lun == vdisk->lun)) {
868 if (atomic_read(&vdisk->error_count) <
869 VIRTHBA_ERROR_COUNT) {
870 atomic_inc(&vdisk->error_count);
871 POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
872 POSTCODE_SEVERITY_INFO);
874 atomic_set(&vdisk->ios_threshold,
875 IOS_ERROR_THRESHOLD);
878 return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd->device);
882 virthba_host_reset_handler(struct scsi_cmnd *scsicmd)
884 /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
885 LOGERR("virthba_host_reset_handler Not yet implemented\n");
889 static char virthba_get_info_str[256];
892 virthba_get_info(struct Scsi_Host *shp)
894 /* Return version string */
895 sprintf(virthba_get_info_str, "virthba, version %s\n", VIRTHBA_VERSION);
896 return virthba_get_info_str;
900 virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
902 DBGINF("In virthba_ioctl: ioctl: cmd=0x%x\n", cmd);
906 /* This returns SCSI_MLQUEUE_DEVICE_BUSY if the signal queue to IOpart
910 virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
911 void (*virthba_cmnd_done)(struct scsi_cmnd *))
913 struct scsi_device *scsidev = scsicmd->device;
916 unsigned char *cdb = scsicmd->cmnd;
917 struct Scsi_Host *scsihost = scsidev->host;
918 struct uiscmdrsp *cmdrsp;
920 struct virthba_info *virthbainfo =
921 (struct virthba_info *)scsihost->hostdata;
922 struct scatterlist *sg = NULL;
923 struct scatterlist *sgl = NULL;
926 if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
927 DBGINF("Server is down/changing state. Returning SCSI_MLQUEUE_DEVICE_BUSY.\n");
928 return SCSI_MLQUEUE_DEVICE_BUSY;
931 cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
932 if (cmdrsp == NULL) {
933 LOGERR("kmalloc of cmdrsp failed.\n");
934 return 1; /* reject the command */
937 /* now saving everything we need from scsi_cmd into cmdrsp
938 * before we queue cmdrsp set type to command - as opposed to
941 cmdrsp->cmdtype = CMD_SCSI_TYPE;
942 /* save the pending insertion location. Deletion from pending
943 * will return the scsicmd pointer for completion
946 add_scsipending_entry(virthbainfo, CMD_SCSI_TYPE, (void *)scsicmd);
947 if (insert_location != -1) {
948 cmdrsp->scsi.scsicmd = (void *)(uintptr_t)insert_location;
950 LOGERR("Queue is full. Returning busy.\n");
952 return SCSI_MLQUEUE_DEVICE_BUSY;
954 /* save done function that we have call when cmd is complete */
955 scsicmd->scsi_done = virthba_cmnd_done;
956 /* save destination */
957 cmdrsp->scsi.vdest.channel = scsidev->channel;
958 cmdrsp->scsi.vdest.id = scsidev->id;
959 cmdrsp->scsi.vdest.lun = scsidev->lun;
961 cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
962 memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
964 cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
966 /* keep track of the max buffer length so far. */
967 if (cmdrsp->scsi.bufflen > max_buff_len)
968 max_buff_len = cmdrsp->scsi.bufflen;
970 if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) {
971 LOGERR("scsicmd use_sg:%d greater than MAX:%d\n",
972 scsi_sg_count(scsicmd), MAX_PHYS_INFO);
973 del_scsipending_entry(virthbainfo, (uintptr_t)insert_location);
975 return 1; /* reject the command */
978 /* This is what we USED to do when we assumed we were running */
979 /* uissd & virthba on the same Linux system. */
980 /* cmdrsp->scsi.buffer = scsicmd->request_buffer; */
981 /* The following code does NOT make that assumption. */
982 /* convert buffer to phys information */
983 if (scsi_sg_count(scsicmd) == 0) {
984 if (scsi_bufflen(scsicmd) > 0) {
985 LOGERR("**** FAILED No scatter list for bufflen > 0\n");
986 BUG_ON(scsi_sg_count(scsicmd) == 0);
988 DBGINF("No sg; buffer:0x%p bufflen:%d\n",
989 scsi_sglist(scsicmd), scsi_bufflen(scsicmd));
991 /* buffer is scatterlist - copy it out */
992 sgl = scsi_sglist(scsicmd);
994 for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
995 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
996 cmdrsp->scsi.gpi_list[i].length = sg->length;
997 if ((i != 0) && (sg->offset != 0))
998 LOGINF("Offset on a sg_entry other than zero =<<%d>>.\n",
1003 LOGERR("Start sg_list dump (entries %d, bufflen %d)...\n",
1004 scsi_sg_count(scsicmd), cmdrsp->scsi.bufflen);
1005 for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
1006 LOGERR(" Entry(%d): page->[0x%p], phys->[0x%Lx], off(%d), len(%d)\n",
1008 (unsigned long long)sg_phys(sg),
1009 sg->offset, sg->length);
1011 LOGERR("Done sg_list dump.\n");
1012 /* BUG(); ***** For now, let it fail in uissd
1013 * if it is a problem, as it might just
1018 cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
1022 i = uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
1023 cmdrsp, IOCHAN_TO_IOPART,
1024 &virthbainfo->chinfo.
1026 DONT_ISSUE_INTERRUPT,
1027 (u64)NULL, DONT_WAIT, "vhba");
1029 /* queue must be full - and we said don't wait - return busy */
1030 LOGERR("uisqueue_put_cmdrsp_with_lock ****FAILED\n");
1032 del_scsipending_entry(virthbainfo, (uintptr_t)insert_location);
1033 return SCSI_MLQUEUE_DEVICE_BUSY;
1036 /* we're done with cmdrsp space - data from it has been copied
1037 * into channel - free it now.
1040 return 0; /* non-zero implies host/device is busy */
1044 virthba_slave_alloc(struct scsi_device *scsidev)
1046 /* this called by the midlayer before scan for new devices -
1047 * LLD can alloc any struct & do init if needed.
1049 struct virtdisk_info *vdisk;
1050 struct virtdisk_info *tmpvdisk;
1051 struct virthba_info *virthbainfo;
1052 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
1054 virthbainfo = (struct virthba_info *)scsihost->hostdata;
1056 LOGERR("Could not find virthba_info for scsihost\n");
1057 return 0; /* even though we errored, treat as success */
1059 for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
1060 if (vdisk->next->valid &&
1061 (vdisk->next->channel == scsidev->channel) &&
1062 (vdisk->next->id == scsidev->id) &&
1063 (vdisk->next->lun == scsidev->lun))
1066 tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
1067 if (!tmpvdisk) { /* error allocating */
1068 LOGERR("Could not allocate memory for disk\n");
1072 tmpvdisk->channel = scsidev->channel;
1073 tmpvdisk->id = scsidev->id;
1074 tmpvdisk->lun = scsidev->lun;
1075 tmpvdisk->valid = 1;
1076 vdisk->next = tmpvdisk;
1077 return 0; /* success */
1081 virthba_slave_configure(struct scsi_device *scsidev)
1083 return 0; /* success */
1087 virthba_slave_destroy(struct scsi_device *scsidev)
1089 /* midlevel calls this after device has been quiesced and
1090 * before it is to be deleted.
1092 struct virtdisk_info *vdisk, *delvdisk;
1093 struct virthba_info *virthbainfo;
1094 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
1096 virthbainfo = (struct virthba_info *)scsihost->hostdata;
1098 LOGERR("Could not find virthba_info for scsihost\n");
1099 for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
1100 if (vdisk->next->valid &&
1101 (vdisk->next->channel == scsidev->channel) &&
1102 (vdisk->next->id == scsidev->id) &&
1103 (vdisk->next->lun == scsidev->lun)) {
1104 delvdisk = vdisk->next;
1105 vdisk->next = vdisk->next->next;
1112 /*****************************************************/
1113 /* Scsi Cmnd support thread */
1114 /*****************************************************/
1117 do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
1119 struct virtdisk_info *vdisk;
1120 struct scsi_device *scsidev;
1121 struct sense_data *sd;
1123 scsidev = scsicmd->device;
1124 memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
1125 sd = (struct sense_data *)scsicmd->sense_buffer;
1127 /* Do not log errors for disk-not-present inquiries */
1128 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
1129 (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
1130 (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
1133 /* Okay see what our error_count is here.... */
1134 for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
1135 vdisk->next; vdisk = vdisk->next) {
1136 if ((scsidev->channel != vdisk->channel) ||
1137 (scsidev->id != vdisk->id) ||
1138 (scsidev->lun != vdisk->lun))
1141 if (atomic_read(&vdisk->error_count) < VIRTHBA_ERROR_COUNT) {
1142 atomic_inc(&vdisk->error_count);
1143 LOGERR("SCSICMD ****FAILED scsicmd:0x%p op:0x%x <%d:%d:%d:%llu> 0x%x-0x%x-0x%x-0x%x-0x%x.\n",
1144 scsicmd, cmdrsp->scsi.cmnd[0],
1145 scsidev->host->host_no, scsidev->id,
1146 scsidev->channel, scsidev->lun,
1147 cmdrsp->scsi.linuxstat, sd->valid, sd->sense_key,
1148 sd->additional_sense_code,
1149 sd->additional_sense_code_qualifier);
1150 if (atomic_read(&vdisk->error_count) ==
1151 VIRTHBA_ERROR_COUNT) {
1152 LOGERR("Throtling SCSICMD errors disk <%d:%d:%d:%llu>\n",
1153 scsidev->host->host_no, scsidev->id,
1154 scsidev->channel, scsidev->lun);
1156 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
1162 do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
1164 struct scsi_device *scsidev;
1165 unsigned char buf[36];
1166 struct scatterlist *sg;
1169 char *thispage_orig;
1171 struct virtdisk_info *vdisk;
1173 scsidev = scsicmd->device;
1174 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
1175 (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
1176 if (cmdrsp->scsi.no_disk_result == 0)
1179 /* Linux scsi code is weird; it wants
1180 * a device at Lun 0 to issue report
1181 * luns, but we don't want a disk
1182 * there so we'll present a processor
1184 SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
1186 DEV_DISK_CAPABLE_NOT_PRESENT,
1189 if (scsi_sg_count(scsicmd) == 0) {
1190 if (scsi_bufflen(scsicmd) > 0) {
1191 LOGERR("**** FAILED No scatter list for bufflen > 0\n");
1192 BUG_ON(scsi_sg_count(scsicmd) ==
1195 memcpy(scsi_sglist(scsicmd), buf,
1196 cmdrsp->scsi.bufflen);
1200 sg = scsi_sglist(scsicmd);
1201 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
1202 DBGVER("copying OUT OF buf into 0x%p %d\n",
1203 sg_page(sg + i), sg[i].length);
1204 thispage_orig = kmap_atomic(sg_page(sg + i));
1205 thispage = (void *)((unsigned long)thispage_orig |
1207 memcpy(thispage, buf + bufind, sg[i].length);
1208 kunmap_atomic(thispage_orig);
1209 bufind += sg[i].length;
1212 vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
1213 for ( ; vdisk->next; vdisk = vdisk->next) {
1214 if ((scsidev->channel != vdisk->channel) ||
1215 (scsidev->id != vdisk->id) ||
1216 (scsidev->lun != vdisk->lun))
1219 if (atomic_read(&vdisk->ios_threshold) > 0) {
1220 atomic_dec(&vdisk->ios_threshold);
1221 if (atomic_read(&vdisk->ios_threshold) == 0) {
1222 LOGERR("Resetting error count for disk\n");
1223 atomic_set(&vdisk->error_count, 0);
1231 complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
1233 DBGINF("cmdrsp: 0x%p, scsistat:0x%x.\n", cmdrsp, cmdrsp->scsi.scsistat);
1235 /* take what we need out of cmdrsp and complete the scsicmd */
1236 scsicmd->result = cmdrsp->scsi.linuxstat;
1237 if (cmdrsp->scsi.linuxstat)
1238 do_scsi_linuxstat(cmdrsp, scsicmd);
1240 do_scsi_nolinuxstat(cmdrsp, scsicmd);
1242 if (scsicmd->scsi_done) {
1243 DBGVER("Scsi_DONE\n");
1244 scsicmd->scsi_done(scsicmd);
1249 complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
1251 /* copy the result of the taskmgmt and */
1252 /* wake up the error handler that is waiting for this */
1253 *(int *)cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result;
1254 wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify);
1255 LOGINF("set notify result to %d\n", cmdrsp->vdiskmgmt.result);
1259 complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
1261 /* copy the result of the taskmgmt and */
1262 /* wake up the error handler that is waiting for this */
1263 *(int *)cmdrsp->scsitaskmgmt.notifyresult =
1264 cmdrsp->scsitaskmgmt.result;
1265 wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify);
1266 LOGINF("set notify result to %d\n", cmdrsp->scsitaskmgmt.result);
1270 drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
1271 struct uiscmdrsp *cmdrsp)
1273 unsigned long flags;
1275 struct scsi_cmnd *scsicmd;
1276 struct Scsi_Host *shost = virthbainfo->scsihost;
1279 spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags);
1280 if (!spar_channel_client_acquire_os(dc->queueinfo->chan,
1282 spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock,
1284 virthbainfo->acquire_failed_cnt++;
1287 qrslt = uisqueue_get_cmdrsp(dc->queueinfo, cmdrsp,
1288 IOCHAN_FROM_IOPART);
1289 spar_channel_client_release_os(dc->queueinfo->chan, "vhba");
1290 spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, flags);
1293 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
1294 /* scsicmd location is returned by the
1297 scsicmd = del_scsipending_entry(virthbainfo,
1299 cmdrsp->scsi.scsicmd);
1302 /* complete the orig cmd */
1303 complete_scsi_command(cmdrsp, scsicmd);
1304 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
1305 if (!del_scsipending_entry(virthbainfo,
1306 (uintptr_t)cmdrsp->scsitaskmgmt.scsicmd))
1308 complete_taskmgmt_command(cmdrsp);
1309 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
1310 /* The vHba pointer has no meaning in
1311 * a Client/Guest Partition. Let's be
1312 * safe and set it to NULL now. Do
1313 * not use it here! */
1314 cmdrsp->disknotify.v_hba = NULL;
1315 process_disk_notify(shost, cmdrsp);
1316 } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
1317 if (!del_scsipending_entry(virthbainfo,
1319 cmdrsp->vdiskmgmt.scsicmd))
1321 complete_vdiskmgmt_command(cmdrsp);
1323 LOGERR("Invalid cmdtype %d\n", cmdrsp->cmdtype);
1324 /* cmdrsp is now available for reuse */
1328 /* main function for the thread that waits for scsi commands to arrive
1329 * in a specified queue
1332 process_incoming_rsps(void *v)
1334 struct virthba_info *virthbainfo = v;
1335 struct chaninfo *dc = &virthbainfo->chinfo;
1336 struct uiscmdrsp *cmdrsp = NULL;
1337 const int SZ = sizeof(struct uiscmdrsp);
1339 unsigned long long rc1;
1341 UIS_DAEMONIZE("vhba_incoming");
1342 /* alloc once and reuse */
1343 cmdrsp = kmalloc(SZ, GFP_ATOMIC);
1344 if (cmdrsp == NULL) {
1345 LOGERR("process_incoming_rsps ****FAILED to malloc - thread exiting\n");
1346 complete_and_exit(&dc->threadinfo.has_stopped, 0);
1349 mask = ULTRA_CHANNEL_ENABLE_INTS;
1351 wait_event_interruptible_timeout(virthbainfo->rsp_queue,
1352 (atomic_read(&virthbainfo->interrupt_rcvd) == 1),
1353 usecs_to_jiffies(rsltq_wait_usecs));
1354 atomic_set(&virthbainfo->interrupt_rcvd, 0);
1356 drain_queue(virthbainfo, dc, cmdrsp);
1357 rc1 = uisqueue_interlocked_or(virthbainfo->flags_addr, mask);
1358 if (dc->threadinfo.should_stop)
1364 DBGINF("exiting processing incoming rsps.\n");
1365 complete_and_exit(&dc->threadinfo.has_stopped, 0);
1368 /*****************************************************/
1369 /* Debugfs filesystem functions */
1370 /*****************************************************/
1372 static ssize_t info_debugfs_read(struct file *file,
1373 char __user *buf, size_t len, loff_t *offset)
1375 ssize_t bytes_read = 0;
1377 u64 phys_flags_addr;
1379 struct virthba_info *virthbainfo;
1384 vbuf = kzalloc(len, GFP_KERNEL);
1388 for (i = 0; i < VIRTHBASOPENMAX; i++) {
1389 if (virthbas_open[i].virthbainfo == NULL)
1392 virthbainfo = virthbas_open[i].virthbainfo;
1394 str_pos += scnprintf(vbuf + str_pos,
1395 len - str_pos, "max_buff_len:%u\n",
1398 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1399 "\nvirthba result queue poll wait:%d usecs.\n",
1401 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1402 "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
1403 virthbainfo->interrupts_rcvd,
1404 virthbainfo->interrupts_disabled);
1405 str_pos += scnprintf(vbuf + str_pos,
1406 len - str_pos, "\ninterrupts_notme = %llu,\n",
1407 virthbainfo->interrupts_notme);
1408 phys_flags_addr = virt_to_phys((__force void *)
1409 virthbainfo->flags_addr);
1410 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1411 "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
1412 virthbainfo->flags_addr, phys_flags_addr,
1413 (__le64)readq(virthbainfo->flags_addr));
1414 str_pos += scnprintf(vbuf + str_pos,
1415 len - str_pos, "acquire_failed_cnt:%llu\n",
1416 virthbainfo->acquire_failed_cnt);
1417 str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
1420 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
1425 static ssize_t enable_ints_write(struct file *file, const char __user *buffer,
1426 size_t count, loff_t *ppos)
1430 struct virthba_info *virthbainfo;
1432 u64 __iomem *features_addr;
1435 if (count >= ARRAY_SIZE(buf))
1439 if (copy_from_user(buf, buffer, count)) {
1440 LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n",
1441 (int)count, buf, count);
1445 i = kstrtoint(buf, 10, &new_value);
1448 LOGERR("Failed to scan value for enable_ints, buf<<%.*s>>",
1453 /* set all counts to new_value usually 0 */
1454 for (i = 0; i < VIRTHBASOPENMAX; i++) {
1455 if (virthbas_open[i].virthbainfo != NULL) {
1456 virthbainfo = virthbas_open[i].virthbainfo;
1458 &virthbainfo->chinfo.queueinfo->chan->features;
1459 if (new_value == 1) {
1460 mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
1461 ULTRA_IO_DRIVER_DISABLES_INTS);
1462 uisqueue_interlocked_and(features_addr, mask);
1463 mask = ULTRA_IO_DRIVER_ENABLES_INTS;
1464 uisqueue_interlocked_or(features_addr, mask);
1465 rsltq_wait_usecs = 4000000;
1467 mask = ~(ULTRA_IO_DRIVER_ENABLES_INTS |
1468 ULTRA_IO_DRIVER_DISABLES_INTS);
1469 uisqueue_interlocked_and(features_addr, mask);
1470 mask = ULTRA_IO_CHANNEL_IS_POLLING;
1471 uisqueue_interlocked_or(features_addr, mask);
1472 rsltq_wait_usecs = 4000;
1479 /* As per VirtpciFunc returns 1 for success and 0 for failure */
1481 virthba_serverup(struct virtpci_dev *virtpcidev)
1483 struct virthba_info *virthbainfo =
1484 (struct virthba_info *)((struct Scsi_Host *)virtpcidev->scsi.
1485 scsihost)->hostdata;
1487 DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
1488 virtpcidev->device_no);
1490 if (!virthbainfo->serverdown) {
1491 DBGINF("Server up message received while server is already up.\n");
1494 if (virthbainfo->serverchangingstate) {
1495 LOGERR("Server already processing change state message\n");
1499 virthbainfo->serverchangingstate = true;
1500 /* Must transition channel to ATTACHED state BEFORE we
1501 * can start using the device again
1503 SPAR_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan,
1504 dev_name(&virtpcidev->generic_dev),
1505 CHANNELCLI_ATTACHED, NULL);
1507 /* Start Processing the IOVM Response Queue Again */
1508 if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
1509 process_incoming_rsps,
1510 virthbainfo, "vhba_incoming")) {
1511 LOGERR("uisthread_start rsp ****FAILED\n");
1514 virthbainfo->serverdown = false;
1515 virthbainfo->serverchangingstate = false;
1521 virthba_serverdown_complete(struct work_struct *work)
1523 struct virthba_info *virthbainfo;
1524 struct virtpci_dev *virtpcidev;
1526 struct scsipending *pendingdel = NULL;
1527 struct scsi_cmnd *scsicmd = NULL;
1528 struct uiscmdrsp *cmdrsp;
1529 unsigned long flags;
1531 virthbainfo = container_of(work, struct virthba_info,
1532 serverdown_completion);
1534 /* Stop Using the IOVM Response Queue (queue should be drained
1537 uisthread_stop(&virthbainfo->chinfo.threadinfo);
1539 /* Fail Commands that weren't completed */
1540 spin_lock_irqsave(&virthbainfo->privlock, flags);
1541 for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
1542 pendingdel = &virthbainfo->pending[i];
1543 switch (pendingdel->cmdtype) {
1545 scsicmd = (struct scsi_cmnd *)pendingdel->sent;
1546 scsicmd->result = (DID_RESET << 16);
1547 if (scsicmd->scsi_done)
1548 scsicmd->scsi_done(scsicmd);
1550 case CMD_SCSITASKMGMT_TYPE:
1551 cmdrsp = (struct uiscmdrsp *)pendingdel->sent;
1552 DBGINF("cmdrsp=0x%x, notify=0x%x\n", cmdrsp,
1553 cmdrsp->scsitaskmgmt.notify);
1554 *(int *)cmdrsp->scsitaskmgmt.notifyresult =
1556 wake_up_all((wait_queue_head_t *)
1557 cmdrsp->scsitaskmgmt.notify);
1559 case CMD_VDISKMGMT_TYPE:
1560 cmdrsp = (struct uiscmdrsp *)pendingdel->sent;
1561 *(int *)cmdrsp->vdiskmgmt.notifyresult =
1563 wake_up_all((wait_queue_head_t *)
1564 cmdrsp->vdiskmgmt.notify);
1567 if (pendingdel->sent != NULL)
1568 LOGERR("Unknown command type: 0x%x. Only freeing list structure.\n",
1569 pendingdel->cmdtype);
1571 pendingdel->cmdtype = 0;
1572 pendingdel->sent = NULL;
1574 spin_unlock_irqrestore(&virthbainfo->privlock, flags);
1576 virtpcidev = virthbainfo->virtpcidev;
1578 DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
1579 virtpcidev->device_no);
1580 virthbainfo->serverdown = true;
1581 virthbainfo->serverchangingstate = false;
1582 /* Return the ServerDown response to Command */
1583 visorchipset_device_pause_response(virtpcidev->bus_no,
1584 virtpcidev->device_no, 0);
1587 /* As per VirtpciFunc returns 1 for success and 0 for failure */
1589 virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state)
1593 struct virthba_info *virthbainfo =
1594 (struct virthba_info *)((struct Scsi_Host *)virtpcidev->scsi.
1595 scsihost)->hostdata;
1597 DBGINF("virthba_serverdown");
1598 DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
1599 virtpcidev->device_no);
1601 if (!virthbainfo->serverdown && !virthbainfo->serverchangingstate) {
1602 virthbainfo->serverchangingstate = true;
1603 queue_work(virthba_serverdown_workqueue,
1604 &virthbainfo->serverdown_completion);
1605 } else if (virthbainfo->serverchangingstate) {
1606 LOGERR("Server already processing change state message\n");
1609 LOGERR("Server already down, but another server down message received.");
1615 /*****************************************************/
1616 /* Module Init & Exit functions */
1617 /*****************************************************/
1620 virthba_parse_line(char *str)
1622 DBGINF("In virthba_parse_line %s\n", str);
1627 virthba_parse_options(char *line)
1631 POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1632 if (line == NULL || !*line)
1634 while ((line = next) != NULL) {
1635 next = strchr(line, ' ');
1638 if (!virthba_parse_line(line))
1639 DBGINF("Unknown option '%s'\n", line);
1642 POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
1646 virthba_mod_init(void)
1651 if (!unisys_spar_platform)
1654 LOGINF("Entering virthba_mod_init...\n");
1656 POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1657 virthba_parse_options(virthba_options);
1659 error = virtpci_register_driver(&virthba_driver);
1661 LOGERR("register ****FAILED 0x%x\n", error);
1662 POSTCODE_LINUX_3(VHBA_CREATE_FAILURE_PC, error,
1663 POSTCODE_SEVERITY_ERR);
1665 /* create the debugfs directories and entries */
1666 virthba_debugfs_dir = debugfs_create_dir("virthba", NULL);
1667 debugfs_create_file("info", S_IRUSR, virthba_debugfs_dir,
1668 NULL, &debugfs_info_fops);
1669 debugfs_create_u32("rqwait_usecs", S_IRUSR | S_IWUSR,
1670 virthba_debugfs_dir, &rsltq_wait_usecs);
1671 debugfs_create_file("enable_ints", S_IWUSR,
1672 virthba_debugfs_dir, NULL,
1673 &debugfs_enable_ints_fops);
1674 /* Initialize dar_work_queue */
1675 INIT_WORK(&dar_work_queue, do_disk_add_remove);
1676 spin_lock_init(&dar_work_queue_lock);
1678 /* clear out array */
1679 for (i = 0; i < VIRTHBASOPENMAX; i++)
1680 virthbas_open[i].virthbainfo = NULL;
1681 /* Initialize the serverdown workqueue */
1682 virthba_serverdown_workqueue =
1683 create_singlethread_workqueue("virthba_serverdown");
1684 if (virthba_serverdown_workqueue == NULL) {
1685 LOGERR("**** FAILED virthba_serverdown_workqueue creation\n");
1686 POSTCODE_LINUX_2(VHBA_CREATE_FAILURE_PC,
1687 POSTCODE_SEVERITY_ERR);
1692 POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
1693 LOGINF("Leaving virthba_mod_init\n");
1698 virthba_acquire_lun(struct device *cdev, struct device_attribute *attr,
1699 const char *buf, size_t count)
1701 struct uisscsi_dest vdest;
1702 struct Scsi_Host *shost = class_to_shost(cdev);
1705 i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
1709 return forward_vdiskmgmt_command(VDISK_MGMT_ACQUIRE, shost, &vdest);
1713 virthba_release_lun(struct device *cdev, struct device_attribute *attr,
1714 const char *buf, size_t count)
1716 struct uisscsi_dest vdest;
1717 struct Scsi_Host *shost = class_to_shost(cdev);
1720 i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
1724 return forward_vdiskmgmt_command(VDISK_MGMT_RELEASE, shost, &vdest);
1727 #define CLASS_DEVICE_ATTR(_name, _mode, _show, _store) \
1728 struct device_attribute class_device_attr_##_name = \
1729 __ATTR(_name, _mode, _show, _store)
1731 static CLASS_DEVICE_ATTR(acquire_lun, S_IWUSR, NULL, virthba_acquire_lun);
1732 static CLASS_DEVICE_ATTR(release_lun, S_IWUSR, NULL, virthba_release_lun);
1734 static DEVICE_ATTRIBUTE *virthba_shost_attrs[] = {
1735 &class_device_attr_acquire_lun,
1736 &class_device_attr_release_lun,
1741 virthba_mod_exit(void)
1743 LOGINF("entering virthba_mod_exit...\n");
1745 virtpci_unregister_driver(&virthba_driver);
1746 /* unregister is going to call virthba_remove */
1747 /* destroy serverdown completion workqueue */
1748 if (virthba_serverdown_workqueue) {
1749 destroy_workqueue(virthba_serverdown_workqueue);
1750 virthba_serverdown_workqueue = NULL;
1753 debugfs_remove_recursive(virthba_debugfs_dir);
1754 LOGINF("Leaving virthba_mod_exit\n");
1757 /* specify function to be run at module insertion time */
1758 module_init(virthba_mod_init);
1760 /* specify function to be run when module is removed */
1761 module_exit(virthba_mod_exit);
1763 MODULE_LICENSE("GPL");
1764 MODULE_AUTHOR("Usha Srinivasan");
1765 MODULE_ALIAS("uisvirthba");
1766 /* this is extracted during depmod and kept in modules.dep */
1767 /* module parameter */
1768 module_param(virthba_options, charp, S_IRUGO);