hpsa: fix physical target reset
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / hpsa.c
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2014-2015 PMC-Sierra, Inc.
4  *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
5  *
6  *    This program is free software; you can redistribute it and/or modify
7  *    it under the terms of the GNU General Public License as published by
8  *    the Free Software Foundation; version 2 of the License.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
14  *
15  *    Questions/Comments/Bugfixes to storagedev@pmcs.com
16  *
17  */
18
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/pci-aspm.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/fs.h>
28 #include <linux/timer.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/compat.h>
32 #include <linux/blktrace_api.h>
33 #include <linux/uaccess.h>
34 #include <linux/io.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/completion.h>
37 #include <linux/moduleparam.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_eh.h>
44 #include <scsi/scsi_dbg.h>
45 #include <linux/cciss_ioctl.h>
46 #include <linux/string.h>
47 #include <linux/bitmap.h>
48 #include <linux/atomic.h>
49 #include <linux/jiffies.h>
50 #include <linux/percpu-defs.h>
51 #include <linux/percpu.h>
52 #include <asm/unaligned.h>
53 #include <asm/div64.h>
54 #include "hpsa_cmd.h"
55 #include "hpsa.h"
56
57 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
58 #define HPSA_DRIVER_VERSION "3.4.10-0"
59 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
60 #define HPSA "hpsa"
61
62 /* How long to wait for CISS doorbell communication */
63 #define CLEAR_EVENT_WAIT_INTERVAL 20    /* ms for each msleep() call */
64 #define MODE_CHANGE_WAIT_INTERVAL 10    /* ms for each msleep() call */
65 #define MAX_CLEAR_EVENT_WAIT 30000      /* times 20 ms = 600 s */
66 #define MAX_MODE_CHANGE_WAIT 2000       /* times 10 ms = 20 s */
67 #define MAX_IOCTL_CONFIG_WAIT 1000
68
69 /*define how many times we will try a command because of bus resets */
70 #define MAX_CMD_RETRIES 3
71
72 /* Embedded module documentation macros - see modules.h */
73 MODULE_AUTHOR("Hewlett-Packard Company");
74 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
75         HPSA_DRIVER_VERSION);
76 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
77 MODULE_VERSION(HPSA_DRIVER_VERSION);
78 MODULE_LICENSE("GPL");
79
80 static int hpsa_allow_any;
81 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
82 MODULE_PARM_DESC(hpsa_allow_any,
83                 "Allow hpsa driver to access unknown HP Smart Array hardware");
84 static int hpsa_simple_mode;
85 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
86 MODULE_PARM_DESC(hpsa_simple_mode,
87         "Use 'simple mode' rather than 'performant mode'");
88
89 /* define the PCI info for the cards we can control */
90 static const struct pci_device_id hpsa_pci_device_id[] = {
91         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
92         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
93         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
94         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
95         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
96         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
97         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
98         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
99         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
100         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
101         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
102         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
103         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
104         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
105         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
106         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
107         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
108         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
109         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
110         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
111         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
112         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
113         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
114         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
115         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
116         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
117         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
118         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
119         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
120         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
121         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
122         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
123         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
124         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
125         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
126         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
127         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
128         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
129         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
130         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
131         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
132         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
133         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
134         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
135         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
136         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
137         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
138         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
139         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
140         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
141         {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
142         {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
143                 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
144         {0,}
145 };
146
147 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
148
149 /*  board_id = Subsystem Device ID & Vendor ID
150  *  product = Marketing Name for the board
151  *  access = Address of the struct of function pointers
152  */
153 static struct board_type products[] = {
154         {0x3241103C, "Smart Array P212", &SA5_access},
155         {0x3243103C, "Smart Array P410", &SA5_access},
156         {0x3245103C, "Smart Array P410i", &SA5_access},
157         {0x3247103C, "Smart Array P411", &SA5_access},
158         {0x3249103C, "Smart Array P812", &SA5_access},
159         {0x324A103C, "Smart Array P712m", &SA5_access},
160         {0x324B103C, "Smart Array P711m", &SA5_access},
161         {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
162         {0x3350103C, "Smart Array P222", &SA5_access},
163         {0x3351103C, "Smart Array P420", &SA5_access},
164         {0x3352103C, "Smart Array P421", &SA5_access},
165         {0x3353103C, "Smart Array P822", &SA5_access},
166         {0x3354103C, "Smart Array P420i", &SA5_access},
167         {0x3355103C, "Smart Array P220i", &SA5_access},
168         {0x3356103C, "Smart Array P721m", &SA5_access},
169         {0x1921103C, "Smart Array P830i", &SA5_access},
170         {0x1922103C, "Smart Array P430", &SA5_access},
171         {0x1923103C, "Smart Array P431", &SA5_access},
172         {0x1924103C, "Smart Array P830", &SA5_access},
173         {0x1926103C, "Smart Array P731m", &SA5_access},
174         {0x1928103C, "Smart Array P230i", &SA5_access},
175         {0x1929103C, "Smart Array P530", &SA5_access},
176         {0x21BD103C, "Smart Array P244br", &SA5_access},
177         {0x21BE103C, "Smart Array P741m", &SA5_access},
178         {0x21BF103C, "Smart HBA H240ar", &SA5_access},
179         {0x21C0103C, "Smart Array P440ar", &SA5_access},
180         {0x21C1103C, "Smart Array P840ar", &SA5_access},
181         {0x21C2103C, "Smart Array P440", &SA5_access},
182         {0x21C3103C, "Smart Array P441", &SA5_access},
183         {0x21C4103C, "Smart Array", &SA5_access},
184         {0x21C5103C, "Smart Array P841", &SA5_access},
185         {0x21C6103C, "Smart HBA H244br", &SA5_access},
186         {0x21C7103C, "Smart HBA H240", &SA5_access},
187         {0x21C8103C, "Smart HBA H241", &SA5_access},
188         {0x21C9103C, "Smart Array", &SA5_access},
189         {0x21CA103C, "Smart Array P246br", &SA5_access},
190         {0x21CB103C, "Smart Array P840", &SA5_access},
191         {0x21CC103C, "Smart Array", &SA5_access},
192         {0x21CD103C, "Smart Array", &SA5_access},
193         {0x21CE103C, "Smart HBA", &SA5_access},
194         {0x05809005, "SmartHBA-SA", &SA5_access},
195         {0x05819005, "SmartHBA-SA 8i", &SA5_access},
196         {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
197         {0x05839005, "SmartHBA-SA 8e", &SA5_access},
198         {0x05849005, "SmartHBA-SA 16i", &SA5_access},
199         {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
200         {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
201         {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
202         {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
203         {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
204         {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
205         {0xFFFF103C, "Unknown Smart Array", &SA5_access},
206 };
207
208 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
209 static const struct scsi_cmnd hpsa_cmd_busy;
210 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
211 static const struct scsi_cmnd hpsa_cmd_idle;
212 static int number_of_controllers;
213
214 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
215 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
216 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
217
218 #ifdef CONFIG_COMPAT
219 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
220         void __user *arg);
221 #endif
222
223 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
224 static struct CommandList *cmd_alloc(struct ctlr_info *h);
225 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
226 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
227                                             struct scsi_cmnd *scmd);
228 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
229         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
230         int cmd_type);
231 static void hpsa_free_cmd_pool(struct ctlr_info *h);
232 #define VPD_PAGE (1 << 8)
233 #define HPSA_SIMPLE_ERROR_BITS 0x03
234
235 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
236 static void hpsa_scan_start(struct Scsi_Host *);
237 static int hpsa_scan_finished(struct Scsi_Host *sh,
238         unsigned long elapsed_time);
239 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
240
241 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
242 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
243 static int hpsa_slave_alloc(struct scsi_device *sdev);
244 static int hpsa_slave_configure(struct scsi_device *sdev);
245 static void hpsa_slave_destroy(struct scsi_device *sdev);
246
247 static void hpsa_update_scsi_devices(struct ctlr_info *h);
248 static int check_for_unit_attention(struct ctlr_info *h,
249         struct CommandList *c);
250 static void check_ioctl_unit_attention(struct ctlr_info *h,
251         struct CommandList *c);
252 /* performant mode helper functions */
253 static void calc_bucket_map(int *bucket, int num_buckets,
254         int nsgs, int min_blocks, u32 *bucket_map);
255 static void hpsa_free_performant_mode(struct ctlr_info *h);
256 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
257 static inline u32 next_command(struct ctlr_info *h, u8 q);
258 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
259                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
260                                u64 *cfg_offset);
261 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
262                                     unsigned long *memory_bar);
263 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
264 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
265                                      int wait_for_ready);
266 static inline void finish_cmd(struct CommandList *c);
267 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
268 #define BOARD_NOT_READY 0
269 #define BOARD_READY 1
270 static void hpsa_drain_accel_commands(struct ctlr_info *h);
271 static void hpsa_flush_cache(struct ctlr_info *h);
272 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
273         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
274         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
275 static void hpsa_command_resubmit_worker(struct work_struct *work);
276 static u32 lockup_detected(struct ctlr_info *h);
277 static int detect_controller_lockup(struct ctlr_info *h);
278 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device);
279
280 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
281 {
282         unsigned long *priv = shost_priv(sdev->host);
283         return (struct ctlr_info *) *priv;
284 }
285
286 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
287 {
288         unsigned long *priv = shost_priv(sh);
289         return (struct ctlr_info *) *priv;
290 }
291
292 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
293 {
294         return c->scsi_cmd == SCSI_CMD_IDLE;
295 }
296
297 static inline bool hpsa_is_pending_event(struct CommandList *c)
298 {
299         return c->abort_pending || c->reset_pending;
300 }
301
302 /* extract sense key, asc, and ascq from sense data.  -1 means invalid. */
303 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
304                         u8 *sense_key, u8 *asc, u8 *ascq)
305 {
306         struct scsi_sense_hdr sshdr;
307         bool rc;
308
309         *sense_key = -1;
310         *asc = -1;
311         *ascq = -1;
312
313         if (sense_data_len < 1)
314                 return;
315
316         rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
317         if (rc) {
318                 *sense_key = sshdr.sense_key;
319                 *asc = sshdr.asc;
320                 *ascq = sshdr.ascq;
321         }
322 }
323
324 static int check_for_unit_attention(struct ctlr_info *h,
325         struct CommandList *c)
326 {
327         u8 sense_key, asc, ascq;
328         int sense_len;
329
330         if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
331                 sense_len = sizeof(c->err_info->SenseInfo);
332         else
333                 sense_len = c->err_info->SenseLen;
334
335         decode_sense_data(c->err_info->SenseInfo, sense_len,
336                                 &sense_key, &asc, &ascq);
337         if (sense_key != UNIT_ATTENTION || asc == 0xff)
338                 return 0;
339
340         switch (asc) {
341         case STATE_CHANGED:
342                 dev_warn(&h->pdev->dev,
343                         "%s: a state change detected, command retried\n",
344                         h->devname);
345                 break;
346         case LUN_FAILED:
347                 dev_warn(&h->pdev->dev,
348                         "%s: LUN failure detected\n", h->devname);
349                 break;
350         case REPORT_LUNS_CHANGED:
351                 dev_warn(&h->pdev->dev,
352                         "%s: report LUN data changed\n", h->devname);
353         /*
354          * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
355          * target (array) devices.
356          */
357                 break;
358         case POWER_OR_RESET:
359                 dev_warn(&h->pdev->dev,
360                         "%s: a power on or device reset detected\n",
361                         h->devname);
362                 break;
363         case UNIT_ATTENTION_CLEARED:
364                 dev_warn(&h->pdev->dev,
365                         "%s: unit attention cleared by another initiator\n",
366                         h->devname);
367                 break;
368         default:
369                 dev_warn(&h->pdev->dev,
370                         "%s: unknown unit attention detected\n",
371                         h->devname);
372                 break;
373         }
374         return 1;
375 }
376
377 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
378 {
379         if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
380                 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
381                  c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
382                 return 0;
383         dev_warn(&h->pdev->dev, HPSA "device busy");
384         return 1;
385 }
386
387 static u32 lockup_detected(struct ctlr_info *h);
388 static ssize_t host_show_lockup_detected(struct device *dev,
389                 struct device_attribute *attr, char *buf)
390 {
391         int ld;
392         struct ctlr_info *h;
393         struct Scsi_Host *shost = class_to_shost(dev);
394
395         h = shost_to_hba(shost);
396         ld = lockup_detected(h);
397
398         return sprintf(buf, "ld=%d\n", ld);
399 }
400
401 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
402                                          struct device_attribute *attr,
403                                          const char *buf, size_t count)
404 {
405         int status, len;
406         struct ctlr_info *h;
407         struct Scsi_Host *shost = class_to_shost(dev);
408         char tmpbuf[10];
409
410         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
411                 return -EACCES;
412         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
413         strncpy(tmpbuf, buf, len);
414         tmpbuf[len] = '\0';
415         if (sscanf(tmpbuf, "%d", &status) != 1)
416                 return -EINVAL;
417         h = shost_to_hba(shost);
418         h->acciopath_status = !!status;
419         dev_warn(&h->pdev->dev,
420                 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
421                 h->acciopath_status ? "enabled" : "disabled");
422         return count;
423 }
424
425 static ssize_t host_store_raid_offload_debug(struct device *dev,
426                                          struct device_attribute *attr,
427                                          const char *buf, size_t count)
428 {
429         int debug_level, len;
430         struct ctlr_info *h;
431         struct Scsi_Host *shost = class_to_shost(dev);
432         char tmpbuf[10];
433
434         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
435                 return -EACCES;
436         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
437         strncpy(tmpbuf, buf, len);
438         tmpbuf[len] = '\0';
439         if (sscanf(tmpbuf, "%d", &debug_level) != 1)
440                 return -EINVAL;
441         if (debug_level < 0)
442                 debug_level = 0;
443         h = shost_to_hba(shost);
444         h->raid_offload_debug = debug_level;
445         dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
446                 h->raid_offload_debug);
447         return count;
448 }
449
450 static ssize_t host_store_rescan(struct device *dev,
451                                  struct device_attribute *attr,
452                                  const char *buf, size_t count)
453 {
454         struct ctlr_info *h;
455         struct Scsi_Host *shost = class_to_shost(dev);
456         h = shost_to_hba(shost);
457         hpsa_scan_start(h->scsi_host);
458         return count;
459 }
460
461 static ssize_t host_show_firmware_revision(struct device *dev,
462              struct device_attribute *attr, char *buf)
463 {
464         struct ctlr_info *h;
465         struct Scsi_Host *shost = class_to_shost(dev);
466         unsigned char *fwrev;
467
468         h = shost_to_hba(shost);
469         if (!h->hba_inquiry_data)
470                 return 0;
471         fwrev = &h->hba_inquiry_data[32];
472         return snprintf(buf, 20, "%c%c%c%c\n",
473                 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
474 }
475
476 static ssize_t host_show_commands_outstanding(struct device *dev,
477              struct device_attribute *attr, char *buf)
478 {
479         struct Scsi_Host *shost = class_to_shost(dev);
480         struct ctlr_info *h = shost_to_hba(shost);
481
482         return snprintf(buf, 20, "%d\n",
483                         atomic_read(&h->commands_outstanding));
484 }
485
486 static ssize_t host_show_transport_mode(struct device *dev,
487         struct device_attribute *attr, char *buf)
488 {
489         struct ctlr_info *h;
490         struct Scsi_Host *shost = class_to_shost(dev);
491
492         h = shost_to_hba(shost);
493         return snprintf(buf, 20, "%s\n",
494                 h->transMethod & CFGTBL_Trans_Performant ?
495                         "performant" : "simple");
496 }
497
498 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
499         struct device_attribute *attr, char *buf)
500 {
501         struct ctlr_info *h;
502         struct Scsi_Host *shost = class_to_shost(dev);
503
504         h = shost_to_hba(shost);
505         return snprintf(buf, 30, "HP SSD Smart Path %s\n",
506                 (h->acciopath_status == 1) ?  "enabled" : "disabled");
507 }
508
509 /* List of controllers which cannot be hard reset on kexec with reset_devices */
510 static u32 unresettable_controller[] = {
511         0x324a103C, /* Smart Array P712m */
512         0x324b103C, /* Smart Array P711m */
513         0x3223103C, /* Smart Array P800 */
514         0x3234103C, /* Smart Array P400 */
515         0x3235103C, /* Smart Array P400i */
516         0x3211103C, /* Smart Array E200i */
517         0x3212103C, /* Smart Array E200 */
518         0x3213103C, /* Smart Array E200i */
519         0x3214103C, /* Smart Array E200i */
520         0x3215103C, /* Smart Array E200i */
521         0x3237103C, /* Smart Array E500 */
522         0x323D103C, /* Smart Array P700m */
523         0x40800E11, /* Smart Array 5i */
524         0x409C0E11, /* Smart Array 6400 */
525         0x409D0E11, /* Smart Array 6400 EM */
526         0x40700E11, /* Smart Array 5300 */
527         0x40820E11, /* Smart Array 532 */
528         0x40830E11, /* Smart Array 5312 */
529         0x409A0E11, /* Smart Array 641 */
530         0x409B0E11, /* Smart Array 642 */
531         0x40910E11, /* Smart Array 6i */
532 };
533
534 /* List of controllers which cannot even be soft reset */
535 static u32 soft_unresettable_controller[] = {
536         0x40800E11, /* Smart Array 5i */
537         0x40700E11, /* Smart Array 5300 */
538         0x40820E11, /* Smart Array 532 */
539         0x40830E11, /* Smart Array 5312 */
540         0x409A0E11, /* Smart Array 641 */
541         0x409B0E11, /* Smart Array 642 */
542         0x40910E11, /* Smart Array 6i */
543         /* Exclude 640x boards.  These are two pci devices in one slot
544          * which share a battery backed cache module.  One controls the
545          * cache, the other accesses the cache through the one that controls
546          * it.  If we reset the one controlling the cache, the other will
547          * likely not be happy.  Just forbid resetting this conjoined mess.
548          * The 640x isn't really supported by hpsa anyway.
549          */
550         0x409C0E11, /* Smart Array 6400 */
551         0x409D0E11, /* Smart Array 6400 EM */
552 };
553
554 static u32 needs_abort_tags_swizzled[] = {
555         0x323D103C, /* Smart Array P700m */
556         0x324a103C, /* Smart Array P712m */
557         0x324b103C, /* SmartArray P711m */
558 };
559
560 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
561 {
562         int i;
563
564         for (i = 0; i < nelems; i++)
565                 if (a[i] == board_id)
566                         return 1;
567         return 0;
568 }
569
570 static int ctlr_is_hard_resettable(u32 board_id)
571 {
572         return !board_id_in_array(unresettable_controller,
573                         ARRAY_SIZE(unresettable_controller), board_id);
574 }
575
576 static int ctlr_is_soft_resettable(u32 board_id)
577 {
578         return !board_id_in_array(soft_unresettable_controller,
579                         ARRAY_SIZE(soft_unresettable_controller), board_id);
580 }
581
582 static int ctlr_is_resettable(u32 board_id)
583 {
584         return ctlr_is_hard_resettable(board_id) ||
585                 ctlr_is_soft_resettable(board_id);
586 }
587
588 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
589 {
590         return board_id_in_array(needs_abort_tags_swizzled,
591                         ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
592 }
593
594 static ssize_t host_show_resettable(struct device *dev,
595         struct device_attribute *attr, char *buf)
596 {
597         struct ctlr_info *h;
598         struct Scsi_Host *shost = class_to_shost(dev);
599
600         h = shost_to_hba(shost);
601         return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
602 }
603
604 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
605 {
606         return (scsi3addr[3] & 0xC0) == 0x40;
607 }
608
609 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
610         "1(+0)ADM", "UNKNOWN"
611 };
612 #define HPSA_RAID_0     0
613 #define HPSA_RAID_4     1
614 #define HPSA_RAID_1     2       /* also used for RAID 10 */
615 #define HPSA_RAID_5     3       /* also used for RAID 50 */
616 #define HPSA_RAID_51    4
617 #define HPSA_RAID_6     5       /* also used for RAID 60 */
618 #define HPSA_RAID_ADM   6       /* also used for RAID 1+0 ADM */
619 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
620
621 static ssize_t raid_level_show(struct device *dev,
622              struct device_attribute *attr, char *buf)
623 {
624         ssize_t l = 0;
625         unsigned char rlevel;
626         struct ctlr_info *h;
627         struct scsi_device *sdev;
628         struct hpsa_scsi_dev_t *hdev;
629         unsigned long flags;
630
631         sdev = to_scsi_device(dev);
632         h = sdev_to_hba(sdev);
633         spin_lock_irqsave(&h->lock, flags);
634         hdev = sdev->hostdata;
635         if (!hdev) {
636                 spin_unlock_irqrestore(&h->lock, flags);
637                 return -ENODEV;
638         }
639
640         /* Is this even a logical drive? */
641         if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
642                 spin_unlock_irqrestore(&h->lock, flags);
643                 l = snprintf(buf, PAGE_SIZE, "N/A\n");
644                 return l;
645         }
646
647         rlevel = hdev->raid_level;
648         spin_unlock_irqrestore(&h->lock, flags);
649         if (rlevel > RAID_UNKNOWN)
650                 rlevel = RAID_UNKNOWN;
651         l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
652         return l;
653 }
654
655 static ssize_t lunid_show(struct device *dev,
656              struct device_attribute *attr, char *buf)
657 {
658         struct ctlr_info *h;
659         struct scsi_device *sdev;
660         struct hpsa_scsi_dev_t *hdev;
661         unsigned long flags;
662         unsigned char lunid[8];
663
664         sdev = to_scsi_device(dev);
665         h = sdev_to_hba(sdev);
666         spin_lock_irqsave(&h->lock, flags);
667         hdev = sdev->hostdata;
668         if (!hdev) {
669                 spin_unlock_irqrestore(&h->lock, flags);
670                 return -ENODEV;
671         }
672         memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
673         spin_unlock_irqrestore(&h->lock, flags);
674         return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
675                 lunid[0], lunid[1], lunid[2], lunid[3],
676                 lunid[4], lunid[5], lunid[6], lunid[7]);
677 }
678
679 static ssize_t unique_id_show(struct device *dev,
680              struct device_attribute *attr, char *buf)
681 {
682         struct ctlr_info *h;
683         struct scsi_device *sdev;
684         struct hpsa_scsi_dev_t *hdev;
685         unsigned long flags;
686         unsigned char sn[16];
687
688         sdev = to_scsi_device(dev);
689         h = sdev_to_hba(sdev);
690         spin_lock_irqsave(&h->lock, flags);
691         hdev = sdev->hostdata;
692         if (!hdev) {
693                 spin_unlock_irqrestore(&h->lock, flags);
694                 return -ENODEV;
695         }
696         memcpy(sn, hdev->device_id, sizeof(sn));
697         spin_unlock_irqrestore(&h->lock, flags);
698         return snprintf(buf, 16 * 2 + 2,
699                         "%02X%02X%02X%02X%02X%02X%02X%02X"
700                         "%02X%02X%02X%02X%02X%02X%02X%02X\n",
701                         sn[0], sn[1], sn[2], sn[3],
702                         sn[4], sn[5], sn[6], sn[7],
703                         sn[8], sn[9], sn[10], sn[11],
704                         sn[12], sn[13], sn[14], sn[15]);
705 }
706
707 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
708              struct device_attribute *attr, char *buf)
709 {
710         struct ctlr_info *h;
711         struct scsi_device *sdev;
712         struct hpsa_scsi_dev_t *hdev;
713         unsigned long flags;
714         int offload_enabled;
715
716         sdev = to_scsi_device(dev);
717         h = sdev_to_hba(sdev);
718         spin_lock_irqsave(&h->lock, flags);
719         hdev = sdev->hostdata;
720         if (!hdev) {
721                 spin_unlock_irqrestore(&h->lock, flags);
722                 return -ENODEV;
723         }
724         offload_enabled = hdev->offload_enabled;
725         spin_unlock_irqrestore(&h->lock, flags);
726         return snprintf(buf, 20, "%d\n", offload_enabled);
727 }
728
729 #define MAX_PATHS 8
730 #define PATH_STRING_LEN 50
731
732 static ssize_t path_info_show(struct device *dev,
733              struct device_attribute *attr, char *buf)
734 {
735         struct ctlr_info *h;
736         struct scsi_device *sdev;
737         struct hpsa_scsi_dev_t *hdev;
738         unsigned long flags;
739         int i;
740         int output_len = 0;
741         u8 box;
742         u8 bay;
743         u8 path_map_index = 0;
744         char *active;
745         unsigned char phys_connector[2];
746         unsigned char path[MAX_PATHS][PATH_STRING_LEN];
747
748         memset(path, 0, MAX_PATHS * PATH_STRING_LEN);
749         sdev = to_scsi_device(dev);
750         h = sdev_to_hba(sdev);
751         spin_lock_irqsave(&h->devlock, flags);
752         hdev = sdev->hostdata;
753         if (!hdev) {
754                 spin_unlock_irqrestore(&h->devlock, flags);
755                 return -ENODEV;
756         }
757
758         bay = hdev->bay;
759         for (i = 0; i < MAX_PATHS; i++) {
760                 path_map_index = 1<<i;
761                 if (i == hdev->active_path_index)
762                         active = "Active";
763                 else if (hdev->path_map & path_map_index)
764                         active = "Inactive";
765                 else
766                         continue;
767
768                 output_len = snprintf(path[i],
769                                 PATH_STRING_LEN, "[%d:%d:%d:%d] %20.20s ",
770                                 h->scsi_host->host_no,
771                                 hdev->bus, hdev->target, hdev->lun,
772                                 scsi_device_type(hdev->devtype));
773
774                 if (is_ext_target(h, hdev) ||
775                         (hdev->devtype == TYPE_RAID) ||
776                         is_logical_dev_addr_mode(hdev->scsi3addr)) {
777                         output_len += snprintf(path[i] + output_len,
778                                                 PATH_STRING_LEN, "%s\n",
779                                                 active);
780                         continue;
781                 }
782
783                 box = hdev->box[i];
784                 memcpy(&phys_connector, &hdev->phys_connector[i],
785                         sizeof(phys_connector));
786                 if (phys_connector[0] < '0')
787                         phys_connector[0] = '0';
788                 if (phys_connector[1] < '0')
789                         phys_connector[1] = '0';
790                 if (hdev->phys_connector[i] > 0)
791                         output_len += snprintf(path[i] + output_len,
792                                 PATH_STRING_LEN,
793                                 "PORT: %.2s ",
794                                 phys_connector);
795                 if (hdev->devtype == TYPE_DISK &&
796                         hdev->expose_state != HPSA_DO_NOT_EXPOSE) {
797                         if (box == 0 || box == 0xFF) {
798                                 output_len += snprintf(path[i] + output_len,
799                                         PATH_STRING_LEN,
800                                         "BAY: %hhu %s\n",
801                                         bay, active);
802                         } else {
803                                 output_len += snprintf(path[i] + output_len,
804                                         PATH_STRING_LEN,
805                                         "BOX: %hhu BAY: %hhu %s\n",
806                                         box, bay, active);
807                         }
808                 } else if (box != 0 && box != 0xFF) {
809                         output_len += snprintf(path[i] + output_len,
810                                 PATH_STRING_LEN, "BOX: %hhu %s\n",
811                                 box, active);
812                 } else
813                         output_len += snprintf(path[i] + output_len,
814                                 PATH_STRING_LEN, "%s\n", active);
815         }
816
817         spin_unlock_irqrestore(&h->devlock, flags);
818         return snprintf(buf, output_len+1, "%s%s%s%s%s%s%s%s",
819                 path[0], path[1], path[2], path[3],
820                 path[4], path[5], path[6], path[7]);
821 }
822
823 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
824 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
825 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
826 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
827 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
828                         host_show_hp_ssd_smart_path_enabled, NULL);
829 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
830 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
831                 host_show_hp_ssd_smart_path_status,
832                 host_store_hp_ssd_smart_path_status);
833 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
834                         host_store_raid_offload_debug);
835 static DEVICE_ATTR(firmware_revision, S_IRUGO,
836         host_show_firmware_revision, NULL);
837 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
838         host_show_commands_outstanding, NULL);
839 static DEVICE_ATTR(transport_mode, S_IRUGO,
840         host_show_transport_mode, NULL);
841 static DEVICE_ATTR(resettable, S_IRUGO,
842         host_show_resettable, NULL);
843 static DEVICE_ATTR(lockup_detected, S_IRUGO,
844         host_show_lockup_detected, NULL);
845
846 static struct device_attribute *hpsa_sdev_attrs[] = {
847         &dev_attr_raid_level,
848         &dev_attr_lunid,
849         &dev_attr_unique_id,
850         &dev_attr_hp_ssd_smart_path_enabled,
851         &dev_attr_path_info,
852         &dev_attr_lockup_detected,
853         NULL,
854 };
855
856 static struct device_attribute *hpsa_shost_attrs[] = {
857         &dev_attr_rescan,
858         &dev_attr_firmware_revision,
859         &dev_attr_commands_outstanding,
860         &dev_attr_transport_mode,
861         &dev_attr_resettable,
862         &dev_attr_hp_ssd_smart_path_status,
863         &dev_attr_raid_offload_debug,
864         NULL,
865 };
866
867 #define HPSA_NRESERVED_CMDS     (HPSA_CMDS_RESERVED_FOR_ABORTS + \
868                 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
869
870 static struct scsi_host_template hpsa_driver_template = {
871         .module                 = THIS_MODULE,
872         .name                   = HPSA,
873         .proc_name              = HPSA,
874         .queuecommand           = hpsa_scsi_queue_command,
875         .scan_start             = hpsa_scan_start,
876         .scan_finished          = hpsa_scan_finished,
877         .change_queue_depth     = hpsa_change_queue_depth,
878         .this_id                = -1,
879         .use_clustering         = ENABLE_CLUSTERING,
880         .eh_abort_handler       = hpsa_eh_abort_handler,
881         .eh_device_reset_handler = hpsa_eh_device_reset_handler,
882         .ioctl                  = hpsa_ioctl,
883         .slave_alloc            = hpsa_slave_alloc,
884         .slave_configure        = hpsa_slave_configure,
885         .slave_destroy          = hpsa_slave_destroy,
886 #ifdef CONFIG_COMPAT
887         .compat_ioctl           = hpsa_compat_ioctl,
888 #endif
889         .sdev_attrs = hpsa_sdev_attrs,
890         .shost_attrs = hpsa_shost_attrs,
891         .max_sectors = 8192,
892         .no_write_same = 1,
893 };
894
895 static inline u32 next_command(struct ctlr_info *h, u8 q)
896 {
897         u32 a;
898         struct reply_queue_buffer *rq = &h->reply_queue[q];
899
900         if (h->transMethod & CFGTBL_Trans_io_accel1)
901                 return h->access.command_completed(h, q);
902
903         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
904                 return h->access.command_completed(h, q);
905
906         if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
907                 a = rq->head[rq->current_entry];
908                 rq->current_entry++;
909                 atomic_dec(&h->commands_outstanding);
910         } else {
911                 a = FIFO_EMPTY;
912         }
913         /* Check for wraparound */
914         if (rq->current_entry == h->max_commands) {
915                 rq->current_entry = 0;
916                 rq->wraparound ^= 1;
917         }
918         return a;
919 }
920
921 /*
922  * There are some special bits in the bus address of the
923  * command that we have to set for the controller to know
924  * how to process the command:
925  *
926  * Normal performant mode:
927  * bit 0: 1 means performant mode, 0 means simple mode.
928  * bits 1-3 = block fetch table entry
929  * bits 4-6 = command type (== 0)
930  *
931  * ioaccel1 mode:
932  * bit 0 = "performant mode" bit.
933  * bits 1-3 = block fetch table entry
934  * bits 4-6 = command type (== 110)
935  * (command type is needed because ioaccel1 mode
936  * commands are submitted through the same register as normal
937  * mode commands, so this is how the controller knows whether
938  * the command is normal mode or ioaccel1 mode.)
939  *
940  * ioaccel2 mode:
941  * bit 0 = "performant mode" bit.
942  * bits 1-4 = block fetch table entry (note extra bit)
943  * bits 4-6 = not needed, because ioaccel2 mode has
944  * a separate special register for submitting commands.
945  */
946
947 /*
948  * set_performant_mode: Modify the tag for cciss performant
949  * set bit 0 for pull model, bits 3-1 for block fetch
950  * register number
951  */
952 #define DEFAULT_REPLY_QUEUE (-1)
953 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
954                                         int reply_queue)
955 {
956         if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
957                 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
958                 if (unlikely(!h->msix_vector))
959                         return;
960                 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
961                         c->Header.ReplyQueue =
962                                 raw_smp_processor_id() % h->nreply_queues;
963                 else
964                         c->Header.ReplyQueue = reply_queue % h->nreply_queues;
965         }
966 }
967
968 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
969                                                 struct CommandList *c,
970                                                 int reply_queue)
971 {
972         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
973
974         /*
975          * Tell the controller to post the reply to the queue for this
976          * processor.  This seems to give the best I/O throughput.
977          */
978         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
979                 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
980         else
981                 cp->ReplyQueue = reply_queue % h->nreply_queues;
982         /*
983          * Set the bits in the address sent down to include:
984          *  - performant mode bit (bit 0)
985          *  - pull count (bits 1-3)
986          *  - command type (bits 4-6)
987          */
988         c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
989                                         IOACCEL1_BUSADDR_CMDTYPE;
990 }
991
992 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
993                                                 struct CommandList *c,
994                                                 int reply_queue)
995 {
996         struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
997                 &h->ioaccel2_cmd_pool[c->cmdindex];
998
999         /* Tell the controller to post the reply to the queue for this
1000          * processor.  This seems to give the best I/O throughput.
1001          */
1002         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1003                 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1004         else
1005                 cp->reply_queue = reply_queue % h->nreply_queues;
1006         /* Set the bits in the address sent down to include:
1007          *  - performant mode bit not used in ioaccel mode 2
1008          *  - pull count (bits 0-3)
1009          *  - command type isn't needed for ioaccel2
1010          */
1011         c->busaddr |= h->ioaccel2_blockFetchTable[0];
1012 }
1013
1014 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1015                                                 struct CommandList *c,
1016                                                 int reply_queue)
1017 {
1018         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1019
1020         /*
1021          * Tell the controller to post the reply to the queue for this
1022          * processor.  This seems to give the best I/O throughput.
1023          */
1024         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1025                 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1026         else
1027                 cp->reply_queue = reply_queue % h->nreply_queues;
1028         /*
1029          * Set the bits in the address sent down to include:
1030          *  - performant mode bit not used in ioaccel mode 2
1031          *  - pull count (bits 0-3)
1032          *  - command type isn't needed for ioaccel2
1033          */
1034         c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1035 }
1036
1037 static int is_firmware_flash_cmd(u8 *cdb)
1038 {
1039         return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1040 }
1041
1042 /*
1043  * During firmware flash, the heartbeat register may not update as frequently
1044  * as it should.  So we dial down lockup detection during firmware flash. and
1045  * dial it back up when firmware flash completes.
1046  */
1047 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1048 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1049 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1050                 struct CommandList *c)
1051 {
1052         if (!is_firmware_flash_cmd(c->Request.CDB))
1053                 return;
1054         atomic_inc(&h->firmware_flash_in_progress);
1055         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1056 }
1057
1058 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1059                 struct CommandList *c)
1060 {
1061         if (is_firmware_flash_cmd(c->Request.CDB) &&
1062                 atomic_dec_and_test(&h->firmware_flash_in_progress))
1063                 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1064 }
1065
1066 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1067         struct CommandList *c, int reply_queue)
1068 {
1069         dial_down_lockup_detection_during_fw_flash(h, c);
1070         atomic_inc(&h->commands_outstanding);
1071         switch (c->cmd_type) {
1072         case CMD_IOACCEL1:
1073                 set_ioaccel1_performant_mode(h, c, reply_queue);
1074                 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1075                 break;
1076         case CMD_IOACCEL2:
1077                 set_ioaccel2_performant_mode(h, c, reply_queue);
1078                 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1079                 break;
1080         case IOACCEL2_TMF:
1081                 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1082                 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1083                 break;
1084         default:
1085                 set_performant_mode(h, c, reply_queue);
1086                 h->access.submit_command(h, c);
1087         }
1088 }
1089
1090 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1091 {
1092         if (unlikely(hpsa_is_pending_event(c)))
1093                 return finish_cmd(c);
1094
1095         __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1096 }
1097
1098 static inline int is_hba_lunid(unsigned char scsi3addr[])
1099 {
1100         return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1101 }
1102
1103 static inline int is_scsi_rev_5(struct ctlr_info *h)
1104 {
1105         if (!h->hba_inquiry_data)
1106                 return 0;
1107         if ((h->hba_inquiry_data[2] & 0x07) == 5)
1108                 return 1;
1109         return 0;
1110 }
1111
1112 static int hpsa_find_target_lun(struct ctlr_info *h,
1113         unsigned char scsi3addr[], int bus, int *target, int *lun)
1114 {
1115         /* finds an unused bus, target, lun for a new physical device
1116          * assumes h->devlock is held
1117          */
1118         int i, found = 0;
1119         DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1120
1121         bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1122
1123         for (i = 0; i < h->ndevices; i++) {
1124                 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1125                         __set_bit(h->dev[i]->target, lun_taken);
1126         }
1127
1128         i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1129         if (i < HPSA_MAX_DEVICES) {
1130                 /* *bus = 1; */
1131                 *target = i;
1132                 *lun = 0;
1133                 found = 1;
1134         }
1135         return !found;
1136 }
1137
1138 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1139         struct hpsa_scsi_dev_t *dev, char *description)
1140 {
1141         if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1142                 return;
1143
1144         dev_printk(level, &h->pdev->dev,
1145                         "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1146                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1147                         description,
1148                         scsi_device_type(dev->devtype),
1149                         dev->vendor,
1150                         dev->model,
1151                         dev->raid_level > RAID_UNKNOWN ?
1152                                 "RAID-?" : raid_label[dev->raid_level],
1153                         dev->offload_config ? '+' : '-',
1154                         dev->offload_enabled ? '+' : '-',
1155                         dev->expose_state);
1156 }
1157
1158 /* Add an entry into h->dev[] array. */
1159 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1160                 struct hpsa_scsi_dev_t *device,
1161                 struct hpsa_scsi_dev_t *added[], int *nadded)
1162 {
1163         /* assumes h->devlock is held */
1164         int n = h->ndevices;
1165         int i;
1166         unsigned char addr1[8], addr2[8];
1167         struct hpsa_scsi_dev_t *sd;
1168
1169         if (n >= HPSA_MAX_DEVICES) {
1170                 dev_err(&h->pdev->dev, "too many devices, some will be "
1171                         "inaccessible.\n");
1172                 return -1;
1173         }
1174
1175         /* physical devices do not have lun or target assigned until now. */
1176         if (device->lun != -1)
1177                 /* Logical device, lun is already assigned. */
1178                 goto lun_assigned;
1179
1180         /* If this device a non-zero lun of a multi-lun device
1181          * byte 4 of the 8-byte LUN addr will contain the logical
1182          * unit no, zero otherwise.
1183          */
1184         if (device->scsi3addr[4] == 0) {
1185                 /* This is not a non-zero lun of a multi-lun device */
1186                 if (hpsa_find_target_lun(h, device->scsi3addr,
1187                         device->bus, &device->target, &device->lun) != 0)
1188                         return -1;
1189                 goto lun_assigned;
1190         }
1191
1192         /* This is a non-zero lun of a multi-lun device.
1193          * Search through our list and find the device which
1194          * has the same 8 byte LUN address, excepting byte 4 and 5.
1195          * Assign the same bus and target for this new LUN.
1196          * Use the logical unit number from the firmware.
1197          */
1198         memcpy(addr1, device->scsi3addr, 8);
1199         addr1[4] = 0;
1200         addr1[5] = 0;
1201         for (i = 0; i < n; i++) {
1202                 sd = h->dev[i];
1203                 memcpy(addr2, sd->scsi3addr, 8);
1204                 addr2[4] = 0;
1205                 addr2[5] = 0;
1206                 /* differ only in byte 4 and 5? */
1207                 if (memcmp(addr1, addr2, 8) == 0) {
1208                         device->bus = sd->bus;
1209                         device->target = sd->target;
1210                         device->lun = device->scsi3addr[4];
1211                         break;
1212                 }
1213         }
1214         if (device->lun == -1) {
1215                 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1216                         " suspect firmware bug or unsupported hardware "
1217                         "configuration.\n");
1218                         return -1;
1219         }
1220
1221 lun_assigned:
1222
1223         h->dev[n] = device;
1224         h->ndevices++;
1225         added[*nadded] = device;
1226         (*nadded)++;
1227         hpsa_show_dev_msg(KERN_INFO, h, device,
1228                 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
1229         device->offload_to_be_enabled = device->offload_enabled;
1230         device->offload_enabled = 0;
1231         return 0;
1232 }
1233
1234 /* Update an entry in h->dev[] array. */
1235 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1236         int entry, struct hpsa_scsi_dev_t *new_entry)
1237 {
1238         int offload_enabled;
1239         /* assumes h->devlock is held */
1240         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1241
1242         /* Raid level changed. */
1243         h->dev[entry]->raid_level = new_entry->raid_level;
1244
1245         /* Raid offload parameters changed.  Careful about the ordering. */
1246         if (new_entry->offload_config && new_entry->offload_enabled) {
1247                 /*
1248                  * if drive is newly offload_enabled, we want to copy the
1249                  * raid map data first.  If previously offload_enabled and
1250                  * offload_config were set, raid map data had better be
1251                  * the same as it was before.  if raid map data is changed
1252                  * then it had better be the case that
1253                  * h->dev[entry]->offload_enabled is currently 0.
1254                  */
1255                 h->dev[entry]->raid_map = new_entry->raid_map;
1256                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1257         }
1258         if (new_entry->hba_ioaccel_enabled) {
1259                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1260                 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1261         }
1262         h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1263         h->dev[entry]->offload_config = new_entry->offload_config;
1264         h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1265         h->dev[entry]->queue_depth = new_entry->queue_depth;
1266
1267         /*
1268          * We can turn off ioaccel offload now, but need to delay turning
1269          * it on until we can update h->dev[entry]->phys_disk[], but we
1270          * can't do that until all the devices are updated.
1271          */
1272         h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1273         if (!new_entry->offload_enabled)
1274                 h->dev[entry]->offload_enabled = 0;
1275
1276         offload_enabled = h->dev[entry]->offload_enabled;
1277         h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1278         hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1279         h->dev[entry]->offload_enabled = offload_enabled;
1280 }
1281
1282 /* Replace an entry from h->dev[] array. */
1283 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1284         int entry, struct hpsa_scsi_dev_t *new_entry,
1285         struct hpsa_scsi_dev_t *added[], int *nadded,
1286         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1287 {
1288         /* assumes h->devlock is held */
1289         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1290         removed[*nremoved] = h->dev[entry];
1291         (*nremoved)++;
1292
1293         /*
1294          * New physical devices won't have target/lun assigned yet
1295          * so we need to preserve the values in the slot we are replacing.
1296          */
1297         if (new_entry->target == -1) {
1298                 new_entry->target = h->dev[entry]->target;
1299                 new_entry->lun = h->dev[entry]->lun;
1300         }
1301
1302         h->dev[entry] = new_entry;
1303         added[*nadded] = new_entry;
1304         (*nadded)++;
1305         hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1306         new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1307         new_entry->offload_enabled = 0;
1308 }
1309
1310 /* Remove an entry from h->dev[] array. */
1311 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1312         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1313 {
1314         /* assumes h->devlock is held */
1315         int i;
1316         struct hpsa_scsi_dev_t *sd;
1317
1318         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1319
1320         sd = h->dev[entry];
1321         removed[*nremoved] = h->dev[entry];
1322         (*nremoved)++;
1323
1324         for (i = entry; i < h->ndevices-1; i++)
1325                 h->dev[i] = h->dev[i+1];
1326         h->ndevices--;
1327         hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1328 }
1329
1330 #define SCSI3ADDR_EQ(a, b) ( \
1331         (a)[7] == (b)[7] && \
1332         (a)[6] == (b)[6] && \
1333         (a)[5] == (b)[5] && \
1334         (a)[4] == (b)[4] && \
1335         (a)[3] == (b)[3] && \
1336         (a)[2] == (b)[2] && \
1337         (a)[1] == (b)[1] && \
1338         (a)[0] == (b)[0])
1339
1340 static void fixup_botched_add(struct ctlr_info *h,
1341         struct hpsa_scsi_dev_t *added)
1342 {
1343         /* called when scsi_add_device fails in order to re-adjust
1344          * h->dev[] to match the mid layer's view.
1345          */
1346         unsigned long flags;
1347         int i, j;
1348
1349         spin_lock_irqsave(&h->lock, flags);
1350         for (i = 0; i < h->ndevices; i++) {
1351                 if (h->dev[i] == added) {
1352                         for (j = i; j < h->ndevices-1; j++)
1353                                 h->dev[j] = h->dev[j+1];
1354                         h->ndevices--;
1355                         break;
1356                 }
1357         }
1358         spin_unlock_irqrestore(&h->lock, flags);
1359         kfree(added);
1360 }
1361
1362 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1363         struct hpsa_scsi_dev_t *dev2)
1364 {
1365         /* we compare everything except lun and target as these
1366          * are not yet assigned.  Compare parts likely
1367          * to differ first
1368          */
1369         if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1370                 sizeof(dev1->scsi3addr)) != 0)
1371                 return 0;
1372         if (memcmp(dev1->device_id, dev2->device_id,
1373                 sizeof(dev1->device_id)) != 0)
1374                 return 0;
1375         if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1376                 return 0;
1377         if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1378                 return 0;
1379         if (dev1->devtype != dev2->devtype)
1380                 return 0;
1381         if (dev1->bus != dev2->bus)
1382                 return 0;
1383         return 1;
1384 }
1385
1386 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1387         struct hpsa_scsi_dev_t *dev2)
1388 {
1389         /* Device attributes that can change, but don't mean
1390          * that the device is a different device, nor that the OS
1391          * needs to be told anything about the change.
1392          */
1393         if (dev1->raid_level != dev2->raid_level)
1394                 return 1;
1395         if (dev1->offload_config != dev2->offload_config)
1396                 return 1;
1397         if (dev1->offload_enabled != dev2->offload_enabled)
1398                 return 1;
1399         if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1400                 if (dev1->queue_depth != dev2->queue_depth)
1401                         return 1;
1402         return 0;
1403 }
1404
1405 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1406  * and return needle location in *index.  If scsi3addr matches, but not
1407  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1408  * location in *index.
1409  * In the case of a minor device attribute change, such as RAID level, just
1410  * return DEVICE_UPDATED, along with the updated device's location in index.
1411  * If needle not found, return DEVICE_NOT_FOUND.
1412  */
1413 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1414         struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1415         int *index)
1416 {
1417         int i;
1418 #define DEVICE_NOT_FOUND 0
1419 #define DEVICE_CHANGED 1
1420 #define DEVICE_SAME 2
1421 #define DEVICE_UPDATED 3
1422         if (needle == NULL)
1423                 return DEVICE_NOT_FOUND;
1424
1425         for (i = 0; i < haystack_size; i++) {
1426                 if (haystack[i] == NULL) /* previously removed. */
1427                         continue;
1428                 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1429                         *index = i;
1430                         if (device_is_the_same(needle, haystack[i])) {
1431                                 if (device_updated(needle, haystack[i]))
1432                                         return DEVICE_UPDATED;
1433                                 return DEVICE_SAME;
1434                         } else {
1435                                 /* Keep offline devices offline */
1436                                 if (needle->volume_offline)
1437                                         return DEVICE_NOT_FOUND;
1438                                 return DEVICE_CHANGED;
1439                         }
1440                 }
1441         }
1442         *index = -1;
1443         return DEVICE_NOT_FOUND;
1444 }
1445
1446 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1447                                         unsigned char scsi3addr[])
1448 {
1449         struct offline_device_entry *device;
1450         unsigned long flags;
1451
1452         /* Check to see if device is already on the list */
1453         spin_lock_irqsave(&h->offline_device_lock, flags);
1454         list_for_each_entry(device, &h->offline_device_list, offline_list) {
1455                 if (memcmp(device->scsi3addr, scsi3addr,
1456                         sizeof(device->scsi3addr)) == 0) {
1457                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1458                         return;
1459                 }
1460         }
1461         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1462
1463         /* Device is not on the list, add it. */
1464         device = kmalloc(sizeof(*device), GFP_KERNEL);
1465         if (!device) {
1466                 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1467                 return;
1468         }
1469         memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1470         spin_lock_irqsave(&h->offline_device_lock, flags);
1471         list_add_tail(&device->offline_list, &h->offline_device_list);
1472         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1473 }
1474
1475 /* Print a message explaining various offline volume states */
1476 static void hpsa_show_volume_status(struct ctlr_info *h,
1477         struct hpsa_scsi_dev_t *sd)
1478 {
1479         if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1480                 dev_info(&h->pdev->dev,
1481                         "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1482                         h->scsi_host->host_no,
1483                         sd->bus, sd->target, sd->lun);
1484         switch (sd->volume_offline) {
1485         case HPSA_LV_OK:
1486                 break;
1487         case HPSA_LV_UNDERGOING_ERASE:
1488                 dev_info(&h->pdev->dev,
1489                         "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1490                         h->scsi_host->host_no,
1491                         sd->bus, sd->target, sd->lun);
1492                 break;
1493         case HPSA_LV_NOT_AVAILABLE:
1494                 dev_info(&h->pdev->dev,
1495                         "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1496                         h->scsi_host->host_no,
1497                         sd->bus, sd->target, sd->lun);
1498                 break;
1499         case HPSA_LV_UNDERGOING_RPI:
1500                 dev_info(&h->pdev->dev,
1501                         "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1502                         h->scsi_host->host_no,
1503                         sd->bus, sd->target, sd->lun);
1504                 break;
1505         case HPSA_LV_PENDING_RPI:
1506                 dev_info(&h->pdev->dev,
1507                         "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1508                         h->scsi_host->host_no,
1509                         sd->bus, sd->target, sd->lun);
1510                 break;
1511         case HPSA_LV_ENCRYPTED_NO_KEY:
1512                 dev_info(&h->pdev->dev,
1513                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1514                         h->scsi_host->host_no,
1515                         sd->bus, sd->target, sd->lun);
1516                 break;
1517         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1518                 dev_info(&h->pdev->dev,
1519                         "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1520                         h->scsi_host->host_no,
1521                         sd->bus, sd->target, sd->lun);
1522                 break;
1523         case HPSA_LV_UNDERGOING_ENCRYPTION:
1524                 dev_info(&h->pdev->dev,
1525                         "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1526                         h->scsi_host->host_no,
1527                         sd->bus, sd->target, sd->lun);
1528                 break;
1529         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1530                 dev_info(&h->pdev->dev,
1531                         "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1532                         h->scsi_host->host_no,
1533                         sd->bus, sd->target, sd->lun);
1534                 break;
1535         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1536                 dev_info(&h->pdev->dev,
1537                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1538                         h->scsi_host->host_no,
1539                         sd->bus, sd->target, sd->lun);
1540                 break;
1541         case HPSA_LV_PENDING_ENCRYPTION:
1542                 dev_info(&h->pdev->dev,
1543                         "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1544                         h->scsi_host->host_no,
1545                         sd->bus, sd->target, sd->lun);
1546                 break;
1547         case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1548                 dev_info(&h->pdev->dev,
1549                         "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1550                         h->scsi_host->host_no,
1551                         sd->bus, sd->target, sd->lun);
1552                 break;
1553         }
1554 }
1555
1556 /*
1557  * Figure the list of physical drive pointers for a logical drive with
1558  * raid offload configured.
1559  */
1560 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1561                                 struct hpsa_scsi_dev_t *dev[], int ndevices,
1562                                 struct hpsa_scsi_dev_t *logical_drive)
1563 {
1564         struct raid_map_data *map = &logical_drive->raid_map;
1565         struct raid_map_disk_data *dd = &map->data[0];
1566         int i, j;
1567         int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1568                                 le16_to_cpu(map->metadata_disks_per_row);
1569         int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1570                                 le16_to_cpu(map->layout_map_count) *
1571                                 total_disks_per_row;
1572         int nphys_disk = le16_to_cpu(map->layout_map_count) *
1573                                 total_disks_per_row;
1574         int qdepth;
1575
1576         if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1577                 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1578
1579         logical_drive->nphysical_disks = nraid_map_entries;
1580
1581         qdepth = 0;
1582         for (i = 0; i < nraid_map_entries; i++) {
1583                 logical_drive->phys_disk[i] = NULL;
1584                 if (!logical_drive->offload_config)
1585                         continue;
1586                 for (j = 0; j < ndevices; j++) {
1587                         if (dev[j] == NULL)
1588                                 continue;
1589                         if (dev[j]->devtype != TYPE_DISK)
1590                                 continue;
1591                         if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1592                                 continue;
1593                         if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1594                                 continue;
1595
1596                         logical_drive->phys_disk[i] = dev[j];
1597                         if (i < nphys_disk)
1598                                 qdepth = min(h->nr_cmds, qdepth +
1599                                     logical_drive->phys_disk[i]->queue_depth);
1600                         break;
1601                 }
1602
1603                 /*
1604                  * This can happen if a physical drive is removed and
1605                  * the logical drive is degraded.  In that case, the RAID
1606                  * map data will refer to a physical disk which isn't actually
1607                  * present.  And in that case offload_enabled should already
1608                  * be 0, but we'll turn it off here just in case
1609                  */
1610                 if (!logical_drive->phys_disk[i]) {
1611                         logical_drive->offload_enabled = 0;
1612                         logical_drive->offload_to_be_enabled = 0;
1613                         logical_drive->queue_depth = 8;
1614                 }
1615         }
1616         if (nraid_map_entries)
1617                 /*
1618                  * This is correct for reads, too high for full stripe writes,
1619                  * way too high for partial stripe writes
1620                  */
1621                 logical_drive->queue_depth = qdepth;
1622         else
1623                 logical_drive->queue_depth = h->nr_cmds;
1624 }
1625
1626 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1627                                 struct hpsa_scsi_dev_t *dev[], int ndevices)
1628 {
1629         int i;
1630
1631         for (i = 0; i < ndevices; i++) {
1632                 if (dev[i] == NULL)
1633                         continue;
1634                 if (dev[i]->devtype != TYPE_DISK)
1635                         continue;
1636                 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1637                         continue;
1638
1639                 /*
1640                  * If offload is currently enabled, the RAID map and
1641                  * phys_disk[] assignment *better* not be changing
1642                  * and since it isn't changing, we do not need to
1643                  * update it.
1644                  */
1645                 if (dev[i]->offload_enabled)
1646                         continue;
1647
1648                 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1649         }
1650 }
1651
1652 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1653         struct hpsa_scsi_dev_t *sd[], int nsds)
1654 {
1655         /* sd contains scsi3 addresses and devtypes, and inquiry
1656          * data.  This function takes what's in sd to be the current
1657          * reality and updates h->dev[] to reflect that reality.
1658          */
1659         int i, entry, device_change, changes = 0;
1660         struct hpsa_scsi_dev_t *csd;
1661         unsigned long flags;
1662         struct hpsa_scsi_dev_t **added, **removed;
1663         int nadded, nremoved;
1664         struct Scsi_Host *sh = NULL;
1665
1666         /*
1667          * A reset can cause a device status to change
1668          * re-schedule the scan to see what happened.
1669          */
1670         if (h->reset_in_progress) {
1671                 h->drv_req_rescan = 1;
1672                 return;
1673         }
1674
1675         added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1676         removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1677
1678         if (!added || !removed) {
1679                 dev_warn(&h->pdev->dev, "out of memory in "
1680                         "adjust_hpsa_scsi_table\n");
1681                 goto free_and_out;
1682         }
1683
1684         spin_lock_irqsave(&h->devlock, flags);
1685
1686         /* find any devices in h->dev[] that are not in
1687          * sd[] and remove them from h->dev[], and for any
1688          * devices which have changed, remove the old device
1689          * info and add the new device info.
1690          * If minor device attributes change, just update
1691          * the existing device structure.
1692          */
1693         i = 0;
1694         nremoved = 0;
1695         nadded = 0;
1696         while (i < h->ndevices) {
1697                 csd = h->dev[i];
1698                 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1699                 if (device_change == DEVICE_NOT_FOUND) {
1700                         changes++;
1701                         hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1702                         continue; /* remove ^^^, hence i not incremented */
1703                 } else if (device_change == DEVICE_CHANGED) {
1704                         changes++;
1705                         hpsa_scsi_replace_entry(h, i, sd[entry],
1706                                 added, &nadded, removed, &nremoved);
1707                         /* Set it to NULL to prevent it from being freed
1708                          * at the bottom of hpsa_update_scsi_devices()
1709                          */
1710                         sd[entry] = NULL;
1711                 } else if (device_change == DEVICE_UPDATED) {
1712                         hpsa_scsi_update_entry(h, i, sd[entry]);
1713                 }
1714                 i++;
1715         }
1716
1717         /* Now, make sure every device listed in sd[] is also
1718          * listed in h->dev[], adding them if they aren't found
1719          */
1720
1721         for (i = 0; i < nsds; i++) {
1722                 if (!sd[i]) /* if already added above. */
1723                         continue;
1724
1725                 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1726                  * as the SCSI mid-layer does not handle such devices well.
1727                  * It relentlessly loops sending TUR at 3Hz, then READ(10)
1728                  * at 160Hz, and prevents the system from coming up.
1729                  */
1730                 if (sd[i]->volume_offline) {
1731                         hpsa_show_volume_status(h, sd[i]);
1732                         hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1733                         continue;
1734                 }
1735
1736                 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1737                                         h->ndevices, &entry);
1738                 if (device_change == DEVICE_NOT_FOUND) {
1739                         changes++;
1740                         if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1741                                 break;
1742                         sd[i] = NULL; /* prevent from being freed later. */
1743                 } else if (device_change == DEVICE_CHANGED) {
1744                         /* should never happen... */
1745                         changes++;
1746                         dev_warn(&h->pdev->dev,
1747                                 "device unexpectedly changed.\n");
1748                         /* but if it does happen, we just ignore that device */
1749                 }
1750         }
1751         hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1752
1753         /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1754          * any logical drives that need it enabled.
1755          */
1756         for (i = 0; i < h->ndevices; i++) {
1757                 if (h->dev[i] == NULL)
1758                         continue;
1759                 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1760         }
1761
1762         spin_unlock_irqrestore(&h->devlock, flags);
1763
1764         /* Monitor devices which are in one of several NOT READY states to be
1765          * brought online later. This must be done without holding h->devlock,
1766          * so don't touch h->dev[]
1767          */
1768         for (i = 0; i < nsds; i++) {
1769                 if (!sd[i]) /* if already added above. */
1770                         continue;
1771                 if (sd[i]->volume_offline)
1772                         hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1773         }
1774
1775         /* Don't notify scsi mid layer of any changes the first time through
1776          * (or if there are no changes) scsi_scan_host will do it later the
1777          * first time through.
1778          */
1779         if (!changes)
1780                 goto free_and_out;
1781
1782         sh = h->scsi_host;
1783         if (sh == NULL) {
1784                 dev_warn(&h->pdev->dev, "%s: scsi_host is null\n", __func__);
1785                 goto free_and_out;
1786         }
1787         /* Notify scsi mid layer of any removed devices */
1788         for (i = 0; i < nremoved; i++) {
1789                 if (removed[i] == NULL)
1790                         continue;
1791                 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1792                         struct scsi_device *sdev =
1793                                 scsi_device_lookup(sh, removed[i]->bus,
1794                                         removed[i]->target, removed[i]->lun);
1795                         if (sdev != NULL) {
1796                                 scsi_remove_device(sdev);
1797                                 scsi_device_put(sdev);
1798                         } else {
1799                                 /*
1800                                  * We don't expect to get here.
1801                                  * future cmds to this device will get selection
1802                                  * timeout as if the device was gone.
1803                                  */
1804                                 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1805                                         "didn't find device for removal.");
1806                         }
1807                 }
1808                 kfree(removed[i]);
1809                 removed[i] = NULL;
1810         }
1811
1812         /* Notify scsi mid layer of any added devices */
1813         for (i = 0; i < nadded; i++) {
1814                 if (added[i] == NULL)
1815                         continue;
1816                 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1817                         continue;
1818                 if (scsi_add_device(sh, added[i]->bus,
1819                         added[i]->target, added[i]->lun) == 0)
1820                         continue;
1821                 dev_warn(&h->pdev->dev, "addition failed, device not added.");
1822                 /* now we have to remove it from h->dev,
1823                  * since it didn't get added to scsi mid layer
1824                  */
1825                 fixup_botched_add(h, added[i]);
1826                 h->drv_req_rescan = 1;
1827         }
1828
1829 free_and_out:
1830         kfree(added);
1831         kfree(removed);
1832 }
1833
1834 /*
1835  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1836  * Assume's h->devlock is held.
1837  */
1838 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1839         int bus, int target, int lun)
1840 {
1841         int i;
1842         struct hpsa_scsi_dev_t *sd;
1843
1844         for (i = 0; i < h->ndevices; i++) {
1845                 sd = h->dev[i];
1846                 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1847                         return sd;
1848         }
1849         return NULL;
1850 }
1851
1852 static int hpsa_slave_alloc(struct scsi_device *sdev)
1853 {
1854         struct hpsa_scsi_dev_t *sd;
1855         unsigned long flags;
1856         struct ctlr_info *h;
1857
1858         h = sdev_to_hba(sdev);
1859         spin_lock_irqsave(&h->devlock, flags);
1860         sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1861                 sdev_id(sdev), sdev->lun);
1862         if (likely(sd)) {
1863                 atomic_set(&sd->ioaccel_cmds_out, 0);
1864                 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1865         } else
1866                 sdev->hostdata = NULL;
1867         spin_unlock_irqrestore(&h->devlock, flags);
1868         return 0;
1869 }
1870
1871 /* configure scsi device based on internal per-device structure */
1872 static int hpsa_slave_configure(struct scsi_device *sdev)
1873 {
1874         struct hpsa_scsi_dev_t *sd;
1875         int queue_depth;
1876
1877         sd = sdev->hostdata;
1878         sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1879
1880         if (sd)
1881                 queue_depth = sd->queue_depth != 0 ?
1882                         sd->queue_depth : sdev->host->can_queue;
1883         else
1884                 queue_depth = sdev->host->can_queue;
1885
1886         scsi_change_queue_depth(sdev, queue_depth);
1887
1888         return 0;
1889 }
1890
1891 static void hpsa_slave_destroy(struct scsi_device *sdev)
1892 {
1893         /* nothing to do. */
1894 }
1895
1896 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1897 {
1898         int i;
1899
1900         if (!h->ioaccel2_cmd_sg_list)
1901                 return;
1902         for (i = 0; i < h->nr_cmds; i++) {
1903                 kfree(h->ioaccel2_cmd_sg_list[i]);
1904                 h->ioaccel2_cmd_sg_list[i] = NULL;
1905         }
1906         kfree(h->ioaccel2_cmd_sg_list);
1907         h->ioaccel2_cmd_sg_list = NULL;
1908 }
1909
1910 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1911 {
1912         int i;
1913
1914         if (h->chainsize <= 0)
1915                 return 0;
1916
1917         h->ioaccel2_cmd_sg_list =
1918                 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1919                                         GFP_KERNEL);
1920         if (!h->ioaccel2_cmd_sg_list)
1921                 return -ENOMEM;
1922         for (i = 0; i < h->nr_cmds; i++) {
1923                 h->ioaccel2_cmd_sg_list[i] =
1924                         kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1925                                         h->maxsgentries, GFP_KERNEL);
1926                 if (!h->ioaccel2_cmd_sg_list[i])
1927                         goto clean;
1928         }
1929         return 0;
1930
1931 clean:
1932         hpsa_free_ioaccel2_sg_chain_blocks(h);
1933         return -ENOMEM;
1934 }
1935
1936 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1937 {
1938         int i;
1939
1940         if (!h->cmd_sg_list)
1941                 return;
1942         for (i = 0; i < h->nr_cmds; i++) {
1943                 kfree(h->cmd_sg_list[i]);
1944                 h->cmd_sg_list[i] = NULL;
1945         }
1946         kfree(h->cmd_sg_list);
1947         h->cmd_sg_list = NULL;
1948 }
1949
1950 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
1951 {
1952         int i;
1953
1954         if (h->chainsize <= 0)
1955                 return 0;
1956
1957         h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1958                                 GFP_KERNEL);
1959         if (!h->cmd_sg_list) {
1960                 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1961                 return -ENOMEM;
1962         }
1963         for (i = 0; i < h->nr_cmds; i++) {
1964                 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1965                                                 h->chainsize, GFP_KERNEL);
1966                 if (!h->cmd_sg_list[i]) {
1967                         dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1968                         goto clean;
1969                 }
1970         }
1971         return 0;
1972
1973 clean:
1974         hpsa_free_sg_chain_blocks(h);
1975         return -ENOMEM;
1976 }
1977
1978 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
1979         struct io_accel2_cmd *cp, struct CommandList *c)
1980 {
1981         struct ioaccel2_sg_element *chain_block;
1982         u64 temp64;
1983         u32 chain_size;
1984
1985         chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
1986         chain_size = le32_to_cpu(cp->data_len);
1987         temp64 = pci_map_single(h->pdev, chain_block, chain_size,
1988                                 PCI_DMA_TODEVICE);
1989         if (dma_mapping_error(&h->pdev->dev, temp64)) {
1990                 /* prevent subsequent unmapping */
1991                 cp->sg->address = 0;
1992                 return -1;
1993         }
1994         cp->sg->address = cpu_to_le64(temp64);
1995         return 0;
1996 }
1997
1998 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
1999         struct io_accel2_cmd *cp)
2000 {
2001         struct ioaccel2_sg_element *chain_sg;
2002         u64 temp64;
2003         u32 chain_size;
2004
2005         chain_sg = cp->sg;
2006         temp64 = le64_to_cpu(chain_sg->address);
2007         chain_size = le32_to_cpu(cp->data_len);
2008         pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2009 }
2010
2011 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2012         struct CommandList *c)
2013 {
2014         struct SGDescriptor *chain_sg, *chain_block;
2015         u64 temp64;
2016         u32 chain_len;
2017
2018         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2019         chain_block = h->cmd_sg_list[c->cmdindex];
2020         chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2021         chain_len = sizeof(*chain_sg) *
2022                 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2023         chain_sg->Len = cpu_to_le32(chain_len);
2024         temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2025                                 PCI_DMA_TODEVICE);
2026         if (dma_mapping_error(&h->pdev->dev, temp64)) {
2027                 /* prevent subsequent unmapping */
2028                 chain_sg->Addr = cpu_to_le64(0);
2029                 return -1;
2030         }
2031         chain_sg->Addr = cpu_to_le64(temp64);
2032         return 0;
2033 }
2034
2035 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2036         struct CommandList *c)
2037 {
2038         struct SGDescriptor *chain_sg;
2039
2040         if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2041                 return;
2042
2043         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2044         pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2045                         le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2046 }
2047
2048
2049 /* Decode the various types of errors on ioaccel2 path.
2050  * Return 1 for any error that should generate a RAID path retry.
2051  * Return 0 for errors that don't require a RAID path retry.
2052  */
2053 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2054                                         struct CommandList *c,
2055                                         struct scsi_cmnd *cmd,
2056                                         struct io_accel2_cmd *c2)
2057 {
2058         int data_len;
2059         int retry = 0;
2060         u32 ioaccel2_resid = 0;
2061
2062         switch (c2->error_data.serv_response) {
2063         case IOACCEL2_SERV_RESPONSE_COMPLETE:
2064                 switch (c2->error_data.status) {
2065                 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2066                         break;
2067                 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2068                         cmd->result |= SAM_STAT_CHECK_CONDITION;
2069                         if (c2->error_data.data_present !=
2070                                         IOACCEL2_SENSE_DATA_PRESENT) {
2071                                 memset(cmd->sense_buffer, 0,
2072                                         SCSI_SENSE_BUFFERSIZE);
2073                                 break;
2074                         }
2075                         /* copy the sense data */
2076                         data_len = c2->error_data.sense_data_len;
2077                         if (data_len > SCSI_SENSE_BUFFERSIZE)
2078                                 data_len = SCSI_SENSE_BUFFERSIZE;
2079                         if (data_len > sizeof(c2->error_data.sense_data_buff))
2080                                 data_len =
2081                                         sizeof(c2->error_data.sense_data_buff);
2082                         memcpy(cmd->sense_buffer,
2083                                 c2->error_data.sense_data_buff, data_len);
2084                         retry = 1;
2085                         break;
2086                 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2087                         retry = 1;
2088                         break;
2089                 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2090                         retry = 1;
2091                         break;
2092                 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2093                         retry = 1;
2094                         break;
2095                 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2096                         retry = 1;
2097                         break;
2098                 default:
2099                         retry = 1;
2100                         break;
2101                 }
2102                 break;
2103         case IOACCEL2_SERV_RESPONSE_FAILURE:
2104                 switch (c2->error_data.status) {
2105                 case IOACCEL2_STATUS_SR_IO_ERROR:
2106                 case IOACCEL2_STATUS_SR_IO_ABORTED:
2107                 case IOACCEL2_STATUS_SR_OVERRUN:
2108                         retry = 1;
2109                         break;
2110                 case IOACCEL2_STATUS_SR_UNDERRUN:
2111                         cmd->result = (DID_OK << 16);           /* host byte */
2112                         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2113                         ioaccel2_resid = get_unaligned_le32(
2114                                                 &c2->error_data.resid_cnt[0]);
2115                         scsi_set_resid(cmd, ioaccel2_resid);
2116                         break;
2117                 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2118                 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2119                 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2120                         /* We will get an event from ctlr to trigger rescan */
2121                         retry = 1;
2122                         break;
2123                 default:
2124                         retry = 1;
2125                 }
2126                 break;
2127         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2128                 break;
2129         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2130                 break;
2131         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2132                 retry = 1;
2133                 break;
2134         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2135                 break;
2136         default:
2137                 retry = 1;
2138                 break;
2139         }
2140
2141         return retry;   /* retry on raid path? */
2142 }
2143
2144 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2145                 struct CommandList *c)
2146 {
2147         bool do_wake = false;
2148
2149         /*
2150          * Prevent the following race in the abort handler:
2151          *
2152          * 1. LLD is requested to abort a SCSI command
2153          * 2. The SCSI command completes
2154          * 3. The struct CommandList associated with step 2 is made available
2155          * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2156          * 5. Abort handler follows scsi_cmnd->host_scribble and
2157          *    finds struct CommandList and tries to aborts it
2158          * Now we have aborted the wrong command.
2159          *
2160          * Reset c->scsi_cmd here so that the abort or reset handler will know
2161          * this command has completed.  Then, check to see if the handler is
2162          * waiting for this command, and, if so, wake it.
2163          */
2164         c->scsi_cmd = SCSI_CMD_IDLE;
2165         mb();   /* Declare command idle before checking for pending events. */
2166         if (c->abort_pending) {
2167                 do_wake = true;
2168                 c->abort_pending = false;
2169         }
2170         if (c->reset_pending) {
2171                 unsigned long flags;
2172                 struct hpsa_scsi_dev_t *dev;
2173
2174                 /*
2175                  * There appears to be a reset pending; lock the lock and
2176                  * reconfirm.  If so, then decrement the count of outstanding
2177                  * commands and wake the reset command if this is the last one.
2178                  */
2179                 spin_lock_irqsave(&h->lock, flags);
2180                 dev = c->reset_pending;         /* Re-fetch under the lock. */
2181                 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2182                         do_wake = true;
2183                 c->reset_pending = NULL;
2184                 spin_unlock_irqrestore(&h->lock, flags);
2185         }
2186
2187         if (do_wake)
2188                 wake_up_all(&h->event_sync_wait_queue);
2189 }
2190
2191 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2192                                       struct CommandList *c)
2193 {
2194         hpsa_cmd_resolve_events(h, c);
2195         cmd_tagged_free(h, c);
2196 }
2197
2198 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2199                 struct CommandList *c, struct scsi_cmnd *cmd)
2200 {
2201         hpsa_cmd_resolve_and_free(h, c);
2202         cmd->scsi_done(cmd);
2203 }
2204
2205 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2206 {
2207         INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2208         queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2209 }
2210
2211 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2212 {
2213         cmd->result = DID_ABORT << 16;
2214 }
2215
2216 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2217                                     struct scsi_cmnd *cmd)
2218 {
2219         hpsa_set_scsi_cmd_aborted(cmd);
2220         dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2221                          c->Request.CDB, c->err_info->ScsiStatus);
2222         hpsa_cmd_resolve_and_free(h, c);
2223 }
2224
2225 static void process_ioaccel2_completion(struct ctlr_info *h,
2226                 struct CommandList *c, struct scsi_cmnd *cmd,
2227                 struct hpsa_scsi_dev_t *dev)
2228 {
2229         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2230
2231         /* check for good status */
2232         if (likely(c2->error_data.serv_response == 0 &&
2233                         c2->error_data.status == 0))
2234                 return hpsa_cmd_free_and_done(h, c, cmd);
2235
2236         /*
2237          * Any RAID offload error results in retry which will use
2238          * the normal I/O path so the controller can handle whatever's
2239          * wrong.
2240          */
2241         if (is_logical_dev_addr_mode(dev->scsi3addr) &&
2242                 c2->error_data.serv_response ==
2243                         IOACCEL2_SERV_RESPONSE_FAILURE) {
2244                 if (c2->error_data.status ==
2245                         IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2246                         dev->offload_enabled = 0;
2247
2248                 return hpsa_retry_cmd(h, c);
2249         }
2250
2251         if (handle_ioaccel_mode2_error(h, c, cmd, c2))
2252                 return hpsa_retry_cmd(h, c);
2253
2254         return hpsa_cmd_free_and_done(h, c, cmd);
2255 }
2256
2257 /* Returns 0 on success, < 0 otherwise. */
2258 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2259                                         struct CommandList *cp)
2260 {
2261         u8 tmf_status = cp->err_info->ScsiStatus;
2262
2263         switch (tmf_status) {
2264         case CISS_TMF_COMPLETE:
2265                 /*
2266                  * CISS_TMF_COMPLETE never happens, instead,
2267                  * ei->CommandStatus == 0 for this case.
2268                  */
2269         case CISS_TMF_SUCCESS:
2270                 return 0;
2271         case CISS_TMF_INVALID_FRAME:
2272         case CISS_TMF_NOT_SUPPORTED:
2273         case CISS_TMF_FAILED:
2274         case CISS_TMF_WRONG_LUN:
2275         case CISS_TMF_OVERLAPPED_TAG:
2276                 break;
2277         default:
2278                 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2279                                 tmf_status);
2280                 break;
2281         }
2282         return -tmf_status;
2283 }
2284
2285 static void complete_scsi_command(struct CommandList *cp)
2286 {
2287         struct scsi_cmnd *cmd;
2288         struct ctlr_info *h;
2289         struct ErrorInfo *ei;
2290         struct hpsa_scsi_dev_t *dev;
2291         struct io_accel2_cmd *c2;
2292
2293         u8 sense_key;
2294         u8 asc;      /* additional sense code */
2295         u8 ascq;     /* additional sense code qualifier */
2296         unsigned long sense_data_size;
2297
2298         ei = cp->err_info;
2299         cmd = cp->scsi_cmd;
2300         h = cp->h;
2301         dev = cmd->device->hostdata;
2302         c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2303
2304         scsi_dma_unmap(cmd); /* undo the DMA mappings */
2305         if ((cp->cmd_type == CMD_SCSI) &&
2306                 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2307                 hpsa_unmap_sg_chain_block(h, cp);
2308
2309         if ((cp->cmd_type == CMD_IOACCEL2) &&
2310                 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2311                 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2312
2313         cmd->result = (DID_OK << 16);           /* host byte */
2314         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2315
2316         if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2317                 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2318
2319         /*
2320          * We check for lockup status here as it may be set for
2321          * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2322          * fail_all_oustanding_cmds()
2323          */
2324         if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2325                 /* DID_NO_CONNECT will prevent a retry */
2326                 cmd->result = DID_NO_CONNECT << 16;
2327                 return hpsa_cmd_free_and_done(h, cp, cmd);
2328         }
2329
2330         if ((unlikely(hpsa_is_pending_event(cp)))) {
2331                 if (cp->reset_pending)
2332                         return hpsa_cmd_resolve_and_free(h, cp);
2333                 if (cp->abort_pending)
2334                         return hpsa_cmd_abort_and_free(h, cp, cmd);
2335         }
2336
2337         if (cp->cmd_type == CMD_IOACCEL2)
2338                 return process_ioaccel2_completion(h, cp, cmd, dev);
2339
2340         scsi_set_resid(cmd, ei->ResidualCnt);
2341         if (ei->CommandStatus == 0)
2342                 return hpsa_cmd_free_and_done(h, cp, cmd);
2343
2344         /* For I/O accelerator commands, copy over some fields to the normal
2345          * CISS header used below for error handling.
2346          */
2347         if (cp->cmd_type == CMD_IOACCEL1) {
2348                 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2349                 cp->Header.SGList = scsi_sg_count(cmd);
2350                 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2351                 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2352                         IOACCEL1_IOFLAGS_CDBLEN_MASK;
2353                 cp->Header.tag = c->tag;
2354                 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2355                 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2356
2357                 /* Any RAID offload error results in retry which will use
2358                  * the normal I/O path so the controller can handle whatever's
2359                  * wrong.
2360                  */
2361                 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
2362                         if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2363                                 dev->offload_enabled = 0;
2364                         return hpsa_retry_cmd(h, cp);
2365                 }
2366         }
2367
2368         /* an error has occurred */
2369         switch (ei->CommandStatus) {
2370
2371         case CMD_TARGET_STATUS:
2372                 cmd->result |= ei->ScsiStatus;
2373                 /* copy the sense data */
2374                 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2375                         sense_data_size = SCSI_SENSE_BUFFERSIZE;
2376                 else
2377                         sense_data_size = sizeof(ei->SenseInfo);
2378                 if (ei->SenseLen < sense_data_size)
2379                         sense_data_size = ei->SenseLen;
2380                 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2381                 if (ei->ScsiStatus)
2382                         decode_sense_data(ei->SenseInfo, sense_data_size,
2383                                 &sense_key, &asc, &ascq);
2384                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2385                         if (sense_key == ABORTED_COMMAND) {
2386                                 cmd->result |= DID_SOFT_ERROR << 16;
2387                                 break;
2388                         }
2389                         break;
2390                 }
2391                 /* Problem was not a check condition
2392                  * Pass it up to the upper layers...
2393                  */
2394                 if (ei->ScsiStatus) {
2395                         dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2396                                 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2397                                 "Returning result: 0x%x\n",
2398                                 cp, ei->ScsiStatus,
2399                                 sense_key, asc, ascq,
2400                                 cmd->result);
2401                 } else {  /* scsi status is zero??? How??? */
2402                         dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2403                                 "Returning no connection.\n", cp),
2404
2405                         /* Ordinarily, this case should never happen,
2406                          * but there is a bug in some released firmware
2407                          * revisions that allows it to happen if, for
2408                          * example, a 4100 backplane loses power and
2409                          * the tape drive is in it.  We assume that
2410                          * it's a fatal error of some kind because we
2411                          * can't show that it wasn't. We will make it
2412                          * look like selection timeout since that is
2413                          * the most common reason for this to occur,
2414                          * and it's severe enough.
2415                          */
2416
2417                         cmd->result = DID_NO_CONNECT << 16;
2418                 }
2419                 break;
2420
2421         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2422                 break;
2423         case CMD_DATA_OVERRUN:
2424                 dev_warn(&h->pdev->dev,
2425                         "CDB %16phN data overrun\n", cp->Request.CDB);
2426                 break;
2427         case CMD_INVALID: {
2428                 /* print_bytes(cp, sizeof(*cp), 1, 0);
2429                 print_cmd(cp); */
2430                 /* We get CMD_INVALID if you address a non-existent device
2431                  * instead of a selection timeout (no response).  You will
2432                  * see this if you yank out a drive, then try to access it.
2433                  * This is kind of a shame because it means that any other
2434                  * CMD_INVALID (e.g. driver bug) will get interpreted as a
2435                  * missing target. */
2436                 cmd->result = DID_NO_CONNECT << 16;
2437         }
2438                 break;
2439         case CMD_PROTOCOL_ERR:
2440                 cmd->result = DID_ERROR << 16;
2441                 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2442                                 cp->Request.CDB);
2443                 break;
2444         case CMD_HARDWARE_ERR:
2445                 cmd->result = DID_ERROR << 16;
2446                 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2447                         cp->Request.CDB);
2448                 break;
2449         case CMD_CONNECTION_LOST:
2450                 cmd->result = DID_ERROR << 16;
2451                 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2452                         cp->Request.CDB);
2453                 break;
2454         case CMD_ABORTED:
2455                 /* Return now to avoid calling scsi_done(). */
2456                 return hpsa_cmd_abort_and_free(h, cp, cmd);
2457         case CMD_ABORT_FAILED:
2458                 cmd->result = DID_ERROR << 16;
2459                 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2460                         cp->Request.CDB);
2461                 break;
2462         case CMD_UNSOLICITED_ABORT:
2463                 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2464                 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2465                         cp->Request.CDB);
2466                 break;
2467         case CMD_TIMEOUT:
2468                 cmd->result = DID_TIME_OUT << 16;
2469                 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2470                         cp->Request.CDB);
2471                 break;
2472         case CMD_UNABORTABLE:
2473                 cmd->result = DID_ERROR << 16;
2474                 dev_warn(&h->pdev->dev, "Command unabortable\n");
2475                 break;
2476         case CMD_TMF_STATUS:
2477                 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2478                         cmd->result = DID_ERROR << 16;
2479                 break;
2480         case CMD_IOACCEL_DISABLED:
2481                 /* This only handles the direct pass-through case since RAID
2482                  * offload is handled above.  Just attempt a retry.
2483                  */
2484                 cmd->result = DID_SOFT_ERROR << 16;
2485                 dev_warn(&h->pdev->dev,
2486                                 "cp %p had HP SSD Smart Path error\n", cp);
2487                 break;
2488         default:
2489                 cmd->result = DID_ERROR << 16;
2490                 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2491                                 cp, ei->CommandStatus);
2492         }
2493
2494         return hpsa_cmd_free_and_done(h, cp, cmd);
2495 }
2496
2497 static void hpsa_pci_unmap(struct pci_dev *pdev,
2498         struct CommandList *c, int sg_used, int data_direction)
2499 {
2500         int i;
2501
2502         for (i = 0; i < sg_used; i++)
2503                 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2504                                 le32_to_cpu(c->SG[i].Len),
2505                                 data_direction);
2506 }
2507
2508 static int hpsa_map_one(struct pci_dev *pdev,
2509                 struct CommandList *cp,
2510                 unsigned char *buf,
2511                 size_t buflen,
2512                 int data_direction)
2513 {
2514         u64 addr64;
2515
2516         if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2517                 cp->Header.SGList = 0;
2518                 cp->Header.SGTotal = cpu_to_le16(0);
2519                 return 0;
2520         }
2521
2522         addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2523         if (dma_mapping_error(&pdev->dev, addr64)) {
2524                 /* Prevent subsequent unmap of something never mapped */
2525                 cp->Header.SGList = 0;
2526                 cp->Header.SGTotal = cpu_to_le16(0);
2527                 return -1;
2528         }
2529         cp->SG[0].Addr = cpu_to_le64(addr64);
2530         cp->SG[0].Len = cpu_to_le32(buflen);
2531         cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2532         cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
2533         cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2534         return 0;
2535 }
2536
2537 #define NO_TIMEOUT ((unsigned long) -1)
2538 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2539 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2540         struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2541 {
2542         DECLARE_COMPLETION_ONSTACK(wait);
2543
2544         c->waiting = &wait;
2545         __enqueue_cmd_and_start_io(h, c, reply_queue);
2546         if (timeout_msecs == NO_TIMEOUT) {
2547                 /* TODO: get rid of this no-timeout thing */
2548                 wait_for_completion_io(&wait);
2549                 return IO_OK;
2550         }
2551         if (!wait_for_completion_io_timeout(&wait,
2552                                         msecs_to_jiffies(timeout_msecs))) {
2553                 dev_warn(&h->pdev->dev, "Command timed out.\n");
2554                 return -ETIMEDOUT;
2555         }
2556         return IO_OK;
2557 }
2558
2559 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2560                                    int reply_queue, unsigned long timeout_msecs)
2561 {
2562         if (unlikely(lockup_detected(h))) {
2563                 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2564                 return IO_OK;
2565         }
2566         return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2567 }
2568
2569 static u32 lockup_detected(struct ctlr_info *h)
2570 {
2571         int cpu;
2572         u32 rc, *lockup_detected;
2573
2574         cpu = get_cpu();
2575         lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2576         rc = *lockup_detected;
2577         put_cpu();
2578         return rc;
2579 }
2580
2581 #define MAX_DRIVER_CMD_RETRIES 25
2582 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2583         struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2584 {
2585         int backoff_time = 10, retry_count = 0;
2586         int rc;
2587
2588         do {
2589                 memset(c->err_info, 0, sizeof(*c->err_info));
2590                 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2591                                                   timeout_msecs);
2592                 if (rc)
2593                         break;
2594                 retry_count++;
2595                 if (retry_count > 3) {
2596                         msleep(backoff_time);
2597                         if (backoff_time < 1000)
2598                                 backoff_time *= 2;
2599                 }
2600         } while ((check_for_unit_attention(h, c) ||
2601                         check_for_busy(h, c)) &&
2602                         retry_count <= MAX_DRIVER_CMD_RETRIES);
2603         hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2604         if (retry_count > MAX_DRIVER_CMD_RETRIES)
2605                 rc = -EIO;
2606         return rc;
2607 }
2608
2609 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2610                                 struct CommandList *c)
2611 {
2612         const u8 *cdb = c->Request.CDB;
2613         const u8 *lun = c->Header.LUN.LunAddrBytes;
2614
2615         dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2616         " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2617                 txt, lun[0], lun[1], lun[2], lun[3],
2618                 lun[4], lun[5], lun[6], lun[7],
2619                 cdb[0], cdb[1], cdb[2], cdb[3],
2620                 cdb[4], cdb[5], cdb[6], cdb[7],
2621                 cdb[8], cdb[9], cdb[10], cdb[11],
2622                 cdb[12], cdb[13], cdb[14], cdb[15]);
2623 }
2624
2625 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2626                         struct CommandList *cp)
2627 {
2628         const struct ErrorInfo *ei = cp->err_info;
2629         struct device *d = &cp->h->pdev->dev;
2630         u8 sense_key, asc, ascq;
2631         int sense_len;
2632
2633         switch (ei->CommandStatus) {
2634         case CMD_TARGET_STATUS:
2635                 if (ei->SenseLen > sizeof(ei->SenseInfo))
2636                         sense_len = sizeof(ei->SenseInfo);
2637                 else
2638                         sense_len = ei->SenseLen;
2639                 decode_sense_data(ei->SenseInfo, sense_len,
2640                                         &sense_key, &asc, &ascq);
2641                 hpsa_print_cmd(h, "SCSI status", cp);
2642                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2643                         dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2644                                 sense_key, asc, ascq);
2645                 else
2646                         dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2647                 if (ei->ScsiStatus == 0)
2648                         dev_warn(d, "SCSI status is abnormally zero.  "
2649                         "(probably indicates selection timeout "
2650                         "reported incorrectly due to a known "
2651                         "firmware bug, circa July, 2001.)\n");
2652                 break;
2653         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2654                 break;
2655         case CMD_DATA_OVERRUN:
2656                 hpsa_print_cmd(h, "overrun condition", cp);
2657                 break;
2658         case CMD_INVALID: {
2659                 /* controller unfortunately reports SCSI passthru's
2660                  * to non-existent targets as invalid commands.
2661                  */
2662                 hpsa_print_cmd(h, "invalid command", cp);
2663                 dev_warn(d, "probably means device no longer present\n");
2664                 }
2665                 break;
2666         case CMD_PROTOCOL_ERR:
2667                 hpsa_print_cmd(h, "protocol error", cp);
2668                 break;
2669         case CMD_HARDWARE_ERR:
2670                 hpsa_print_cmd(h, "hardware error", cp);
2671                 break;
2672         case CMD_CONNECTION_LOST:
2673                 hpsa_print_cmd(h, "connection lost", cp);
2674                 break;
2675         case CMD_ABORTED:
2676                 hpsa_print_cmd(h, "aborted", cp);
2677                 break;
2678         case CMD_ABORT_FAILED:
2679                 hpsa_print_cmd(h, "abort failed", cp);
2680                 break;
2681         case CMD_UNSOLICITED_ABORT:
2682                 hpsa_print_cmd(h, "unsolicited abort", cp);
2683                 break;
2684         case CMD_TIMEOUT:
2685                 hpsa_print_cmd(h, "timed out", cp);
2686                 break;
2687         case CMD_UNABORTABLE:
2688                 hpsa_print_cmd(h, "unabortable", cp);
2689                 break;
2690         case CMD_CTLR_LOCKUP:
2691                 hpsa_print_cmd(h, "controller lockup detected", cp);
2692                 break;
2693         default:
2694                 hpsa_print_cmd(h, "unknown status", cp);
2695                 dev_warn(d, "Unknown command status %x\n",
2696                                 ei->CommandStatus);
2697         }
2698 }
2699
2700 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2701                         u16 page, unsigned char *buf,
2702                         unsigned char bufsize)
2703 {
2704         int rc = IO_OK;
2705         struct CommandList *c;
2706         struct ErrorInfo *ei;
2707
2708         c = cmd_alloc(h);
2709
2710         if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2711                         page, scsi3addr, TYPE_CMD)) {
2712                 rc = -1;
2713                 goto out;
2714         }
2715         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2716                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2717         if (rc)
2718                 goto out;
2719         ei = c->err_info;
2720         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2721                 hpsa_scsi_interpret_error(h, c);
2722                 rc = -1;
2723         }
2724 out:
2725         cmd_free(h, c);
2726         return rc;
2727 }
2728
2729 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2730         u8 reset_type, int reply_queue)
2731 {
2732         int rc = IO_OK;
2733         struct CommandList *c;
2734         struct ErrorInfo *ei;
2735
2736         c = cmd_alloc(h);
2737
2738
2739         /* fill_cmd can't fail here, no data buffer to map. */
2740         (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2741                         scsi3addr, TYPE_MSG);
2742         rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2743         if (rc) {
2744                 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2745                 goto out;
2746         }
2747         /* no unmap needed here because no data xfer. */
2748
2749         ei = c->err_info;
2750         if (ei->CommandStatus != 0) {
2751                 hpsa_scsi_interpret_error(h, c);
2752                 rc = -1;
2753         }
2754 out:
2755         cmd_free(h, c);
2756         return rc;
2757 }
2758
2759 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2760                                struct hpsa_scsi_dev_t *dev,
2761                                unsigned char *scsi3addr)
2762 {
2763         int i;
2764         bool match = false;
2765         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2766         struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2767
2768         if (hpsa_is_cmd_idle(c))
2769                 return false;
2770
2771         switch (c->cmd_type) {
2772         case CMD_SCSI:
2773         case CMD_IOCTL_PEND:
2774                 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2775                                 sizeof(c->Header.LUN.LunAddrBytes));
2776                 break;
2777
2778         case CMD_IOACCEL1:
2779         case CMD_IOACCEL2:
2780                 if (c->phys_disk == dev) {
2781                         /* HBA mode match */
2782                         match = true;
2783                 } else {
2784                         /* Possible RAID mode -- check each phys dev. */
2785                         /* FIXME:  Do we need to take out a lock here?  If
2786                          * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2787                          * instead. */
2788                         for (i = 0; i < dev->nphysical_disks && !match; i++) {
2789                                 /* FIXME: an alternate test might be
2790                                  *
2791                                  * match = dev->phys_disk[i]->ioaccel_handle
2792                                  *              == c2->scsi_nexus;      */
2793                                 match = dev->phys_disk[i] == c->phys_disk;
2794                         }
2795                 }
2796                 break;
2797
2798         case IOACCEL2_TMF:
2799                 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2800                         match = dev->phys_disk[i]->ioaccel_handle ==
2801                                         le32_to_cpu(ac->it_nexus);
2802                 }
2803                 break;
2804
2805         case 0:         /* The command is in the middle of being initialized. */
2806                 match = false;
2807                 break;
2808
2809         default:
2810                 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
2811                         c->cmd_type);
2812                 BUG();
2813         }
2814
2815         return match;
2816 }
2817
2818 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2819         unsigned char *scsi3addr, u8 reset_type, int reply_queue)
2820 {
2821         int i;
2822         int rc = 0;
2823
2824         /* We can really only handle one reset at a time */
2825         if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
2826                 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
2827                 return -EINTR;
2828         }
2829
2830         BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
2831
2832         for (i = 0; i < h->nr_cmds; i++) {
2833                 struct CommandList *c = h->cmd_pool + i;
2834                 int refcount = atomic_inc_return(&c->refcount);
2835
2836                 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
2837                         unsigned long flags;
2838
2839                         /*
2840                          * Mark the target command as having a reset pending,
2841                          * then lock a lock so that the command cannot complete
2842                          * while we're considering it.  If the command is not
2843                          * idle then count it; otherwise revoke the event.
2844                          */
2845                         c->reset_pending = dev;
2846                         spin_lock_irqsave(&h->lock, flags);     /* Implied MB */
2847                         if (!hpsa_is_cmd_idle(c))
2848                                 atomic_inc(&dev->reset_cmds_out);
2849                         else
2850                                 c->reset_pending = NULL;
2851                         spin_unlock_irqrestore(&h->lock, flags);
2852                 }
2853
2854                 cmd_free(h, c);
2855         }
2856
2857         rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
2858         if (!rc)
2859                 wait_event(h->event_sync_wait_queue,
2860                         atomic_read(&dev->reset_cmds_out) == 0 ||
2861                         lockup_detected(h));
2862
2863         if (unlikely(lockup_detected(h))) {
2864                 dev_warn(&h->pdev->dev,
2865                          "Controller lockup detected during reset wait\n");
2866                 rc = -ENODEV;
2867         }
2868
2869         if (unlikely(rc))
2870                 atomic_set(&dev->reset_cmds_out, 0);
2871
2872         mutex_unlock(&h->reset_mutex);
2873         return rc;
2874 }
2875
2876 static void hpsa_get_raid_level(struct ctlr_info *h,
2877         unsigned char *scsi3addr, unsigned char *raid_level)
2878 {
2879         int rc;
2880         unsigned char *buf;
2881
2882         *raid_level = RAID_UNKNOWN;
2883         buf = kzalloc(64, GFP_KERNEL);
2884         if (!buf)
2885                 return;
2886         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2887         if (rc == 0)
2888                 *raid_level = buf[8];
2889         if (*raid_level > RAID_UNKNOWN)
2890                 *raid_level = RAID_UNKNOWN;
2891         kfree(buf);
2892         return;
2893 }
2894
2895 #define HPSA_MAP_DEBUG
2896 #ifdef HPSA_MAP_DEBUG
2897 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2898                                 struct raid_map_data *map_buff)
2899 {
2900         struct raid_map_disk_data *dd = &map_buff->data[0];
2901         int map, row, col;
2902         u16 map_cnt, row_cnt, disks_per_row;
2903
2904         if (rc != 0)
2905                 return;
2906
2907         /* Show details only if debugging has been activated. */
2908         if (h->raid_offload_debug < 2)
2909                 return;
2910
2911         dev_info(&h->pdev->dev, "structure_size = %u\n",
2912                                 le32_to_cpu(map_buff->structure_size));
2913         dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2914                         le32_to_cpu(map_buff->volume_blk_size));
2915         dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2916                         le64_to_cpu(map_buff->volume_blk_cnt));
2917         dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2918                         map_buff->phys_blk_shift);
2919         dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2920                         map_buff->parity_rotation_shift);
2921         dev_info(&h->pdev->dev, "strip_size = %u\n",
2922                         le16_to_cpu(map_buff->strip_size));
2923         dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2924                         le64_to_cpu(map_buff->disk_starting_blk));
2925         dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2926                         le64_to_cpu(map_buff->disk_blk_cnt));
2927         dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2928                         le16_to_cpu(map_buff->data_disks_per_row));
2929         dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2930                         le16_to_cpu(map_buff->metadata_disks_per_row));
2931         dev_info(&h->pdev->dev, "row_cnt = %u\n",
2932                         le16_to_cpu(map_buff->row_cnt));
2933         dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2934                         le16_to_cpu(map_buff->layout_map_count));
2935         dev_info(&h->pdev->dev, "flags = 0x%x\n",
2936                         le16_to_cpu(map_buff->flags));
2937         dev_info(&h->pdev->dev, "encrypytion = %s\n",
2938                         le16_to_cpu(map_buff->flags) &
2939                         RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
2940         dev_info(&h->pdev->dev, "dekindex = %u\n",
2941                         le16_to_cpu(map_buff->dekindex));
2942         map_cnt = le16_to_cpu(map_buff->layout_map_count);
2943         for (map = 0; map < map_cnt; map++) {
2944                 dev_info(&h->pdev->dev, "Map%u:\n", map);
2945                 row_cnt = le16_to_cpu(map_buff->row_cnt);
2946                 for (row = 0; row < row_cnt; row++) {
2947                         dev_info(&h->pdev->dev, "  Row%u:\n", row);
2948                         disks_per_row =
2949                                 le16_to_cpu(map_buff->data_disks_per_row);
2950                         for (col = 0; col < disks_per_row; col++, dd++)
2951                                 dev_info(&h->pdev->dev,
2952                                         "    D%02u: h=0x%04x xor=%u,%u\n",
2953                                         col, dd->ioaccel_handle,
2954                                         dd->xor_mult[0], dd->xor_mult[1]);
2955                         disks_per_row =
2956                                 le16_to_cpu(map_buff->metadata_disks_per_row);
2957                         for (col = 0; col < disks_per_row; col++, dd++)
2958                                 dev_info(&h->pdev->dev,
2959                                         "    M%02u: h=0x%04x xor=%u,%u\n",
2960                                         col, dd->ioaccel_handle,
2961                                         dd->xor_mult[0], dd->xor_mult[1]);
2962                 }
2963         }
2964 }
2965 #else
2966 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2967                         __attribute__((unused)) int rc,
2968                         __attribute__((unused)) struct raid_map_data *map_buff)
2969 {
2970 }
2971 #endif
2972
2973 static int hpsa_get_raid_map(struct ctlr_info *h,
2974         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2975 {
2976         int rc = 0;
2977         struct CommandList *c;
2978         struct ErrorInfo *ei;
2979
2980         c = cmd_alloc(h);
2981
2982         if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2983                         sizeof(this_device->raid_map), 0,
2984                         scsi3addr, TYPE_CMD)) {
2985                 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
2986                 cmd_free(h, c);
2987                 return -1;
2988         }
2989         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2990                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2991         if (rc)
2992                 goto out;
2993         ei = c->err_info;
2994         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2995                 hpsa_scsi_interpret_error(h, c);
2996                 rc = -1;
2997                 goto out;
2998         }
2999         cmd_free(h, c);
3000
3001         /* @todo in the future, dynamically allocate RAID map memory */
3002         if (le32_to_cpu(this_device->raid_map.structure_size) >
3003                                 sizeof(this_device->raid_map)) {
3004                 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3005                 rc = -1;
3006         }
3007         hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3008         return rc;
3009 out:
3010         cmd_free(h, c);
3011         return rc;
3012 }
3013
3014 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3015                 unsigned char scsi3addr[], u16 bmic_device_index,
3016                 struct bmic_identify_physical_device *buf, size_t bufsize)
3017 {
3018         int rc = IO_OK;
3019         struct CommandList *c;
3020         struct ErrorInfo *ei;
3021
3022         c = cmd_alloc(h);
3023         rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3024                 0, RAID_CTLR_LUNID, TYPE_CMD);
3025         if (rc)
3026                 goto out;
3027
3028         c->Request.CDB[2] = bmic_device_index & 0xff;
3029         c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3030
3031         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3032                                                 NO_TIMEOUT);
3033         ei = c->err_info;
3034         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3035                 hpsa_scsi_interpret_error(h, c);
3036                 rc = -1;
3037         }
3038 out:
3039         cmd_free(h, c);
3040         return rc;
3041 }
3042
3043 static int hpsa_vpd_page_supported(struct ctlr_info *h,
3044         unsigned char scsi3addr[], u8 page)
3045 {
3046         int rc;
3047         int i;
3048         int pages;
3049         unsigned char *buf, bufsize;
3050
3051         buf = kzalloc(256, GFP_KERNEL);
3052         if (!buf)
3053                 return 0;
3054
3055         /* Get the size of the page list first */
3056         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3057                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3058                                 buf, HPSA_VPD_HEADER_SZ);
3059         if (rc != 0)
3060                 goto exit_unsupported;
3061         pages = buf[3];
3062         if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3063                 bufsize = pages + HPSA_VPD_HEADER_SZ;
3064         else
3065                 bufsize = 255;
3066
3067         /* Get the whole VPD page list */
3068         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3069                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3070                                 buf, bufsize);
3071         if (rc != 0)
3072                 goto exit_unsupported;
3073
3074         pages = buf[3];
3075         for (i = 1; i <= pages; i++)
3076                 if (buf[3 + i] == page)
3077                         goto exit_supported;
3078 exit_unsupported:
3079         kfree(buf);
3080         return 0;
3081 exit_supported:
3082         kfree(buf);
3083         return 1;
3084 }
3085
3086 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3087         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3088 {
3089         int rc;
3090         unsigned char *buf;
3091         u8 ioaccel_status;
3092
3093         this_device->offload_config = 0;
3094         this_device->offload_enabled = 0;
3095         this_device->offload_to_be_enabled = 0;
3096
3097         buf = kzalloc(64, GFP_KERNEL);
3098         if (!buf)
3099                 return;
3100         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3101                 goto out;
3102         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3103                         VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3104         if (rc != 0)
3105                 goto out;
3106
3107 #define IOACCEL_STATUS_BYTE 4
3108 #define OFFLOAD_CONFIGURED_BIT 0x01
3109 #define OFFLOAD_ENABLED_BIT 0x02
3110         ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3111         this_device->offload_config =
3112                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3113         if (this_device->offload_config) {
3114                 this_device->offload_enabled =
3115                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3116                 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3117                         this_device->offload_enabled = 0;
3118         }
3119         this_device->offload_to_be_enabled = this_device->offload_enabled;
3120 out:
3121         kfree(buf);
3122         return;
3123 }
3124
3125 /* Get the device id from inquiry page 0x83 */
3126 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3127         unsigned char *device_id, int buflen)
3128 {
3129         int rc;
3130         unsigned char *buf;
3131
3132         if (buflen > 16)
3133                 buflen = 16;
3134         buf = kzalloc(64, GFP_KERNEL);
3135         if (!buf)
3136                 return -ENOMEM;
3137         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
3138         if (rc == 0)
3139                 memcpy(device_id, &buf[8], buflen);
3140         kfree(buf);
3141         return rc != 0;
3142 }
3143
3144 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3145                 void *buf, int bufsize,
3146                 int extended_response)
3147 {
3148         int rc = IO_OK;
3149         struct CommandList *c;
3150         unsigned char scsi3addr[8];
3151         struct ErrorInfo *ei;
3152
3153         c = cmd_alloc(h);
3154
3155         /* address the controller */
3156         memset(scsi3addr, 0, sizeof(scsi3addr));
3157         if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3158                 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3159                 rc = -1;
3160                 goto out;
3161         }
3162         if (extended_response)
3163                 c->Request.CDB[1] = extended_response;
3164         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3165                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3166         if (rc)
3167                 goto out;
3168         ei = c->err_info;
3169         if (ei->CommandStatus != 0 &&
3170             ei->CommandStatus != CMD_DATA_UNDERRUN) {
3171                 hpsa_scsi_interpret_error(h, c);
3172                 rc = -1;
3173         } else {
3174                 struct ReportLUNdata *rld = buf;
3175
3176                 if (rld->extended_response_flag != extended_response) {
3177                         dev_err(&h->pdev->dev,
3178                                 "report luns requested format %u, got %u\n",
3179                                 extended_response,
3180                                 rld->extended_response_flag);
3181                         rc = -1;
3182                 }
3183         }
3184 out:
3185         cmd_free(h, c);
3186         return rc;
3187 }
3188
3189 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3190                 struct ReportExtendedLUNdata *buf, int bufsize)
3191 {
3192         return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3193                                                 HPSA_REPORT_PHYS_EXTENDED);
3194 }
3195
3196 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3197                 struct ReportLUNdata *buf, int bufsize)
3198 {
3199         return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3200 }
3201
3202 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3203         int bus, int target, int lun)
3204 {
3205         device->bus = bus;
3206         device->target = target;
3207         device->lun = lun;
3208 }
3209
3210 /* Use VPD inquiry to get details of volume status */
3211 static int hpsa_get_volume_status(struct ctlr_info *h,
3212                                         unsigned char scsi3addr[])
3213 {
3214         int rc;
3215         int status;
3216         int size;
3217         unsigned char *buf;
3218
3219         buf = kzalloc(64, GFP_KERNEL);
3220         if (!buf)
3221                 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3222
3223         /* Does controller have VPD for logical volume status? */
3224         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3225                 goto exit_failed;
3226
3227         /* Get the size of the VPD return buffer */
3228         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3229                                         buf, HPSA_VPD_HEADER_SZ);
3230         if (rc != 0)
3231                 goto exit_failed;
3232         size = buf[3];
3233
3234         /* Now get the whole VPD buffer */
3235         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3236                                         buf, size + HPSA_VPD_HEADER_SZ);
3237         if (rc != 0)
3238                 goto exit_failed;
3239         status = buf[4]; /* status byte */
3240
3241         kfree(buf);
3242         return status;
3243 exit_failed:
3244         kfree(buf);
3245         return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3246 }
3247
3248 /* Determine offline status of a volume.
3249  * Return either:
3250  *  0 (not offline)
3251  *  0xff (offline for unknown reasons)
3252  *  # (integer code indicating one of several NOT READY states
3253  *     describing why a volume is to be kept offline)
3254  */
3255 static int hpsa_volume_offline(struct ctlr_info *h,
3256                                         unsigned char scsi3addr[])
3257 {
3258         struct CommandList *c;
3259         unsigned char *sense;
3260         u8 sense_key, asc, ascq;
3261         int sense_len;
3262         int rc, ldstat = 0;
3263         u16 cmd_status;
3264         u8 scsi_status;
3265 #define ASC_LUN_NOT_READY 0x04
3266 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3267 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3268
3269         c = cmd_alloc(h);
3270
3271         (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3272         rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3273         if (rc) {
3274                 cmd_free(h, c);
3275                 return 0;
3276         }
3277         sense = c->err_info->SenseInfo;
3278         if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3279                 sense_len = sizeof(c->err_info->SenseInfo);
3280         else
3281                 sense_len = c->err_info->SenseLen;
3282         decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3283         cmd_status = c->err_info->CommandStatus;
3284         scsi_status = c->err_info->ScsiStatus;
3285         cmd_free(h, c);
3286         /* Is the volume 'not ready'? */
3287         if (cmd_status != CMD_TARGET_STATUS ||
3288                 scsi_status != SAM_STAT_CHECK_CONDITION ||
3289                 sense_key != NOT_READY ||
3290                 asc != ASC_LUN_NOT_READY)  {
3291                 return 0;
3292         }
3293
3294         /* Determine the reason for not ready state */
3295         ldstat = hpsa_get_volume_status(h, scsi3addr);
3296
3297         /* Keep volume offline in certain cases: */
3298         switch (ldstat) {
3299         case HPSA_LV_UNDERGOING_ERASE:
3300         case HPSA_LV_NOT_AVAILABLE:
3301         case HPSA_LV_UNDERGOING_RPI:
3302         case HPSA_LV_PENDING_RPI:
3303         case HPSA_LV_ENCRYPTED_NO_KEY:
3304         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3305         case HPSA_LV_UNDERGOING_ENCRYPTION:
3306         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3307         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3308                 return ldstat;
3309         case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3310                 /* If VPD status page isn't available,
3311                  * use ASC/ASCQ to determine state
3312                  */
3313                 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3314                         (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3315                         return ldstat;
3316                 break;
3317         default:
3318                 break;
3319         }
3320         return 0;
3321 }
3322
3323 /*
3324  * Find out if a logical device supports aborts by simply trying one.
3325  * Smart Array may claim not to support aborts on logical drives, but
3326  * if a MSA2000 * is connected, the drives on that will be presented
3327  * by the Smart Array as logical drives, and aborts may be sent to
3328  * those devices successfully.  So the simplest way to find out is
3329  * to simply try an abort and see how the device responds.
3330  */
3331 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3332                                         unsigned char *scsi3addr)
3333 {
3334         struct CommandList *c;
3335         struct ErrorInfo *ei;
3336         int rc = 0;
3337
3338         u64 tag = (u64) -1; /* bogus tag */
3339
3340         /* Assume that physical devices support aborts */
3341         if (!is_logical_dev_addr_mode(scsi3addr))
3342                 return 1;
3343
3344         c = cmd_alloc(h);
3345
3346         (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3347         (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3348         /* no unmap needed here because no data xfer. */
3349         ei = c->err_info;
3350         switch (ei->CommandStatus) {
3351         case CMD_INVALID:
3352                 rc = 0;
3353                 break;
3354         case CMD_UNABORTABLE:
3355         case CMD_ABORT_FAILED:
3356                 rc = 1;
3357                 break;
3358         case CMD_TMF_STATUS:
3359                 rc = hpsa_evaluate_tmf_status(h, c);
3360                 break;
3361         default:
3362                 rc = 0;
3363                 break;
3364         }
3365         cmd_free(h, c);
3366         return rc;
3367 }
3368
3369 static int hpsa_update_device_info(struct ctlr_info *h,
3370         unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3371         unsigned char *is_OBDR_device)
3372 {
3373
3374 #define OBDR_SIG_OFFSET 43
3375 #define OBDR_TAPE_SIG "$DR-10"
3376 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3377 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3378
3379         unsigned char *inq_buff;
3380         unsigned char *obdr_sig;
3381         int rc = 0;
3382
3383         inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3384         if (!inq_buff) {
3385                 rc = -ENOMEM;
3386                 goto bail_out;
3387         }
3388
3389         /* Do an inquiry to the device to see what it is. */
3390         if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3391                 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3392                 /* Inquiry failed (msg printed already) */
3393                 dev_err(&h->pdev->dev,
3394                         "hpsa_update_device_info: inquiry failed\n");
3395                 rc = -EIO;
3396                 goto bail_out;
3397         }
3398
3399         this_device->devtype = (inq_buff[0] & 0x1f);
3400         memcpy(this_device->scsi3addr, scsi3addr, 8);
3401         memcpy(this_device->vendor, &inq_buff[8],
3402                 sizeof(this_device->vendor));
3403         memcpy(this_device->model, &inq_buff[16],
3404                 sizeof(this_device->model));
3405         memset(this_device->device_id, 0,
3406                 sizeof(this_device->device_id));
3407         hpsa_get_device_id(h, scsi3addr, this_device->device_id,
3408                 sizeof(this_device->device_id));
3409
3410         if (this_device->devtype == TYPE_DISK &&
3411                 is_logical_dev_addr_mode(scsi3addr)) {
3412                 int volume_offline;
3413
3414                 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3415                 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3416                         hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3417                 volume_offline = hpsa_volume_offline(h, scsi3addr);
3418                 if (volume_offline < 0 || volume_offline > 0xff)
3419                         volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3420                 this_device->volume_offline = volume_offline & 0xff;
3421         } else {
3422                 this_device->raid_level = RAID_UNKNOWN;
3423                 this_device->offload_config = 0;
3424                 this_device->offload_enabled = 0;
3425                 this_device->offload_to_be_enabled = 0;
3426                 this_device->hba_ioaccel_enabled = 0;
3427                 this_device->volume_offline = 0;
3428                 this_device->queue_depth = h->nr_cmds;
3429         }
3430
3431         if (is_OBDR_device) {
3432                 /* See if this is a One-Button-Disaster-Recovery device
3433                  * by looking for "$DR-10" at offset 43 in inquiry data.
3434                  */
3435                 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3436                 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3437                                         strncmp(obdr_sig, OBDR_TAPE_SIG,
3438                                                 OBDR_SIG_LEN) == 0);
3439         }
3440         kfree(inq_buff);
3441         return 0;
3442
3443 bail_out:
3444         kfree(inq_buff);
3445         return rc;
3446 }
3447
3448 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3449                         struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3450 {
3451         unsigned long flags;
3452         int rc, entry;
3453         /*
3454          * See if this device supports aborts.  If we already know
3455          * the device, we already know if it supports aborts, otherwise
3456          * we have to find out if it supports aborts by trying one.
3457          */
3458         spin_lock_irqsave(&h->devlock, flags);
3459         rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3460         if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3461                 entry >= 0 && entry < h->ndevices) {
3462                 dev->supports_aborts = h->dev[entry]->supports_aborts;
3463                 spin_unlock_irqrestore(&h->devlock, flags);
3464         } else {
3465                 spin_unlock_irqrestore(&h->devlock, flags);
3466                 dev->supports_aborts =
3467                                 hpsa_device_supports_aborts(h, scsi3addr);
3468                 if (dev->supports_aborts < 0)
3469                         dev->supports_aborts = 0;
3470         }
3471 }
3472
3473 static unsigned char *ext_target_model[] = {
3474         "MSA2012",
3475         "MSA2024",
3476         "MSA2312",
3477         "MSA2324",
3478         "P2000 G3 SAS",
3479         "MSA 2040 SAS",
3480         NULL,
3481 };
3482
3483 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
3484 {
3485         int i;
3486
3487         for (i = 0; ext_target_model[i]; i++)
3488                 if (strncmp(device->model, ext_target_model[i],
3489                         strlen(ext_target_model[i])) == 0)
3490                         return 1;
3491         return 0;
3492 }
3493
3494 /* Helper function to assign bus, target, lun mapping of devices.
3495  * Puts non-external target logical volumes on bus 0, external target logical
3496  * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3497  * Logical drive target and lun are assigned at this time, but
3498  * physical device lun and target assignment are deferred (assigned
3499  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3500  */
3501 static void figure_bus_target_lun(struct ctlr_info *h,
3502         u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3503 {
3504         u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
3505
3506         if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3507                 /* physical device, target and lun filled in later */
3508                 if (is_hba_lunid(lunaddrbytes))
3509                         hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
3510                 else
3511                         /* defer target, lun assignment for physical devices */
3512                         hpsa_set_bus_target_lun(device, 2, -1, -1);
3513                 return;
3514         }
3515         /* It's a logical device */
3516         if (is_ext_target(h, device)) {
3517                 /* external target way, put logicals on bus 1
3518                  * and match target/lun numbers box
3519                  * reports, other smart array, bus 0, target 0, match lunid
3520                  */
3521                 hpsa_set_bus_target_lun(device,
3522                         1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3523                 return;
3524         }
3525         hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
3526 }
3527
3528 /*
3529  * If there is no lun 0 on a target, linux won't find any devices.
3530  * For the external targets (arrays), we have to manually detect the enclosure
3531  * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3532  * it for some reason.  *tmpdevice is the target we're adding,
3533  * this_device is a pointer into the current element of currentsd[]
3534  * that we're building up in update_scsi_devices(), below.
3535  * lunzerobits is a bitmap that tracks which targets already have a
3536  * lun 0 assigned.
3537  * Returns 1 if an enclosure was added, 0 if not.
3538  */
3539 static int add_ext_target_dev(struct ctlr_info *h,
3540         struct hpsa_scsi_dev_t *tmpdevice,
3541         struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
3542         unsigned long lunzerobits[], int *n_ext_target_devs)
3543 {
3544         unsigned char scsi3addr[8];
3545
3546         if (test_bit(tmpdevice->target, lunzerobits))
3547                 return 0; /* There is already a lun 0 on this target. */
3548
3549         if (!is_logical_dev_addr_mode(lunaddrbytes))
3550                 return 0; /* It's the logical targets that may lack lun 0. */
3551
3552         if (!is_ext_target(h, tmpdevice))
3553                 return 0; /* Only external target devices have this problem. */
3554
3555         if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
3556                 return 0;
3557
3558         memset(scsi3addr, 0, 8);
3559         scsi3addr[3] = tmpdevice->target;
3560         if (is_hba_lunid(scsi3addr))
3561                 return 0; /* Don't add the RAID controller here. */
3562
3563         if (is_scsi_rev_5(h))
3564                 return 0; /* p1210m doesn't need to do this. */
3565
3566         if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
3567                 dev_warn(&h->pdev->dev, "Maximum number of external "
3568                         "target devices exceeded.  Check your hardware "
3569                         "configuration.");
3570                 return 0;
3571         }
3572
3573         if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
3574                 return 0;
3575         (*n_ext_target_devs)++;
3576         hpsa_set_bus_target_lun(this_device,
3577                                 tmpdevice->bus, tmpdevice->target, 0);
3578         hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
3579         set_bit(tmpdevice->target, lunzerobits);
3580         return 1;
3581 }
3582
3583 /*
3584  * Get address of physical disk used for an ioaccel2 mode command:
3585  *      1. Extract ioaccel2 handle from the command.
3586  *      2. Find a matching ioaccel2 handle from list of physical disks.
3587  *      3. Return:
3588  *              1 and set scsi3addr to address of matching physical
3589  *              0 if no matching physical disk was found.
3590  */
3591 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3592         struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3593 {
3594         struct io_accel2_cmd *c2 =
3595                         &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3596         unsigned long flags;
3597         int i;
3598
3599         spin_lock_irqsave(&h->devlock, flags);
3600         for (i = 0; i < h->ndevices; i++)
3601                 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3602                         memcpy(scsi3addr, h->dev[i]->scsi3addr,
3603                                 sizeof(h->dev[i]->scsi3addr));
3604                         spin_unlock_irqrestore(&h->devlock, flags);
3605                         return 1;
3606                 }
3607         spin_unlock_irqrestore(&h->devlock, flags);
3608         return 0;
3609 }
3610
3611 /*
3612  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
3613  * logdev.  The number of luns in physdev and logdev are returned in
3614  * *nphysicals and *nlogicals, respectively.
3615  * Returns 0 on success, -1 otherwise.
3616  */
3617 static int hpsa_gather_lun_info(struct ctlr_info *h,
3618         struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3619         struct ReportLUNdata *logdev, u32 *nlogicals)
3620 {
3621         if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3622                 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3623                 return -1;
3624         }
3625         *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3626         if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3627                 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3628                         HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3629                 *nphysicals = HPSA_MAX_PHYS_LUN;
3630         }
3631         if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3632                 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3633                 return -1;
3634         }
3635         *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3636         /* Reject Logicals in excess of our max capability. */
3637         if (*nlogicals > HPSA_MAX_LUN) {
3638                 dev_warn(&h->pdev->dev,
3639                         "maximum logical LUNs (%d) exceeded.  "
3640                         "%d LUNs ignored.\n", HPSA_MAX_LUN,
3641                         *nlogicals - HPSA_MAX_LUN);
3642                         *nlogicals = HPSA_MAX_LUN;
3643         }
3644         if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3645                 dev_warn(&h->pdev->dev,
3646                         "maximum logical + physical LUNs (%d) exceeded. "
3647                         "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3648                         *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3649                 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3650         }
3651         return 0;
3652 }
3653
3654 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3655         int i, int nphysicals, int nlogicals,
3656         struct ReportExtendedLUNdata *physdev_list,
3657         struct ReportLUNdata *logdev_list)
3658 {
3659         /* Helper function, figure out where the LUN ID info is coming from
3660          * given index i, lists of physical and logical devices, where in
3661          * the list the raid controller is supposed to appear (first or last)
3662          */
3663
3664         int logicals_start = nphysicals + (raid_ctlr_position == 0);
3665         int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3666
3667         if (i == raid_ctlr_position)
3668                 return RAID_CTLR_LUNID;
3669
3670         if (i < logicals_start)
3671                 return &physdev_list->LUN[i -
3672                                 (raid_ctlr_position == 0)].lunid[0];
3673
3674         if (i < last_device)
3675                 return &logdev_list->LUN[i - nphysicals -
3676                         (raid_ctlr_position == 0)][0];
3677         BUG();
3678         return NULL;
3679 }
3680
3681 /* get physical drive ioaccel handle and queue depth */
3682 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3683                 struct hpsa_scsi_dev_t *dev,
3684                 u8 *lunaddrbytes,
3685                 struct bmic_identify_physical_device *id_phys)
3686 {
3687         int rc;
3688         struct ext_report_lun_entry *rle =
3689                 (struct ext_report_lun_entry *) lunaddrbytes;
3690
3691         dev->ioaccel_handle = rle->ioaccel_handle;
3692         if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
3693                 dev->hba_ioaccel_enabled = 1;
3694         memset(id_phys, 0, sizeof(*id_phys));
3695         rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3696                         GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3697                         sizeof(*id_phys));
3698         if (!rc)
3699                 /* Reserve space for FW operations */
3700 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3701 #define DRIVE_QUEUE_DEPTH 7
3702                 dev->queue_depth =
3703                         le16_to_cpu(id_phys->current_queue_depth_limit) -
3704                                 DRIVE_CMDS_RESERVED_FOR_FW;
3705         else
3706                 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3707 }
3708
3709 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
3710         u8 *lunaddrbytes,
3711         struct bmic_identify_physical_device *id_phys)
3712 {
3713         if (PHYS_IOACCEL(lunaddrbytes)
3714                 && this_device->ioaccel_handle)
3715                 this_device->hba_ioaccel_enabled = 1;
3716
3717         memcpy(&this_device->active_path_index,
3718                 &id_phys->active_path_number,
3719                 sizeof(this_device->active_path_index));
3720         memcpy(&this_device->path_map,
3721                 &id_phys->redundant_path_present_map,
3722                 sizeof(this_device->path_map));
3723         memcpy(&this_device->box,
3724                 &id_phys->alternate_paths_phys_box_on_port,
3725                 sizeof(this_device->box));
3726         memcpy(&this_device->phys_connector,
3727                 &id_phys->alternate_paths_phys_connector,
3728                 sizeof(this_device->phys_connector));
3729         memcpy(&this_device->bay,
3730                 &id_phys->phys_bay_in_box,
3731                 sizeof(this_device->bay));
3732 }
3733
3734 static void hpsa_update_scsi_devices(struct ctlr_info *h)
3735 {
3736         /* the idea here is we could get notified
3737          * that some devices have changed, so we do a report
3738          * physical luns and report logical luns cmd, and adjust
3739          * our list of devices accordingly.
3740          *
3741          * The scsi3addr's of devices won't change so long as the
3742          * adapter is not reset.  That means we can rescan and
3743          * tell which devices we already know about, vs. new
3744          * devices, vs.  disappearing devices.
3745          */
3746         struct ReportExtendedLUNdata *physdev_list = NULL;
3747         struct ReportLUNdata *logdev_list = NULL;
3748         struct bmic_identify_physical_device *id_phys = NULL;
3749         u32 nphysicals = 0;
3750         u32 nlogicals = 0;
3751         u32 ndev_allocated = 0;
3752         struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3753         int ncurrent = 0;
3754         int i, n_ext_target_devs, ndevs_to_allocate;
3755         int raid_ctlr_position;
3756         DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3757
3758         currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3759         physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3760         logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3761         tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3762         id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3763
3764         if (!currentsd || !physdev_list || !logdev_list ||
3765                 !tmpdevice || !id_phys) {
3766                 dev_err(&h->pdev->dev, "out of memory\n");
3767                 goto out;
3768         }
3769         memset(lunzerobits, 0, sizeof(lunzerobits));
3770
3771         h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
3772
3773         if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3774                         logdev_list, &nlogicals)) {
3775                 h->drv_req_rescan = 1;
3776                 goto out;
3777         }
3778
3779         /* We might see up to the maximum number of logical and physical disks
3780          * plus external target devices, and a device for the local RAID
3781          * controller.
3782          */
3783         ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3784
3785         /* Allocate the per device structures */
3786         for (i = 0; i < ndevs_to_allocate; i++) {
3787                 if (i >= HPSA_MAX_DEVICES) {
3788                         dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3789                                 "  %d devices ignored.\n", HPSA_MAX_DEVICES,
3790                                 ndevs_to_allocate - HPSA_MAX_DEVICES);
3791                         break;
3792                 }
3793
3794                 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3795                 if (!currentsd[i]) {
3796                         dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3797                                 __FILE__, __LINE__);
3798                         h->drv_req_rescan = 1;
3799                         goto out;
3800                 }
3801                 ndev_allocated++;
3802         }
3803
3804         if (is_scsi_rev_5(h))
3805                 raid_ctlr_position = 0;
3806         else
3807                 raid_ctlr_position = nphysicals + nlogicals;
3808
3809         /* adjust our table of devices */
3810         n_ext_target_devs = 0;
3811         for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3812                 u8 *lunaddrbytes, is_OBDR = 0;
3813                 int rc = 0;
3814
3815                 /* Figure out where the LUN ID info is coming from */
3816                 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3817                         i, nphysicals, nlogicals, physdev_list, logdev_list);
3818
3819                 /* skip masked non-disk devices */
3820                 if (MASKED_DEVICE(lunaddrbytes))
3821                         if (i < nphysicals + (raid_ctlr_position == 0) &&
3822                                 NON_DISK_PHYS_DEV(lunaddrbytes))
3823                                 continue;
3824
3825                 /* Get device type, vendor, model, device id */
3826                 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3827                                                         &is_OBDR);
3828                 if (rc == -ENOMEM) {
3829                         dev_warn(&h->pdev->dev,
3830                                 "Out of memory, rescan deferred.\n");
3831                         h->drv_req_rescan = 1;
3832                         goto out;
3833                 }
3834                 if (rc) {
3835                         dev_warn(&h->pdev->dev,
3836                                 "Inquiry failed, skipping device.\n");
3837                         continue;
3838                 }
3839
3840                 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3841                 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
3842                 this_device = currentsd[ncurrent];
3843
3844                 /*
3845                  * For external target devices, we have to insert a LUN 0 which
3846                  * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3847                  * is nonetheless an enclosure device there.  We have to
3848                  * present that otherwise linux won't find anything if
3849                  * there is no lun 0.
3850                  */
3851                 if (add_ext_target_dev(h, tmpdevice, this_device,
3852                                 lunaddrbytes, lunzerobits,
3853                                 &n_ext_target_devs)) {
3854                         ncurrent++;
3855                         this_device = currentsd[ncurrent];
3856                 }
3857
3858                 *this_device = *tmpdevice;
3859
3860                 /* do not expose masked devices */
3861                 if (MASKED_DEVICE(lunaddrbytes) &&
3862                         i < nphysicals + (raid_ctlr_position == 0)) {
3863                         this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3864                 } else {
3865                         this_device->expose_state =
3866                                         HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3867                 }
3868
3869                 switch (this_device->devtype) {
3870                 case TYPE_ROM:
3871                         /* We don't *really* support actual CD-ROM devices,
3872                          * just "One Button Disaster Recovery" tape drive
3873                          * which temporarily pretends to be a CD-ROM drive.
3874                          * So we check that the device is really an OBDR tape
3875                          * device by checking for "$DR-10" in bytes 43-48 of
3876                          * the inquiry data.
3877                          */
3878                         if (is_OBDR)
3879                                 ncurrent++;
3880                         break;
3881                 case TYPE_DISK:
3882                         if (i < nphysicals + (raid_ctlr_position == 0)) {
3883                                 /* The disk is in HBA mode. */
3884                                 /* Never use RAID mapper in HBA mode. */
3885                                 this_device->offload_enabled = 0;
3886                                 hpsa_get_ioaccel_drive_info(h, this_device,
3887                                         lunaddrbytes, id_phys);
3888                                 hpsa_get_path_info(this_device, lunaddrbytes,
3889                                                         id_phys);
3890                         }
3891                         ncurrent++;
3892                         break;
3893                 case TYPE_TAPE:
3894                 case TYPE_MEDIUM_CHANGER:
3895                 case TYPE_ENCLOSURE:
3896                         ncurrent++;
3897                         break;
3898                 case TYPE_RAID:
3899                         /* Only present the Smartarray HBA as a RAID controller.
3900                          * If it's a RAID controller other than the HBA itself
3901                          * (an external RAID controller, MSA500 or similar)
3902                          * don't present it.
3903                          */
3904                         if (!is_hba_lunid(lunaddrbytes))
3905                                 break;
3906                         ncurrent++;
3907                         break;
3908                 default:
3909                         break;
3910                 }
3911                 if (ncurrent >= HPSA_MAX_DEVICES)
3912                         break;
3913         }
3914         adjust_hpsa_scsi_table(h, currentsd, ncurrent);
3915 out:
3916         kfree(tmpdevice);
3917         for (i = 0; i < ndev_allocated; i++)
3918                 kfree(currentsd[i]);
3919         kfree(currentsd);
3920         kfree(physdev_list);
3921         kfree(logdev_list);
3922         kfree(id_phys);
3923 }
3924
3925 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3926                                    struct scatterlist *sg)
3927 {
3928         u64 addr64 = (u64) sg_dma_address(sg);
3929         unsigned int len = sg_dma_len(sg);
3930
3931         desc->Addr = cpu_to_le64(addr64);
3932         desc->Len = cpu_to_le32(len);
3933         desc->Ext = 0;
3934 }
3935
3936 /*
3937  * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3938  * dma mapping  and fills in the scatter gather entries of the
3939  * hpsa command, cp.
3940  */
3941 static int hpsa_scatter_gather(struct ctlr_info *h,
3942                 struct CommandList *cp,
3943                 struct scsi_cmnd *cmd)
3944 {
3945         struct scatterlist *sg;
3946         int use_sg, i, sg_limit, chained, last_sg;
3947         struct SGDescriptor *curr_sg;
3948
3949         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3950
3951         use_sg = scsi_dma_map(cmd);
3952         if (use_sg < 0)
3953                 return use_sg;
3954
3955         if (!use_sg)
3956                 goto sglist_finished;
3957
3958         /*
3959          * If the number of entries is greater than the max for a single list,
3960          * then we have a chained list; we will set up all but one entry in the
3961          * first list (the last entry is saved for link information);
3962          * otherwise, we don't have a chained list and we'll set up at each of
3963          * the entries in the one list.
3964          */
3965         curr_sg = cp->SG;
3966         chained = use_sg > h->max_cmd_sg_entries;
3967         sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
3968         last_sg = scsi_sg_count(cmd) - 1;
3969         scsi_for_each_sg(cmd, sg, sg_limit, i) {
3970                 hpsa_set_sg_descriptor(curr_sg, sg);
3971                 curr_sg++;
3972         }
3973
3974         if (chained) {
3975                 /*
3976                  * Continue with the chained list.  Set curr_sg to the chained
3977                  * list.  Modify the limit to the total count less the entries
3978                  * we've already set up.  Resume the scan at the list entry
3979                  * where the previous loop left off.
3980                  */
3981                 curr_sg = h->cmd_sg_list[cp->cmdindex];
3982                 sg_limit = use_sg - sg_limit;
3983                 for_each_sg(sg, sg, sg_limit, i) {
3984                         hpsa_set_sg_descriptor(curr_sg, sg);
3985                         curr_sg++;
3986                 }
3987         }
3988
3989         /* Back the pointer up to the last entry and mark it as "last". */
3990         (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
3991
3992         if (use_sg + chained > h->maxSG)
3993                 h->maxSG = use_sg + chained;
3994
3995         if (chained) {
3996                 cp->Header.SGList = h->max_cmd_sg_entries;
3997                 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3998                 if (hpsa_map_sg_chain_block(h, cp)) {
3999                         scsi_dma_unmap(cmd);
4000                         return -1;
4001                 }
4002                 return 0;
4003         }
4004
4005 sglist_finished:
4006
4007         cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
4008         cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4009         return 0;
4010 }
4011
4012 #define IO_ACCEL_INELIGIBLE (1)
4013 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4014 {
4015         int is_write = 0;
4016         u32 block;
4017         u32 block_cnt;
4018
4019         /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4020         switch (cdb[0]) {
4021         case WRITE_6:
4022         case WRITE_12:
4023                 is_write = 1;
4024         case READ_6:
4025         case READ_12:
4026                 if (*cdb_len == 6) {
4027                         block = get_unaligned_be16(&cdb[2]);
4028                         block_cnt = cdb[4];
4029                         if (block_cnt == 0)
4030                                 block_cnt = 256;
4031                 } else {
4032                         BUG_ON(*cdb_len != 12);
4033                         block = get_unaligned_be32(&cdb[2]);
4034                         block_cnt = get_unaligned_be32(&cdb[6]);
4035                 }
4036                 if (block_cnt > 0xffff)
4037                         return IO_ACCEL_INELIGIBLE;
4038
4039                 cdb[0] = is_write ? WRITE_10 : READ_10;
4040                 cdb[1] = 0;
4041                 cdb[2] = (u8) (block >> 24);
4042                 cdb[3] = (u8) (block >> 16);
4043                 cdb[4] = (u8) (block >> 8);
4044                 cdb[5] = (u8) (block);
4045                 cdb[6] = 0;
4046                 cdb[7] = (u8) (block_cnt >> 8);
4047                 cdb[8] = (u8) (block_cnt);
4048                 cdb[9] = 0;
4049                 *cdb_len = 10;
4050                 break;
4051         }
4052         return 0;
4053 }
4054
4055 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4056         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4057         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4058 {
4059         struct scsi_cmnd *cmd = c->scsi_cmd;
4060         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4061         unsigned int len;
4062         unsigned int total_len = 0;
4063         struct scatterlist *sg;
4064         u64 addr64;
4065         int use_sg, i;
4066         struct SGDescriptor *curr_sg;
4067         u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4068
4069         /* TODO: implement chaining support */
4070         if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4071                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4072                 return IO_ACCEL_INELIGIBLE;
4073         }
4074
4075         BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4076
4077         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4078                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4079                 return IO_ACCEL_INELIGIBLE;
4080         }
4081
4082         c->cmd_type = CMD_IOACCEL1;
4083
4084         /* Adjust the DMA address to point to the accelerated command buffer */
4085         c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4086                                 (c->cmdindex * sizeof(*cp));
4087         BUG_ON(c->busaddr & 0x0000007F);
4088
4089         use_sg = scsi_dma_map(cmd);
4090         if (use_sg < 0) {
4091                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4092                 return use_sg;
4093         }
4094
4095         if (use_sg) {
4096                 curr_sg = cp->SG;
4097                 scsi_for_each_sg(cmd, sg, use_sg, i) {
4098                         addr64 = (u64) sg_dma_address(sg);
4099                         len  = sg_dma_len(sg);
4100                         total_len += len;
4101                         curr_sg->Addr = cpu_to_le64(addr64);
4102                         curr_sg->Len = cpu_to_le32(len);
4103                         curr_sg->Ext = cpu_to_le32(0);
4104                         curr_sg++;
4105                 }
4106                 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4107
4108                 switch (cmd->sc_data_direction) {
4109                 case DMA_TO_DEVICE:
4110                         control |= IOACCEL1_CONTROL_DATA_OUT;
4111                         break;
4112                 case DMA_FROM_DEVICE:
4113                         control |= IOACCEL1_CONTROL_DATA_IN;
4114                         break;
4115                 case DMA_NONE:
4116                         control |= IOACCEL1_CONTROL_NODATAXFER;
4117                         break;
4118                 default:
4119                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4120                         cmd->sc_data_direction);
4121                         BUG();
4122                         break;
4123                 }
4124         } else {
4125                 control |= IOACCEL1_CONTROL_NODATAXFER;
4126         }
4127
4128         c->Header.SGList = use_sg;
4129         /* Fill out the command structure to submit */
4130         cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4131         cp->transfer_len = cpu_to_le32(total_len);
4132         cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4133                         (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4134         cp->control = cpu_to_le32(control);
4135         memcpy(cp->CDB, cdb, cdb_len);
4136         memcpy(cp->CISS_LUN, scsi3addr, 8);
4137         /* Tag was already set at init time. */
4138         enqueue_cmd_and_start_io(h, c);
4139         return 0;
4140 }
4141
4142 /*
4143  * Queue a command directly to a device behind the controller using the
4144  * I/O accelerator path.
4145  */
4146 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4147         struct CommandList *c)
4148 {
4149         struct scsi_cmnd *cmd = c->scsi_cmd;
4150         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4151
4152         c->phys_disk = dev;
4153
4154         return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4155                 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4156 }
4157
4158 /*
4159  * Set encryption parameters for the ioaccel2 request
4160  */
4161 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4162         struct CommandList *c, struct io_accel2_cmd *cp)
4163 {
4164         struct scsi_cmnd *cmd = c->scsi_cmd;
4165         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4166         struct raid_map_data *map = &dev->raid_map;
4167         u64 first_block;
4168
4169         /* Are we doing encryption on this device */
4170         if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4171                 return;
4172         /* Set the data encryption key index. */
4173         cp->dekindex = map->dekindex;
4174
4175         /* Set the encryption enable flag, encoded into direction field. */
4176         cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4177
4178         /* Set encryption tweak values based on logical block address
4179          * If block size is 512, tweak value is LBA.
4180          * For other block sizes, tweak is (LBA * block size)/ 512)
4181          */
4182         switch (cmd->cmnd[0]) {
4183         /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4184         case WRITE_6:
4185         case READ_6:
4186                 first_block = get_unaligned_be16(&cmd->cmnd[2]);
4187                 break;
4188         case WRITE_10:
4189         case READ_10:
4190         /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4191         case WRITE_12:
4192         case READ_12:
4193                 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4194                 break;
4195         case WRITE_16:
4196         case READ_16:
4197                 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4198                 break;
4199         default:
4200                 dev_err(&h->pdev->dev,
4201                         "ERROR: %s: size (0x%x) not supported for encryption\n",
4202                         __func__, cmd->cmnd[0]);
4203                 BUG();
4204                 break;
4205         }
4206
4207         if (le32_to_cpu(map->volume_blk_size) != 512)
4208                 first_block = first_block *
4209                                 le32_to_cpu(map->volume_blk_size)/512;
4210
4211         cp->tweak_lower = cpu_to_le32(first_block);
4212         cp->tweak_upper = cpu_to_le32(first_block >> 32);
4213 }
4214
4215 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4216         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4217         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4218 {
4219         struct scsi_cmnd *cmd = c->scsi_cmd;
4220         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4221         struct ioaccel2_sg_element *curr_sg;
4222         int use_sg, i;
4223         struct scatterlist *sg;
4224         u64 addr64;
4225         u32 len;
4226         u32 total_len = 0;
4227
4228         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4229
4230         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4231                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4232                 return IO_ACCEL_INELIGIBLE;
4233         }
4234
4235         c->cmd_type = CMD_IOACCEL2;
4236         /* Adjust the DMA address to point to the accelerated command buffer */
4237         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4238                                 (c->cmdindex * sizeof(*cp));
4239         BUG_ON(c->busaddr & 0x0000007F);
4240
4241         memset(cp, 0, sizeof(*cp));
4242         cp->IU_type = IOACCEL2_IU_TYPE;
4243
4244         use_sg = scsi_dma_map(cmd);
4245         if (use_sg < 0) {
4246                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4247                 return use_sg;
4248         }
4249
4250         if (use_sg) {
4251                 curr_sg = cp->sg;
4252                 if (use_sg > h->ioaccel_maxsg) {
4253                         addr64 = le64_to_cpu(
4254                                 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4255                         curr_sg->address = cpu_to_le64(addr64);
4256                         curr_sg->length = 0;
4257                         curr_sg->reserved[0] = 0;
4258                         curr_sg->reserved[1] = 0;
4259                         curr_sg->reserved[2] = 0;
4260                         curr_sg->chain_indicator = 0x80;
4261
4262                         curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4263                 }
4264                 scsi_for_each_sg(cmd, sg, use_sg, i) {
4265                         addr64 = (u64) sg_dma_address(sg);
4266                         len  = sg_dma_len(sg);
4267                         total_len += len;
4268                         curr_sg->address = cpu_to_le64(addr64);
4269                         curr_sg->length = cpu_to_le32(len);
4270                         curr_sg->reserved[0] = 0;
4271                         curr_sg->reserved[1] = 0;
4272                         curr_sg->reserved[2] = 0;
4273                         curr_sg->chain_indicator = 0;
4274                         curr_sg++;
4275                 }
4276
4277                 switch (cmd->sc_data_direction) {
4278                 case DMA_TO_DEVICE:
4279                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4280                         cp->direction |= IOACCEL2_DIR_DATA_OUT;
4281                         break;
4282                 case DMA_FROM_DEVICE:
4283                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4284                         cp->direction |= IOACCEL2_DIR_DATA_IN;
4285                         break;
4286                 case DMA_NONE:
4287                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4288                         cp->direction |= IOACCEL2_DIR_NO_DATA;
4289                         break;
4290                 default:
4291                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4292                                 cmd->sc_data_direction);
4293                         BUG();
4294                         break;
4295                 }
4296         } else {
4297                 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4298                 cp->direction |= IOACCEL2_DIR_NO_DATA;
4299         }
4300
4301         /* Set encryption parameters, if necessary */
4302         set_encrypt_ioaccel2(h, c, cp);
4303
4304         cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4305         cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4306         memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4307
4308         cp->data_len = cpu_to_le32(total_len);
4309         cp->err_ptr = cpu_to_le64(c->busaddr +
4310                         offsetof(struct io_accel2_cmd, error_data));
4311         cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4312
4313         /* fill in sg elements */
4314         if (use_sg > h->ioaccel_maxsg) {
4315                 cp->sg_count = 1;
4316                 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4317                         atomic_dec(&phys_disk->ioaccel_cmds_out);
4318                         scsi_dma_unmap(cmd);
4319                         return -1;
4320                 }
4321         } else
4322                 cp->sg_count = (u8) use_sg;
4323
4324         enqueue_cmd_and_start_io(h, c);
4325         return 0;
4326 }
4327
4328 /*
4329  * Queue a command to the correct I/O accelerator path.
4330  */
4331 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4332         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4333         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4334 {
4335         /* Try to honor the device's queue depth */
4336         if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4337                                         phys_disk->queue_depth) {
4338                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4339                 return IO_ACCEL_INELIGIBLE;
4340         }
4341         if (h->transMethod & CFGTBL_Trans_io_accel1)
4342                 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4343                                                 cdb, cdb_len, scsi3addr,
4344                                                 phys_disk);
4345         else
4346                 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4347                                                 cdb, cdb_len, scsi3addr,
4348                                                 phys_disk);
4349 }
4350
4351 static void raid_map_helper(struct raid_map_data *map,
4352                 int offload_to_mirror, u32 *map_index, u32 *current_group)
4353 {
4354         if (offload_to_mirror == 0)  {
4355                 /* use physical disk in the first mirrored group. */
4356                 *map_index %= le16_to_cpu(map->data_disks_per_row);
4357                 return;
4358         }
4359         do {
4360                 /* determine mirror group that *map_index indicates */
4361                 *current_group = *map_index /
4362                         le16_to_cpu(map->data_disks_per_row);
4363                 if (offload_to_mirror == *current_group)
4364                         continue;
4365                 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4366                         /* select map index from next group */
4367                         *map_index += le16_to_cpu(map->data_disks_per_row);
4368                         (*current_group)++;
4369                 } else {
4370                         /* select map index from first group */
4371                         *map_index %= le16_to_cpu(map->data_disks_per_row);
4372                         *current_group = 0;
4373                 }
4374         } while (offload_to_mirror != *current_group);
4375 }
4376
4377 /*
4378  * Attempt to perform offload RAID mapping for a logical volume I/O.
4379  */
4380 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4381         struct CommandList *c)
4382 {
4383         struct scsi_cmnd *cmd = c->scsi_cmd;
4384         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4385         struct raid_map_data *map = &dev->raid_map;
4386         struct raid_map_disk_data *dd = &map->data[0];
4387         int is_write = 0;
4388         u32 map_index;
4389         u64 first_block, last_block;
4390         u32 block_cnt;
4391         u32 blocks_per_row;
4392         u64 first_row, last_row;
4393         u32 first_row_offset, last_row_offset;
4394         u32 first_column, last_column;
4395         u64 r0_first_row, r0_last_row;
4396         u32 r5or6_blocks_per_row;
4397         u64 r5or6_first_row, r5or6_last_row;
4398         u32 r5or6_first_row_offset, r5or6_last_row_offset;
4399         u32 r5or6_first_column, r5or6_last_column;
4400         u32 total_disks_per_row;
4401         u32 stripesize;
4402         u32 first_group, last_group, current_group;
4403         u32 map_row;
4404         u32 disk_handle;
4405         u64 disk_block;
4406         u32 disk_block_cnt;
4407         u8 cdb[16];
4408         u8 cdb_len;
4409         u16 strip_size;
4410 #if BITS_PER_LONG == 32
4411         u64 tmpdiv;
4412 #endif
4413         int offload_to_mirror;
4414
4415         /* check for valid opcode, get LBA and block count */
4416         switch (cmd->cmnd[0]) {
4417         case WRITE_6:
4418                 is_write = 1;
4419         case READ_6:
4420                 first_block = get_unaligned_be16(&cmd->cmnd[2]);
4421                 block_cnt = cmd->cmnd[4];
4422                 if (block_cnt == 0)
4423                         block_cnt = 256;
4424                 break;
4425         case WRITE_10:
4426                 is_write = 1;
4427         case READ_10:
4428                 first_block =
4429                         (((u64) cmd->cmnd[2]) << 24) |
4430                         (((u64) cmd->cmnd[3]) << 16) |
4431                         (((u64) cmd->cmnd[4]) << 8) |
4432                         cmd->cmnd[5];
4433                 block_cnt =
4434                         (((u32) cmd->cmnd[7]) << 8) |
4435                         cmd->cmnd[8];
4436                 break;
4437         case WRITE_12:
4438                 is_write = 1;
4439         case READ_12:
4440                 first_block =
4441                         (((u64) cmd->cmnd[2]) << 24) |
4442                         (((u64) cmd->cmnd[3]) << 16) |
4443                         (((u64) cmd->cmnd[4]) << 8) |
4444                         cmd->cmnd[5];
4445                 block_cnt =
4446                         (((u32) cmd->cmnd[6]) << 24) |
4447                         (((u32) cmd->cmnd[7]) << 16) |
4448                         (((u32) cmd->cmnd[8]) << 8) |
4449                 cmd->cmnd[9];
4450                 break;
4451         case WRITE_16:
4452                 is_write = 1;
4453         case READ_16:
4454                 first_block =
4455                         (((u64) cmd->cmnd[2]) << 56) |
4456                         (((u64) cmd->cmnd[3]) << 48) |
4457                         (((u64) cmd->cmnd[4]) << 40) |
4458                         (((u64) cmd->cmnd[5]) << 32) |
4459                         (((u64) cmd->cmnd[6]) << 24) |
4460                         (((u64) cmd->cmnd[7]) << 16) |
4461                         (((u64) cmd->cmnd[8]) << 8) |
4462                         cmd->cmnd[9];
4463                 block_cnt =
4464                         (((u32) cmd->cmnd[10]) << 24) |
4465                         (((u32) cmd->cmnd[11]) << 16) |
4466                         (((u32) cmd->cmnd[12]) << 8) |
4467                         cmd->cmnd[13];
4468                 break;
4469         default:
4470                 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4471         }
4472         last_block = first_block + block_cnt - 1;
4473
4474         /* check for write to non-RAID-0 */
4475         if (is_write && dev->raid_level != 0)
4476                 return IO_ACCEL_INELIGIBLE;
4477
4478         /* check for invalid block or wraparound */
4479         if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4480                 last_block < first_block)
4481                 return IO_ACCEL_INELIGIBLE;
4482
4483         /* calculate stripe information for the request */
4484         blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4485                                 le16_to_cpu(map->strip_size);
4486         strip_size = le16_to_cpu(map->strip_size);
4487 #if BITS_PER_LONG == 32
4488         tmpdiv = first_block;
4489         (void) do_div(tmpdiv, blocks_per_row);
4490         first_row = tmpdiv;
4491         tmpdiv = last_block;
4492         (void) do_div(tmpdiv, blocks_per_row);
4493         last_row = tmpdiv;
4494         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4495         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4496         tmpdiv = first_row_offset;
4497         (void) do_div(tmpdiv, strip_size);
4498         first_column = tmpdiv;
4499         tmpdiv = last_row_offset;
4500         (void) do_div(tmpdiv, strip_size);
4501         last_column = tmpdiv;
4502 #else
4503         first_row = first_block / blocks_per_row;
4504         last_row = last_block / blocks_per_row;
4505         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4506         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4507         first_column = first_row_offset / strip_size;
4508         last_column = last_row_offset / strip_size;
4509 #endif
4510
4511         /* if this isn't a single row/column then give to the controller */
4512         if ((first_row != last_row) || (first_column != last_column))
4513                 return IO_ACCEL_INELIGIBLE;
4514
4515         /* proceeding with driver mapping */
4516         total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4517                                 le16_to_cpu(map->metadata_disks_per_row);
4518         map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4519                                 le16_to_cpu(map->row_cnt);
4520         map_index = (map_row * total_disks_per_row) + first_column;
4521
4522         switch (dev->raid_level) {
4523         case HPSA_RAID_0:
4524                 break; /* nothing special to do */
4525         case HPSA_RAID_1:
4526                 /* Handles load balance across RAID 1 members.
4527                  * (2-drive R1 and R10 with even # of drives.)
4528                  * Appropriate for SSDs, not optimal for HDDs
4529                  */
4530                 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
4531                 if (dev->offload_to_mirror)
4532                         map_index += le16_to_cpu(map->data_disks_per_row);
4533                 dev->offload_to_mirror = !dev->offload_to_mirror;
4534                 break;
4535         case HPSA_RAID_ADM:
4536                 /* Handles N-way mirrors  (R1-ADM)
4537                  * and R10 with # of drives divisible by 3.)
4538                  */
4539                 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
4540
4541                 offload_to_mirror = dev->offload_to_mirror;
4542                 raid_map_helper(map, offload_to_mirror,
4543                                 &map_index, &current_group);
4544                 /* set mirror group to use next time */
4545                 offload_to_mirror =
4546                         (offload_to_mirror >=
4547                         le16_to_cpu(map->layout_map_count) - 1)
4548                         ? 0 : offload_to_mirror + 1;
4549                 dev->offload_to_mirror = offload_to_mirror;
4550                 /* Avoid direct use of dev->offload_to_mirror within this
4551                  * function since multiple threads might simultaneously
4552                  * increment it beyond the range of dev->layout_map_count -1.
4553                  */
4554                 break;
4555         case HPSA_RAID_5:
4556         case HPSA_RAID_6:
4557                 if (le16_to_cpu(map->layout_map_count) <= 1)
4558                         break;
4559
4560                 /* Verify first and last block are in same RAID group */
4561                 r5or6_blocks_per_row =
4562                         le16_to_cpu(map->strip_size) *
4563                         le16_to_cpu(map->data_disks_per_row);
4564                 BUG_ON(r5or6_blocks_per_row == 0);
4565                 stripesize = r5or6_blocks_per_row *
4566                         le16_to_cpu(map->layout_map_count);
4567 #if BITS_PER_LONG == 32
4568                 tmpdiv = first_block;
4569                 first_group = do_div(tmpdiv, stripesize);
4570                 tmpdiv = first_group;
4571                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4572                 first_group = tmpdiv;
4573                 tmpdiv = last_block;
4574                 last_group = do_div(tmpdiv, stripesize);
4575                 tmpdiv = last_group;
4576                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4577                 last_group = tmpdiv;
4578 #else
4579                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4580                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
4581 #endif
4582                 if (first_group != last_group)
4583                         return IO_ACCEL_INELIGIBLE;
4584
4585                 /* Verify request is in a single row of RAID 5/6 */
4586 #if BITS_PER_LONG == 32
4587                 tmpdiv = first_block;
4588                 (void) do_div(tmpdiv, stripesize);
4589                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4590                 tmpdiv = last_block;
4591                 (void) do_div(tmpdiv, stripesize);
4592                 r5or6_last_row = r0_last_row = tmpdiv;
4593 #else
4594                 first_row = r5or6_first_row = r0_first_row =
4595                                                 first_block / stripesize;
4596                 r5or6_last_row = r0_last_row = last_block / stripesize;
4597 #endif
4598                 if (r5or6_first_row != r5or6_last_row)
4599                         return IO_ACCEL_INELIGIBLE;
4600
4601
4602                 /* Verify request is in a single column */
4603 #if BITS_PER_LONG == 32
4604                 tmpdiv = first_block;
4605                 first_row_offset = do_div(tmpdiv, stripesize);
4606                 tmpdiv = first_row_offset;
4607                 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4608                 r5or6_first_row_offset = first_row_offset;
4609                 tmpdiv = last_block;
4610                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4611                 tmpdiv = r5or6_last_row_offset;
4612                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4613                 tmpdiv = r5or6_first_row_offset;
4614                 (void) do_div(tmpdiv, map->strip_size);
4615                 first_column = r5or6_first_column = tmpdiv;
4616                 tmpdiv = r5or6_last_row_offset;
4617                 (void) do_div(tmpdiv, map->strip_size);
4618                 r5or6_last_column = tmpdiv;
4619 #else
4620                 first_row_offset = r5or6_first_row_offset =
4621                         (u32)((first_block % stripesize) %
4622                                                 r5or6_blocks_per_row);
4623
4624                 r5or6_last_row_offset =
4625                         (u32)((last_block % stripesize) %
4626                                                 r5or6_blocks_per_row);
4627
4628                 first_column = r5or6_first_column =
4629                         r5or6_first_row_offset / le16_to_cpu(map->strip_size);
4630                 r5or6_last_column =
4631                         r5or6_last_row_offset / le16_to_cpu(map->strip_size);
4632 #endif
4633                 if (r5or6_first_column != r5or6_last_column)
4634                         return IO_ACCEL_INELIGIBLE;
4635
4636                 /* Request is eligible */
4637                 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4638                         le16_to_cpu(map->row_cnt);
4639
4640                 map_index = (first_group *
4641                         (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
4642                         (map_row * total_disks_per_row) + first_column;
4643                 break;
4644         default:
4645                 return IO_ACCEL_INELIGIBLE;
4646         }
4647
4648         if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4649                 return IO_ACCEL_INELIGIBLE;
4650
4651         c->phys_disk = dev->phys_disk[map_index];
4652
4653         disk_handle = dd[map_index].ioaccel_handle;
4654         disk_block = le64_to_cpu(map->disk_starting_blk) +
4655                         first_row * le16_to_cpu(map->strip_size) +
4656                         (first_row_offset - first_column *
4657                         le16_to_cpu(map->strip_size));
4658         disk_block_cnt = block_cnt;
4659
4660         /* handle differing logical/physical block sizes */
4661         if (map->phys_blk_shift) {
4662                 disk_block <<= map->phys_blk_shift;
4663                 disk_block_cnt <<= map->phys_blk_shift;
4664         }
4665         BUG_ON(disk_block_cnt > 0xffff);
4666
4667         /* build the new CDB for the physical disk I/O */
4668         if (disk_block > 0xffffffff) {
4669                 cdb[0] = is_write ? WRITE_16 : READ_16;
4670                 cdb[1] = 0;
4671                 cdb[2] = (u8) (disk_block >> 56);
4672                 cdb[3] = (u8) (disk_block >> 48);
4673                 cdb[4] = (u8) (disk_block >> 40);
4674                 cdb[5] = (u8) (disk_block >> 32);
4675                 cdb[6] = (u8) (disk_block >> 24);
4676                 cdb[7] = (u8) (disk_block >> 16);
4677                 cdb[8] = (u8) (disk_block >> 8);
4678                 cdb[9] = (u8) (disk_block);
4679                 cdb[10] = (u8) (disk_block_cnt >> 24);
4680                 cdb[11] = (u8) (disk_block_cnt >> 16);
4681                 cdb[12] = (u8) (disk_block_cnt >> 8);
4682                 cdb[13] = (u8) (disk_block_cnt);
4683                 cdb[14] = 0;
4684                 cdb[15] = 0;
4685                 cdb_len = 16;
4686         } else {
4687                 cdb[0] = is_write ? WRITE_10 : READ_10;
4688                 cdb[1] = 0;
4689                 cdb[2] = (u8) (disk_block >> 24);
4690                 cdb[3] = (u8) (disk_block >> 16);
4691                 cdb[4] = (u8) (disk_block >> 8);
4692                 cdb[5] = (u8) (disk_block);
4693                 cdb[6] = 0;
4694                 cdb[7] = (u8) (disk_block_cnt >> 8);
4695                 cdb[8] = (u8) (disk_block_cnt);
4696                 cdb[9] = 0;
4697                 cdb_len = 10;
4698         }
4699         return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
4700                                                 dev->scsi3addr,
4701                                                 dev->phys_disk[map_index]);
4702 }
4703
4704 /*
4705  * Submit commands down the "normal" RAID stack path
4706  * All callers to hpsa_ciss_submit must check lockup_detected
4707  * beforehand, before (opt.) and after calling cmd_alloc
4708  */
4709 static int hpsa_ciss_submit(struct ctlr_info *h,
4710         struct CommandList *c, struct scsi_cmnd *cmd,
4711         unsigned char scsi3addr[])
4712 {
4713         cmd->host_scribble = (unsigned char *) c;
4714         c->cmd_type = CMD_SCSI;
4715         c->scsi_cmd = cmd;
4716         c->Header.ReplyQueue = 0;  /* unused in simple mode */
4717         memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4718         c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
4719
4720         /* Fill in the request block... */
4721
4722         c->Request.Timeout = 0;
4723         BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4724         c->Request.CDBLen = cmd->cmd_len;
4725         memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4726         switch (cmd->sc_data_direction) {
4727         case DMA_TO_DEVICE:
4728                 c->Request.type_attr_dir =
4729                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4730                 break;
4731         case DMA_FROM_DEVICE:
4732                 c->Request.type_attr_dir =
4733                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4734                 break;
4735         case DMA_NONE:
4736                 c->Request.type_attr_dir =
4737                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4738                 break;
4739         case DMA_BIDIRECTIONAL:
4740                 /* This can happen if a buggy application does a scsi passthru
4741                  * and sets both inlen and outlen to non-zero. ( see
4742                  * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4743                  */
4744
4745                 c->Request.type_attr_dir =
4746                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4747                 /* This is technically wrong, and hpsa controllers should
4748                  * reject it with CMD_INVALID, which is the most correct
4749                  * response, but non-fibre backends appear to let it
4750                  * slide by, and give the same results as if this field
4751                  * were set correctly.  Either way is acceptable for
4752                  * our purposes here.
4753                  */
4754
4755                 break;
4756
4757         default:
4758                 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4759                         cmd->sc_data_direction);
4760                 BUG();
4761                 break;
4762         }
4763
4764         if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4765                 hpsa_cmd_resolve_and_free(h, c);
4766                 return SCSI_MLQUEUE_HOST_BUSY;
4767         }
4768         enqueue_cmd_and_start_io(h, c);
4769         /* the cmd'll come back via intr handler in complete_scsi_command()  */
4770         return 0;
4771 }
4772
4773 static void hpsa_cmd_init(struct ctlr_info *h, int index,
4774                                 struct CommandList *c)
4775 {
4776         dma_addr_t cmd_dma_handle, err_dma_handle;
4777
4778         /* Zero out all of commandlist except the last field, refcount */
4779         memset(c, 0, offsetof(struct CommandList, refcount));
4780         c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4781         cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4782         c->err_info = h->errinfo_pool + index;
4783         memset(c->err_info, 0, sizeof(*c->err_info));
4784         err_dma_handle = h->errinfo_pool_dhandle
4785             + index * sizeof(*c->err_info);
4786         c->cmdindex = index;
4787         c->busaddr = (u32) cmd_dma_handle;
4788         c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4789         c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4790         c->h = h;
4791         c->scsi_cmd = SCSI_CMD_IDLE;
4792 }
4793
4794 static void hpsa_preinitialize_commands(struct ctlr_info *h)
4795 {
4796         int i;
4797
4798         for (i = 0; i < h->nr_cmds; i++) {
4799                 struct CommandList *c = h->cmd_pool + i;
4800
4801                 hpsa_cmd_init(h, i, c);
4802                 atomic_set(&c->refcount, 0);
4803         }
4804 }
4805
4806 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4807                                 struct CommandList *c)
4808 {
4809         dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4810
4811         BUG_ON(c->cmdindex != index);
4812
4813         memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4814         memset(c->err_info, 0, sizeof(*c->err_info));
4815         c->busaddr = (u32) cmd_dma_handle;
4816 }
4817
4818 static int hpsa_ioaccel_submit(struct ctlr_info *h,
4819                 struct CommandList *c, struct scsi_cmnd *cmd,
4820                 unsigned char *scsi3addr)
4821 {
4822         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4823         int rc = IO_ACCEL_INELIGIBLE;
4824
4825         cmd->host_scribble = (unsigned char *) c;
4826
4827         if (dev->offload_enabled) {
4828                 hpsa_cmd_init(h, c->cmdindex, c);
4829                 c->cmd_type = CMD_SCSI;
4830                 c->scsi_cmd = cmd;
4831                 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4832                 if (rc < 0)     /* scsi_dma_map failed. */
4833                         rc = SCSI_MLQUEUE_HOST_BUSY;
4834         } else if (dev->hba_ioaccel_enabled) {
4835                 hpsa_cmd_init(h, c->cmdindex, c);
4836                 c->cmd_type = CMD_SCSI;
4837                 c->scsi_cmd = cmd;
4838                 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4839                 if (rc < 0)     /* scsi_dma_map failed. */
4840                         rc = SCSI_MLQUEUE_HOST_BUSY;
4841         }
4842         return rc;
4843 }
4844
4845 static void hpsa_command_resubmit_worker(struct work_struct *work)
4846 {
4847         struct scsi_cmnd *cmd;
4848         struct hpsa_scsi_dev_t *dev;
4849         struct CommandList *c = container_of(work, struct CommandList, work);
4850
4851         cmd = c->scsi_cmd;
4852         dev = cmd->device->hostdata;
4853         if (!dev) {
4854                 cmd->result = DID_NO_CONNECT << 16;
4855                 return hpsa_cmd_free_and_done(c->h, c, cmd);
4856         }
4857         if (c->reset_pending)
4858                 return hpsa_cmd_resolve_and_free(c->h, c);
4859         if (c->abort_pending)
4860                 return hpsa_cmd_abort_and_free(c->h, c, cmd);
4861         if (c->cmd_type == CMD_IOACCEL2) {
4862                 struct ctlr_info *h = c->h;
4863                 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4864                 int rc;
4865
4866                 if (c2->error_data.serv_response ==
4867                                 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4868                         rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4869                         if (rc == 0)
4870                                 return;
4871                         if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4872                                 /*
4873                                  * If we get here, it means dma mapping failed.
4874                                  * Try again via scsi mid layer, which will
4875                                  * then get SCSI_MLQUEUE_HOST_BUSY.
4876                                  */
4877                                 cmd->result = DID_IMM_RETRY << 16;
4878                                 return hpsa_cmd_free_and_done(h, c, cmd);
4879                         }
4880                         /* else, fall thru and resubmit down CISS path */
4881                 }
4882         }
4883         hpsa_cmd_partial_init(c->h, c->cmdindex, c);
4884         if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4885                 /*
4886                  * If we get here, it means dma mapping failed. Try
4887                  * again via scsi mid layer, which will then get
4888                  * SCSI_MLQUEUE_HOST_BUSY.
4889                  *
4890                  * hpsa_ciss_submit will have already freed c
4891                  * if it encountered a dma mapping failure.
4892                  */
4893                 cmd->result = DID_IMM_RETRY << 16;
4894                 cmd->scsi_done(cmd);
4895         }
4896 }
4897
4898 /* Running in struct Scsi_Host->host_lock less mode */
4899 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4900 {
4901         struct ctlr_info *h;
4902         struct hpsa_scsi_dev_t *dev;
4903         unsigned char scsi3addr[8];
4904         struct CommandList *c;
4905         int rc = 0;
4906
4907         /* Get the ptr to our adapter structure out of cmd->host. */
4908         h = sdev_to_hba(cmd->device);
4909
4910         BUG_ON(cmd->request->tag < 0);
4911
4912         dev = cmd->device->hostdata;
4913         if (!dev) {
4914                 cmd->result = DID_NO_CONNECT << 16;
4915                 cmd->scsi_done(cmd);
4916                 return 0;
4917         }
4918
4919         memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4920
4921         if (unlikely(lockup_detected(h))) {
4922                 cmd->result = DID_NO_CONNECT << 16;
4923                 cmd->scsi_done(cmd);
4924                 return 0;
4925         }
4926         c = cmd_tagged_alloc(h, cmd);
4927
4928         /*
4929          * Call alternate submit routine for I/O accelerated commands.
4930          * Retries always go down the normal I/O path.
4931          */
4932         if (likely(cmd->retries == 0 &&
4933                 cmd->request->cmd_type == REQ_TYPE_FS &&
4934                 h->acciopath_status)) {
4935                 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4936                 if (rc == 0)
4937                         return 0;
4938                 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4939                         hpsa_cmd_resolve_and_free(h, c);
4940                         return SCSI_MLQUEUE_HOST_BUSY;
4941                 }
4942         }
4943         return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4944 }
4945
4946 static void hpsa_scan_complete(struct ctlr_info *h)
4947 {
4948         unsigned long flags;
4949
4950         spin_lock_irqsave(&h->scan_lock, flags);
4951         h->scan_finished = 1;
4952         wake_up_all(&h->scan_wait_queue);
4953         spin_unlock_irqrestore(&h->scan_lock, flags);
4954 }
4955
4956 static void hpsa_scan_start(struct Scsi_Host *sh)
4957 {
4958         struct ctlr_info *h = shost_to_hba(sh);
4959         unsigned long flags;
4960
4961         /*
4962          * Don't let rescans be initiated on a controller known to be locked
4963          * up.  If the controller locks up *during* a rescan, that thread is
4964          * probably hosed, but at least we can prevent new rescan threads from
4965          * piling up on a locked up controller.
4966          */
4967         if (unlikely(lockup_detected(h)))
4968                 return hpsa_scan_complete(h);
4969
4970         /* wait until any scan already in progress is finished. */
4971         while (1) {
4972                 spin_lock_irqsave(&h->scan_lock, flags);
4973                 if (h->scan_finished)
4974                         break;
4975                 spin_unlock_irqrestore(&h->scan_lock, flags);
4976                 wait_event(h->scan_wait_queue, h->scan_finished);
4977                 /* Note: We don't need to worry about a race between this
4978                  * thread and driver unload because the midlayer will
4979                  * have incremented the reference count, so unload won't
4980                  * happen if we're in here.
4981                  */
4982         }
4983         h->scan_finished = 0; /* mark scan as in progress */
4984         spin_unlock_irqrestore(&h->scan_lock, flags);
4985
4986         if (unlikely(lockup_detected(h)))
4987                 return hpsa_scan_complete(h);
4988
4989         hpsa_update_scsi_devices(h);
4990
4991         hpsa_scan_complete(h);
4992 }
4993
4994 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4995 {
4996         struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4997
4998         if (!logical_drive)
4999                 return -ENODEV;
5000
5001         if (qdepth < 1)
5002                 qdepth = 1;
5003         else if (qdepth > logical_drive->queue_depth)
5004                 qdepth = logical_drive->queue_depth;
5005
5006         return scsi_change_queue_depth(sdev, qdepth);
5007 }
5008
5009 static int hpsa_scan_finished(struct Scsi_Host *sh,
5010         unsigned long elapsed_time)
5011 {
5012         struct ctlr_info *h = shost_to_hba(sh);
5013         unsigned long flags;
5014         int finished;
5015
5016         spin_lock_irqsave(&h->scan_lock, flags);
5017         finished = h->scan_finished;
5018         spin_unlock_irqrestore(&h->scan_lock, flags);
5019         return finished;
5020 }
5021
5022 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5023 {
5024         struct Scsi_Host *sh;
5025         int error;
5026
5027         sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5028         if (sh == NULL) {
5029                 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5030                 return -ENOMEM;
5031         }
5032
5033         sh->io_port = 0;
5034         sh->n_io_port = 0;
5035         sh->this_id = -1;
5036         sh->max_channel = 3;
5037         sh->max_cmd_len = MAX_COMMAND_SIZE;
5038         sh->max_lun = HPSA_MAX_LUN;
5039         sh->max_id = HPSA_MAX_LUN;
5040         sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5041         sh->cmd_per_lun = sh->can_queue;
5042         sh->sg_tablesize = h->maxsgentries;
5043         sh->hostdata[0] = (unsigned long) h;
5044         sh->irq = h->intr[h->intr_mode];
5045         sh->unique_id = sh->irq;
5046         error = scsi_init_shared_tag_map(sh, sh->can_queue);
5047         if (error) {
5048                 dev_err(&h->pdev->dev,
5049                         "%s: scsi_init_shared_tag_map failed for controller %d\n",
5050                         __func__, h->ctlr);
5051                         scsi_host_put(sh);
5052                         return error;
5053         }
5054         h->scsi_host = sh;
5055         return 0;
5056 }
5057
5058 static int hpsa_scsi_add_host(struct ctlr_info *h)
5059 {
5060         int rv;
5061
5062         rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5063         if (rv) {
5064                 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5065                 return rv;
5066         }
5067         scsi_scan_host(h->scsi_host);
5068         return 0;
5069 }
5070
5071 /*
5072  * The block layer has already gone to the trouble of picking out a unique,
5073  * small-integer tag for this request.  We use an offset from that value as
5074  * an index to select our command block.  (The offset allows us to reserve the
5075  * low-numbered entries for our own uses.)
5076  */
5077 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5078 {
5079         int idx = scmd->request->tag;
5080
5081         if (idx < 0)
5082                 return idx;
5083
5084         /* Offset to leave space for internal cmds. */
5085         return idx += HPSA_NRESERVED_CMDS;
5086 }
5087
5088 /*
5089  * Send a TEST_UNIT_READY command to the specified LUN using the specified
5090  * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5091  */
5092 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5093                                 struct CommandList *c, unsigned char lunaddr[],
5094                                 int reply_queue)
5095 {
5096         int rc;
5097
5098         /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5099         (void) fill_cmd(c, TEST_UNIT_READY, h,
5100                         NULL, 0, 0, lunaddr, TYPE_CMD);
5101         rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5102         if (rc)
5103                 return rc;
5104         /* no unmap needed here because no data xfer. */
5105
5106         /* Check if the unit is already ready. */
5107         if (c->err_info->CommandStatus == CMD_SUCCESS)
5108                 return 0;
5109
5110         /*
5111          * The first command sent after reset will receive "unit attention" to
5112          * indicate that the LUN has been reset...this is actually what we're
5113          * looking for (but, success is good too).
5114          */
5115         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5116                 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5117                         (c->err_info->SenseInfo[2] == NO_SENSE ||
5118                          c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5119                 return 0;
5120
5121         return 1;
5122 }
5123
5124 /*
5125  * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5126  * returns zero when the unit is ready, and non-zero when giving up.
5127  */
5128 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5129                                 struct CommandList *c,
5130                                 unsigned char lunaddr[], int reply_queue)
5131 {
5132         int rc;
5133         int count = 0;
5134         int waittime = 1; /* seconds */
5135
5136         /* Send test unit ready until device ready, or give up. */
5137         for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5138
5139                 /*
5140                  * Wait for a bit.  do this first, because if we send
5141                  * the TUR right away, the reset will just abort it.
5142                  */
5143                 msleep(1000 * waittime);
5144
5145                 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5146                 if (!rc)
5147                         break;
5148
5149                 /* Increase wait time with each try, up to a point. */
5150                 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5151                         waittime *= 2;
5152
5153                 dev_warn(&h->pdev->dev,
5154                          "waiting %d secs for device to become ready.\n",
5155                          waittime);
5156         }
5157
5158         return rc;
5159 }
5160
5161 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5162                                            unsigned char lunaddr[],
5163                                            int reply_queue)
5164 {
5165         int first_queue;
5166         int last_queue;
5167         int rq;
5168         int rc = 0;
5169         struct CommandList *c;
5170
5171         c = cmd_alloc(h);
5172
5173         /*
5174          * If no specific reply queue was requested, then send the TUR
5175          * repeatedly, requesting a reply on each reply queue; otherwise execute
5176          * the loop exactly once using only the specified queue.
5177          */
5178         if (reply_queue == DEFAULT_REPLY_QUEUE) {
5179                 first_queue = 0;
5180                 last_queue = h->nreply_queues - 1;
5181         } else {
5182                 first_queue = reply_queue;
5183                 last_queue = reply_queue;
5184         }
5185
5186         for (rq = first_queue; rq <= last_queue; rq++) {
5187                 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5188                 if (rc)
5189                         break;
5190         }
5191
5192         if (rc)
5193                 dev_warn(&h->pdev->dev, "giving up on device.\n");
5194         else
5195                 dev_warn(&h->pdev->dev, "device is ready.\n");
5196
5197         cmd_free(h, c);
5198         return rc;
5199 }
5200
5201 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5202  * complaining.  Doing a host- or bus-reset can't do anything good here.
5203  */
5204 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5205 {
5206         int rc;
5207         struct ctlr_info *h;
5208         struct hpsa_scsi_dev_t *dev;
5209         u8 reset_type;
5210         char msg[48];
5211
5212         /* find the controller to which the command to be aborted was sent */
5213         h = sdev_to_hba(scsicmd->device);
5214         if (h == NULL) /* paranoia */
5215                 return FAILED;
5216
5217         if (lockup_detected(h))
5218                 return FAILED;
5219
5220         dev = scsicmd->device->hostdata;
5221         if (!dev) {
5222                 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5223                 return FAILED;
5224         }
5225
5226         /* if controller locked up, we can guarantee command won't complete */
5227         if (lockup_detected(h)) {
5228                 snprintf(msg, sizeof(msg),
5229                          "cmd %d RESET FAILED, lockup detected",
5230                          hpsa_get_cmd_index(scsicmd));
5231                 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5232                 return FAILED;
5233         }
5234
5235         /* this reset request might be the result of a lockup; check */
5236         if (detect_controller_lockup(h)) {
5237                 snprintf(msg, sizeof(msg),
5238                          "cmd %d RESET FAILED, new lockup detected",
5239                          hpsa_get_cmd_index(scsicmd));
5240                 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5241                 return FAILED;
5242         }
5243
5244         /* Do not attempt on controller */
5245         if (is_hba_lunid(dev->scsi3addr))
5246                 return SUCCESS;
5247
5248         if (is_logical_dev_addr_mode(dev->scsi3addr))
5249                 reset_type = HPSA_DEVICE_RESET_MSG;
5250         else
5251                 reset_type = HPSA_PHYS_TARGET_RESET;
5252
5253         sprintf(msg, "resetting %s",
5254                 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5255         hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5256
5257         h->reset_in_progress = 1;
5258
5259         /* send a reset to the SCSI LUN which the command was sent to */
5260         rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
5261                            DEFAULT_REPLY_QUEUE);
5262         sprintf(msg, "reset %s %s",
5263                 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5264                 rc == 0 ? "completed successfully" : "failed");
5265         hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5266         h->reset_in_progress = 0;
5267         return rc == 0 ? SUCCESS : FAILED;
5268 }
5269
5270 static void swizzle_abort_tag(u8 *tag)
5271 {
5272         u8 original_tag[8];
5273
5274         memcpy(original_tag, tag, 8);
5275         tag[0] = original_tag[3];
5276         tag[1] = original_tag[2];
5277         tag[2] = original_tag[1];
5278         tag[3] = original_tag[0];
5279         tag[4] = original_tag[7];
5280         tag[5] = original_tag[6];
5281         tag[6] = original_tag[5];
5282         tag[7] = original_tag[4];
5283 }
5284
5285 static void hpsa_get_tag(struct ctlr_info *h,
5286         struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5287 {
5288         u64 tag;
5289         if (c->cmd_type == CMD_IOACCEL1) {
5290                 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5291                         &h->ioaccel_cmd_pool[c->cmdindex];
5292                 tag = le64_to_cpu(cm1->tag);
5293                 *tagupper = cpu_to_le32(tag >> 32);
5294                 *taglower = cpu_to_le32(tag);
5295                 return;
5296         }
5297         if (c->cmd_type == CMD_IOACCEL2) {
5298                 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5299                         &h->ioaccel2_cmd_pool[c->cmdindex];
5300                 /* upper tag not used in ioaccel2 mode */
5301                 memset(tagupper, 0, sizeof(*tagupper));
5302                 *taglower = cm2->Tag;
5303                 return;
5304         }
5305         tag = le64_to_cpu(c->Header.tag);
5306         *tagupper = cpu_to_le32(tag >> 32);
5307         *taglower = cpu_to_le32(tag);
5308 }
5309
5310 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5311         struct CommandList *abort, int reply_queue)
5312 {
5313         int rc = IO_OK;
5314         struct CommandList *c;
5315         struct ErrorInfo *ei;
5316         __le32 tagupper, taglower;
5317
5318         c = cmd_alloc(h);
5319
5320         /* fill_cmd can't fail here, no buffer to map */
5321         (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5322                 0, 0, scsi3addr, TYPE_MSG);
5323         if (h->needs_abort_tags_swizzled)
5324                 swizzle_abort_tag(&c->Request.CDB[4]);
5325         (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5326         hpsa_get_tag(h, abort, &taglower, &tagupper);
5327         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5328                 __func__, tagupper, taglower);
5329         /* no unmap needed here because no data xfer. */
5330
5331         ei = c->err_info;
5332         switch (ei->CommandStatus) {
5333         case CMD_SUCCESS:
5334                 break;
5335         case CMD_TMF_STATUS:
5336                 rc = hpsa_evaluate_tmf_status(h, c);
5337                 break;
5338         case CMD_UNABORTABLE: /* Very common, don't make noise. */
5339                 rc = -1;
5340                 break;
5341         default:
5342                 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5343                         __func__, tagupper, taglower);
5344                 hpsa_scsi_interpret_error(h, c);
5345                 rc = -1;
5346                 break;
5347         }
5348         cmd_free(h, c);
5349         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5350                 __func__, tagupper, taglower);
5351         return rc;
5352 }
5353
5354 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5355         struct CommandList *command_to_abort, int reply_queue)
5356 {
5357         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5358         struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5359         struct io_accel2_cmd *c2a =
5360                 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5361         struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5362         struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5363
5364         /*
5365          * We're overlaying struct hpsa_tmf_struct on top of something which
5366          * was allocated as a struct io_accel2_cmd, so we better be sure it
5367          * actually fits, and doesn't overrun the error info space.
5368          */
5369         BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5370                         sizeof(struct io_accel2_cmd));
5371         BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5372                         offsetof(struct hpsa_tmf_struct, error_len) +
5373                                 sizeof(ac->error_len));
5374
5375         c->cmd_type = IOACCEL2_TMF;
5376         c->scsi_cmd = SCSI_CMD_BUSY;
5377
5378         /* Adjust the DMA address to point to the accelerated command buffer */
5379         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5380                                 (c->cmdindex * sizeof(struct io_accel2_cmd));
5381         BUG_ON(c->busaddr & 0x0000007F);
5382
5383         memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5384         ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5385         ac->reply_queue = reply_queue;
5386         ac->tmf = IOACCEL2_TMF_ABORT;
5387         ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5388         memset(ac->lun_id, 0, sizeof(ac->lun_id));
5389         ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5390         ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5391         ac->error_ptr = cpu_to_le64(c->busaddr +
5392                         offsetof(struct io_accel2_cmd, error_data));
5393         ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5394 }
5395
5396 /* ioaccel2 path firmware cannot handle abort task requests.
5397  * Change abort requests to physical target reset, and send to the
5398  * address of the physical disk used for the ioaccel 2 command.
5399  * Return 0 on success (IO_OK)
5400  *       -1 on failure
5401  */
5402
5403 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5404         unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5405 {
5406         int rc = IO_OK;
5407         struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5408         struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5409         unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5410         unsigned char *psa = &phys_scsi3addr[0];
5411
5412         /* Get a pointer to the hpsa logical device. */
5413         scmd = abort->scsi_cmd;
5414         dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5415         if (dev == NULL) {
5416                 dev_warn(&h->pdev->dev,
5417                         "Cannot abort: no device pointer for command.\n");
5418                         return -1; /* not abortable */
5419         }
5420
5421         if (h->raid_offload_debug > 0)
5422                 dev_info(&h->pdev->dev,
5423                         "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5424                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
5425                         "Reset as abort",
5426                         scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5427                         scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5428
5429         if (!dev->offload_enabled) {
5430                 dev_warn(&h->pdev->dev,
5431                         "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5432                 return -1; /* not abortable */
5433         }
5434
5435         /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5436         if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5437                 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5438                 return -1; /* not abortable */
5439         }
5440
5441         /* send the reset */
5442         if (h->raid_offload_debug > 0)
5443                 dev_info(&h->pdev->dev,
5444                         "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5445                         psa[0], psa[1], psa[2], psa[3],
5446                         psa[4], psa[5], psa[6], psa[7]);
5447         rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
5448         if (rc != 0) {
5449                 dev_warn(&h->pdev->dev,
5450                         "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5451                         psa[0], psa[1], psa[2], psa[3],
5452                         psa[4], psa[5], psa[6], psa[7]);
5453                 return rc; /* failed to reset */
5454         }
5455
5456         /* wait for device to recover */
5457         if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
5458                 dev_warn(&h->pdev->dev,
5459                         "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5460                         psa[0], psa[1], psa[2], psa[3],
5461                         psa[4], psa[5], psa[6], psa[7]);
5462                 return -1;  /* failed to recover */
5463         }
5464
5465         /* device recovered */
5466         dev_info(&h->pdev->dev,
5467                 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5468                 psa[0], psa[1], psa[2], psa[3],
5469                 psa[4], psa[5], psa[6], psa[7]);
5470
5471         return rc; /* success */
5472 }
5473
5474 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5475         struct CommandList *abort, int reply_queue)
5476 {
5477         int rc = IO_OK;
5478         struct CommandList *c;
5479         __le32 taglower, tagupper;
5480         struct hpsa_scsi_dev_t *dev;
5481         struct io_accel2_cmd *c2;
5482
5483         dev = abort->scsi_cmd->device->hostdata;
5484         if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5485                 return -1;
5486
5487         c = cmd_alloc(h);
5488         setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5489         c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5490         (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5491         hpsa_get_tag(h, abort, &taglower, &tagupper);
5492         dev_dbg(&h->pdev->dev,
5493                 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5494                 __func__, tagupper, taglower);
5495         /* no unmap needed here because no data xfer. */
5496
5497         dev_dbg(&h->pdev->dev,
5498                 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5499                 __func__, tagupper, taglower, c2->error_data.serv_response);
5500         switch (c2->error_data.serv_response) {
5501         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5502         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5503                 rc = 0;
5504                 break;
5505         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5506         case IOACCEL2_SERV_RESPONSE_FAILURE:
5507         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5508                 rc = -1;
5509                 break;
5510         default:
5511                 dev_warn(&h->pdev->dev,
5512                         "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5513                         __func__, tagupper, taglower,
5514                         c2->error_data.serv_response);
5515                 rc = -1;
5516         }
5517         cmd_free(h, c);
5518         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5519                 tagupper, taglower);
5520         return rc;
5521 }
5522
5523 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
5524         unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5525 {
5526         /*
5527          * ioccelerator mode 2 commands should be aborted via the
5528          * accelerated path, since RAID path is unaware of these commands,
5529          * but not all underlying firmware can handle abort TMF.
5530          * Change abort to physical device reset when abort TMF is unsupported.
5531          */
5532         if (abort->cmd_type == CMD_IOACCEL2) {
5533                 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5534                         return hpsa_send_abort_ioaccel2(h, abort,
5535                                                 reply_queue);
5536                 else
5537                         return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
5538                                                         abort, reply_queue);
5539         }
5540         return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
5541 }
5542
5543 /* Find out which reply queue a command was meant to return on */
5544 static int hpsa_extract_reply_queue(struct ctlr_info *h,
5545                                         struct CommandList *c)
5546 {
5547         if (c->cmd_type == CMD_IOACCEL2)
5548                 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5549         return c->Header.ReplyQueue;
5550 }
5551
5552 /*
5553  * Limit concurrency of abort commands to prevent
5554  * over-subscription of commands
5555  */
5556 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5557 {
5558 #define ABORT_CMD_WAIT_MSECS 5000
5559         return !wait_event_timeout(h->abort_cmd_wait_queue,
5560                         atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5561                         msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5562 }
5563
5564 /* Send an abort for the specified command.
5565  *      If the device and controller support it,
5566  *              send a task abort request.
5567  */
5568 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5569 {
5570
5571         int rc;
5572         struct ctlr_info *h;
5573         struct hpsa_scsi_dev_t *dev;
5574         struct CommandList *abort; /* pointer to command to be aborted */
5575         struct scsi_cmnd *as;   /* ptr to scsi cmd inside aborted command. */
5576         char msg[256];          /* For debug messaging. */
5577         int ml = 0;
5578         __le32 tagupper, taglower;
5579         int refcount, reply_queue;
5580
5581         if (sc == NULL)
5582                 return FAILED;
5583
5584         if (sc->device == NULL)
5585                 return FAILED;
5586
5587         /* Find the controller of the command to be aborted */
5588         h = sdev_to_hba(sc->device);
5589         if (h == NULL)
5590                 return FAILED;
5591
5592         /* Find the device of the command to be aborted */
5593         dev = sc->device->hostdata;
5594         if (!dev) {
5595                 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5596                                 msg);
5597                 return FAILED;
5598         }
5599
5600         /* If controller locked up, we can guarantee command won't complete */
5601         if (lockup_detected(h)) {
5602                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5603                                         "ABORT FAILED, lockup detected");
5604                 return FAILED;
5605         }
5606
5607         /* This is a good time to check if controller lockup has occurred */
5608         if (detect_controller_lockup(h)) {
5609                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5610                                         "ABORT FAILED, new lockup detected");
5611                 return FAILED;
5612         }
5613
5614         /* Check that controller supports some kind of task abort */
5615         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5616                 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5617                 return FAILED;
5618
5619         memset(msg, 0, sizeof(msg));
5620         ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
5621                 h->scsi_host->host_no, sc->device->channel,
5622                 sc->device->id, sc->device->lun,
5623                 "Aborting command", sc);
5624
5625         /* Get SCSI command to be aborted */
5626         abort = (struct CommandList *) sc->host_scribble;
5627         if (abort == NULL) {
5628                 /* This can happen if the command already completed. */
5629                 return SUCCESS;
5630         }
5631         refcount = atomic_inc_return(&abort->refcount);
5632         if (refcount == 1) { /* Command is done already. */
5633                 cmd_free(h, abort);
5634                 return SUCCESS;
5635         }
5636
5637         /* Don't bother trying the abort if we know it won't work. */
5638         if (abort->cmd_type != CMD_IOACCEL2 &&
5639                 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5640                 cmd_free(h, abort);
5641                 return FAILED;
5642         }
5643
5644         /*
5645          * Check that we're aborting the right command.
5646          * It's possible the CommandList already completed and got re-used.
5647          */
5648         if (abort->scsi_cmd != sc) {
5649                 cmd_free(h, abort);
5650                 return SUCCESS;
5651         }
5652
5653         abort->abort_pending = true;
5654         hpsa_get_tag(h, abort, &taglower, &tagupper);
5655         reply_queue = hpsa_extract_reply_queue(h, abort);
5656         ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
5657         as  = abort->scsi_cmd;
5658         if (as != NULL)
5659                 ml += sprintf(msg+ml,
5660                         "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5661                         as->cmd_len, as->cmnd[0], as->cmnd[1],
5662                         as->serial_number);
5663         dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
5664         hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
5665
5666         /*
5667          * Command is in flight, or possibly already completed
5668          * by the firmware (but not to the scsi mid layer) but we can't
5669          * distinguish which.  Send the abort down.
5670          */
5671         if (wait_for_available_abort_cmd(h)) {
5672                 dev_warn(&h->pdev->dev,
5673                         "%s FAILED, timeout waiting for an abort command to become available.\n",
5674                         msg);
5675                 cmd_free(h, abort);
5676                 return FAILED;
5677         }
5678         rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
5679         atomic_inc(&h->abort_cmds_available);
5680         wake_up_all(&h->abort_cmd_wait_queue);
5681         if (rc != 0) {
5682                 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
5683                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5684                                 "FAILED to abort command");
5685                 cmd_free(h, abort);
5686                 return FAILED;
5687         }
5688         dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
5689         wait_event(h->event_sync_wait_queue,
5690                    abort->scsi_cmd != sc || lockup_detected(h));
5691         cmd_free(h, abort);
5692         return !lockup_detected(h) ? SUCCESS : FAILED;
5693 }
5694
5695 /*
5696  * For operations with an associated SCSI command, a command block is allocated
5697  * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5698  * block request tag as an index into a table of entries.  cmd_tagged_free() is
5699  * the complement, although cmd_free() may be called instead.
5700  */
5701 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5702                                             struct scsi_cmnd *scmd)
5703 {
5704         int idx = hpsa_get_cmd_index(scmd);
5705         struct CommandList *c = h->cmd_pool + idx;
5706
5707         if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5708                 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5709                         idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5710                 /* The index value comes from the block layer, so if it's out of
5711                  * bounds, it's probably not our bug.
5712                  */
5713                 BUG();
5714         }
5715
5716         atomic_inc(&c->refcount);
5717         if (unlikely(!hpsa_is_cmd_idle(c))) {
5718                 /*
5719                  * We expect that the SCSI layer will hand us a unique tag
5720                  * value.  Thus, there should never be a collision here between
5721                  * two requests...because if the selected command isn't idle
5722                  * then someone is going to be very disappointed.
5723                  */
5724                 dev_err(&h->pdev->dev,
5725                         "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5726                         idx);
5727                 if (c->scsi_cmd != NULL)
5728                         scsi_print_command(c->scsi_cmd);
5729                 scsi_print_command(scmd);
5730         }
5731
5732         hpsa_cmd_partial_init(h, idx, c);
5733         return c;
5734 }
5735
5736 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5737 {
5738         /*
5739          * Release our reference to the block.  We don't need to do anything
5740          * else to free it, because it is accessed by index.  (There's no point
5741          * in checking the result of the decrement, since we cannot guarantee
5742          * that there isn't a concurrent abort which is also accessing it.)
5743          */
5744         (void)atomic_dec(&c->refcount);
5745 }
5746
5747 /*
5748  * For operations that cannot sleep, a command block is allocated at init,
5749  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5750  * which ones are free or in use.  Lock must be held when calling this.
5751  * cmd_free() is the complement.
5752  * This function never gives up and returns NULL.  If it hangs,
5753  * another thread must call cmd_free() to free some tags.
5754  */
5755
5756 static struct CommandList *cmd_alloc(struct ctlr_info *h)
5757 {
5758         struct CommandList *c;
5759         int refcount, i;
5760         int offset = 0;
5761
5762         /*
5763          * There is some *extremely* small but non-zero chance that that
5764          * multiple threads could get in here, and one thread could
5765          * be scanning through the list of bits looking for a free
5766          * one, but the free ones are always behind him, and other
5767          * threads sneak in behind him and eat them before he can
5768          * get to them, so that while there is always a free one, a
5769          * very unlucky thread might be starved anyway, never able to
5770          * beat the other threads.  In reality, this happens so
5771          * infrequently as to be indistinguishable from never.
5772          *
5773          * Note that we start allocating commands before the SCSI host structure
5774          * is initialized.  Since the search starts at bit zero, this
5775          * all works, since we have at least one command structure available;
5776          * however, it means that the structures with the low indexes have to be
5777          * reserved for driver-initiated requests, while requests from the block
5778          * layer will use the higher indexes.
5779          */
5780
5781         for (;;) {
5782                 i = find_next_zero_bit(h->cmd_pool_bits,
5783                                         HPSA_NRESERVED_CMDS,
5784                                         offset);
5785                 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
5786                         offset = 0;
5787                         continue;
5788                 }
5789                 c = h->cmd_pool + i;
5790                 refcount = atomic_inc_return(&c->refcount);
5791                 if (unlikely(refcount > 1)) {
5792                         cmd_free(h, c); /* already in use */
5793                         offset = (i + 1) % HPSA_NRESERVED_CMDS;
5794                         continue;
5795                 }
5796                 set_bit(i & (BITS_PER_LONG - 1),
5797                         h->cmd_pool_bits + (i / BITS_PER_LONG));
5798                 break; /* it's ours now. */
5799         }
5800         hpsa_cmd_partial_init(h, i, c);
5801         return c;
5802 }
5803
5804 /*
5805  * This is the complementary operation to cmd_alloc().  Note, however, in some
5806  * corner cases it may also be used to free blocks allocated by
5807  * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5808  * the clear-bit is harmless.
5809  */
5810 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5811 {
5812         if (atomic_dec_and_test(&c->refcount)) {
5813                 int i;
5814
5815                 i = c - h->cmd_pool;
5816                 clear_bit(i & (BITS_PER_LONG - 1),
5817                           h->cmd_pool_bits + (i / BITS_PER_LONG));
5818         }
5819 }
5820
5821 #ifdef CONFIG_COMPAT
5822
5823 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5824         void __user *arg)
5825 {
5826         IOCTL32_Command_struct __user *arg32 =
5827             (IOCTL32_Command_struct __user *) arg;
5828         IOCTL_Command_struct arg64;
5829         IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5830         int err;
5831         u32 cp;
5832
5833         memset(&arg64, 0, sizeof(arg64));
5834         err = 0;
5835         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5836                            sizeof(arg64.LUN_info));
5837         err |= copy_from_user(&arg64.Request, &arg32->Request,
5838                            sizeof(arg64.Request));
5839         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5840                            sizeof(arg64.error_info));
5841         err |= get_user(arg64.buf_size, &arg32->buf_size);
5842         err |= get_user(cp, &arg32->buf);
5843         arg64.buf = compat_ptr(cp);
5844         err |= copy_to_user(p, &arg64, sizeof(arg64));
5845
5846         if (err)
5847                 return -EFAULT;
5848
5849         err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
5850         if (err)
5851                 return err;
5852         err |= copy_in_user(&arg32->error_info, &p->error_info,
5853                          sizeof(arg32->error_info));
5854         if (err)
5855                 return -EFAULT;
5856         return err;
5857 }
5858
5859 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
5860         int cmd, void __user *arg)
5861 {
5862         BIG_IOCTL32_Command_struct __user *arg32 =
5863             (BIG_IOCTL32_Command_struct __user *) arg;
5864         BIG_IOCTL_Command_struct arg64;
5865         BIG_IOCTL_Command_struct __user *p =
5866             compat_alloc_user_space(sizeof(arg64));
5867         int err;
5868         u32 cp;
5869
5870         memset(&arg64, 0, sizeof(arg64));
5871         err = 0;
5872         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5873                            sizeof(arg64.LUN_info));
5874         err |= copy_from_user(&arg64.Request, &arg32->Request,
5875                            sizeof(arg64.Request));
5876         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5877                            sizeof(arg64.error_info));
5878         err |= get_user(arg64.buf_size, &arg32->buf_size);
5879         err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5880         err |= get_user(cp, &arg32->buf);
5881         arg64.buf = compat_ptr(cp);
5882         err |= copy_to_user(p, &arg64, sizeof(arg64));
5883
5884         if (err)
5885                 return -EFAULT;
5886
5887         err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
5888         if (err)
5889                 return err;
5890         err |= copy_in_user(&arg32->error_info, &p->error_info,
5891                          sizeof(arg32->error_info));
5892         if (err)
5893                 return -EFAULT;
5894         return err;
5895 }
5896
5897 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5898 {
5899         switch (cmd) {
5900         case CCISS_GETPCIINFO:
5901         case CCISS_GETINTINFO:
5902         case CCISS_SETINTINFO:
5903         case CCISS_GETNODENAME:
5904         case CCISS_SETNODENAME:
5905         case CCISS_GETHEARTBEAT:
5906         case CCISS_GETBUSTYPES:
5907         case CCISS_GETFIRMVER:
5908         case CCISS_GETDRIVVER:
5909         case CCISS_REVALIDVOLS:
5910         case CCISS_DEREGDISK:
5911         case CCISS_REGNEWDISK:
5912         case CCISS_REGNEWD:
5913         case CCISS_RESCANDISK:
5914         case CCISS_GETLUNINFO:
5915                 return hpsa_ioctl(dev, cmd, arg);
5916
5917         case CCISS_PASSTHRU32:
5918                 return hpsa_ioctl32_passthru(dev, cmd, arg);
5919         case CCISS_BIG_PASSTHRU32:
5920                 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5921
5922         default:
5923                 return -ENOIOCTLCMD;
5924         }
5925 }
5926 #endif
5927
5928 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5929 {
5930         struct hpsa_pci_info pciinfo;
5931
5932         if (!argp)
5933                 return -EINVAL;
5934         pciinfo.domain = pci_domain_nr(h->pdev->bus);
5935         pciinfo.bus = h->pdev->bus->number;
5936         pciinfo.dev_fn = h->pdev->devfn;
5937         pciinfo.board_id = h->board_id;
5938         if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5939                 return -EFAULT;
5940         return 0;
5941 }
5942
5943 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5944 {
5945         DriverVer_type DriverVer;
5946         unsigned char vmaj, vmin, vsubmin;
5947         int rc;
5948
5949         rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5950                 &vmaj, &vmin, &vsubmin);
5951         if (rc != 3) {
5952                 dev_info(&h->pdev->dev, "driver version string '%s' "
5953                         "unrecognized.", HPSA_DRIVER_VERSION);
5954                 vmaj = 0;
5955                 vmin = 0;
5956                 vsubmin = 0;
5957         }
5958         DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5959         if (!argp)
5960                 return -EINVAL;
5961         if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5962                 return -EFAULT;
5963         return 0;
5964 }
5965
5966 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5967 {
5968         IOCTL_Command_struct iocommand;
5969         struct CommandList *c;
5970         char *buff = NULL;
5971         u64 temp64;
5972         int rc = 0;
5973
5974         if (!argp)
5975                 return -EINVAL;
5976         if (!capable(CAP_SYS_RAWIO))
5977                 return -EPERM;
5978         if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5979                 return -EFAULT;
5980         if ((iocommand.buf_size < 1) &&
5981             (iocommand.Request.Type.Direction != XFER_NONE)) {
5982                 return -EINVAL;
5983         }
5984         if (iocommand.buf_size > 0) {
5985                 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5986                 if (buff == NULL)
5987                         return -ENOMEM;
5988                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5989                         /* Copy the data into the buffer we created */
5990                         if (copy_from_user(buff, iocommand.buf,
5991                                 iocommand.buf_size)) {
5992                                 rc = -EFAULT;
5993                                 goto out_kfree;
5994                         }
5995                 } else {
5996                         memset(buff, 0, iocommand.buf_size);
5997                 }
5998         }
5999         c = cmd_alloc(h);
6000
6001         /* Fill in the command type */
6002         c->cmd_type = CMD_IOCTL_PEND;
6003         c->scsi_cmd = SCSI_CMD_BUSY;
6004         /* Fill in Command Header */
6005         c->Header.ReplyQueue = 0; /* unused in simple mode */
6006         if (iocommand.buf_size > 0) {   /* buffer to fill */
6007                 c->Header.SGList = 1;
6008                 c->Header.SGTotal = cpu_to_le16(1);
6009         } else  { /* no buffers to fill */
6010                 c->Header.SGList = 0;
6011                 c->Header.SGTotal = cpu_to_le16(0);
6012         }
6013         memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6014
6015         /* Fill in Request block */
6016         memcpy(&c->Request, &iocommand.Request,
6017                 sizeof(c->Request));
6018
6019         /* Fill in the scatter gather information */
6020         if (iocommand.buf_size > 0) {
6021                 temp64 = pci_map_single(h->pdev, buff,
6022                         iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6023                 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6024                         c->SG[0].Addr = cpu_to_le64(0);
6025                         c->SG[0].Len = cpu_to_le32(0);
6026                         rc = -ENOMEM;
6027                         goto out;
6028                 }
6029                 c->SG[0].Addr = cpu_to_le64(temp64);
6030                 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6031                 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6032         }
6033         rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
6034         if (iocommand.buf_size > 0)
6035                 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6036         check_ioctl_unit_attention(h, c);
6037         if (rc) {
6038                 rc = -EIO;
6039                 goto out;
6040         }
6041
6042         /* Copy the error information out */
6043         memcpy(&iocommand.error_info, c->err_info,
6044                 sizeof(iocommand.error_info));
6045         if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6046                 rc = -EFAULT;
6047                 goto out;
6048         }
6049         if ((iocommand.Request.Type.Direction & XFER_READ) &&
6050                 iocommand.buf_size > 0) {
6051                 /* Copy the data out of the buffer we created */
6052                 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6053                         rc = -EFAULT;
6054                         goto out;
6055                 }
6056         }
6057 out:
6058         cmd_free(h, c);
6059 out_kfree:
6060         kfree(buff);
6061         return rc;
6062 }
6063
6064 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6065 {
6066         BIG_IOCTL_Command_struct *ioc;
6067         struct CommandList *c;
6068         unsigned char **buff = NULL;
6069         int *buff_size = NULL;
6070         u64 temp64;
6071         BYTE sg_used = 0;
6072         int status = 0;
6073         u32 left;
6074         u32 sz;
6075         BYTE __user *data_ptr;
6076
6077         if (!argp)
6078                 return -EINVAL;
6079         if (!capable(CAP_SYS_RAWIO))
6080                 return -EPERM;
6081         ioc = (BIG_IOCTL_Command_struct *)
6082             kmalloc(sizeof(*ioc), GFP_KERNEL);
6083         if (!ioc) {
6084                 status = -ENOMEM;
6085                 goto cleanup1;
6086         }
6087         if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6088                 status = -EFAULT;
6089                 goto cleanup1;
6090         }
6091         if ((ioc->buf_size < 1) &&
6092             (ioc->Request.Type.Direction != XFER_NONE)) {
6093                 status = -EINVAL;
6094                 goto cleanup1;
6095         }
6096         /* Check kmalloc limits  using all SGs */
6097         if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6098                 status = -EINVAL;
6099                 goto cleanup1;
6100         }
6101         if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6102                 status = -EINVAL;
6103                 goto cleanup1;
6104         }
6105         buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
6106         if (!buff) {
6107                 status = -ENOMEM;
6108                 goto cleanup1;
6109         }
6110         buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
6111         if (!buff_size) {
6112                 status = -ENOMEM;
6113                 goto cleanup1;
6114         }
6115         left = ioc->buf_size;
6116         data_ptr = ioc->buf;
6117         while (left) {
6118                 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6119                 buff_size[sg_used] = sz;
6120                 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6121                 if (buff[sg_used] == NULL) {
6122                         status = -ENOMEM;
6123                         goto cleanup1;
6124                 }
6125                 if (ioc->Request.Type.Direction & XFER_WRITE) {
6126                         if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6127                                 status = -EFAULT;
6128                                 goto cleanup1;
6129                         }
6130                 } else
6131                         memset(buff[sg_used], 0, sz);
6132                 left -= sz;
6133                 data_ptr += sz;
6134                 sg_used++;
6135         }
6136         c = cmd_alloc(h);
6137
6138         c->cmd_type = CMD_IOCTL_PEND;
6139         c->scsi_cmd = SCSI_CMD_BUSY;
6140         c->Header.ReplyQueue = 0;
6141         c->Header.SGList = (u8) sg_used;
6142         c->Header.SGTotal = cpu_to_le16(sg_used);
6143         memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6144         memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6145         if (ioc->buf_size > 0) {
6146                 int i;
6147                 for (i = 0; i < sg_used; i++) {
6148                         temp64 = pci_map_single(h->pdev, buff[i],
6149                                     buff_size[i], PCI_DMA_BIDIRECTIONAL);
6150                         if (dma_mapping_error(&h->pdev->dev,
6151                                                         (dma_addr_t) temp64)) {
6152                                 c->SG[i].Addr = cpu_to_le64(0);
6153                                 c->SG[i].Len = cpu_to_le32(0);
6154                                 hpsa_pci_unmap(h->pdev, c, i,
6155                                         PCI_DMA_BIDIRECTIONAL);
6156                                 status = -ENOMEM;
6157                                 goto cleanup0;
6158                         }
6159                         c->SG[i].Addr = cpu_to_le64(temp64);
6160                         c->SG[i].Len = cpu_to_le32(buff_size[i]);
6161                         c->SG[i].Ext = cpu_to_le32(0);
6162                 }
6163                 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6164         }
6165         status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
6166         if (sg_used)
6167                 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6168         check_ioctl_unit_attention(h, c);
6169         if (status) {
6170                 status = -EIO;
6171                 goto cleanup0;
6172         }
6173
6174         /* Copy the error information out */
6175         memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6176         if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6177                 status = -EFAULT;
6178                 goto cleanup0;
6179         }
6180         if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6181                 int i;
6182
6183                 /* Copy the data out of the buffer we created */
6184                 BYTE __user *ptr = ioc->buf;
6185                 for (i = 0; i < sg_used; i++) {
6186                         if (copy_to_user(ptr, buff[i], buff_size[i])) {
6187                                 status = -EFAULT;
6188                                 goto cleanup0;
6189                         }
6190                         ptr += buff_size[i];
6191                 }
6192         }
6193         status = 0;
6194 cleanup0:
6195         cmd_free(h, c);
6196 cleanup1:
6197         if (buff) {
6198                 int i;
6199
6200                 for (i = 0; i < sg_used; i++)
6201                         kfree(buff[i]);
6202                 kfree(buff);
6203         }
6204         kfree(buff_size);
6205         kfree(ioc);
6206         return status;
6207 }
6208
6209 static void check_ioctl_unit_attention(struct ctlr_info *h,
6210         struct CommandList *c)
6211 {
6212         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6213                         c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6214                 (void) check_for_unit_attention(h, c);
6215 }
6216
6217 /*
6218  * ioctl
6219  */
6220 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6221 {
6222         struct ctlr_info *h;
6223         void __user *argp = (void __user *)arg;
6224         int rc;
6225
6226         h = sdev_to_hba(dev);
6227
6228         switch (cmd) {
6229         case CCISS_DEREGDISK:
6230         case CCISS_REGNEWDISK:
6231         case CCISS_REGNEWD:
6232                 hpsa_scan_start(h->scsi_host);
6233                 return 0;
6234         case CCISS_GETPCIINFO:
6235                 return hpsa_getpciinfo_ioctl(h, argp);
6236         case CCISS_GETDRIVVER:
6237                 return hpsa_getdrivver_ioctl(h, argp);
6238         case CCISS_PASSTHRU:
6239                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6240                         return -EAGAIN;
6241                 rc = hpsa_passthru_ioctl(h, argp);
6242                 atomic_inc(&h->passthru_cmds_avail);
6243                 return rc;
6244         case CCISS_BIG_PASSTHRU:
6245                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6246                         return -EAGAIN;
6247                 rc = hpsa_big_passthru_ioctl(h, argp);
6248                 atomic_inc(&h->passthru_cmds_avail);
6249                 return rc;
6250         default:
6251                 return -ENOTTY;
6252         }
6253 }
6254
6255 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6256                                 u8 reset_type)
6257 {
6258         struct CommandList *c;
6259
6260         c = cmd_alloc(h);
6261
6262         /* fill_cmd can't fail here, no data buffer to map */
6263         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6264                 RAID_CTLR_LUNID, TYPE_MSG);
6265         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6266         c->waiting = NULL;
6267         enqueue_cmd_and_start_io(h, c);
6268         /* Don't wait for completion, the reset won't complete.  Don't free
6269          * the command either.  This is the last command we will send before
6270          * re-initializing everything, so it doesn't matter and won't leak.
6271          */
6272         return;
6273 }
6274
6275 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6276         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6277         int cmd_type)
6278 {
6279         int pci_dir = XFER_NONE;
6280         u64 tag; /* for commands to be aborted */
6281
6282         c->cmd_type = CMD_IOCTL_PEND;
6283         c->scsi_cmd = SCSI_CMD_BUSY;
6284         c->Header.ReplyQueue = 0;
6285         if (buff != NULL && size > 0) {
6286                 c->Header.SGList = 1;
6287                 c->Header.SGTotal = cpu_to_le16(1);
6288         } else {
6289                 c->Header.SGList = 0;
6290                 c->Header.SGTotal = cpu_to_le16(0);
6291         }
6292         memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6293
6294         if (cmd_type == TYPE_CMD) {
6295                 switch (cmd) {
6296                 case HPSA_INQUIRY:
6297                         /* are we trying to read a vital product page */
6298                         if (page_code & VPD_PAGE) {
6299                                 c->Request.CDB[1] = 0x01;
6300                                 c->Request.CDB[2] = (page_code & 0xff);
6301                         }
6302                         c->Request.CDBLen = 6;
6303                         c->Request.type_attr_dir =
6304                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6305                         c->Request.Timeout = 0;
6306                         c->Request.CDB[0] = HPSA_INQUIRY;
6307                         c->Request.CDB[4] = size & 0xFF;
6308                         break;
6309                 case HPSA_REPORT_LOG:
6310                 case HPSA_REPORT_PHYS:
6311                         /* Talking to controller so It's a physical command
6312                            mode = 00 target = 0.  Nothing to write.
6313                          */
6314                         c->Request.CDBLen = 12;
6315                         c->Request.type_attr_dir =
6316                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6317                         c->Request.Timeout = 0;
6318                         c->Request.CDB[0] = cmd;
6319                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6320                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6321                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6322                         c->Request.CDB[9] = size & 0xFF;
6323                         break;
6324                 case HPSA_CACHE_FLUSH:
6325                         c->Request.CDBLen = 12;
6326                         c->Request.type_attr_dir =
6327                                         TYPE_ATTR_DIR(cmd_type,
6328                                                 ATTR_SIMPLE, XFER_WRITE);
6329                         c->Request.Timeout = 0;
6330                         c->Request.CDB[0] = BMIC_WRITE;
6331                         c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6332                         c->Request.CDB[7] = (size >> 8) & 0xFF;
6333                         c->Request.CDB[8] = size & 0xFF;
6334                         break;
6335                 case TEST_UNIT_READY:
6336                         c->Request.CDBLen = 6;
6337                         c->Request.type_attr_dir =
6338                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6339                         c->Request.Timeout = 0;
6340                         break;
6341                 case HPSA_GET_RAID_MAP:
6342                         c->Request.CDBLen = 12;
6343                         c->Request.type_attr_dir =
6344                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6345                         c->Request.Timeout = 0;
6346                         c->Request.CDB[0] = HPSA_CISS_READ;
6347                         c->Request.CDB[1] = cmd;
6348                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6349                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6350                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6351                         c->Request.CDB[9] = size & 0xFF;
6352                         break;
6353                 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6354                         c->Request.CDBLen = 10;
6355                         c->Request.type_attr_dir =
6356                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6357                         c->Request.Timeout = 0;
6358                         c->Request.CDB[0] = BMIC_READ;
6359                         c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6360                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6361                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6362                         break;
6363                 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6364                         c->Request.CDBLen = 10;
6365                         c->Request.type_attr_dir =
6366                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6367                         c->Request.Timeout = 0;
6368                         c->Request.CDB[0] = BMIC_READ;
6369                         c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6370                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6371                         c->Request.CDB[8] = (size >> 8) & 0XFF;
6372                         break;
6373                 default:
6374                         dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6375                         BUG();
6376                         return -1;
6377                 }
6378         } else if (cmd_type == TYPE_MSG) {
6379                 switch (cmd) {
6380
6381                 case  HPSA_PHYS_TARGET_RESET:
6382                         c->Request.CDBLen = 16;
6383                         c->Request.type_attr_dir =
6384                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6385                         c->Request.Timeout = 0; /* Don't time out */
6386                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6387                         c->Request.CDB[0] = HPSA_RESET;
6388                         c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6389                         /* Physical target reset needs no control bytes 4-7*/
6390                         c->Request.CDB[4] = 0x00;
6391                         c->Request.CDB[5] = 0x00;
6392                         c->Request.CDB[6] = 0x00;
6393                         c->Request.CDB[7] = 0x00;
6394                         break;
6395                 case  HPSA_DEVICE_RESET_MSG:
6396                         c->Request.CDBLen = 16;
6397                         c->Request.type_attr_dir =
6398                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6399                         c->Request.Timeout = 0; /* Don't time out */
6400                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6401                         c->Request.CDB[0] =  cmd;
6402                         c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6403                         /* If bytes 4-7 are zero, it means reset the */
6404                         /* LunID device */
6405                         c->Request.CDB[4] = 0x00;
6406                         c->Request.CDB[5] = 0x00;
6407                         c->Request.CDB[6] = 0x00;
6408                         c->Request.CDB[7] = 0x00;
6409                         break;
6410                 case  HPSA_ABORT_MSG:
6411                         memcpy(&tag, buff, sizeof(tag));
6412                         dev_dbg(&h->pdev->dev,
6413                                 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6414                                 tag, c->Header.tag);
6415                         c->Request.CDBLen = 16;
6416                         c->Request.type_attr_dir =
6417                                         TYPE_ATTR_DIR(cmd_type,
6418                                                 ATTR_SIMPLE, XFER_WRITE);
6419                         c->Request.Timeout = 0; /* Don't time out */
6420                         c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6421                         c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6422                         c->Request.CDB[2] = 0x00; /* reserved */
6423                         c->Request.CDB[3] = 0x00; /* reserved */
6424                         /* Tag to abort goes in CDB[4]-CDB[11] */
6425                         memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
6426                         c->Request.CDB[12] = 0x00; /* reserved */
6427                         c->Request.CDB[13] = 0x00; /* reserved */
6428                         c->Request.CDB[14] = 0x00; /* reserved */
6429                         c->Request.CDB[15] = 0x00; /* reserved */
6430                 break;
6431                 default:
6432                         dev_warn(&h->pdev->dev, "unknown message type %d\n",
6433                                 cmd);
6434                         BUG();
6435                 }
6436         } else {
6437                 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6438                 BUG();
6439         }
6440
6441         switch (GET_DIR(c->Request.type_attr_dir)) {
6442         case XFER_READ:
6443                 pci_dir = PCI_DMA_FROMDEVICE;
6444                 break;
6445         case XFER_WRITE:
6446                 pci_dir = PCI_DMA_TODEVICE;
6447                 break;
6448         case XFER_NONE:
6449                 pci_dir = PCI_DMA_NONE;
6450                 break;
6451         default:
6452                 pci_dir = PCI_DMA_BIDIRECTIONAL;
6453         }
6454         if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6455                 return -1;
6456         return 0;
6457 }
6458
6459 /*
6460  * Map (physical) PCI mem into (virtual) kernel space
6461  */
6462 static void __iomem *remap_pci_mem(ulong base, ulong size)
6463 {
6464         ulong page_base = ((ulong) base) & PAGE_MASK;
6465         ulong page_offs = ((ulong) base) - page_base;
6466         void __iomem *page_remapped = ioremap_nocache(page_base,
6467                 page_offs + size);
6468
6469         return page_remapped ? (page_remapped + page_offs) : NULL;
6470 }
6471
6472 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6473 {
6474         return h->access.command_completed(h, q);
6475 }
6476
6477 static inline bool interrupt_pending(struct ctlr_info *h)
6478 {
6479         return h->access.intr_pending(h);
6480 }
6481
6482 static inline long interrupt_not_for_us(struct ctlr_info *h)
6483 {
6484         return (h->access.intr_pending(h) == 0) ||
6485                 (h->interrupts_enabled == 0);
6486 }
6487
6488 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6489         u32 raw_tag)
6490 {
6491         if (unlikely(tag_index >= h->nr_cmds)) {
6492                 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6493                 return 1;
6494         }
6495         return 0;
6496 }
6497
6498 static inline void finish_cmd(struct CommandList *c)
6499 {
6500         dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6501         if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6502                         || c->cmd_type == CMD_IOACCEL2))
6503                 complete_scsi_command(c);
6504         else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6505                 complete(c->waiting);
6506 }
6507
6508 /* process completion of an indexed ("direct lookup") command */
6509 static inline void process_indexed_cmd(struct ctlr_info *h,
6510         u32 raw_tag)
6511 {
6512         u32 tag_index;
6513         struct CommandList *c;
6514
6515         tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6516         if (!bad_tag(h, tag_index, raw_tag)) {
6517                 c = h->cmd_pool + tag_index;
6518                 finish_cmd(c);
6519         }
6520 }
6521
6522 /* Some controllers, like p400, will give us one interrupt
6523  * after a soft reset, even if we turned interrupts off.
6524  * Only need to check for this in the hpsa_xxx_discard_completions
6525  * functions.
6526  */
6527 static int ignore_bogus_interrupt(struct ctlr_info *h)
6528 {
6529         if (likely(!reset_devices))
6530                 return 0;
6531
6532         if (likely(h->interrupts_enabled))
6533                 return 0;
6534
6535         dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6536                 "(known firmware bug.)  Ignoring.\n");
6537
6538         return 1;
6539 }
6540
6541 /*
6542  * Convert &h->q[x] (passed to interrupt handlers) back to h.
6543  * Relies on (h-q[x] == x) being true for x such that
6544  * 0 <= x < MAX_REPLY_QUEUES.
6545  */
6546 static struct ctlr_info *queue_to_hba(u8 *queue)
6547 {
6548         return container_of((queue - *queue), struct ctlr_info, q[0]);
6549 }
6550
6551 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6552 {
6553         struct ctlr_info *h = queue_to_hba(queue);
6554         u8 q = *(u8 *) queue;
6555         u32 raw_tag;
6556
6557         if (ignore_bogus_interrupt(h))
6558                 return IRQ_NONE;
6559
6560         if (interrupt_not_for_us(h))
6561                 return IRQ_NONE;
6562         h->last_intr_timestamp = get_jiffies_64();
6563         while (interrupt_pending(h)) {
6564                 raw_tag = get_next_completion(h, q);
6565                 while (raw_tag != FIFO_EMPTY)
6566                         raw_tag = next_command(h, q);
6567         }
6568         return IRQ_HANDLED;
6569 }
6570
6571 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6572 {
6573         struct ctlr_info *h = queue_to_hba(queue);
6574         u32 raw_tag;
6575         u8 q = *(u8 *) queue;
6576
6577         if (ignore_bogus_interrupt(h))
6578                 return IRQ_NONE;
6579
6580         h->last_intr_timestamp = get_jiffies_64();
6581         raw_tag = get_next_completion(h, q);
6582         while (raw_tag != FIFO_EMPTY)
6583                 raw_tag = next_command(h, q);
6584         return IRQ_HANDLED;
6585 }
6586
6587 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6588 {
6589         struct ctlr_info *h = queue_to_hba((u8 *) queue);
6590         u32 raw_tag;
6591         u8 q = *(u8 *) queue;
6592
6593         if (interrupt_not_for_us(h))
6594                 return IRQ_NONE;
6595         h->last_intr_timestamp = get_jiffies_64();
6596         while (interrupt_pending(h)) {
6597                 raw_tag = get_next_completion(h, q);
6598                 while (raw_tag != FIFO_EMPTY) {
6599                         process_indexed_cmd(h, raw_tag);
6600                         raw_tag = next_command(h, q);
6601                 }
6602         }
6603         return IRQ_HANDLED;
6604 }
6605
6606 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
6607 {
6608         struct ctlr_info *h = queue_to_hba(queue);
6609         u32 raw_tag;
6610         u8 q = *(u8 *) queue;
6611
6612         h->last_intr_timestamp = get_jiffies_64();
6613         raw_tag = get_next_completion(h, q);
6614         while (raw_tag != FIFO_EMPTY) {
6615                 process_indexed_cmd(h, raw_tag);
6616                 raw_tag = next_command(h, q);
6617         }
6618         return IRQ_HANDLED;
6619 }
6620
6621 /* Send a message CDB to the firmware. Careful, this only works
6622  * in simple mode, not performant mode due to the tag lookup.
6623  * We only ever use this immediately after a controller reset.
6624  */
6625 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6626                         unsigned char type)
6627 {
6628         struct Command {
6629                 struct CommandListHeader CommandHeader;
6630                 struct RequestBlock Request;
6631                 struct ErrDescriptor ErrorDescriptor;
6632         };
6633         struct Command *cmd;
6634         static const size_t cmd_sz = sizeof(*cmd) +
6635                                         sizeof(cmd->ErrorDescriptor);
6636         dma_addr_t paddr64;
6637         __le32 paddr32;
6638         u32 tag;
6639         void __iomem *vaddr;
6640         int i, err;
6641
6642         vaddr = pci_ioremap_bar(pdev, 0);
6643         if (vaddr == NULL)
6644                 return -ENOMEM;
6645
6646         /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6647          * CCISS commands, so they must be allocated from the lower 4GiB of
6648          * memory.
6649          */
6650         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6651         if (err) {
6652                 iounmap(vaddr);
6653                 return err;
6654         }
6655
6656         cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6657         if (cmd == NULL) {
6658                 iounmap(vaddr);
6659                 return -ENOMEM;
6660         }
6661
6662         /* This must fit, because of the 32-bit consistent DMA mask.  Also,
6663          * although there's no guarantee, we assume that the address is at
6664          * least 4-byte aligned (most likely, it's page-aligned).
6665          */
6666         paddr32 = cpu_to_le32(paddr64);
6667
6668         cmd->CommandHeader.ReplyQueue = 0;
6669         cmd->CommandHeader.SGList = 0;
6670         cmd->CommandHeader.SGTotal = cpu_to_le16(0);
6671         cmd->CommandHeader.tag = cpu_to_le64(paddr64);
6672         memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6673
6674         cmd->Request.CDBLen = 16;
6675         cmd->Request.type_attr_dir =
6676                         TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
6677         cmd->Request.Timeout = 0; /* Don't time out */
6678         cmd->Request.CDB[0] = opcode;
6679         cmd->Request.CDB[1] = type;
6680         memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
6681         cmd->ErrorDescriptor.Addr =
6682                         cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
6683         cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
6684
6685         writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
6686
6687         for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6688                 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
6689                 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
6690                         break;
6691                 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6692         }
6693
6694         iounmap(vaddr);
6695
6696         /* we leak the DMA buffer here ... no choice since the controller could
6697          *  still complete the command.
6698          */
6699         if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6700                 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6701                         opcode, type);
6702                 return -ETIMEDOUT;
6703         }
6704
6705         pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6706
6707         if (tag & HPSA_ERROR_BIT) {
6708                 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6709                         opcode, type);
6710                 return -EIO;
6711         }
6712
6713         dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6714                 opcode, type);
6715         return 0;
6716 }
6717
6718 #define hpsa_noop(p) hpsa_message(p, 3, 0)
6719
6720 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
6721         void __iomem *vaddr, u32 use_doorbell)
6722 {
6723
6724         if (use_doorbell) {
6725                 /* For everything after the P600, the PCI power state method
6726                  * of resetting the controller doesn't work, so we have this
6727                  * other way using the doorbell register.
6728                  */
6729                 dev_info(&pdev->dev, "using doorbell to reset controller\n");
6730                 writel(use_doorbell, vaddr + SA5_DOORBELL);
6731
6732                 /* PMC hardware guys tell us we need a 10 second delay after
6733                  * doorbell reset and before any attempt to talk to the board
6734                  * at all to ensure that this actually works and doesn't fall
6735                  * over in some weird corner cases.
6736                  */
6737                 msleep(10000);
6738         } else { /* Try to do it the PCI power state way */
6739
6740                 /* Quoting from the Open CISS Specification: "The Power
6741                  * Management Control/Status Register (CSR) controls the power
6742                  * state of the device.  The normal operating state is D0,
6743                  * CSR=00h.  The software off state is D3, CSR=03h.  To reset
6744                  * the controller, place the interface device in D3 then to D0,
6745                  * this causes a secondary PCI reset which will reset the
6746                  * controller." */
6747
6748                 int rc = 0;
6749
6750                 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
6751
6752                 /* enter the D3hot power management state */
6753                 rc = pci_set_power_state(pdev, PCI_D3hot);
6754                 if (rc)
6755                         return rc;
6756
6757                 msleep(500);
6758
6759                 /* enter the D0 power management state */
6760                 rc = pci_set_power_state(pdev, PCI_D0);
6761                 if (rc)
6762                         return rc;
6763
6764                 /*
6765                  * The P600 requires a small delay when changing states.
6766                  * Otherwise we may think the board did not reset and we bail.
6767                  * This for kdump only and is particular to the P600.
6768                  */
6769                 msleep(500);
6770         }
6771         return 0;
6772 }
6773
6774 static void init_driver_version(char *driver_version, int len)
6775 {
6776         memset(driver_version, 0, len);
6777         strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
6778 }
6779
6780 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
6781 {
6782         char *driver_version;
6783         int i, size = sizeof(cfgtable->driver_version);
6784
6785         driver_version = kmalloc(size, GFP_KERNEL);
6786         if (!driver_version)
6787                 return -ENOMEM;
6788
6789         init_driver_version(driver_version, size);
6790         for (i = 0; i < size; i++)
6791                 writeb(driver_version[i], &cfgtable->driver_version[i]);
6792         kfree(driver_version);
6793         return 0;
6794 }
6795
6796 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6797                                           unsigned char *driver_ver)
6798 {
6799         int i;
6800
6801         for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6802                 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6803 }
6804
6805 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
6806 {
6807
6808         char *driver_ver, *old_driver_ver;
6809         int rc, size = sizeof(cfgtable->driver_version);
6810
6811         old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6812         if (!old_driver_ver)
6813                 return -ENOMEM;
6814         driver_ver = old_driver_ver + size;
6815
6816         /* After a reset, the 32 bytes of "driver version" in the cfgtable
6817          * should have been changed, otherwise we know the reset failed.
6818          */
6819         init_driver_version(old_driver_ver, size);
6820         read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6821         rc = !memcmp(driver_ver, old_driver_ver, size);
6822         kfree(old_driver_ver);
6823         return rc;
6824 }
6825 /* This does a hard reset of the controller using PCI power management
6826  * states or the using the doorbell register.
6827  */
6828 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
6829 {
6830         u64 cfg_offset;
6831         u32 cfg_base_addr;
6832         u64 cfg_base_addr_index;
6833         void __iomem *vaddr;
6834         unsigned long paddr;
6835         u32 misc_fw_support;
6836         int rc;
6837         struct CfgTable __iomem *cfgtable;
6838         u32 use_doorbell;
6839         u16 command_register;
6840
6841         /* For controllers as old as the P600, this is very nearly
6842          * the same thing as
6843          *
6844          * pci_save_state(pci_dev);
6845          * pci_set_power_state(pci_dev, PCI_D3hot);
6846          * pci_set_power_state(pci_dev, PCI_D0);
6847          * pci_restore_state(pci_dev);
6848          *
6849          * For controllers newer than the P600, the pci power state
6850          * method of resetting doesn't work so we have another way
6851          * using the doorbell register.
6852          */
6853
6854         if (!ctlr_is_resettable(board_id)) {
6855                 dev_warn(&pdev->dev, "Controller not resettable\n");
6856                 return -ENODEV;
6857         }
6858
6859         /* if controller is soft- but not hard resettable... */
6860         if (!ctlr_is_hard_resettable(board_id))
6861                 return -ENOTSUPP; /* try soft reset later. */
6862
6863         /* Save the PCI command register */
6864         pci_read_config_word(pdev, 4, &command_register);
6865         pci_save_state(pdev);
6866
6867         /* find the first memory BAR, so we can find the cfg table */
6868         rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6869         if (rc)
6870                 return rc;
6871         vaddr = remap_pci_mem(paddr, 0x250);
6872         if (!vaddr)
6873                 return -ENOMEM;
6874
6875         /* find cfgtable in order to check if reset via doorbell is supported */
6876         rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6877                                         &cfg_base_addr_index, &cfg_offset);
6878         if (rc)
6879                 goto unmap_vaddr;
6880         cfgtable = remap_pci_mem(pci_resource_start(pdev,
6881                        cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6882         if (!cfgtable) {
6883                 rc = -ENOMEM;
6884                 goto unmap_vaddr;
6885         }
6886         rc = write_driver_ver_to_cfgtable(cfgtable);
6887         if (rc)
6888                 goto unmap_cfgtable;
6889
6890         /* If reset via doorbell register is supported, use that.
6891          * There are two such methods.  Favor the newest method.
6892          */
6893         misc_fw_support = readl(&cfgtable->misc_fw_support);
6894         use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6895         if (use_doorbell) {
6896                 use_doorbell = DOORBELL_CTLR_RESET2;
6897         } else {
6898                 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6899                 if (use_doorbell) {
6900                         dev_warn(&pdev->dev,
6901                                 "Soft reset not supported. Firmware update is required.\n");
6902                         rc = -ENOTSUPP; /* try soft reset */
6903                         goto unmap_cfgtable;
6904                 }
6905         }
6906
6907         rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6908         if (rc)
6909                 goto unmap_cfgtable;
6910
6911         pci_restore_state(pdev);
6912         pci_write_config_word(pdev, 4, command_register);
6913
6914         /* Some devices (notably the HP Smart Array 5i Controller)
6915            need a little pause here */
6916         msleep(HPSA_POST_RESET_PAUSE_MSECS);
6917
6918         rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6919         if (rc) {
6920                 dev_warn(&pdev->dev,
6921                         "Failed waiting for board to become ready after hard reset\n");
6922                 goto unmap_cfgtable;
6923         }
6924
6925         rc = controller_reset_failed(vaddr);
6926         if (rc < 0)
6927                 goto unmap_cfgtable;
6928         if (rc) {
6929                 dev_warn(&pdev->dev, "Unable to successfully reset "
6930                         "controller. Will try soft reset.\n");
6931                 rc = -ENOTSUPP;
6932         } else {
6933                 dev_info(&pdev->dev, "board ready after hard reset.\n");
6934         }
6935
6936 unmap_cfgtable:
6937         iounmap(cfgtable);
6938
6939 unmap_vaddr:
6940         iounmap(vaddr);
6941         return rc;
6942 }
6943
6944 /*
6945  *  We cannot read the structure directly, for portability we must use
6946  *   the io functions.
6947  *   This is for debug only.
6948  */
6949 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
6950 {
6951 #ifdef HPSA_DEBUG
6952         int i;
6953         char temp_name[17];
6954
6955         dev_info(dev, "Controller Configuration information\n");
6956         dev_info(dev, "------------------------------------\n");
6957         for (i = 0; i < 4; i++)
6958                 temp_name[i] = readb(&(tb->Signature[i]));
6959         temp_name[4] = '\0';
6960         dev_info(dev, "   Signature = %s\n", temp_name);
6961         dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
6962         dev_info(dev, "   Transport methods supported = 0x%x\n",
6963                readl(&(tb->TransportSupport)));
6964         dev_info(dev, "   Transport methods active = 0x%x\n",
6965                readl(&(tb->TransportActive)));
6966         dev_info(dev, "   Requested transport Method = 0x%x\n",
6967                readl(&(tb->HostWrite.TransportRequest)));
6968         dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
6969                readl(&(tb->HostWrite.CoalIntDelay)));
6970         dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
6971                readl(&(tb->HostWrite.CoalIntCount)));
6972         dev_info(dev, "   Max outstanding commands = %d\n",
6973                readl(&(tb->CmdsOutMax)));
6974         dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6975         for (i = 0; i < 16; i++)
6976                 temp_name[i] = readb(&(tb->ServerName[i]));
6977         temp_name[16] = '\0';
6978         dev_info(dev, "   Server Name = %s\n", temp_name);
6979         dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
6980                 readl(&(tb->HeartBeat)));
6981 #endif                          /* HPSA_DEBUG */
6982 }
6983
6984 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6985 {
6986         int i, offset, mem_type, bar_type;
6987
6988         if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6989                 return 0;
6990         offset = 0;
6991         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6992                 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6993                 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6994                         offset += 4;
6995                 else {
6996                         mem_type = pci_resource_flags(pdev, i) &
6997                             PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6998                         switch (mem_type) {
6999                         case PCI_BASE_ADDRESS_MEM_TYPE_32:
7000                         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7001                                 offset += 4;    /* 32 bit */
7002                                 break;
7003                         case PCI_BASE_ADDRESS_MEM_TYPE_64:
7004                                 offset += 8;
7005                                 break;
7006                         default:        /* reserved in PCI 2.2 */
7007                                 dev_warn(&pdev->dev,
7008                                        "base address is invalid\n");
7009                                 return -1;
7010                                 break;
7011                         }
7012                 }
7013                 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7014                         return i + 1;
7015         }
7016         return -1;
7017 }
7018
7019 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7020 {
7021         if (h->msix_vector) {
7022                 if (h->pdev->msix_enabled)
7023                         pci_disable_msix(h->pdev);
7024                 h->msix_vector = 0;
7025         } else if (h->msi_vector) {
7026                 if (h->pdev->msi_enabled)
7027                         pci_disable_msi(h->pdev);
7028                 h->msi_vector = 0;
7029         }
7030 }
7031
7032 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7033  * controllers that are capable. If not, we use legacy INTx mode.
7034  */
7035 static void hpsa_interrupt_mode(struct ctlr_info *h)
7036 {
7037 #ifdef CONFIG_PCI_MSI
7038         int err, i;
7039         struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
7040
7041         for (i = 0; i < MAX_REPLY_QUEUES; i++) {
7042                 hpsa_msix_entries[i].vector = 0;
7043                 hpsa_msix_entries[i].entry = i;
7044         }
7045
7046         /* Some boards advertise MSI but don't really support it */
7047         if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
7048             (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
7049                 goto default_int_mode;
7050         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
7051                 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
7052                 h->msix_vector = MAX_REPLY_QUEUES;
7053                 if (h->msix_vector > num_online_cpus())
7054                         h->msix_vector = num_online_cpus();
7055                 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
7056                                             1, h->msix_vector);
7057                 if (err < 0) {
7058                         dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
7059                         h->msix_vector = 0;
7060                         goto single_msi_mode;
7061                 } else if (err < h->msix_vector) {
7062                         dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
7063                                "available\n", err);
7064                 }
7065                 h->msix_vector = err;
7066                 for (i = 0; i < h->msix_vector; i++)
7067                         h->intr[i] = hpsa_msix_entries[i].vector;
7068                 return;
7069         }
7070 single_msi_mode:
7071         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
7072                 dev_info(&h->pdev->dev, "MSI capable controller\n");
7073                 if (!pci_enable_msi(h->pdev))
7074                         h->msi_vector = 1;
7075                 else
7076                         dev_warn(&h->pdev->dev, "MSI init failed\n");
7077         }
7078 default_int_mode:
7079 #endif                          /* CONFIG_PCI_MSI */
7080         /* if we get here we're going to use the default interrupt mode */
7081         h->intr[h->intr_mode] = h->pdev->irq;
7082 }
7083
7084 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
7085 {
7086         int i;
7087         u32 subsystem_vendor_id, subsystem_device_id;
7088
7089         subsystem_vendor_id = pdev->subsystem_vendor;
7090         subsystem_device_id = pdev->subsystem_device;
7091         *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7092                     subsystem_vendor_id;
7093
7094         for (i = 0; i < ARRAY_SIZE(products); i++)
7095                 if (*board_id == products[i].board_id)
7096                         return i;
7097
7098         if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7099                 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7100                 !hpsa_allow_any) {
7101                 dev_warn(&pdev->dev, "unrecognized board ID: "
7102                         "0x%08x, ignoring.\n", *board_id);
7103                         return -ENODEV;
7104         }
7105         return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7106 }
7107
7108 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7109                                     unsigned long *memory_bar)
7110 {
7111         int i;
7112
7113         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7114                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7115                         /* addressing mode bits already removed */
7116                         *memory_bar = pci_resource_start(pdev, i);
7117                         dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7118                                 *memory_bar);
7119                         return 0;
7120                 }
7121         dev_warn(&pdev->dev, "no memory BAR found\n");
7122         return -ENODEV;
7123 }
7124
7125 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7126                                      int wait_for_ready)
7127 {
7128         int i, iterations;
7129         u32 scratchpad;
7130         if (wait_for_ready)
7131                 iterations = HPSA_BOARD_READY_ITERATIONS;
7132         else
7133                 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7134
7135         for (i = 0; i < iterations; i++) {
7136                 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7137                 if (wait_for_ready) {
7138                         if (scratchpad == HPSA_FIRMWARE_READY)
7139                                 return 0;
7140                 } else {
7141                         if (scratchpad != HPSA_FIRMWARE_READY)
7142                                 return 0;
7143                 }
7144                 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7145         }
7146         dev_warn(&pdev->dev, "board not ready, timed out.\n");
7147         return -ENODEV;
7148 }
7149
7150 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7151                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7152                                u64 *cfg_offset)
7153 {
7154         *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7155         *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7156         *cfg_base_addr &= (u32) 0x0000ffff;
7157         *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7158         if (*cfg_base_addr_index == -1) {
7159                 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7160                 return -ENODEV;
7161         }
7162         return 0;
7163 }
7164
7165 static void hpsa_free_cfgtables(struct ctlr_info *h)
7166 {
7167         if (h->transtable) {
7168                 iounmap(h->transtable);
7169                 h->transtable = NULL;
7170         }
7171         if (h->cfgtable) {
7172                 iounmap(h->cfgtable);
7173                 h->cfgtable = NULL;
7174         }
7175 }
7176
7177 /* Find and map CISS config table and transfer table
7178 + * several items must be unmapped (freed) later
7179 + * */
7180 static int hpsa_find_cfgtables(struct ctlr_info *h)
7181 {
7182         u64 cfg_offset;
7183         u32 cfg_base_addr;
7184         u64 cfg_base_addr_index;
7185         u32 trans_offset;
7186         int rc;
7187
7188         rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7189                 &cfg_base_addr_index, &cfg_offset);
7190         if (rc)
7191                 return rc;
7192         h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7193                        cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7194         if (!h->cfgtable) {
7195                 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7196                 return -ENOMEM;
7197         }
7198         rc = write_driver_ver_to_cfgtable(h->cfgtable);
7199         if (rc)
7200                 return rc;
7201         /* Find performant mode table. */
7202         trans_offset = readl(&h->cfgtable->TransMethodOffset);
7203         h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7204                                 cfg_base_addr_index)+cfg_offset+trans_offset,
7205                                 sizeof(*h->transtable));
7206         if (!h->transtable) {
7207                 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7208                 hpsa_free_cfgtables(h);
7209                 return -ENOMEM;
7210         }
7211         return 0;
7212 }
7213
7214 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7215 {
7216 #define MIN_MAX_COMMANDS 16
7217         BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7218
7219         h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7220
7221         /* Limit commands in memory limited kdump scenario. */
7222         if (reset_devices && h->max_commands > 32)
7223                 h->max_commands = 32;
7224
7225         if (h->max_commands < MIN_MAX_COMMANDS) {
7226                 dev_warn(&h->pdev->dev,
7227                         "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7228                         h->max_commands,
7229                         MIN_MAX_COMMANDS);
7230                 h->max_commands = MIN_MAX_COMMANDS;
7231         }
7232 }
7233
7234 /* If the controller reports that the total max sg entries is greater than 512,
7235  * then we know that chained SG blocks work.  (Original smart arrays did not
7236  * support chained SG blocks and would return zero for max sg entries.)
7237  */
7238 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7239 {
7240         return h->maxsgentries > 512;
7241 }
7242
7243 /* Interrogate the hardware for some limits:
7244  * max commands, max SG elements without chaining, and with chaining,
7245  * SG chain block size, etc.
7246  */
7247 static void hpsa_find_board_params(struct ctlr_info *h)
7248 {
7249         hpsa_get_max_perf_mode_cmds(h);
7250         h->nr_cmds = h->max_commands;
7251         h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7252         h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7253         if (hpsa_supports_chained_sg_blocks(h)) {
7254                 /* Limit in-command s/g elements to 32 save dma'able memory. */
7255                 h->max_cmd_sg_entries = 32;
7256                 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7257                 h->maxsgentries--; /* save one for chain pointer */
7258         } else {
7259                 /*
7260                  * Original smart arrays supported at most 31 s/g entries
7261                  * embedded inline in the command (trying to use more
7262                  * would lock up the controller)
7263                  */
7264                 h->max_cmd_sg_entries = 31;
7265                 h->maxsgentries = 31; /* default to traditional values */
7266                 h->chainsize = 0;
7267         }
7268
7269         /* Find out what task management functions are supported and cache */
7270         h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7271         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7272                 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7273         if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7274                 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7275         if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7276                 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7277 }
7278
7279 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7280 {
7281         if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7282                 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7283                 return false;
7284         }
7285         return true;
7286 }
7287
7288 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7289 {
7290         u32 driver_support;
7291
7292         driver_support = readl(&(h->cfgtable->driver_support));
7293         /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7294 #ifdef CONFIG_X86
7295         driver_support |= ENABLE_SCSI_PREFETCH;
7296 #endif
7297         driver_support |= ENABLE_UNIT_ATTN;
7298         writel(driver_support, &(h->cfgtable->driver_support));
7299 }
7300
7301 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
7302  * in a prefetch beyond physical memory.
7303  */
7304 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7305 {
7306         u32 dma_prefetch;
7307
7308         if (h->board_id != 0x3225103C)
7309                 return;
7310         dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7311         dma_prefetch |= 0x8000;
7312         writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7313 }
7314
7315 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7316 {
7317         int i;
7318         u32 doorbell_value;
7319         unsigned long flags;
7320         /* wait until the clear_event_notify bit 6 is cleared by controller. */
7321         for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7322                 spin_lock_irqsave(&h->lock, flags);
7323                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7324                 spin_unlock_irqrestore(&h->lock, flags);
7325                 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7326                         goto done;
7327                 /* delay and try again */
7328                 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7329         }
7330         return -ENODEV;
7331 done:
7332         return 0;
7333 }
7334
7335 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7336 {
7337         int i;
7338         u32 doorbell_value;
7339         unsigned long flags;
7340
7341         /* under certain very rare conditions, this can take awhile.
7342          * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7343          * as we enter this code.)
7344          */
7345         for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7346                 if (h->remove_in_progress)
7347                         goto done;
7348                 spin_lock_irqsave(&h->lock, flags);
7349                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7350                 spin_unlock_irqrestore(&h->lock, flags);
7351                 if (!(doorbell_value & CFGTBL_ChangeReq))
7352                         goto done;
7353                 /* delay and try again */
7354                 msleep(MODE_CHANGE_WAIT_INTERVAL);
7355         }
7356         return -ENODEV;
7357 done:
7358         return 0;
7359 }
7360
7361 /* return -ENODEV or other reason on error, 0 on success */
7362 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7363 {
7364         u32 trans_support;
7365
7366         trans_support = readl(&(h->cfgtable->TransportSupport));
7367         if (!(trans_support & SIMPLE_MODE))
7368                 return -ENOTSUPP;
7369
7370         h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7371
7372         /* Update the field, and then ring the doorbell */
7373         writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7374         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7375         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7376         if (hpsa_wait_for_mode_change_ack(h))
7377                 goto error;
7378         print_cfg_table(&h->pdev->dev, h->cfgtable);
7379         if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7380                 goto error;
7381         h->transMethod = CFGTBL_Trans_Simple;
7382         return 0;
7383 error:
7384         dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7385         return -ENODEV;
7386 }
7387
7388 /* free items allocated or mapped by hpsa_pci_init */
7389 static void hpsa_free_pci_init(struct ctlr_info *h)
7390 {
7391         hpsa_free_cfgtables(h);                 /* pci_init 4 */
7392         iounmap(h->vaddr);                      /* pci_init 3 */
7393         h->vaddr = NULL;
7394         hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
7395         /*
7396          * call pci_disable_device before pci_release_regions per
7397          * Documentation/PCI/pci.txt
7398          */
7399         pci_disable_device(h->pdev);            /* pci_init 1 */
7400         pci_release_regions(h->pdev);           /* pci_init 2 */
7401 }
7402
7403 /* several items must be freed later */
7404 static int hpsa_pci_init(struct ctlr_info *h)
7405 {
7406         int prod_index, err;
7407
7408         prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7409         if (prod_index < 0)
7410                 return prod_index;
7411         h->product_name = products[prod_index].product_name;
7412         h->access = *(products[prod_index].access);
7413
7414         h->needs_abort_tags_swizzled =
7415                 ctlr_needs_abort_tags_swizzled(h->board_id);
7416
7417         pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7418                                PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7419
7420         err = pci_enable_device(h->pdev);
7421         if (err) {
7422                 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7423                 pci_disable_device(h->pdev);
7424                 return err;
7425         }
7426
7427         err = pci_request_regions(h->pdev, HPSA);
7428         if (err) {
7429                 dev_err(&h->pdev->dev,
7430                         "failed to obtain PCI resources\n");
7431                 pci_disable_device(h->pdev);
7432                 return err;
7433         }
7434
7435         pci_set_master(h->pdev);
7436
7437         hpsa_interrupt_mode(h);
7438         err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7439         if (err)
7440                 goto clean2;    /* intmode+region, pci */
7441         h->vaddr = remap_pci_mem(h->paddr, 0x250);
7442         if (!h->vaddr) {
7443                 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7444                 err = -ENOMEM;
7445                 goto clean2;    /* intmode+region, pci */
7446         }
7447         err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7448         if (err)
7449                 goto clean3;    /* vaddr, intmode+region, pci */
7450         err = hpsa_find_cfgtables(h);
7451         if (err)
7452                 goto clean3;    /* vaddr, intmode+region, pci */
7453         hpsa_find_board_params(h);
7454
7455         if (!hpsa_CISS_signature_present(h)) {
7456                 err = -ENODEV;
7457                 goto clean4;    /* cfgtables, vaddr, intmode+region, pci */
7458         }
7459         hpsa_set_driver_support_bits(h);
7460         hpsa_p600_dma_prefetch_quirk(h);
7461         err = hpsa_enter_simple_mode(h);
7462         if (err)
7463                 goto clean4;    /* cfgtables, vaddr, intmode+region, pci */
7464         return 0;
7465
7466 clean4: /* cfgtables, vaddr, intmode+region, pci */
7467         hpsa_free_cfgtables(h);
7468 clean3: /* vaddr, intmode+region, pci */
7469         iounmap(h->vaddr);
7470         h->vaddr = NULL;
7471 clean2: /* intmode+region, pci */
7472         hpsa_disable_interrupt_mode(h);
7473         /*
7474          * call pci_disable_device before pci_release_regions per
7475          * Documentation/PCI/pci.txt
7476          */
7477         pci_disable_device(h->pdev);
7478         pci_release_regions(h->pdev);
7479         return err;
7480 }
7481
7482 static void hpsa_hba_inquiry(struct ctlr_info *h)
7483 {
7484         int rc;
7485
7486 #define HBA_INQUIRY_BYTE_COUNT 64
7487         h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7488         if (!h->hba_inquiry_data)
7489                 return;
7490         rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7491                 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7492         if (rc != 0) {
7493                 kfree(h->hba_inquiry_data);
7494                 h->hba_inquiry_data = NULL;
7495         }
7496 }
7497
7498 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7499 {
7500         int rc, i;
7501         void __iomem *vaddr;
7502
7503         if (!reset_devices)
7504                 return 0;
7505
7506         /* kdump kernel is loading, we don't know in which state is
7507          * the pci interface. The dev->enable_cnt is equal zero
7508          * so we call enable+disable, wait a while and switch it on.
7509          */
7510         rc = pci_enable_device(pdev);
7511         if (rc) {
7512                 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7513                 return -ENODEV;
7514         }
7515         pci_disable_device(pdev);
7516         msleep(260);                    /* a randomly chosen number */
7517         rc = pci_enable_device(pdev);
7518         if (rc) {
7519                 dev_warn(&pdev->dev, "failed to enable device.\n");
7520                 return -ENODEV;
7521         }
7522
7523         pci_set_master(pdev);
7524
7525         vaddr = pci_ioremap_bar(pdev, 0);
7526         if (vaddr == NULL) {
7527                 rc = -ENOMEM;
7528                 goto out_disable;
7529         }
7530         writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7531         iounmap(vaddr);
7532
7533         /* Reset the controller with a PCI power-cycle or via doorbell */
7534         rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7535
7536         /* -ENOTSUPP here means we cannot reset the controller
7537          * but it's already (and still) up and running in
7538          * "performant mode".  Or, it might be 640x, which can't reset
7539          * due to concerns about shared bbwc between 6402/6404 pair.
7540          */
7541         if (rc)
7542                 goto out_disable;
7543
7544         /* Now try to get the controller to respond to a no-op */
7545         dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7546         for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7547                 if (hpsa_noop(pdev) == 0)
7548                         break;
7549                 else
7550                         dev_warn(&pdev->dev, "no-op failed%s\n",
7551                                         (i < 11 ? "; re-trying" : ""));
7552         }
7553
7554 out_disable:
7555
7556         pci_disable_device(pdev);
7557         return rc;
7558 }
7559
7560 static void hpsa_free_cmd_pool(struct ctlr_info *h)
7561 {
7562         kfree(h->cmd_pool_bits);
7563         h->cmd_pool_bits = NULL;
7564         if (h->cmd_pool) {
7565                 pci_free_consistent(h->pdev,
7566                                 h->nr_cmds * sizeof(struct CommandList),
7567                                 h->cmd_pool,
7568                                 h->cmd_pool_dhandle);
7569                 h->cmd_pool = NULL;
7570                 h->cmd_pool_dhandle = 0;
7571         }
7572         if (h->errinfo_pool) {
7573                 pci_free_consistent(h->pdev,
7574                                 h->nr_cmds * sizeof(struct ErrorInfo),
7575                                 h->errinfo_pool,
7576                                 h->errinfo_pool_dhandle);
7577                 h->errinfo_pool = NULL;
7578                 h->errinfo_pool_dhandle = 0;
7579         }
7580 }
7581
7582 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7583 {
7584         h->cmd_pool_bits = kzalloc(
7585                 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7586                 sizeof(unsigned long), GFP_KERNEL);
7587         h->cmd_pool = pci_alloc_consistent(h->pdev,
7588                     h->nr_cmds * sizeof(*h->cmd_pool),
7589                     &(h->cmd_pool_dhandle));
7590         h->errinfo_pool = pci_alloc_consistent(h->pdev,
7591                     h->nr_cmds * sizeof(*h->errinfo_pool),
7592                     &(h->errinfo_pool_dhandle));
7593         if ((h->cmd_pool_bits == NULL)
7594             || (h->cmd_pool == NULL)
7595             || (h->errinfo_pool == NULL)) {
7596                 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
7597                 goto clean_up;
7598         }
7599         hpsa_preinitialize_commands(h);
7600         return 0;
7601 clean_up:
7602         hpsa_free_cmd_pool(h);
7603         return -ENOMEM;
7604 }
7605
7606 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7607 {
7608         int i, cpu;
7609
7610         cpu = cpumask_first(cpu_online_mask);
7611         for (i = 0; i < h->msix_vector; i++) {
7612                 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
7613                 cpu = cpumask_next(cpu, cpu_online_mask);
7614         }
7615 }
7616
7617 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7618 static void hpsa_free_irqs(struct ctlr_info *h)
7619 {
7620         int i;
7621
7622         if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7623                 /* Single reply queue, only one irq to free */
7624                 i = h->intr_mode;
7625                 irq_set_affinity_hint(h->intr[i], NULL);
7626                 free_irq(h->intr[i], &h->q[i]);
7627                 h->q[i] = 0;
7628                 return;
7629         }
7630
7631         for (i = 0; i < h->msix_vector; i++) {
7632                 irq_set_affinity_hint(h->intr[i], NULL);
7633                 free_irq(h->intr[i], &h->q[i]);
7634                 h->q[i] = 0;
7635         }
7636         for (; i < MAX_REPLY_QUEUES; i++)
7637                 h->q[i] = 0;
7638 }
7639
7640 /* returns 0 on success; cleans up and returns -Enn on error */
7641 static int hpsa_request_irqs(struct ctlr_info *h,
7642         irqreturn_t (*msixhandler)(int, void *),
7643         irqreturn_t (*intxhandler)(int, void *))
7644 {
7645         int rc, i;
7646
7647         /*
7648          * initialize h->q[x] = x so that interrupt handlers know which
7649          * queue to process.
7650          */
7651         for (i = 0; i < MAX_REPLY_QUEUES; i++)
7652                 h->q[i] = (u8) i;
7653
7654         if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
7655                 /* If performant mode and MSI-X, use multiple reply queues */
7656                 for (i = 0; i < h->msix_vector; i++) {
7657                         sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
7658                         rc = request_irq(h->intr[i], msixhandler,
7659                                         0, h->intrname[i],
7660                                         &h->q[i]);
7661                         if (rc) {
7662                                 int j;
7663
7664                                 dev_err(&h->pdev->dev,
7665                                         "failed to get irq %d for %s\n",
7666                                        h->intr[i], h->devname);
7667                                 for (j = 0; j < i; j++) {
7668                                         free_irq(h->intr[j], &h->q[j]);
7669                                         h->q[j] = 0;
7670                                 }
7671                                 for (; j < MAX_REPLY_QUEUES; j++)
7672                                         h->q[j] = 0;
7673                                 return rc;
7674                         }
7675                 }
7676                 hpsa_irq_affinity_hints(h);
7677         } else {
7678                 /* Use single reply pool */
7679                 if (h->msix_vector > 0 || h->msi_vector) {
7680                         if (h->msix_vector)
7681                                 sprintf(h->intrname[h->intr_mode],
7682                                         "%s-msix", h->devname);
7683                         else
7684                                 sprintf(h->intrname[h->intr_mode],
7685                                         "%s-msi", h->devname);
7686                         rc = request_irq(h->intr[h->intr_mode],
7687                                 msixhandler, 0,
7688                                 h->intrname[h->intr_mode],
7689                                 &h->q[h->intr_mode]);
7690                 } else {
7691                         sprintf(h->intrname[h->intr_mode],
7692                                 "%s-intx", h->devname);
7693                         rc = request_irq(h->intr[h->intr_mode],
7694                                 intxhandler, IRQF_SHARED,
7695                                 h->intrname[h->intr_mode],
7696                                 &h->q[h->intr_mode]);
7697                 }
7698                 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
7699         }
7700         if (rc) {
7701                 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
7702                        h->intr[h->intr_mode], h->devname);
7703                 hpsa_free_irqs(h);
7704                 return -ENODEV;
7705         }
7706         return 0;
7707 }
7708
7709 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
7710 {
7711         int rc;
7712         hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
7713
7714         dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
7715         rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7716         if (rc) {
7717                 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
7718                 return rc;
7719         }
7720
7721         dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
7722         rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7723         if (rc) {
7724                 dev_warn(&h->pdev->dev, "Board failed to become ready "
7725                         "after soft reset.\n");
7726                 return rc;
7727         }
7728
7729         return 0;
7730 }
7731
7732 static void hpsa_free_reply_queues(struct ctlr_info *h)
7733 {
7734         int i;
7735
7736         for (i = 0; i < h->nreply_queues; i++) {
7737                 if (!h->reply_queue[i].head)
7738                         continue;
7739                 pci_free_consistent(h->pdev,
7740                                         h->reply_queue_size,
7741                                         h->reply_queue[i].head,
7742                                         h->reply_queue[i].busaddr);
7743                 h->reply_queue[i].head = NULL;
7744                 h->reply_queue[i].busaddr = 0;
7745         }
7746         h->reply_queue_size = 0;
7747 }
7748
7749 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7750 {
7751         hpsa_free_performant_mode(h);           /* init_one 7 */
7752         hpsa_free_sg_chain_blocks(h);           /* init_one 6 */
7753         hpsa_free_cmd_pool(h);                  /* init_one 5 */
7754         hpsa_free_irqs(h);                      /* init_one 4 */
7755         scsi_host_put(h->scsi_host);            /* init_one 3 */
7756         h->scsi_host = NULL;                    /* init_one 3 */
7757         hpsa_free_pci_init(h);                  /* init_one 2_5 */
7758         free_percpu(h->lockup_detected);        /* init_one 2 */
7759         h->lockup_detected = NULL;              /* init_one 2 */
7760         if (h->resubmit_wq) {
7761                 destroy_workqueue(h->resubmit_wq);      /* init_one 1 */
7762                 h->resubmit_wq = NULL;
7763         }
7764         if (h->rescan_ctlr_wq) {
7765                 destroy_workqueue(h->rescan_ctlr_wq);
7766                 h->rescan_ctlr_wq = NULL;
7767         }
7768         kfree(h);                               /* init_one 1 */
7769 }
7770
7771 /* Called when controller lockup detected. */
7772 static void fail_all_outstanding_cmds(struct ctlr_info *h)
7773 {
7774         int i, refcount;
7775         struct CommandList *c;
7776         int failcount = 0;
7777
7778         flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
7779         for (i = 0; i < h->nr_cmds; i++) {
7780                 c = h->cmd_pool + i;
7781                 refcount = atomic_inc_return(&c->refcount);
7782                 if (refcount > 1) {
7783                         c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
7784                         finish_cmd(c);
7785                         atomic_dec(&h->commands_outstanding);
7786                         failcount++;
7787                 }
7788                 cmd_free(h, c);
7789         }
7790         dev_warn(&h->pdev->dev,
7791                 "failed %d commands in fail_all\n", failcount);
7792 }
7793
7794 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7795 {
7796         int cpu;
7797
7798         for_each_online_cpu(cpu) {
7799                 u32 *lockup_detected;
7800                 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7801                 *lockup_detected = value;
7802         }
7803         wmb(); /* be sure the per-cpu variables are out to memory */
7804 }
7805
7806 static void controller_lockup_detected(struct ctlr_info *h)
7807 {
7808         unsigned long flags;
7809         u32 lockup_detected;
7810
7811         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7812         spin_lock_irqsave(&h->lock, flags);
7813         lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7814         if (!lockup_detected) {
7815                 /* no heartbeat, but controller gave us a zero. */
7816                 dev_warn(&h->pdev->dev,
7817                         "lockup detected after %d but scratchpad register is zero\n",
7818                         h->heartbeat_sample_interval / HZ);
7819                 lockup_detected = 0xffffffff;
7820         }
7821         set_lockup_detected_for_all_cpus(h, lockup_detected);
7822         spin_unlock_irqrestore(&h->lock, flags);
7823         dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7824                         lockup_detected, h->heartbeat_sample_interval / HZ);
7825         pci_disable_device(h->pdev);
7826         fail_all_outstanding_cmds(h);
7827 }
7828
7829 static int detect_controller_lockup(struct ctlr_info *h)
7830 {
7831         u64 now;
7832         u32 heartbeat;
7833         unsigned long flags;
7834
7835         now = get_jiffies_64();
7836         /* If we've received an interrupt recently, we're ok. */
7837         if (time_after64(h->last_intr_timestamp +
7838                                 (h->heartbeat_sample_interval), now))
7839                 return false;
7840
7841         /*
7842          * If we've already checked the heartbeat recently, we're ok.
7843          * This could happen if someone sends us a signal. We
7844          * otherwise don't care about signals in this thread.
7845          */
7846         if (time_after64(h->last_heartbeat_timestamp +
7847                                 (h->heartbeat_sample_interval), now))
7848                 return false;
7849
7850         /* If heartbeat has not changed since we last looked, we're not ok. */
7851         spin_lock_irqsave(&h->lock, flags);
7852         heartbeat = readl(&h->cfgtable->HeartBeat);
7853         spin_unlock_irqrestore(&h->lock, flags);
7854         if (h->last_heartbeat == heartbeat) {
7855                 controller_lockup_detected(h);
7856                 return true;
7857         }
7858
7859         /* We're ok. */
7860         h->last_heartbeat = heartbeat;
7861         h->last_heartbeat_timestamp = now;
7862         return false;
7863 }
7864
7865 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
7866 {
7867         int i;
7868         char *event_type;
7869
7870         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7871                 return;
7872
7873         /* Ask the controller to clear the events we're handling. */
7874         if ((h->transMethod & (CFGTBL_Trans_io_accel1
7875                         | CFGTBL_Trans_io_accel2)) &&
7876                 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7877                  h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7878
7879                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7880                         event_type = "state change";
7881                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7882                         event_type = "configuration change";
7883                 /* Stop sending new RAID offload reqs via the IO accelerator */
7884                 scsi_block_requests(h->scsi_host);
7885                 for (i = 0; i < h->ndevices; i++)
7886                         h->dev[i]->offload_enabled = 0;
7887                 hpsa_drain_accel_commands(h);
7888                 /* Set 'accelerator path config change' bit */
7889                 dev_warn(&h->pdev->dev,
7890                         "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7891                         h->events, event_type);
7892                 writel(h->events, &(h->cfgtable->clear_event_notify));
7893                 /* Set the "clear event notify field update" bit 6 */
7894                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7895                 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7896                 hpsa_wait_for_clear_event_notify_ack(h);
7897                 scsi_unblock_requests(h->scsi_host);
7898         } else {
7899                 /* Acknowledge controller notification events. */
7900                 writel(h->events, &(h->cfgtable->clear_event_notify));
7901                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7902                 hpsa_wait_for_clear_event_notify_ack(h);
7903 #if 0
7904                 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7905                 hpsa_wait_for_mode_change_ack(h);
7906 #endif
7907         }
7908         return;
7909 }
7910
7911 /* Check a register on the controller to see if there are configuration
7912  * changes (added/changed/removed logical drives, etc.) which mean that
7913  * we should rescan the controller for devices.
7914  * Also check flag for driver-initiated rescan.
7915  */
7916 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
7917 {
7918         if (h->drv_req_rescan) {
7919                 h->drv_req_rescan = 0;
7920                 return 1;
7921         }
7922
7923         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7924                 return 0;
7925
7926         h->events = readl(&(h->cfgtable->event_notify));
7927         return h->events & RESCAN_REQUIRED_EVENT_BITS;
7928 }
7929
7930 /*
7931  * Check if any of the offline devices have become ready
7932  */
7933 static int hpsa_offline_devices_ready(struct ctlr_info *h)
7934 {
7935         unsigned long flags;
7936         struct offline_device_entry *d;
7937         struct list_head *this, *tmp;
7938
7939         spin_lock_irqsave(&h->offline_device_lock, flags);
7940         list_for_each_safe(this, tmp, &h->offline_device_list) {
7941                 d = list_entry(this, struct offline_device_entry,
7942                                 offline_list);
7943                 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7944                 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7945                         spin_lock_irqsave(&h->offline_device_lock, flags);
7946                         list_del(&d->offline_list);
7947                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
7948                         return 1;
7949                 }
7950                 spin_lock_irqsave(&h->offline_device_lock, flags);
7951         }
7952         spin_unlock_irqrestore(&h->offline_device_lock, flags);
7953         return 0;
7954 }
7955
7956 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
7957 {
7958         unsigned long flags;
7959         struct ctlr_info *h = container_of(to_delayed_work(work),
7960                                         struct ctlr_info, rescan_ctlr_work);
7961
7962
7963         if (h->remove_in_progress)
7964                 return;
7965
7966         if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7967                 scsi_host_get(h->scsi_host);
7968                 hpsa_ack_ctlr_events(h);
7969                 hpsa_scan_start(h->scsi_host);
7970                 scsi_host_put(h->scsi_host);
7971         }
7972         spin_lock_irqsave(&h->lock, flags);
7973         if (!h->remove_in_progress)
7974                 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7975                                 h->heartbeat_sample_interval);
7976         spin_unlock_irqrestore(&h->lock, flags);
7977 }
7978
7979 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7980 {
7981         unsigned long flags;
7982         struct ctlr_info *h = container_of(to_delayed_work(work),
7983                                         struct ctlr_info, monitor_ctlr_work);
7984
7985         detect_controller_lockup(h);
7986         if (lockup_detected(h))
7987                 return;
7988
7989         spin_lock_irqsave(&h->lock, flags);
7990         if (!h->remove_in_progress)
7991                 schedule_delayed_work(&h->monitor_ctlr_work,
7992                                 h->heartbeat_sample_interval);
7993         spin_unlock_irqrestore(&h->lock, flags);
7994 }
7995
7996 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7997                                                 char *name)
7998 {
7999         struct workqueue_struct *wq = NULL;
8000
8001         wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8002         if (!wq)
8003                 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8004
8005         return wq;
8006 }
8007
8008 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8009 {
8010         int dac, rc;
8011         struct ctlr_info *h;
8012         int try_soft_reset = 0;
8013         unsigned long flags;
8014         u32 board_id;
8015
8016         if (number_of_controllers == 0)
8017                 printk(KERN_INFO DRIVER_NAME "\n");
8018
8019         rc = hpsa_lookup_board_id(pdev, &board_id);
8020         if (rc < 0) {
8021                 dev_warn(&pdev->dev, "Board ID not found\n");
8022                 return rc;
8023         }
8024
8025         rc = hpsa_init_reset_devices(pdev, board_id);
8026         if (rc) {
8027                 if (rc != -ENOTSUPP)
8028                         return rc;
8029                 /* If the reset fails in a particular way (it has no way to do
8030                  * a proper hard reset, so returns -ENOTSUPP) we can try to do
8031                  * a soft reset once we get the controller configured up to the
8032                  * point that it can accept a command.
8033                  */
8034                 try_soft_reset = 1;
8035                 rc = 0;
8036         }
8037
8038 reinit_after_soft_reset:
8039
8040         /* Command structures must be aligned on a 32-byte boundary because
8041          * the 5 lower bits of the address are used by the hardware. and by
8042          * the driver.  See comments in hpsa.h for more info.
8043          */
8044         BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8045         h = kzalloc(sizeof(*h), GFP_KERNEL);
8046         if (!h) {
8047                 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8048                 return -ENOMEM;
8049         }
8050
8051         h->pdev = pdev;
8052
8053         h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8054         INIT_LIST_HEAD(&h->offline_device_list);
8055         spin_lock_init(&h->lock);
8056         spin_lock_init(&h->offline_device_lock);
8057         spin_lock_init(&h->scan_lock);
8058         atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8059         atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
8060
8061         /* Allocate and clear per-cpu variable lockup_detected */
8062         h->lockup_detected = alloc_percpu(u32);
8063         if (!h->lockup_detected) {
8064                 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8065                 rc = -ENOMEM;
8066                 goto clean1;    /* aer/h */
8067         }
8068         set_lockup_detected_for_all_cpus(h, 0);
8069
8070         rc = hpsa_pci_init(h);
8071         if (rc)
8072                 goto clean2;    /* lu, aer/h */
8073
8074         /* relies on h-> settings made by hpsa_pci_init, including
8075          * interrupt_mode h->intr */
8076         rc = hpsa_scsi_host_alloc(h);
8077         if (rc)
8078                 goto clean2_5;  /* pci, lu, aer/h */
8079
8080         sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8081         h->ctlr = number_of_controllers;
8082         number_of_controllers++;
8083
8084         /* configure PCI DMA stuff */
8085         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8086         if (rc == 0) {
8087                 dac = 1;
8088         } else {
8089                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8090                 if (rc == 0) {
8091                         dac = 0;
8092                 } else {
8093                         dev_err(&pdev->dev, "no suitable DMA available\n");
8094                         goto clean3;    /* shost, pci, lu, aer/h */
8095                 }
8096         }
8097
8098         /* make sure the board interrupts are off */
8099         h->access.set_intr_mask(h, HPSA_INTR_OFF);
8100
8101         rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8102         if (rc)
8103                 goto clean3;    /* shost, pci, lu, aer/h */
8104         rc = hpsa_alloc_cmd_pool(h);
8105         if (rc)
8106                 goto clean4;    /* irq, shost, pci, lu, aer/h */
8107         rc = hpsa_alloc_sg_chain_blocks(h);
8108         if (rc)
8109                 goto clean5;    /* cmd, irq, shost, pci, lu, aer/h */
8110         init_waitqueue_head(&h->scan_wait_queue);
8111         init_waitqueue_head(&h->abort_cmd_wait_queue);
8112         init_waitqueue_head(&h->event_sync_wait_queue);
8113         mutex_init(&h->reset_mutex);
8114         h->scan_finished = 1; /* no scan currently in progress */
8115
8116         pci_set_drvdata(pdev, h);
8117         h->ndevices = 0;
8118
8119         spin_lock_init(&h->devlock);
8120         rc = hpsa_put_ctlr_into_performant_mode(h);
8121         if (rc)
8122                 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8123
8124         /* hook into SCSI subsystem */
8125         rc = hpsa_scsi_add_host(h);
8126         if (rc)
8127                 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8128
8129         /* create the resubmit workqueue */
8130         h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8131         if (!h->rescan_ctlr_wq) {
8132                 rc = -ENOMEM;
8133                 goto clean7;
8134         }
8135
8136         h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8137         if (!h->resubmit_wq) {
8138                 rc = -ENOMEM;
8139                 goto clean7;    /* aer/h */
8140         }
8141
8142         /*
8143          * At this point, the controller is ready to take commands.
8144          * Now, if reset_devices and the hard reset didn't work, try
8145          * the soft reset and see if that works.
8146          */
8147         if (try_soft_reset) {
8148
8149                 /* This is kind of gross.  We may or may not get a completion
8150                  * from the soft reset command, and if we do, then the value
8151                  * from the fifo may or may not be valid.  So, we wait 10 secs
8152                  * after the reset throwing away any completions we get during
8153                  * that time.  Unregister the interrupt handler and register
8154                  * fake ones to scoop up any residual completions.
8155                  */
8156                 spin_lock_irqsave(&h->lock, flags);
8157                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8158                 spin_unlock_irqrestore(&h->lock, flags);
8159                 hpsa_free_irqs(h);
8160                 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8161                                         hpsa_intx_discard_completions);
8162                 if (rc) {
8163                         dev_warn(&h->pdev->dev,
8164                                 "Failed to request_irq after soft reset.\n");
8165                         /*
8166                          * cannot goto clean7 or free_irqs will be called
8167                          * again. Instead, do its work
8168                          */
8169                         hpsa_free_performant_mode(h);   /* clean7 */
8170                         hpsa_free_sg_chain_blocks(h);   /* clean6 */
8171                         hpsa_free_cmd_pool(h);          /* clean5 */
8172                         /*
8173                          * skip hpsa_free_irqs(h) clean4 since that
8174                          * was just called before request_irqs failed
8175                          */
8176                         goto clean3;
8177                 }
8178
8179                 rc = hpsa_kdump_soft_reset(h);
8180                 if (rc)
8181                         /* Neither hard nor soft reset worked, we're hosed. */
8182                         goto clean7;
8183
8184                 dev_info(&h->pdev->dev, "Board READY.\n");
8185                 dev_info(&h->pdev->dev,
8186                         "Waiting for stale completions to drain.\n");
8187                 h->access.set_intr_mask(h, HPSA_INTR_ON);
8188                 msleep(10000);
8189                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8190
8191                 rc = controller_reset_failed(h->cfgtable);
8192                 if (rc)
8193                         dev_info(&h->pdev->dev,
8194                                 "Soft reset appears to have failed.\n");
8195
8196                 /* since the controller's reset, we have to go back and re-init
8197                  * everything.  Easiest to just forget what we've done and do it
8198                  * all over again.
8199                  */
8200                 hpsa_undo_allocations_after_kdump_soft_reset(h);
8201                 try_soft_reset = 0;
8202                 if (rc)
8203                         /* don't goto clean, we already unallocated */
8204                         return -ENODEV;
8205
8206                 goto reinit_after_soft_reset;
8207         }
8208
8209         /* Enable Accelerated IO path at driver layer */
8210         h->acciopath_status = 1;
8211
8212
8213         /* Turn the interrupts on so we can service requests */
8214         h->access.set_intr_mask(h, HPSA_INTR_ON);
8215
8216         hpsa_hba_inquiry(h);
8217
8218         /* Monitor the controller for firmware lockups */
8219         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8220         INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8221         schedule_delayed_work(&h->monitor_ctlr_work,
8222                                 h->heartbeat_sample_interval);
8223         INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8224         queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8225                                 h->heartbeat_sample_interval);
8226         return 0;
8227
8228 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8229         hpsa_free_performant_mode(h);
8230         h->access.set_intr_mask(h, HPSA_INTR_OFF);
8231 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8232         hpsa_free_sg_chain_blocks(h);
8233 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8234         hpsa_free_cmd_pool(h);
8235 clean4: /* irq, shost, pci, lu, aer/h */
8236         hpsa_free_irqs(h);
8237 clean3: /* shost, pci, lu, aer/h */
8238         scsi_host_put(h->scsi_host);
8239         h->scsi_host = NULL;
8240 clean2_5: /* pci, lu, aer/h */
8241         hpsa_free_pci_init(h);
8242 clean2: /* lu, aer/h */
8243         if (h->lockup_detected) {
8244                 free_percpu(h->lockup_detected);
8245                 h->lockup_detected = NULL;
8246         }
8247 clean1: /* wq/aer/h */
8248         if (h->resubmit_wq) {
8249                 destroy_workqueue(h->resubmit_wq);
8250                 h->resubmit_wq = NULL;
8251         }
8252         if (h->rescan_ctlr_wq) {
8253                 destroy_workqueue(h->rescan_ctlr_wq);
8254                 h->rescan_ctlr_wq = NULL;
8255         }
8256         kfree(h);
8257         return rc;
8258 }
8259
8260 static void hpsa_flush_cache(struct ctlr_info *h)
8261 {
8262         char *flush_buf;
8263         struct CommandList *c;
8264         int rc;
8265
8266         if (unlikely(lockup_detected(h)))
8267                 return;
8268         flush_buf = kzalloc(4, GFP_KERNEL);
8269         if (!flush_buf)
8270                 return;
8271
8272         c = cmd_alloc(h);
8273
8274         if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8275                 RAID_CTLR_LUNID, TYPE_CMD)) {
8276                 goto out;
8277         }
8278         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8279                                         PCI_DMA_TODEVICE, NO_TIMEOUT);
8280         if (rc)
8281                 goto out;
8282         if (c->err_info->CommandStatus != 0)
8283 out:
8284                 dev_warn(&h->pdev->dev,
8285                         "error flushing cache on controller\n");
8286         cmd_free(h, c);
8287         kfree(flush_buf);
8288 }
8289
8290 static void hpsa_shutdown(struct pci_dev *pdev)
8291 {
8292         struct ctlr_info *h;
8293
8294         h = pci_get_drvdata(pdev);
8295         /* Turn board interrupts off  and send the flush cache command
8296          * sendcmd will turn off interrupt, and send the flush...
8297          * To write all data in the battery backed cache to disks
8298          */
8299         hpsa_flush_cache(h);
8300         h->access.set_intr_mask(h, HPSA_INTR_OFF);
8301         hpsa_free_irqs(h);                      /* init_one 4 */
8302         hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
8303 }
8304
8305 static void hpsa_free_device_info(struct ctlr_info *h)
8306 {
8307         int i;
8308
8309         for (i = 0; i < h->ndevices; i++) {
8310                 kfree(h->dev[i]);
8311                 h->dev[i] = NULL;
8312         }
8313 }
8314
8315 static void hpsa_remove_one(struct pci_dev *pdev)
8316 {
8317         struct ctlr_info *h;
8318         unsigned long flags;
8319
8320         if (pci_get_drvdata(pdev) == NULL) {
8321                 dev_err(&pdev->dev, "unable to remove device\n");
8322                 return;
8323         }
8324         h = pci_get_drvdata(pdev);
8325
8326         /* Get rid of any controller monitoring work items */
8327         spin_lock_irqsave(&h->lock, flags);
8328         h->remove_in_progress = 1;
8329         spin_unlock_irqrestore(&h->lock, flags);
8330         cancel_delayed_work_sync(&h->monitor_ctlr_work);
8331         cancel_delayed_work_sync(&h->rescan_ctlr_work);
8332         destroy_workqueue(h->rescan_ctlr_wq);
8333         destroy_workqueue(h->resubmit_wq);
8334
8335         /*
8336          * Call before disabling interrupts.
8337          * scsi_remove_host can trigger I/O operations especially
8338          * when multipath is enabled. There can be SYNCHRONIZE CACHE
8339          * operations which cannot complete and will hang the system.
8340          */
8341         if (h->scsi_host)
8342                 scsi_remove_host(h->scsi_host);         /* init_one 8 */
8343         /* includes hpsa_free_irqs - init_one 4 */
8344         /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8345         hpsa_shutdown(pdev);
8346
8347         hpsa_free_device_info(h);               /* scan */
8348
8349         kfree(h->hba_inquiry_data);                     /* init_one 10 */
8350         h->hba_inquiry_data = NULL;                     /* init_one 10 */
8351         hpsa_free_ioaccel2_sg_chain_blocks(h);
8352         hpsa_free_performant_mode(h);                   /* init_one 7 */
8353         hpsa_free_sg_chain_blocks(h);                   /* init_one 6 */
8354         hpsa_free_cmd_pool(h);                          /* init_one 5 */
8355
8356         /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
8357
8358         scsi_host_put(h->scsi_host);                    /* init_one 3 */
8359         h->scsi_host = NULL;                            /* init_one 3 */
8360
8361         /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8362         hpsa_free_pci_init(h);                          /* init_one 2.5 */
8363
8364         free_percpu(h->lockup_detected);                /* init_one 2 */
8365         h->lockup_detected = NULL;                      /* init_one 2 */
8366         /* (void) pci_disable_pcie_error_reporting(pdev); */    /* init_one 1 */
8367         kfree(h);                                       /* init_one 1 */
8368 }
8369
8370 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8371         __attribute__((unused)) pm_message_t state)
8372 {
8373         return -ENOSYS;
8374 }
8375
8376 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8377 {
8378         return -ENOSYS;
8379 }
8380
8381 static struct pci_driver hpsa_pci_driver = {
8382         .name = HPSA,
8383         .probe = hpsa_init_one,
8384         .remove = hpsa_remove_one,
8385         .id_table = hpsa_pci_device_id, /* id_table */
8386         .shutdown = hpsa_shutdown,
8387         .suspend = hpsa_suspend,
8388         .resume = hpsa_resume,
8389 };
8390
8391 /* Fill in bucket_map[], given nsgs (the max number of
8392  * scatter gather elements supported) and bucket[],
8393  * which is an array of 8 integers.  The bucket[] array
8394  * contains 8 different DMA transfer sizes (in 16
8395  * byte increments) which the controller uses to fetch
8396  * commands.  This function fills in bucket_map[], which
8397  * maps a given number of scatter gather elements to one of
8398  * the 8 DMA transfer sizes.  The point of it is to allow the
8399  * controller to only do as much DMA as needed to fetch the
8400  * command, with the DMA transfer size encoded in the lower
8401  * bits of the command address.
8402  */
8403 static void  calc_bucket_map(int bucket[], int num_buckets,
8404         int nsgs, int min_blocks, u32 *bucket_map)
8405 {
8406         int i, j, b, size;
8407
8408         /* Note, bucket_map must have nsgs+1 entries. */
8409         for (i = 0; i <= nsgs; i++) {
8410                 /* Compute size of a command with i SG entries */
8411                 size = i + min_blocks;
8412                 b = num_buckets; /* Assume the biggest bucket */
8413                 /* Find the bucket that is just big enough */
8414                 for (j = 0; j < num_buckets; j++) {
8415                         if (bucket[j] >= size) {
8416                                 b = j;
8417                                 break;
8418                         }
8419                 }
8420                 /* for a command with i SG entries, use bucket b. */
8421                 bucket_map[i] = b;
8422         }
8423 }
8424
8425 /*
8426  * return -ENODEV on err, 0 on success (or no action)
8427  * allocates numerous items that must be freed later
8428  */
8429 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
8430 {
8431         int i;
8432         unsigned long register_value;
8433         unsigned long transMethod = CFGTBL_Trans_Performant |
8434                         (trans_support & CFGTBL_Trans_use_short_tags) |
8435                                 CFGTBL_Trans_enable_directed_msix |
8436                         (trans_support & (CFGTBL_Trans_io_accel1 |
8437                                 CFGTBL_Trans_io_accel2));
8438         struct access_method access = SA5_performant_access;
8439
8440         /* This is a bit complicated.  There are 8 registers on
8441          * the controller which we write to to tell it 8 different
8442          * sizes of commands which there may be.  It's a way of
8443          * reducing the DMA done to fetch each command.  Encoded into
8444          * each command's tag are 3 bits which communicate to the controller
8445          * which of the eight sizes that command fits within.  The size of
8446          * each command depends on how many scatter gather entries there are.
8447          * Each SG entry requires 16 bytes.  The eight registers are programmed
8448          * with the number of 16-byte blocks a command of that size requires.
8449          * The smallest command possible requires 5 such 16 byte blocks.
8450          * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
8451          * blocks.  Note, this only extends to the SG entries contained
8452          * within the command block, and does not extend to chained blocks
8453          * of SG elements.   bft[] contains the eight values we write to
8454          * the registers.  They are not evenly distributed, but have more
8455          * sizes for small commands, and fewer sizes for larger commands.
8456          */
8457         int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
8458 #define MIN_IOACCEL2_BFT_ENTRY 5
8459 #define HPSA_IOACCEL2_HEADER_SZ 4
8460         int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8461                         13, 14, 15, 16, 17, 18, 19,
8462                         HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8463         BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8464         BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8465         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8466                                  16 * MIN_IOACCEL2_BFT_ENTRY);
8467         BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
8468         BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
8469         /*  5 = 1 s/g entry or 4k
8470          *  6 = 2 s/g entry or 8k
8471          *  8 = 4 s/g entry or 16k
8472          * 10 = 6 s/g entry or 24k
8473          */
8474
8475         /* If the controller supports either ioaccel method then
8476          * we can also use the RAID stack submit path that does not
8477          * perform the superfluous readl() after each command submission.
8478          */
8479         if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8480                 access = SA5_performant_access_no_read;
8481
8482         /* Controller spec: zero out this buffer. */
8483         for (i = 0; i < h->nreply_queues; i++)
8484                 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
8485
8486         bft[7] = SG_ENTRIES_IN_CMD + 4;
8487         calc_bucket_map(bft, ARRAY_SIZE(bft),
8488                                 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
8489         for (i = 0; i < 8; i++)
8490                 writel(bft[i], &h->transtable->BlockFetch[i]);
8491
8492         /* size of controller ring buffer */
8493         writel(h->max_commands, &h->transtable->RepQSize);
8494         writel(h->nreply_queues, &h->transtable->RepQCount);
8495         writel(0, &h->transtable->RepQCtrAddrLow32);
8496         writel(0, &h->transtable->RepQCtrAddrHigh32);
8497
8498         for (i = 0; i < h->nreply_queues; i++) {
8499                 writel(0, &h->transtable->RepQAddr[i].upper);
8500                 writel(h->reply_queue[i].busaddr,
8501                         &h->transtable->RepQAddr[i].lower);
8502         }
8503
8504         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
8505         writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8506         /*
8507          * enable outbound interrupt coalescing in accelerator mode;
8508          */
8509         if (trans_support & CFGTBL_Trans_io_accel1) {
8510                 access = SA5_ioaccel_mode1_access;
8511                 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8512                 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8513         } else {
8514                 if (trans_support & CFGTBL_Trans_io_accel2) {
8515                         access = SA5_ioaccel_mode2_access;
8516                         writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8517                         writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8518                 }
8519         }
8520         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8521         if (hpsa_wait_for_mode_change_ack(h)) {
8522                 dev_err(&h->pdev->dev,
8523                         "performant mode problem - doorbell timeout\n");
8524                 return -ENODEV;
8525         }
8526         register_value = readl(&(h->cfgtable->TransportActive));
8527         if (!(register_value & CFGTBL_Trans_Performant)) {
8528                 dev_err(&h->pdev->dev,
8529                         "performant mode problem - transport not active\n");
8530                 return -ENODEV;
8531         }
8532         /* Change the access methods to the performant access methods */
8533         h->access = access;
8534         h->transMethod = transMethod;
8535
8536         if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8537                 (trans_support & CFGTBL_Trans_io_accel2)))
8538                 return 0;
8539
8540         if (trans_support & CFGTBL_Trans_io_accel1) {
8541                 /* Set up I/O accelerator mode */
8542                 for (i = 0; i < h->nreply_queues; i++) {
8543                         writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8544                         h->reply_queue[i].current_entry =
8545                                 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8546                 }
8547                 bft[7] = h->ioaccel_maxsg + 8;
8548                 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8549                                 h->ioaccel1_blockFetchTable);
8550
8551                 /* initialize all reply queue entries to unused */
8552                 for (i = 0; i < h->nreply_queues; i++)
8553                         memset(h->reply_queue[i].head,
8554                                 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8555                                 h->reply_queue_size);
8556
8557                 /* set all the constant fields in the accelerator command
8558                  * frames once at init time to save CPU cycles later.
8559                  */
8560                 for (i = 0; i < h->nr_cmds; i++) {
8561                         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8562
8563                         cp->function = IOACCEL1_FUNCTION_SCSIIO;
8564                         cp->err_info = (u32) (h->errinfo_pool_dhandle +
8565                                         (i * sizeof(struct ErrorInfo)));
8566                         cp->err_info_len = sizeof(struct ErrorInfo);
8567                         cp->sgl_offset = IOACCEL1_SGLOFFSET;
8568                         cp->host_context_flags =
8569                                 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
8570                         cp->timeout_sec = 0;
8571                         cp->ReplyQueue = 0;
8572                         cp->tag =
8573                                 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
8574                         cp->host_addr =
8575                                 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
8576                                         (i * sizeof(struct io_accel1_cmd)));
8577                 }
8578         } else if (trans_support & CFGTBL_Trans_io_accel2) {
8579                 u64 cfg_offset, cfg_base_addr_index;
8580                 u32 bft2_offset, cfg_base_addr;
8581                 int rc;
8582
8583                 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8584                         &cfg_base_addr_index, &cfg_offset);
8585                 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8586                 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8587                 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8588                                 4, h->ioaccel2_blockFetchTable);
8589                 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8590                 BUILD_BUG_ON(offsetof(struct CfgTable,
8591                                 io_accel_request_size_offset) != 0xb8);
8592                 h->ioaccel2_bft2_regs =
8593                         remap_pci_mem(pci_resource_start(h->pdev,
8594                                         cfg_base_addr_index) +
8595                                         cfg_offset + bft2_offset,
8596                                         ARRAY_SIZE(bft2) *
8597                                         sizeof(*h->ioaccel2_bft2_regs));
8598                 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8599                         writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
8600         }
8601         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8602         if (hpsa_wait_for_mode_change_ack(h)) {
8603                 dev_err(&h->pdev->dev,
8604                         "performant mode problem - enabling ioaccel mode\n");
8605                 return -ENODEV;
8606         }
8607         return 0;
8608 }
8609
8610 /* Free ioaccel1 mode command blocks and block fetch table */
8611 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8612 {
8613         if (h->ioaccel_cmd_pool) {
8614                 pci_free_consistent(h->pdev,
8615                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8616                         h->ioaccel_cmd_pool,
8617                         h->ioaccel_cmd_pool_dhandle);
8618                 h->ioaccel_cmd_pool = NULL;
8619                 h->ioaccel_cmd_pool_dhandle = 0;
8620         }
8621         kfree(h->ioaccel1_blockFetchTable);
8622         h->ioaccel1_blockFetchTable = NULL;
8623 }
8624
8625 /* Allocate ioaccel1 mode command blocks and block fetch table */
8626 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8627 {
8628         h->ioaccel_maxsg =
8629                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8630         if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8631                 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8632
8633         /* Command structures must be aligned on a 128-byte boundary
8634          * because the 7 lower bits of the address are used by the
8635          * hardware.
8636          */
8637         BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
8638                         IOACCEL1_COMMANDLIST_ALIGNMENT);
8639         h->ioaccel_cmd_pool =
8640                 pci_alloc_consistent(h->pdev,
8641                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8642                         &(h->ioaccel_cmd_pool_dhandle));
8643
8644         h->ioaccel1_blockFetchTable =
8645                 kmalloc(((h->ioaccel_maxsg + 1) *
8646                                 sizeof(u32)), GFP_KERNEL);
8647
8648         if ((h->ioaccel_cmd_pool == NULL) ||
8649                 (h->ioaccel1_blockFetchTable == NULL))
8650                 goto clean_up;
8651
8652         memset(h->ioaccel_cmd_pool, 0,
8653                 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
8654         return 0;
8655
8656 clean_up:
8657         hpsa_free_ioaccel1_cmd_and_bft(h);
8658         return -ENOMEM;
8659 }
8660
8661 /* Free ioaccel2 mode command blocks and block fetch table */
8662 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8663 {
8664         hpsa_free_ioaccel2_sg_chain_blocks(h);
8665
8666         if (h->ioaccel2_cmd_pool) {
8667                 pci_free_consistent(h->pdev,
8668                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8669                         h->ioaccel2_cmd_pool,
8670                         h->ioaccel2_cmd_pool_dhandle);
8671                 h->ioaccel2_cmd_pool = NULL;
8672                 h->ioaccel2_cmd_pool_dhandle = 0;
8673         }
8674         kfree(h->ioaccel2_blockFetchTable);
8675         h->ioaccel2_blockFetchTable = NULL;
8676 }
8677
8678 /* Allocate ioaccel2 mode command blocks and block fetch table */
8679 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8680 {
8681         int rc;
8682
8683         /* Allocate ioaccel2 mode command blocks and block fetch table */
8684
8685         h->ioaccel_maxsg =
8686                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8687         if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
8688                 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
8689
8690         BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
8691                         IOACCEL2_COMMANDLIST_ALIGNMENT);
8692         h->ioaccel2_cmd_pool =
8693                 pci_alloc_consistent(h->pdev,
8694                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8695                         &(h->ioaccel2_cmd_pool_dhandle));
8696
8697         h->ioaccel2_blockFetchTable =
8698                 kmalloc(((h->ioaccel_maxsg + 1) *
8699                                 sizeof(u32)), GFP_KERNEL);
8700
8701         if ((h->ioaccel2_cmd_pool == NULL) ||
8702                 (h->ioaccel2_blockFetchTable == NULL)) {
8703                 rc = -ENOMEM;
8704                 goto clean_up;
8705         }
8706
8707         rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8708         if (rc)
8709                 goto clean_up;
8710
8711         memset(h->ioaccel2_cmd_pool, 0,
8712                 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8713         return 0;
8714
8715 clean_up:
8716         hpsa_free_ioaccel2_cmd_and_bft(h);
8717         return rc;
8718 }
8719
8720 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8721 static void hpsa_free_performant_mode(struct ctlr_info *h)
8722 {
8723         kfree(h->blockFetchTable);
8724         h->blockFetchTable = NULL;
8725         hpsa_free_reply_queues(h);
8726         hpsa_free_ioaccel1_cmd_and_bft(h);
8727         hpsa_free_ioaccel2_cmd_and_bft(h);
8728 }
8729
8730 /* return -ENODEV on error, 0 on success (or no action)
8731  * allocates numerous items that must be freed later
8732  */
8733 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
8734 {
8735         u32 trans_support;
8736         unsigned long transMethod = CFGTBL_Trans_Performant |
8737                                         CFGTBL_Trans_use_short_tags;
8738         int i, rc;
8739
8740         if (hpsa_simple_mode)
8741                 return 0;
8742
8743         trans_support = readl(&(h->cfgtable->TransportSupport));
8744         if (!(trans_support & PERFORMANT_MODE))
8745                 return 0;
8746
8747         /* Check for I/O accelerator mode support */
8748         if (trans_support & CFGTBL_Trans_io_accel1) {
8749                 transMethod |= CFGTBL_Trans_io_accel1 |
8750                                 CFGTBL_Trans_enable_directed_msix;
8751                 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
8752                 if (rc)
8753                         return rc;
8754         } else if (trans_support & CFGTBL_Trans_io_accel2) {
8755                 transMethod |= CFGTBL_Trans_io_accel2 |
8756                                 CFGTBL_Trans_enable_directed_msix;
8757                 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
8758                 if (rc)
8759                         return rc;
8760         }
8761
8762         h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
8763         hpsa_get_max_perf_mode_cmds(h);
8764         /* Performant mode ring buffer and supporting data structures */
8765         h->reply_queue_size = h->max_commands * sizeof(u64);
8766
8767         for (i = 0; i < h->nreply_queues; i++) {
8768                 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8769                                                 h->reply_queue_size,
8770                                                 &(h->reply_queue[i].busaddr));
8771                 if (!h->reply_queue[i].head) {
8772                         rc = -ENOMEM;
8773                         goto clean1;    /* rq, ioaccel */
8774                 }
8775                 h->reply_queue[i].size = h->max_commands;
8776                 h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
8777                 h->reply_queue[i].current_entry = 0;
8778         }
8779
8780         /* Need a block fetch table for performant mode */
8781         h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
8782                                 sizeof(u32)), GFP_KERNEL);
8783         if (!h->blockFetchTable) {
8784                 rc = -ENOMEM;
8785                 goto clean1;    /* rq, ioaccel */
8786         }
8787
8788         rc = hpsa_enter_performant_mode(h, trans_support);
8789         if (rc)
8790                 goto clean2;    /* bft, rq, ioaccel */
8791         return 0;
8792
8793 clean2: /* bft, rq, ioaccel */
8794         kfree(h->blockFetchTable);
8795         h->blockFetchTable = NULL;
8796 clean1: /* rq, ioaccel */
8797         hpsa_free_reply_queues(h);
8798         hpsa_free_ioaccel1_cmd_and_bft(h);
8799         hpsa_free_ioaccel2_cmd_and_bft(h);
8800         return rc;
8801 }
8802
8803 static int is_accelerated_cmd(struct CommandList *c)
8804 {
8805         return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8806 }
8807
8808 static void hpsa_drain_accel_commands(struct ctlr_info *h)
8809 {
8810         struct CommandList *c = NULL;
8811         int i, accel_cmds_out;
8812         int refcount;
8813
8814         do { /* wait for all outstanding ioaccel commands to drain out */
8815                 accel_cmds_out = 0;
8816                 for (i = 0; i < h->nr_cmds; i++) {
8817                         c = h->cmd_pool + i;
8818                         refcount = atomic_inc_return(&c->refcount);
8819                         if (refcount > 1) /* Command is allocated */
8820                                 accel_cmds_out += is_accelerated_cmd(c);
8821                         cmd_free(h, c);
8822                 }
8823                 if (accel_cmds_out <= 0)
8824                         break;
8825                 msleep(100);
8826         } while (1);
8827 }
8828
8829 /*
8830  *  This is it.  Register the PCI driver information for the cards we control
8831  *  the OS will call our registered routines when it finds one of our cards.
8832  */
8833 static int __init hpsa_init(void)
8834 {
8835         return pci_register_driver(&hpsa_pci_driver);
8836 }
8837
8838 static void __exit hpsa_cleanup(void)
8839 {
8840         pci_unregister_driver(&hpsa_pci_driver);
8841 }
8842
8843 static void __attribute__((unused)) verify_offsets(void)
8844 {
8845 #define VERIFY_OFFSET(member, offset) \
8846         BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8847
8848         VERIFY_OFFSET(structure_size, 0);
8849         VERIFY_OFFSET(volume_blk_size, 4);
8850         VERIFY_OFFSET(volume_blk_cnt, 8);
8851         VERIFY_OFFSET(phys_blk_shift, 16);
8852         VERIFY_OFFSET(parity_rotation_shift, 17);
8853         VERIFY_OFFSET(strip_size, 18);
8854         VERIFY_OFFSET(disk_starting_blk, 20);
8855         VERIFY_OFFSET(disk_blk_cnt, 28);
8856         VERIFY_OFFSET(data_disks_per_row, 36);
8857         VERIFY_OFFSET(metadata_disks_per_row, 38);
8858         VERIFY_OFFSET(row_cnt, 40);
8859         VERIFY_OFFSET(layout_map_count, 42);
8860         VERIFY_OFFSET(flags, 44);
8861         VERIFY_OFFSET(dekindex, 46);
8862         /* VERIFY_OFFSET(reserved, 48 */
8863         VERIFY_OFFSET(data, 64);
8864
8865 #undef VERIFY_OFFSET
8866
8867 #define VERIFY_OFFSET(member, offset) \
8868         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8869
8870         VERIFY_OFFSET(IU_type, 0);
8871         VERIFY_OFFSET(direction, 1);
8872         VERIFY_OFFSET(reply_queue, 2);
8873         /* VERIFY_OFFSET(reserved1, 3);  */
8874         VERIFY_OFFSET(scsi_nexus, 4);
8875         VERIFY_OFFSET(Tag, 8);
8876         VERIFY_OFFSET(cdb, 16);
8877         VERIFY_OFFSET(cciss_lun, 32);
8878         VERIFY_OFFSET(data_len, 40);
8879         VERIFY_OFFSET(cmd_priority_task_attr, 44);
8880         VERIFY_OFFSET(sg_count, 45);
8881         /* VERIFY_OFFSET(reserved3 */
8882         VERIFY_OFFSET(err_ptr, 48);
8883         VERIFY_OFFSET(err_len, 56);
8884         /* VERIFY_OFFSET(reserved4  */
8885         VERIFY_OFFSET(sg, 64);
8886
8887 #undef VERIFY_OFFSET
8888
8889 #define VERIFY_OFFSET(member, offset) \
8890         BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8891
8892         VERIFY_OFFSET(dev_handle, 0x00);
8893         VERIFY_OFFSET(reserved1, 0x02);
8894         VERIFY_OFFSET(function, 0x03);
8895         VERIFY_OFFSET(reserved2, 0x04);
8896         VERIFY_OFFSET(err_info, 0x0C);
8897         VERIFY_OFFSET(reserved3, 0x10);
8898         VERIFY_OFFSET(err_info_len, 0x12);
8899         VERIFY_OFFSET(reserved4, 0x13);
8900         VERIFY_OFFSET(sgl_offset, 0x14);
8901         VERIFY_OFFSET(reserved5, 0x15);
8902         VERIFY_OFFSET(transfer_len, 0x1C);
8903         VERIFY_OFFSET(reserved6, 0x20);
8904         VERIFY_OFFSET(io_flags, 0x24);
8905         VERIFY_OFFSET(reserved7, 0x26);
8906         VERIFY_OFFSET(LUN, 0x34);
8907         VERIFY_OFFSET(control, 0x3C);
8908         VERIFY_OFFSET(CDB, 0x40);
8909         VERIFY_OFFSET(reserved8, 0x50);
8910         VERIFY_OFFSET(host_context_flags, 0x60);
8911         VERIFY_OFFSET(timeout_sec, 0x62);
8912         VERIFY_OFFSET(ReplyQueue, 0x64);
8913         VERIFY_OFFSET(reserved9, 0x65);
8914         VERIFY_OFFSET(tag, 0x68);
8915         VERIFY_OFFSET(host_addr, 0x70);
8916         VERIFY_OFFSET(CISS_LUN, 0x78);
8917         VERIFY_OFFSET(SG, 0x78 + 8);
8918 #undef VERIFY_OFFSET
8919 }
8920
8921 module_init(hpsa_init);
8922 module_exit(hpsa_cleanup);