hpsa: generalize external arrays
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / hpsa.c
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2014-2015 PMC-Sierra, Inc.
4  *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
5  *
6  *    This program is free software; you can redistribute it and/or modify
7  *    it under the terms of the GNU General Public License as published by
8  *    the Free Software Foundation; version 2 of the License.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
14  *
15  *    Questions/Comments/Bugfixes to storagedev@pmcs.com
16  *
17  */
18
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/pci-aspm.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/fs.h>
28 #include <linux/timer.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/compat.h>
32 #include <linux/blktrace_api.h>
33 #include <linux/uaccess.h>
34 #include <linux/io.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/completion.h>
37 #include <linux/moduleparam.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_eh.h>
44 #include <scsi/scsi_dbg.h>
45 #include <linux/cciss_ioctl.h>
46 #include <linux/string.h>
47 #include <linux/bitmap.h>
48 #include <linux/atomic.h>
49 #include <linux/jiffies.h>
50 #include <linux/percpu-defs.h>
51 #include <linux/percpu.h>
52 #include <asm/unaligned.h>
53 #include <asm/div64.h>
54 #include "hpsa_cmd.h"
55 #include "hpsa.h"
56
57 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
58 #define HPSA_DRIVER_VERSION "3.4.10-0"
59 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
60 #define HPSA "hpsa"
61
62 /* How long to wait for CISS doorbell communication */
63 #define CLEAR_EVENT_WAIT_INTERVAL 20    /* ms for each msleep() call */
64 #define MODE_CHANGE_WAIT_INTERVAL 10    /* ms for each msleep() call */
65 #define MAX_CLEAR_EVENT_WAIT 30000      /* times 20 ms = 600 s */
66 #define MAX_MODE_CHANGE_WAIT 2000       /* times 10 ms = 20 s */
67 #define MAX_IOCTL_CONFIG_WAIT 1000
68
69 /*define how many times we will try a command because of bus resets */
70 #define MAX_CMD_RETRIES 3
71
72 /* Embedded module documentation macros - see modules.h */
73 MODULE_AUTHOR("Hewlett-Packard Company");
74 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
75         HPSA_DRIVER_VERSION);
76 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
77 MODULE_VERSION(HPSA_DRIVER_VERSION);
78 MODULE_LICENSE("GPL");
79
80 static int hpsa_allow_any;
81 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
82 MODULE_PARM_DESC(hpsa_allow_any,
83                 "Allow hpsa driver to access unknown HP Smart Array hardware");
84 static int hpsa_simple_mode;
85 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
86 MODULE_PARM_DESC(hpsa_simple_mode,
87         "Use 'simple mode' rather than 'performant mode'");
88
89 /* define the PCI info for the cards we can control */
90 static const struct pci_device_id hpsa_pci_device_id[] = {
91         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
92         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
93         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
94         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
95         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
96         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
97         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
98         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
99         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
100         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
101         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
102         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
103         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
104         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
105         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
106         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
107         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
108         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
109         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
110         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
111         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
112         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
113         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
114         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
115         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
116         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
117         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
118         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
119         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
120         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
121         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
122         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
123         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
124         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
125         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
126         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
127         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
128         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
129         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
130         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
131         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
132         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
133         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
134         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
135         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
136         {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
137         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
138         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
139         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
140         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
141         {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
142         {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
143                 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
144         {0,}
145 };
146
147 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
148
149 /*  board_id = Subsystem Device ID & Vendor ID
150  *  product = Marketing Name for the board
151  *  access = Address of the struct of function pointers
152  */
153 static struct board_type products[] = {
154         {0x3241103C, "Smart Array P212", &SA5_access},
155         {0x3243103C, "Smart Array P410", &SA5_access},
156         {0x3245103C, "Smart Array P410i", &SA5_access},
157         {0x3247103C, "Smart Array P411", &SA5_access},
158         {0x3249103C, "Smart Array P812", &SA5_access},
159         {0x324A103C, "Smart Array P712m", &SA5_access},
160         {0x324B103C, "Smart Array P711m", &SA5_access},
161         {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
162         {0x3350103C, "Smart Array P222", &SA5_access},
163         {0x3351103C, "Smart Array P420", &SA5_access},
164         {0x3352103C, "Smart Array P421", &SA5_access},
165         {0x3353103C, "Smart Array P822", &SA5_access},
166         {0x3354103C, "Smart Array P420i", &SA5_access},
167         {0x3355103C, "Smart Array P220i", &SA5_access},
168         {0x3356103C, "Smart Array P721m", &SA5_access},
169         {0x1921103C, "Smart Array P830i", &SA5_access},
170         {0x1922103C, "Smart Array P430", &SA5_access},
171         {0x1923103C, "Smart Array P431", &SA5_access},
172         {0x1924103C, "Smart Array P830", &SA5_access},
173         {0x1926103C, "Smart Array P731m", &SA5_access},
174         {0x1928103C, "Smart Array P230i", &SA5_access},
175         {0x1929103C, "Smart Array P530", &SA5_access},
176         {0x21BD103C, "Smart Array P244br", &SA5_access},
177         {0x21BE103C, "Smart Array P741m", &SA5_access},
178         {0x21BF103C, "Smart HBA H240ar", &SA5_access},
179         {0x21C0103C, "Smart Array P440ar", &SA5_access},
180         {0x21C1103C, "Smart Array P840ar", &SA5_access},
181         {0x21C2103C, "Smart Array P440", &SA5_access},
182         {0x21C3103C, "Smart Array P441", &SA5_access},
183         {0x21C4103C, "Smart Array", &SA5_access},
184         {0x21C5103C, "Smart Array P841", &SA5_access},
185         {0x21C6103C, "Smart HBA H244br", &SA5_access},
186         {0x21C7103C, "Smart HBA H240", &SA5_access},
187         {0x21C8103C, "Smart HBA H241", &SA5_access},
188         {0x21C9103C, "Smart Array", &SA5_access},
189         {0x21CA103C, "Smart Array P246br", &SA5_access},
190         {0x21CB103C, "Smart Array P840", &SA5_access},
191         {0x21CC103C, "Smart Array", &SA5_access},
192         {0x21CD103C, "Smart Array", &SA5_access},
193         {0x21CE103C, "Smart HBA", &SA5_access},
194         {0x05809005, "SmartHBA-SA", &SA5_access},
195         {0x05819005, "SmartHBA-SA 8i", &SA5_access},
196         {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
197         {0x05839005, "SmartHBA-SA 8e", &SA5_access},
198         {0x05849005, "SmartHBA-SA 16i", &SA5_access},
199         {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
200         {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
201         {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
202         {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
203         {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
204         {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
205         {0xFFFF103C, "Unknown Smart Array", &SA5_access},
206 };
207
208 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
209 static const struct scsi_cmnd hpsa_cmd_busy;
210 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
211 static const struct scsi_cmnd hpsa_cmd_idle;
212 static int number_of_controllers;
213
214 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
215 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
216 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
217
218 #ifdef CONFIG_COMPAT
219 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
220         void __user *arg);
221 #endif
222
223 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
224 static struct CommandList *cmd_alloc(struct ctlr_info *h);
225 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
226 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
227                                             struct scsi_cmnd *scmd);
228 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
229         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
230         int cmd_type);
231 static void hpsa_free_cmd_pool(struct ctlr_info *h);
232 #define VPD_PAGE (1 << 8)
233 #define HPSA_SIMPLE_ERROR_BITS 0x03
234
235 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
236 static void hpsa_scan_start(struct Scsi_Host *);
237 static int hpsa_scan_finished(struct Scsi_Host *sh,
238         unsigned long elapsed_time);
239 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
240
241 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
242 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
243 static int hpsa_slave_alloc(struct scsi_device *sdev);
244 static int hpsa_slave_configure(struct scsi_device *sdev);
245 static void hpsa_slave_destroy(struct scsi_device *sdev);
246
247 static void hpsa_update_scsi_devices(struct ctlr_info *h);
248 static int check_for_unit_attention(struct ctlr_info *h,
249         struct CommandList *c);
250 static void check_ioctl_unit_attention(struct ctlr_info *h,
251         struct CommandList *c);
252 /* performant mode helper functions */
253 static void calc_bucket_map(int *bucket, int num_buckets,
254         int nsgs, int min_blocks, u32 *bucket_map);
255 static void hpsa_free_performant_mode(struct ctlr_info *h);
256 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
257 static inline u32 next_command(struct ctlr_info *h, u8 q);
258 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
259                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
260                                u64 *cfg_offset);
261 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
262                                     unsigned long *memory_bar);
263 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
264 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
265                                      int wait_for_ready);
266 static inline void finish_cmd(struct CommandList *c);
267 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
268 #define BOARD_NOT_READY 0
269 #define BOARD_READY 1
270 static void hpsa_drain_accel_commands(struct ctlr_info *h);
271 static void hpsa_flush_cache(struct ctlr_info *h);
272 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
273         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
274         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
275 static void hpsa_command_resubmit_worker(struct work_struct *work);
276 static u32 lockup_detected(struct ctlr_info *h);
277 static int detect_controller_lockup(struct ctlr_info *h);
278
279 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
280 {
281         unsigned long *priv = shost_priv(sdev->host);
282         return (struct ctlr_info *) *priv;
283 }
284
285 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
286 {
287         unsigned long *priv = shost_priv(sh);
288         return (struct ctlr_info *) *priv;
289 }
290
291 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
292 {
293         return c->scsi_cmd == SCSI_CMD_IDLE;
294 }
295
296 static inline bool hpsa_is_pending_event(struct CommandList *c)
297 {
298         return c->abort_pending || c->reset_pending;
299 }
300
301 /* extract sense key, asc, and ascq from sense data.  -1 means invalid. */
302 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
303                         u8 *sense_key, u8 *asc, u8 *ascq)
304 {
305         struct scsi_sense_hdr sshdr;
306         bool rc;
307
308         *sense_key = -1;
309         *asc = -1;
310         *ascq = -1;
311
312         if (sense_data_len < 1)
313                 return;
314
315         rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
316         if (rc) {
317                 *sense_key = sshdr.sense_key;
318                 *asc = sshdr.asc;
319                 *ascq = sshdr.ascq;
320         }
321 }
322
323 static int check_for_unit_attention(struct ctlr_info *h,
324         struct CommandList *c)
325 {
326         u8 sense_key, asc, ascq;
327         int sense_len;
328
329         if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
330                 sense_len = sizeof(c->err_info->SenseInfo);
331         else
332                 sense_len = c->err_info->SenseLen;
333
334         decode_sense_data(c->err_info->SenseInfo, sense_len,
335                                 &sense_key, &asc, &ascq);
336         if (sense_key != UNIT_ATTENTION || asc == 0xff)
337                 return 0;
338
339         switch (asc) {
340         case STATE_CHANGED:
341                 dev_warn(&h->pdev->dev,
342                         "%s: a state change detected, command retried\n",
343                         h->devname);
344                 break;
345         case LUN_FAILED:
346                 dev_warn(&h->pdev->dev,
347                         "%s: LUN failure detected\n", h->devname);
348                 break;
349         case REPORT_LUNS_CHANGED:
350                 dev_warn(&h->pdev->dev,
351                         "%s: report LUN data changed\n", h->devname);
352         /*
353          * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
354          * target (array) devices.
355          */
356                 break;
357         case POWER_OR_RESET:
358                 dev_warn(&h->pdev->dev,
359                         "%s: a power on or device reset detected\n",
360                         h->devname);
361                 break;
362         case UNIT_ATTENTION_CLEARED:
363                 dev_warn(&h->pdev->dev,
364                         "%s: unit attention cleared by another initiator\n",
365                         h->devname);
366                 break;
367         default:
368                 dev_warn(&h->pdev->dev,
369                         "%s: unknown unit attention detected\n",
370                         h->devname);
371                 break;
372         }
373         return 1;
374 }
375
376 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
377 {
378         if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
379                 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
380                  c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
381                 return 0;
382         dev_warn(&h->pdev->dev, HPSA "device busy");
383         return 1;
384 }
385
386 static u32 lockup_detected(struct ctlr_info *h);
387 static ssize_t host_show_lockup_detected(struct device *dev,
388                 struct device_attribute *attr, char *buf)
389 {
390         int ld;
391         struct ctlr_info *h;
392         struct Scsi_Host *shost = class_to_shost(dev);
393
394         h = shost_to_hba(shost);
395         ld = lockup_detected(h);
396
397         return sprintf(buf, "ld=%d\n", ld);
398 }
399
400 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
401                                          struct device_attribute *attr,
402                                          const char *buf, size_t count)
403 {
404         int status, len;
405         struct ctlr_info *h;
406         struct Scsi_Host *shost = class_to_shost(dev);
407         char tmpbuf[10];
408
409         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
410                 return -EACCES;
411         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
412         strncpy(tmpbuf, buf, len);
413         tmpbuf[len] = '\0';
414         if (sscanf(tmpbuf, "%d", &status) != 1)
415                 return -EINVAL;
416         h = shost_to_hba(shost);
417         h->acciopath_status = !!status;
418         dev_warn(&h->pdev->dev,
419                 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
420                 h->acciopath_status ? "enabled" : "disabled");
421         return count;
422 }
423
424 static ssize_t host_store_raid_offload_debug(struct device *dev,
425                                          struct device_attribute *attr,
426                                          const char *buf, size_t count)
427 {
428         int debug_level, len;
429         struct ctlr_info *h;
430         struct Scsi_Host *shost = class_to_shost(dev);
431         char tmpbuf[10];
432
433         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
434                 return -EACCES;
435         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
436         strncpy(tmpbuf, buf, len);
437         tmpbuf[len] = '\0';
438         if (sscanf(tmpbuf, "%d", &debug_level) != 1)
439                 return -EINVAL;
440         if (debug_level < 0)
441                 debug_level = 0;
442         h = shost_to_hba(shost);
443         h->raid_offload_debug = debug_level;
444         dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
445                 h->raid_offload_debug);
446         return count;
447 }
448
449 static ssize_t host_store_rescan(struct device *dev,
450                                  struct device_attribute *attr,
451                                  const char *buf, size_t count)
452 {
453         struct ctlr_info *h;
454         struct Scsi_Host *shost = class_to_shost(dev);
455         h = shost_to_hba(shost);
456         hpsa_scan_start(h->scsi_host);
457         return count;
458 }
459
460 static ssize_t host_show_firmware_revision(struct device *dev,
461              struct device_attribute *attr, char *buf)
462 {
463         struct ctlr_info *h;
464         struct Scsi_Host *shost = class_to_shost(dev);
465         unsigned char *fwrev;
466
467         h = shost_to_hba(shost);
468         if (!h->hba_inquiry_data)
469                 return 0;
470         fwrev = &h->hba_inquiry_data[32];
471         return snprintf(buf, 20, "%c%c%c%c\n",
472                 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
473 }
474
475 static ssize_t host_show_commands_outstanding(struct device *dev,
476              struct device_attribute *attr, char *buf)
477 {
478         struct Scsi_Host *shost = class_to_shost(dev);
479         struct ctlr_info *h = shost_to_hba(shost);
480
481         return snprintf(buf, 20, "%d\n",
482                         atomic_read(&h->commands_outstanding));
483 }
484
485 static ssize_t host_show_transport_mode(struct device *dev,
486         struct device_attribute *attr, char *buf)
487 {
488         struct ctlr_info *h;
489         struct Scsi_Host *shost = class_to_shost(dev);
490
491         h = shost_to_hba(shost);
492         return snprintf(buf, 20, "%s\n",
493                 h->transMethod & CFGTBL_Trans_Performant ?
494                         "performant" : "simple");
495 }
496
497 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
498         struct device_attribute *attr, char *buf)
499 {
500         struct ctlr_info *h;
501         struct Scsi_Host *shost = class_to_shost(dev);
502
503         h = shost_to_hba(shost);
504         return snprintf(buf, 30, "HP SSD Smart Path %s\n",
505                 (h->acciopath_status == 1) ?  "enabled" : "disabled");
506 }
507
508 /* List of controllers which cannot be hard reset on kexec with reset_devices */
509 static u32 unresettable_controller[] = {
510         0x324a103C, /* Smart Array P712m */
511         0x324b103C, /* Smart Array P711m */
512         0x3223103C, /* Smart Array P800 */
513         0x3234103C, /* Smart Array P400 */
514         0x3235103C, /* Smart Array P400i */
515         0x3211103C, /* Smart Array E200i */
516         0x3212103C, /* Smart Array E200 */
517         0x3213103C, /* Smart Array E200i */
518         0x3214103C, /* Smart Array E200i */
519         0x3215103C, /* Smart Array E200i */
520         0x3237103C, /* Smart Array E500 */
521         0x323D103C, /* Smart Array P700m */
522         0x40800E11, /* Smart Array 5i */
523         0x409C0E11, /* Smart Array 6400 */
524         0x409D0E11, /* Smart Array 6400 EM */
525         0x40700E11, /* Smart Array 5300 */
526         0x40820E11, /* Smart Array 532 */
527         0x40830E11, /* Smart Array 5312 */
528         0x409A0E11, /* Smart Array 641 */
529         0x409B0E11, /* Smart Array 642 */
530         0x40910E11, /* Smart Array 6i */
531 };
532
533 /* List of controllers which cannot even be soft reset */
534 static u32 soft_unresettable_controller[] = {
535         0x40800E11, /* Smart Array 5i */
536         0x40700E11, /* Smart Array 5300 */
537         0x40820E11, /* Smart Array 532 */
538         0x40830E11, /* Smart Array 5312 */
539         0x409A0E11, /* Smart Array 641 */
540         0x409B0E11, /* Smart Array 642 */
541         0x40910E11, /* Smart Array 6i */
542         /* Exclude 640x boards.  These are two pci devices in one slot
543          * which share a battery backed cache module.  One controls the
544          * cache, the other accesses the cache through the one that controls
545          * it.  If we reset the one controlling the cache, the other will
546          * likely not be happy.  Just forbid resetting this conjoined mess.
547          * The 640x isn't really supported by hpsa anyway.
548          */
549         0x409C0E11, /* Smart Array 6400 */
550         0x409D0E11, /* Smart Array 6400 EM */
551 };
552
553 static u32 needs_abort_tags_swizzled[] = {
554         0x323D103C, /* Smart Array P700m */
555         0x324a103C, /* Smart Array P712m */
556         0x324b103C, /* SmartArray P711m */
557 };
558
559 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
560 {
561         int i;
562
563         for (i = 0; i < nelems; i++)
564                 if (a[i] == board_id)
565                         return 1;
566         return 0;
567 }
568
569 static int ctlr_is_hard_resettable(u32 board_id)
570 {
571         return !board_id_in_array(unresettable_controller,
572                         ARRAY_SIZE(unresettable_controller), board_id);
573 }
574
575 static int ctlr_is_soft_resettable(u32 board_id)
576 {
577         return !board_id_in_array(soft_unresettable_controller,
578                         ARRAY_SIZE(soft_unresettable_controller), board_id);
579 }
580
581 static int ctlr_is_resettable(u32 board_id)
582 {
583         return ctlr_is_hard_resettable(board_id) ||
584                 ctlr_is_soft_resettable(board_id);
585 }
586
587 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
588 {
589         return board_id_in_array(needs_abort_tags_swizzled,
590                         ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
591 }
592
593 static ssize_t host_show_resettable(struct device *dev,
594         struct device_attribute *attr, char *buf)
595 {
596         struct ctlr_info *h;
597         struct Scsi_Host *shost = class_to_shost(dev);
598
599         h = shost_to_hba(shost);
600         return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
601 }
602
603 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
604 {
605         return (scsi3addr[3] & 0xC0) == 0x40;
606 }
607
608 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
609         "1(+0)ADM", "UNKNOWN"
610 };
611 #define HPSA_RAID_0     0
612 #define HPSA_RAID_4     1
613 #define HPSA_RAID_1     2       /* also used for RAID 10 */
614 #define HPSA_RAID_5     3       /* also used for RAID 50 */
615 #define HPSA_RAID_51    4
616 #define HPSA_RAID_6     5       /* also used for RAID 60 */
617 #define HPSA_RAID_ADM   6       /* also used for RAID 1+0 ADM */
618 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
619
620 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
621 {
622         return !device->physical_device;
623 }
624
625 static ssize_t raid_level_show(struct device *dev,
626              struct device_attribute *attr, char *buf)
627 {
628         ssize_t l = 0;
629         unsigned char rlevel;
630         struct ctlr_info *h;
631         struct scsi_device *sdev;
632         struct hpsa_scsi_dev_t *hdev;
633         unsigned long flags;
634
635         sdev = to_scsi_device(dev);
636         h = sdev_to_hba(sdev);
637         spin_lock_irqsave(&h->lock, flags);
638         hdev = sdev->hostdata;
639         if (!hdev) {
640                 spin_unlock_irqrestore(&h->lock, flags);
641                 return -ENODEV;
642         }
643
644         /* Is this even a logical drive? */
645         if (!is_logical_device(hdev)) {
646                 spin_unlock_irqrestore(&h->lock, flags);
647                 l = snprintf(buf, PAGE_SIZE, "N/A\n");
648                 return l;
649         }
650
651         rlevel = hdev->raid_level;
652         spin_unlock_irqrestore(&h->lock, flags);
653         if (rlevel > RAID_UNKNOWN)
654                 rlevel = RAID_UNKNOWN;
655         l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
656         return l;
657 }
658
659 static ssize_t lunid_show(struct device *dev,
660              struct device_attribute *attr, char *buf)
661 {
662         struct ctlr_info *h;
663         struct scsi_device *sdev;
664         struct hpsa_scsi_dev_t *hdev;
665         unsigned long flags;
666         unsigned char lunid[8];
667
668         sdev = to_scsi_device(dev);
669         h = sdev_to_hba(sdev);
670         spin_lock_irqsave(&h->lock, flags);
671         hdev = sdev->hostdata;
672         if (!hdev) {
673                 spin_unlock_irqrestore(&h->lock, flags);
674                 return -ENODEV;
675         }
676         memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
677         spin_unlock_irqrestore(&h->lock, flags);
678         return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
679                 lunid[0], lunid[1], lunid[2], lunid[3],
680                 lunid[4], lunid[5], lunid[6], lunid[7]);
681 }
682
683 static ssize_t unique_id_show(struct device *dev,
684              struct device_attribute *attr, char *buf)
685 {
686         struct ctlr_info *h;
687         struct scsi_device *sdev;
688         struct hpsa_scsi_dev_t *hdev;
689         unsigned long flags;
690         unsigned char sn[16];
691
692         sdev = to_scsi_device(dev);
693         h = sdev_to_hba(sdev);
694         spin_lock_irqsave(&h->lock, flags);
695         hdev = sdev->hostdata;
696         if (!hdev) {
697                 spin_unlock_irqrestore(&h->lock, flags);
698                 return -ENODEV;
699         }
700         memcpy(sn, hdev->device_id, sizeof(sn));
701         spin_unlock_irqrestore(&h->lock, flags);
702         return snprintf(buf, 16 * 2 + 2,
703                         "%02X%02X%02X%02X%02X%02X%02X%02X"
704                         "%02X%02X%02X%02X%02X%02X%02X%02X\n",
705                         sn[0], sn[1], sn[2], sn[3],
706                         sn[4], sn[5], sn[6], sn[7],
707                         sn[8], sn[9], sn[10], sn[11],
708                         sn[12], sn[13], sn[14], sn[15]);
709 }
710
711 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
712              struct device_attribute *attr, char *buf)
713 {
714         struct ctlr_info *h;
715         struct scsi_device *sdev;
716         struct hpsa_scsi_dev_t *hdev;
717         unsigned long flags;
718         int offload_enabled;
719
720         sdev = to_scsi_device(dev);
721         h = sdev_to_hba(sdev);
722         spin_lock_irqsave(&h->lock, flags);
723         hdev = sdev->hostdata;
724         if (!hdev) {
725                 spin_unlock_irqrestore(&h->lock, flags);
726                 return -ENODEV;
727         }
728         offload_enabled = hdev->offload_enabled;
729         spin_unlock_irqrestore(&h->lock, flags);
730         return snprintf(buf, 20, "%d\n", offload_enabled);
731 }
732
733 #define MAX_PATHS 8
734 #define PATH_STRING_LEN 50
735
736 static ssize_t path_info_show(struct device *dev,
737              struct device_attribute *attr, char *buf)
738 {
739         struct ctlr_info *h;
740         struct scsi_device *sdev;
741         struct hpsa_scsi_dev_t *hdev;
742         unsigned long flags;
743         int i;
744         int output_len = 0;
745         u8 box;
746         u8 bay;
747         u8 path_map_index = 0;
748         char *active;
749         unsigned char phys_connector[2];
750         unsigned char path[MAX_PATHS][PATH_STRING_LEN];
751
752         memset(path, 0, MAX_PATHS * PATH_STRING_LEN);
753         sdev = to_scsi_device(dev);
754         h = sdev_to_hba(sdev);
755         spin_lock_irqsave(&h->devlock, flags);
756         hdev = sdev->hostdata;
757         if (!hdev) {
758                 spin_unlock_irqrestore(&h->devlock, flags);
759                 return -ENODEV;
760         }
761
762         bay = hdev->bay;
763         for (i = 0; i < MAX_PATHS; i++) {
764                 path_map_index = 1<<i;
765                 if (i == hdev->active_path_index)
766                         active = "Active";
767                 else if (hdev->path_map & path_map_index)
768                         active = "Inactive";
769                 else
770                         continue;
771
772                 output_len = snprintf(path[i],
773                                 PATH_STRING_LEN, "[%d:%d:%d:%d] %20.20s ",
774                                 h->scsi_host->host_no,
775                                 hdev->bus, hdev->target, hdev->lun,
776                                 scsi_device_type(hdev->devtype));
777
778                 if (hdev->external ||
779                         hdev->devtype == TYPE_RAID ||
780                         is_logical_device(hdev)) {
781                         output_len += snprintf(path[i] + output_len,
782                                                 PATH_STRING_LEN, "%s\n",
783                                                 active);
784                         continue;
785                 }
786
787                 box = hdev->box[i];
788                 memcpy(&phys_connector, &hdev->phys_connector[i],
789                         sizeof(phys_connector));
790                 if (phys_connector[0] < '0')
791                         phys_connector[0] = '0';
792                 if (phys_connector[1] < '0')
793                         phys_connector[1] = '0';
794                 if (hdev->phys_connector[i] > 0)
795                         output_len += snprintf(path[i] + output_len,
796                                 PATH_STRING_LEN,
797                                 "PORT: %.2s ",
798                                 phys_connector);
799                 if (hdev->devtype == TYPE_DISK && hdev->expose_device) {
800                         if (box == 0 || box == 0xFF) {
801                                 output_len += snprintf(path[i] + output_len,
802                                         PATH_STRING_LEN,
803                                         "BAY: %hhu %s\n",
804                                         bay, active);
805                         } else {
806                                 output_len += snprintf(path[i] + output_len,
807                                         PATH_STRING_LEN,
808                                         "BOX: %hhu BAY: %hhu %s\n",
809                                         box, bay, active);
810                         }
811                 } else if (box != 0 && box != 0xFF) {
812                         output_len += snprintf(path[i] + output_len,
813                                 PATH_STRING_LEN, "BOX: %hhu %s\n",
814                                 box, active);
815                 } else
816                         output_len += snprintf(path[i] + output_len,
817                                 PATH_STRING_LEN, "%s\n", active);
818         }
819
820         spin_unlock_irqrestore(&h->devlock, flags);
821         return snprintf(buf, output_len+1, "%s%s%s%s%s%s%s%s",
822                 path[0], path[1], path[2], path[3],
823                 path[4], path[5], path[6], path[7]);
824 }
825
826 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
827 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
828 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
829 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
830 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
831                         host_show_hp_ssd_smart_path_enabled, NULL);
832 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
833 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
834                 host_show_hp_ssd_smart_path_status,
835                 host_store_hp_ssd_smart_path_status);
836 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
837                         host_store_raid_offload_debug);
838 static DEVICE_ATTR(firmware_revision, S_IRUGO,
839         host_show_firmware_revision, NULL);
840 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
841         host_show_commands_outstanding, NULL);
842 static DEVICE_ATTR(transport_mode, S_IRUGO,
843         host_show_transport_mode, NULL);
844 static DEVICE_ATTR(resettable, S_IRUGO,
845         host_show_resettable, NULL);
846 static DEVICE_ATTR(lockup_detected, S_IRUGO,
847         host_show_lockup_detected, NULL);
848
849 static struct device_attribute *hpsa_sdev_attrs[] = {
850         &dev_attr_raid_level,
851         &dev_attr_lunid,
852         &dev_attr_unique_id,
853         &dev_attr_hp_ssd_smart_path_enabled,
854         &dev_attr_path_info,
855         &dev_attr_lockup_detected,
856         NULL,
857 };
858
859 static struct device_attribute *hpsa_shost_attrs[] = {
860         &dev_attr_rescan,
861         &dev_attr_firmware_revision,
862         &dev_attr_commands_outstanding,
863         &dev_attr_transport_mode,
864         &dev_attr_resettable,
865         &dev_attr_hp_ssd_smart_path_status,
866         &dev_attr_raid_offload_debug,
867         NULL,
868 };
869
870 #define HPSA_NRESERVED_CMDS     (HPSA_CMDS_RESERVED_FOR_ABORTS + \
871                 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
872
873 static struct scsi_host_template hpsa_driver_template = {
874         .module                 = THIS_MODULE,
875         .name                   = HPSA,
876         .proc_name              = HPSA,
877         .queuecommand           = hpsa_scsi_queue_command,
878         .scan_start             = hpsa_scan_start,
879         .scan_finished          = hpsa_scan_finished,
880         .change_queue_depth     = hpsa_change_queue_depth,
881         .this_id                = -1,
882         .use_clustering         = ENABLE_CLUSTERING,
883         .eh_abort_handler       = hpsa_eh_abort_handler,
884         .eh_device_reset_handler = hpsa_eh_device_reset_handler,
885         .ioctl                  = hpsa_ioctl,
886         .slave_alloc            = hpsa_slave_alloc,
887         .slave_configure        = hpsa_slave_configure,
888         .slave_destroy          = hpsa_slave_destroy,
889 #ifdef CONFIG_COMPAT
890         .compat_ioctl           = hpsa_compat_ioctl,
891 #endif
892         .sdev_attrs = hpsa_sdev_attrs,
893         .shost_attrs = hpsa_shost_attrs,
894         .max_sectors = 8192,
895         .no_write_same = 1,
896 };
897
898 static inline u32 next_command(struct ctlr_info *h, u8 q)
899 {
900         u32 a;
901         struct reply_queue_buffer *rq = &h->reply_queue[q];
902
903         if (h->transMethod & CFGTBL_Trans_io_accel1)
904                 return h->access.command_completed(h, q);
905
906         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
907                 return h->access.command_completed(h, q);
908
909         if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
910                 a = rq->head[rq->current_entry];
911                 rq->current_entry++;
912                 atomic_dec(&h->commands_outstanding);
913         } else {
914                 a = FIFO_EMPTY;
915         }
916         /* Check for wraparound */
917         if (rq->current_entry == h->max_commands) {
918                 rq->current_entry = 0;
919                 rq->wraparound ^= 1;
920         }
921         return a;
922 }
923
924 /*
925  * There are some special bits in the bus address of the
926  * command that we have to set for the controller to know
927  * how to process the command:
928  *
929  * Normal performant mode:
930  * bit 0: 1 means performant mode, 0 means simple mode.
931  * bits 1-3 = block fetch table entry
932  * bits 4-6 = command type (== 0)
933  *
934  * ioaccel1 mode:
935  * bit 0 = "performant mode" bit.
936  * bits 1-3 = block fetch table entry
937  * bits 4-6 = command type (== 110)
938  * (command type is needed because ioaccel1 mode
939  * commands are submitted through the same register as normal
940  * mode commands, so this is how the controller knows whether
941  * the command is normal mode or ioaccel1 mode.)
942  *
943  * ioaccel2 mode:
944  * bit 0 = "performant mode" bit.
945  * bits 1-4 = block fetch table entry (note extra bit)
946  * bits 4-6 = not needed, because ioaccel2 mode has
947  * a separate special register for submitting commands.
948  */
949
950 /*
951  * set_performant_mode: Modify the tag for cciss performant
952  * set bit 0 for pull model, bits 3-1 for block fetch
953  * register number
954  */
955 #define DEFAULT_REPLY_QUEUE (-1)
956 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
957                                         int reply_queue)
958 {
959         if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
960                 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
961                 if (unlikely(!h->msix_vector))
962                         return;
963                 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
964                         c->Header.ReplyQueue =
965                                 raw_smp_processor_id() % h->nreply_queues;
966                 else
967                         c->Header.ReplyQueue = reply_queue % h->nreply_queues;
968         }
969 }
970
971 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
972                                                 struct CommandList *c,
973                                                 int reply_queue)
974 {
975         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
976
977         /*
978          * Tell the controller to post the reply to the queue for this
979          * processor.  This seems to give the best I/O throughput.
980          */
981         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
982                 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
983         else
984                 cp->ReplyQueue = reply_queue % h->nreply_queues;
985         /*
986          * Set the bits in the address sent down to include:
987          *  - performant mode bit (bit 0)
988          *  - pull count (bits 1-3)
989          *  - command type (bits 4-6)
990          */
991         c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
992                                         IOACCEL1_BUSADDR_CMDTYPE;
993 }
994
995 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
996                                                 struct CommandList *c,
997                                                 int reply_queue)
998 {
999         struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1000                 &h->ioaccel2_cmd_pool[c->cmdindex];
1001
1002         /* Tell the controller to post the reply to the queue for this
1003          * processor.  This seems to give the best I/O throughput.
1004          */
1005         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1006                 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1007         else
1008                 cp->reply_queue = reply_queue % h->nreply_queues;
1009         /* Set the bits in the address sent down to include:
1010          *  - performant mode bit not used in ioaccel mode 2
1011          *  - pull count (bits 0-3)
1012          *  - command type isn't needed for ioaccel2
1013          */
1014         c->busaddr |= h->ioaccel2_blockFetchTable[0];
1015 }
1016
1017 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1018                                                 struct CommandList *c,
1019                                                 int reply_queue)
1020 {
1021         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1022
1023         /*
1024          * Tell the controller to post the reply to the queue for this
1025          * processor.  This seems to give the best I/O throughput.
1026          */
1027         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1028                 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1029         else
1030                 cp->reply_queue = reply_queue % h->nreply_queues;
1031         /*
1032          * Set the bits in the address sent down to include:
1033          *  - performant mode bit not used in ioaccel mode 2
1034          *  - pull count (bits 0-3)
1035          *  - command type isn't needed for ioaccel2
1036          */
1037         c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1038 }
1039
1040 static int is_firmware_flash_cmd(u8 *cdb)
1041 {
1042         return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1043 }
1044
1045 /*
1046  * During firmware flash, the heartbeat register may not update as frequently
1047  * as it should.  So we dial down lockup detection during firmware flash. and
1048  * dial it back up when firmware flash completes.
1049  */
1050 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1051 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1052 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1053                 struct CommandList *c)
1054 {
1055         if (!is_firmware_flash_cmd(c->Request.CDB))
1056                 return;
1057         atomic_inc(&h->firmware_flash_in_progress);
1058         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1059 }
1060
1061 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1062                 struct CommandList *c)
1063 {
1064         if (is_firmware_flash_cmd(c->Request.CDB) &&
1065                 atomic_dec_and_test(&h->firmware_flash_in_progress))
1066                 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1067 }
1068
1069 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1070         struct CommandList *c, int reply_queue)
1071 {
1072         dial_down_lockup_detection_during_fw_flash(h, c);
1073         atomic_inc(&h->commands_outstanding);
1074         switch (c->cmd_type) {
1075         case CMD_IOACCEL1:
1076                 set_ioaccel1_performant_mode(h, c, reply_queue);
1077                 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1078                 break;
1079         case CMD_IOACCEL2:
1080                 set_ioaccel2_performant_mode(h, c, reply_queue);
1081                 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1082                 break;
1083         case IOACCEL2_TMF:
1084                 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1085                 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1086                 break;
1087         default:
1088                 set_performant_mode(h, c, reply_queue);
1089                 h->access.submit_command(h, c);
1090         }
1091 }
1092
1093 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1094 {
1095         if (unlikely(hpsa_is_pending_event(c)))
1096                 return finish_cmd(c);
1097
1098         __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1099 }
1100
1101 static inline int is_hba_lunid(unsigned char scsi3addr[])
1102 {
1103         return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1104 }
1105
1106 static inline int is_scsi_rev_5(struct ctlr_info *h)
1107 {
1108         if (!h->hba_inquiry_data)
1109                 return 0;
1110         if ((h->hba_inquiry_data[2] & 0x07) == 5)
1111                 return 1;
1112         return 0;
1113 }
1114
1115 static int hpsa_find_target_lun(struct ctlr_info *h,
1116         unsigned char scsi3addr[], int bus, int *target, int *lun)
1117 {
1118         /* finds an unused bus, target, lun for a new physical device
1119          * assumes h->devlock is held
1120          */
1121         int i, found = 0;
1122         DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1123
1124         bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1125
1126         for (i = 0; i < h->ndevices; i++) {
1127                 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1128                         __set_bit(h->dev[i]->target, lun_taken);
1129         }
1130
1131         i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1132         if (i < HPSA_MAX_DEVICES) {
1133                 /* *bus = 1; */
1134                 *target = i;
1135                 *lun = 0;
1136                 found = 1;
1137         }
1138         return !found;
1139 }
1140
1141 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1142         struct hpsa_scsi_dev_t *dev, char *description)
1143 {
1144         if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1145                 return;
1146
1147         dev_printk(level, &h->pdev->dev,
1148                         "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1149                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1150                         description,
1151                         scsi_device_type(dev->devtype),
1152                         dev->vendor,
1153                         dev->model,
1154                         dev->raid_level > RAID_UNKNOWN ?
1155                                 "RAID-?" : raid_label[dev->raid_level],
1156                         dev->offload_config ? '+' : '-',
1157                         dev->offload_enabled ? '+' : '-',
1158                         dev->expose_device);
1159 }
1160
1161 /* Add an entry into h->dev[] array. */
1162 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1163                 struct hpsa_scsi_dev_t *device,
1164                 struct hpsa_scsi_dev_t *added[], int *nadded)
1165 {
1166         /* assumes h->devlock is held */
1167         int n = h->ndevices;
1168         int i;
1169         unsigned char addr1[8], addr2[8];
1170         struct hpsa_scsi_dev_t *sd;
1171
1172         if (n >= HPSA_MAX_DEVICES) {
1173                 dev_err(&h->pdev->dev, "too many devices, some will be "
1174                         "inaccessible.\n");
1175                 return -1;
1176         }
1177
1178         /* physical devices do not have lun or target assigned until now. */
1179         if (device->lun != -1)
1180                 /* Logical device, lun is already assigned. */
1181                 goto lun_assigned;
1182
1183         /* If this device a non-zero lun of a multi-lun device
1184          * byte 4 of the 8-byte LUN addr will contain the logical
1185          * unit no, zero otherwise.
1186          */
1187         if (device->scsi3addr[4] == 0) {
1188                 /* This is not a non-zero lun of a multi-lun device */
1189                 if (hpsa_find_target_lun(h, device->scsi3addr,
1190                         device->bus, &device->target, &device->lun) != 0)
1191                         return -1;
1192                 goto lun_assigned;
1193         }
1194
1195         /* This is a non-zero lun of a multi-lun device.
1196          * Search through our list and find the device which
1197          * has the same 8 byte LUN address, excepting byte 4 and 5.
1198          * Assign the same bus and target for this new LUN.
1199          * Use the logical unit number from the firmware.
1200          */
1201         memcpy(addr1, device->scsi3addr, 8);
1202         addr1[4] = 0;
1203         addr1[5] = 0;
1204         for (i = 0; i < n; i++) {
1205                 sd = h->dev[i];
1206                 memcpy(addr2, sd->scsi3addr, 8);
1207                 addr2[4] = 0;
1208                 addr2[5] = 0;
1209                 /* differ only in byte 4 and 5? */
1210                 if (memcmp(addr1, addr2, 8) == 0) {
1211                         device->bus = sd->bus;
1212                         device->target = sd->target;
1213                         device->lun = device->scsi3addr[4];
1214                         break;
1215                 }
1216         }
1217         if (device->lun == -1) {
1218                 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1219                         " suspect firmware bug or unsupported hardware "
1220                         "configuration.\n");
1221                         return -1;
1222         }
1223
1224 lun_assigned:
1225
1226         h->dev[n] = device;
1227         h->ndevices++;
1228         added[*nadded] = device;
1229         (*nadded)++;
1230         hpsa_show_dev_msg(KERN_INFO, h, device,
1231                 device->expose_device ? "added" : "masked");
1232         device->offload_to_be_enabled = device->offload_enabled;
1233         device->offload_enabled = 0;
1234         return 0;
1235 }
1236
1237 /* Update an entry in h->dev[] array. */
1238 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1239         int entry, struct hpsa_scsi_dev_t *new_entry)
1240 {
1241         int offload_enabled;
1242         /* assumes h->devlock is held */
1243         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1244
1245         /* Raid level changed. */
1246         h->dev[entry]->raid_level = new_entry->raid_level;
1247
1248         /* Raid offload parameters changed.  Careful about the ordering. */
1249         if (new_entry->offload_config && new_entry->offload_enabled) {
1250                 /*
1251                  * if drive is newly offload_enabled, we want to copy the
1252                  * raid map data first.  If previously offload_enabled and
1253                  * offload_config were set, raid map data had better be
1254                  * the same as it was before.  if raid map data is changed
1255                  * then it had better be the case that
1256                  * h->dev[entry]->offload_enabled is currently 0.
1257                  */
1258                 h->dev[entry]->raid_map = new_entry->raid_map;
1259                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1260         }
1261         if (new_entry->hba_ioaccel_enabled) {
1262                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1263                 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1264         }
1265         h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1266         h->dev[entry]->offload_config = new_entry->offload_config;
1267         h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1268         h->dev[entry]->queue_depth = new_entry->queue_depth;
1269
1270         /*
1271          * We can turn off ioaccel offload now, but need to delay turning
1272          * it on until we can update h->dev[entry]->phys_disk[], but we
1273          * can't do that until all the devices are updated.
1274          */
1275         h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1276         if (!new_entry->offload_enabled)
1277                 h->dev[entry]->offload_enabled = 0;
1278
1279         offload_enabled = h->dev[entry]->offload_enabled;
1280         h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1281         hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1282         h->dev[entry]->offload_enabled = offload_enabled;
1283 }
1284
1285 /* Replace an entry from h->dev[] array. */
1286 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1287         int entry, struct hpsa_scsi_dev_t *new_entry,
1288         struct hpsa_scsi_dev_t *added[], int *nadded,
1289         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1290 {
1291         /* assumes h->devlock is held */
1292         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1293         removed[*nremoved] = h->dev[entry];
1294         (*nremoved)++;
1295
1296         /*
1297          * New physical devices won't have target/lun assigned yet
1298          * so we need to preserve the values in the slot we are replacing.
1299          */
1300         if (new_entry->target == -1) {
1301                 new_entry->target = h->dev[entry]->target;
1302                 new_entry->lun = h->dev[entry]->lun;
1303         }
1304
1305         h->dev[entry] = new_entry;
1306         added[*nadded] = new_entry;
1307         (*nadded)++;
1308         hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1309         new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1310         new_entry->offload_enabled = 0;
1311 }
1312
1313 /* Remove an entry from h->dev[] array. */
1314 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1315         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1316 {
1317         /* assumes h->devlock is held */
1318         int i;
1319         struct hpsa_scsi_dev_t *sd;
1320
1321         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1322
1323         sd = h->dev[entry];
1324         removed[*nremoved] = h->dev[entry];
1325         (*nremoved)++;
1326
1327         for (i = entry; i < h->ndevices-1; i++)
1328                 h->dev[i] = h->dev[i+1];
1329         h->ndevices--;
1330         hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1331 }
1332
1333 #define SCSI3ADDR_EQ(a, b) ( \
1334         (a)[7] == (b)[7] && \
1335         (a)[6] == (b)[6] && \
1336         (a)[5] == (b)[5] && \
1337         (a)[4] == (b)[4] && \
1338         (a)[3] == (b)[3] && \
1339         (a)[2] == (b)[2] && \
1340         (a)[1] == (b)[1] && \
1341         (a)[0] == (b)[0])
1342
1343 static void fixup_botched_add(struct ctlr_info *h,
1344         struct hpsa_scsi_dev_t *added)
1345 {
1346         /* called when scsi_add_device fails in order to re-adjust
1347          * h->dev[] to match the mid layer's view.
1348          */
1349         unsigned long flags;
1350         int i, j;
1351
1352         spin_lock_irqsave(&h->lock, flags);
1353         for (i = 0; i < h->ndevices; i++) {
1354                 if (h->dev[i] == added) {
1355                         for (j = i; j < h->ndevices-1; j++)
1356                                 h->dev[j] = h->dev[j+1];
1357                         h->ndevices--;
1358                         break;
1359                 }
1360         }
1361         spin_unlock_irqrestore(&h->lock, flags);
1362         kfree(added);
1363 }
1364
1365 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1366         struct hpsa_scsi_dev_t *dev2)
1367 {
1368         /* we compare everything except lun and target as these
1369          * are not yet assigned.  Compare parts likely
1370          * to differ first
1371          */
1372         if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1373                 sizeof(dev1->scsi3addr)) != 0)
1374                 return 0;
1375         if (memcmp(dev1->device_id, dev2->device_id,
1376                 sizeof(dev1->device_id)) != 0)
1377                 return 0;
1378         if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1379                 return 0;
1380         if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1381                 return 0;
1382         if (dev1->devtype != dev2->devtype)
1383                 return 0;
1384         if (dev1->bus != dev2->bus)
1385                 return 0;
1386         return 1;
1387 }
1388
1389 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1390         struct hpsa_scsi_dev_t *dev2)
1391 {
1392         /* Device attributes that can change, but don't mean
1393          * that the device is a different device, nor that the OS
1394          * needs to be told anything about the change.
1395          */
1396         if (dev1->raid_level != dev2->raid_level)
1397                 return 1;
1398         if (dev1->offload_config != dev2->offload_config)
1399                 return 1;
1400         if (dev1->offload_enabled != dev2->offload_enabled)
1401                 return 1;
1402         if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1403                 if (dev1->queue_depth != dev2->queue_depth)
1404                         return 1;
1405         return 0;
1406 }
1407
1408 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1409  * and return needle location in *index.  If scsi3addr matches, but not
1410  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1411  * location in *index.
1412  * In the case of a minor device attribute change, such as RAID level, just
1413  * return DEVICE_UPDATED, along with the updated device's location in index.
1414  * If needle not found, return DEVICE_NOT_FOUND.
1415  */
1416 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1417         struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1418         int *index)
1419 {
1420         int i;
1421 #define DEVICE_NOT_FOUND 0
1422 #define DEVICE_CHANGED 1
1423 #define DEVICE_SAME 2
1424 #define DEVICE_UPDATED 3
1425         if (needle == NULL)
1426                 return DEVICE_NOT_FOUND;
1427
1428         for (i = 0; i < haystack_size; i++) {
1429                 if (haystack[i] == NULL) /* previously removed. */
1430                         continue;
1431                 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1432                         *index = i;
1433                         if (device_is_the_same(needle, haystack[i])) {
1434                                 if (device_updated(needle, haystack[i]))
1435                                         return DEVICE_UPDATED;
1436                                 return DEVICE_SAME;
1437                         } else {
1438                                 /* Keep offline devices offline */
1439                                 if (needle->volume_offline)
1440                                         return DEVICE_NOT_FOUND;
1441                                 return DEVICE_CHANGED;
1442                         }
1443                 }
1444         }
1445         *index = -1;
1446         return DEVICE_NOT_FOUND;
1447 }
1448
1449 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1450                                         unsigned char scsi3addr[])
1451 {
1452         struct offline_device_entry *device;
1453         unsigned long flags;
1454
1455         /* Check to see if device is already on the list */
1456         spin_lock_irqsave(&h->offline_device_lock, flags);
1457         list_for_each_entry(device, &h->offline_device_list, offline_list) {
1458                 if (memcmp(device->scsi3addr, scsi3addr,
1459                         sizeof(device->scsi3addr)) == 0) {
1460                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1461                         return;
1462                 }
1463         }
1464         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1465
1466         /* Device is not on the list, add it. */
1467         device = kmalloc(sizeof(*device), GFP_KERNEL);
1468         if (!device) {
1469                 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1470                 return;
1471         }
1472         memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1473         spin_lock_irqsave(&h->offline_device_lock, flags);
1474         list_add_tail(&device->offline_list, &h->offline_device_list);
1475         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1476 }
1477
1478 /* Print a message explaining various offline volume states */
1479 static void hpsa_show_volume_status(struct ctlr_info *h,
1480         struct hpsa_scsi_dev_t *sd)
1481 {
1482         if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1483                 dev_info(&h->pdev->dev,
1484                         "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1485                         h->scsi_host->host_no,
1486                         sd->bus, sd->target, sd->lun);
1487         switch (sd->volume_offline) {
1488         case HPSA_LV_OK:
1489                 break;
1490         case HPSA_LV_UNDERGOING_ERASE:
1491                 dev_info(&h->pdev->dev,
1492                         "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1493                         h->scsi_host->host_no,
1494                         sd->bus, sd->target, sd->lun);
1495                 break;
1496         case HPSA_LV_NOT_AVAILABLE:
1497                 dev_info(&h->pdev->dev,
1498                         "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1499                         h->scsi_host->host_no,
1500                         sd->bus, sd->target, sd->lun);
1501                 break;
1502         case HPSA_LV_UNDERGOING_RPI:
1503                 dev_info(&h->pdev->dev,
1504                         "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1505                         h->scsi_host->host_no,
1506                         sd->bus, sd->target, sd->lun);
1507                 break;
1508         case HPSA_LV_PENDING_RPI:
1509                 dev_info(&h->pdev->dev,
1510                         "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1511                         h->scsi_host->host_no,
1512                         sd->bus, sd->target, sd->lun);
1513                 break;
1514         case HPSA_LV_ENCRYPTED_NO_KEY:
1515                 dev_info(&h->pdev->dev,
1516                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1517                         h->scsi_host->host_no,
1518                         sd->bus, sd->target, sd->lun);
1519                 break;
1520         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1521                 dev_info(&h->pdev->dev,
1522                         "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1523                         h->scsi_host->host_no,
1524                         sd->bus, sd->target, sd->lun);
1525                 break;
1526         case HPSA_LV_UNDERGOING_ENCRYPTION:
1527                 dev_info(&h->pdev->dev,
1528                         "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1529                         h->scsi_host->host_no,
1530                         sd->bus, sd->target, sd->lun);
1531                 break;
1532         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1533                 dev_info(&h->pdev->dev,
1534                         "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1535                         h->scsi_host->host_no,
1536                         sd->bus, sd->target, sd->lun);
1537                 break;
1538         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1539                 dev_info(&h->pdev->dev,
1540                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1541                         h->scsi_host->host_no,
1542                         sd->bus, sd->target, sd->lun);
1543                 break;
1544         case HPSA_LV_PENDING_ENCRYPTION:
1545                 dev_info(&h->pdev->dev,
1546                         "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1547                         h->scsi_host->host_no,
1548                         sd->bus, sd->target, sd->lun);
1549                 break;
1550         case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1551                 dev_info(&h->pdev->dev,
1552                         "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1553                         h->scsi_host->host_no,
1554                         sd->bus, sd->target, sd->lun);
1555                 break;
1556         }
1557 }
1558
1559 /*
1560  * Figure the list of physical drive pointers for a logical drive with
1561  * raid offload configured.
1562  */
1563 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1564                                 struct hpsa_scsi_dev_t *dev[], int ndevices,
1565                                 struct hpsa_scsi_dev_t *logical_drive)
1566 {
1567         struct raid_map_data *map = &logical_drive->raid_map;
1568         struct raid_map_disk_data *dd = &map->data[0];
1569         int i, j;
1570         int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1571                                 le16_to_cpu(map->metadata_disks_per_row);
1572         int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1573                                 le16_to_cpu(map->layout_map_count) *
1574                                 total_disks_per_row;
1575         int nphys_disk = le16_to_cpu(map->layout_map_count) *
1576                                 total_disks_per_row;
1577         int qdepth;
1578
1579         if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1580                 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1581
1582         logical_drive->nphysical_disks = nraid_map_entries;
1583
1584         qdepth = 0;
1585         for (i = 0; i < nraid_map_entries; i++) {
1586                 logical_drive->phys_disk[i] = NULL;
1587                 if (!logical_drive->offload_config)
1588                         continue;
1589                 for (j = 0; j < ndevices; j++) {
1590                         if (dev[j] == NULL)
1591                                 continue;
1592                         if (dev[j]->devtype != TYPE_DISK)
1593                                 continue;
1594                         if (is_logical_device(dev[j]))
1595                                 continue;
1596                         if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1597                                 continue;
1598
1599                         logical_drive->phys_disk[i] = dev[j];
1600                         if (i < nphys_disk)
1601                                 qdepth = min(h->nr_cmds, qdepth +
1602                                     logical_drive->phys_disk[i]->queue_depth);
1603                         break;
1604                 }
1605
1606                 /*
1607                  * This can happen if a physical drive is removed and
1608                  * the logical drive is degraded.  In that case, the RAID
1609                  * map data will refer to a physical disk which isn't actually
1610                  * present.  And in that case offload_enabled should already
1611                  * be 0, but we'll turn it off here just in case
1612                  */
1613                 if (!logical_drive->phys_disk[i]) {
1614                         logical_drive->offload_enabled = 0;
1615                         logical_drive->offload_to_be_enabled = 0;
1616                         logical_drive->queue_depth = 8;
1617                 }
1618         }
1619         if (nraid_map_entries)
1620                 /*
1621                  * This is correct for reads, too high for full stripe writes,
1622                  * way too high for partial stripe writes
1623                  */
1624                 logical_drive->queue_depth = qdepth;
1625         else
1626                 logical_drive->queue_depth = h->nr_cmds;
1627 }
1628
1629 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1630                                 struct hpsa_scsi_dev_t *dev[], int ndevices)
1631 {
1632         int i;
1633
1634         for (i = 0; i < ndevices; i++) {
1635                 if (dev[i] == NULL)
1636                         continue;
1637                 if (dev[i]->devtype != TYPE_DISK)
1638                         continue;
1639                 if (!is_logical_device(dev[i]))
1640                         continue;
1641
1642                 /*
1643                  * If offload is currently enabled, the RAID map and
1644                  * phys_disk[] assignment *better* not be changing
1645                  * and since it isn't changing, we do not need to
1646                  * update it.
1647                  */
1648                 if (dev[i]->offload_enabled)
1649                         continue;
1650
1651                 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1652         }
1653 }
1654
1655 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1656 {
1657         int rc = 0;
1658
1659         if (!h->scsi_host)
1660                 return 1;
1661
1662         rc = scsi_add_device(h->scsi_host, device->bus,
1663                                         device->target, device->lun);
1664         return rc;
1665 }
1666
1667 static void hpsa_remove_device(struct ctlr_info *h,
1668                         struct hpsa_scsi_dev_t *device)
1669 {
1670         struct scsi_device *sdev = NULL;
1671
1672         if (!h->scsi_host)
1673                 return;
1674
1675         sdev = scsi_device_lookup(h->scsi_host, device->bus,
1676                                                 device->target, device->lun);
1677
1678         if (sdev) {
1679                 scsi_remove_device(sdev);
1680                 scsi_device_put(sdev);
1681         } else {
1682                 /*
1683                  * We don't expect to get here.  Future commands
1684                  * to this device will get a selection timeout as
1685                  * if the device were gone.
1686                  */
1687                 hpsa_show_dev_msg(KERN_WARNING, h, device,
1688                                         "didn't find device for removal.");
1689         }
1690 }
1691
1692 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1693         struct hpsa_scsi_dev_t *sd[], int nsds)
1694 {
1695         /* sd contains scsi3 addresses and devtypes, and inquiry
1696          * data.  This function takes what's in sd to be the current
1697          * reality and updates h->dev[] to reflect that reality.
1698          */
1699         int i, entry, device_change, changes = 0;
1700         struct hpsa_scsi_dev_t *csd;
1701         unsigned long flags;
1702         struct hpsa_scsi_dev_t **added, **removed;
1703         int nadded, nremoved;
1704
1705         /*
1706          * A reset can cause a device status to change
1707          * re-schedule the scan to see what happened.
1708          */
1709         if (h->reset_in_progress) {
1710                 h->drv_req_rescan = 1;
1711                 return;
1712         }
1713
1714         added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1715         removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1716
1717         if (!added || !removed) {
1718                 dev_warn(&h->pdev->dev, "out of memory in "
1719                         "adjust_hpsa_scsi_table\n");
1720                 goto free_and_out;
1721         }
1722
1723         spin_lock_irqsave(&h->devlock, flags);
1724
1725         /* find any devices in h->dev[] that are not in
1726          * sd[] and remove them from h->dev[], and for any
1727          * devices which have changed, remove the old device
1728          * info and add the new device info.
1729          * If minor device attributes change, just update
1730          * the existing device structure.
1731          */
1732         i = 0;
1733         nremoved = 0;
1734         nadded = 0;
1735         while (i < h->ndevices) {
1736                 csd = h->dev[i];
1737                 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1738                 if (device_change == DEVICE_NOT_FOUND) {
1739                         changes++;
1740                         hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1741                         continue; /* remove ^^^, hence i not incremented */
1742                 } else if (device_change == DEVICE_CHANGED) {
1743                         changes++;
1744                         hpsa_scsi_replace_entry(h, i, sd[entry],
1745                                 added, &nadded, removed, &nremoved);
1746                         /* Set it to NULL to prevent it from being freed
1747                          * at the bottom of hpsa_update_scsi_devices()
1748                          */
1749                         sd[entry] = NULL;
1750                 } else if (device_change == DEVICE_UPDATED) {
1751                         hpsa_scsi_update_entry(h, i, sd[entry]);
1752                 }
1753                 i++;
1754         }
1755
1756         /* Now, make sure every device listed in sd[] is also
1757          * listed in h->dev[], adding them if they aren't found
1758          */
1759
1760         for (i = 0; i < nsds; i++) {
1761                 if (!sd[i]) /* if already added above. */
1762                         continue;
1763
1764                 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1765                  * as the SCSI mid-layer does not handle such devices well.
1766                  * It relentlessly loops sending TUR at 3Hz, then READ(10)
1767                  * at 160Hz, and prevents the system from coming up.
1768                  */
1769                 if (sd[i]->volume_offline) {
1770                         hpsa_show_volume_status(h, sd[i]);
1771                         hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1772                         continue;
1773                 }
1774
1775                 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1776                                         h->ndevices, &entry);
1777                 if (device_change == DEVICE_NOT_FOUND) {
1778                         changes++;
1779                         if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1780                                 break;
1781                         sd[i] = NULL; /* prevent from being freed later. */
1782                 } else if (device_change == DEVICE_CHANGED) {
1783                         /* should never happen... */
1784                         changes++;
1785                         dev_warn(&h->pdev->dev,
1786                                 "device unexpectedly changed.\n");
1787                         /* but if it does happen, we just ignore that device */
1788                 }
1789         }
1790         hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1791
1792         /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1793          * any logical drives that need it enabled.
1794          */
1795         for (i = 0; i < h->ndevices; i++) {
1796                 if (h->dev[i] == NULL)
1797                         continue;
1798                 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1799         }
1800
1801         spin_unlock_irqrestore(&h->devlock, flags);
1802
1803         /* Monitor devices which are in one of several NOT READY states to be
1804          * brought online later. This must be done without holding h->devlock,
1805          * so don't touch h->dev[]
1806          */
1807         for (i = 0; i < nsds; i++) {
1808                 if (!sd[i]) /* if already added above. */
1809                         continue;
1810                 if (sd[i]->volume_offline)
1811                         hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1812         }
1813
1814         /* Don't notify scsi mid layer of any changes the first time through
1815          * (or if there are no changes) scsi_scan_host will do it later the
1816          * first time through.
1817          */
1818         if (!changes)
1819                 goto free_and_out;
1820
1821         /* Notify scsi mid layer of any removed devices */
1822         for (i = 0; i < nremoved; i++) {
1823                 if (removed[i] == NULL)
1824                         continue;
1825                 if (removed[i]->expose_device)
1826                         hpsa_remove_device(h, removed[i]);
1827                 kfree(removed[i]);
1828                 removed[i] = NULL;
1829         }
1830
1831         /* Notify scsi mid layer of any added devices */
1832         for (i = 0; i < nadded; i++) {
1833                 int rc = 0;
1834
1835                 if (added[i] == NULL)
1836                         continue;
1837                 if (!(added[i]->expose_device))
1838                         continue;
1839                 rc = hpsa_add_device(h, added[i]);
1840                 if (!rc)
1841                         continue;
1842                 dev_warn(&h->pdev->dev,
1843                         "addition failed %d, device not added.", rc);
1844                 /* now we have to remove it from h->dev,
1845                  * since it didn't get added to scsi mid layer
1846                  */
1847                 fixup_botched_add(h, added[i]);
1848                 h->drv_req_rescan = 1;
1849         }
1850
1851 free_and_out:
1852         kfree(added);
1853         kfree(removed);
1854 }
1855
1856 /*
1857  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1858  * Assume's h->devlock is held.
1859  */
1860 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1861         int bus, int target, int lun)
1862 {
1863         int i;
1864         struct hpsa_scsi_dev_t *sd;
1865
1866         for (i = 0; i < h->ndevices; i++) {
1867                 sd = h->dev[i];
1868                 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1869                         return sd;
1870         }
1871         return NULL;
1872 }
1873
1874 static int hpsa_slave_alloc(struct scsi_device *sdev)
1875 {
1876         struct hpsa_scsi_dev_t *sd;
1877         unsigned long flags;
1878         struct ctlr_info *h;
1879
1880         h = sdev_to_hba(sdev);
1881         spin_lock_irqsave(&h->devlock, flags);
1882         sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1883                 sdev_id(sdev), sdev->lun);
1884         if (likely(sd)) {
1885                 atomic_set(&sd->ioaccel_cmds_out, 0);
1886                 sdev->hostdata = sd->expose_device ? sd : NULL;
1887         } else
1888                 sdev->hostdata = NULL;
1889         spin_unlock_irqrestore(&h->devlock, flags);
1890         return 0;
1891 }
1892
1893 /* configure scsi device based on internal per-device structure */
1894 static int hpsa_slave_configure(struct scsi_device *sdev)
1895 {
1896         struct hpsa_scsi_dev_t *sd;
1897         int queue_depth;
1898
1899         sd = sdev->hostdata;
1900         sdev->no_uld_attach = !sd || !sd->expose_device;
1901
1902         if (sd)
1903                 queue_depth = sd->queue_depth != 0 ?
1904                         sd->queue_depth : sdev->host->can_queue;
1905         else
1906                 queue_depth = sdev->host->can_queue;
1907
1908         scsi_change_queue_depth(sdev, queue_depth);
1909
1910         return 0;
1911 }
1912
1913 static void hpsa_slave_destroy(struct scsi_device *sdev)
1914 {
1915         /* nothing to do. */
1916 }
1917
1918 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1919 {
1920         int i;
1921
1922         if (!h->ioaccel2_cmd_sg_list)
1923                 return;
1924         for (i = 0; i < h->nr_cmds; i++) {
1925                 kfree(h->ioaccel2_cmd_sg_list[i]);
1926                 h->ioaccel2_cmd_sg_list[i] = NULL;
1927         }
1928         kfree(h->ioaccel2_cmd_sg_list);
1929         h->ioaccel2_cmd_sg_list = NULL;
1930 }
1931
1932 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1933 {
1934         int i;
1935
1936         if (h->chainsize <= 0)
1937                 return 0;
1938
1939         h->ioaccel2_cmd_sg_list =
1940                 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1941                                         GFP_KERNEL);
1942         if (!h->ioaccel2_cmd_sg_list)
1943                 return -ENOMEM;
1944         for (i = 0; i < h->nr_cmds; i++) {
1945                 h->ioaccel2_cmd_sg_list[i] =
1946                         kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1947                                         h->maxsgentries, GFP_KERNEL);
1948                 if (!h->ioaccel2_cmd_sg_list[i])
1949                         goto clean;
1950         }
1951         return 0;
1952
1953 clean:
1954         hpsa_free_ioaccel2_sg_chain_blocks(h);
1955         return -ENOMEM;
1956 }
1957
1958 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1959 {
1960         int i;
1961
1962         if (!h->cmd_sg_list)
1963                 return;
1964         for (i = 0; i < h->nr_cmds; i++) {
1965                 kfree(h->cmd_sg_list[i]);
1966                 h->cmd_sg_list[i] = NULL;
1967         }
1968         kfree(h->cmd_sg_list);
1969         h->cmd_sg_list = NULL;
1970 }
1971
1972 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
1973 {
1974         int i;
1975
1976         if (h->chainsize <= 0)
1977                 return 0;
1978
1979         h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1980                                 GFP_KERNEL);
1981         if (!h->cmd_sg_list) {
1982                 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1983                 return -ENOMEM;
1984         }
1985         for (i = 0; i < h->nr_cmds; i++) {
1986                 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1987                                                 h->chainsize, GFP_KERNEL);
1988                 if (!h->cmd_sg_list[i]) {
1989                         dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1990                         goto clean;
1991                 }
1992         }
1993         return 0;
1994
1995 clean:
1996         hpsa_free_sg_chain_blocks(h);
1997         return -ENOMEM;
1998 }
1999
2000 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2001         struct io_accel2_cmd *cp, struct CommandList *c)
2002 {
2003         struct ioaccel2_sg_element *chain_block;
2004         u64 temp64;
2005         u32 chain_size;
2006
2007         chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2008         chain_size = le32_to_cpu(cp->sg[0].length);
2009         temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2010                                 PCI_DMA_TODEVICE);
2011         if (dma_mapping_error(&h->pdev->dev, temp64)) {
2012                 /* prevent subsequent unmapping */
2013                 cp->sg->address = 0;
2014                 return -1;
2015         }
2016         cp->sg->address = cpu_to_le64(temp64);
2017         return 0;
2018 }
2019
2020 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2021         struct io_accel2_cmd *cp)
2022 {
2023         struct ioaccel2_sg_element *chain_sg;
2024         u64 temp64;
2025         u32 chain_size;
2026
2027         chain_sg = cp->sg;
2028         temp64 = le64_to_cpu(chain_sg->address);
2029         chain_size = le32_to_cpu(cp->sg[0].length);
2030         pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2031 }
2032
2033 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2034         struct CommandList *c)
2035 {
2036         struct SGDescriptor *chain_sg, *chain_block;
2037         u64 temp64;
2038         u32 chain_len;
2039
2040         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2041         chain_block = h->cmd_sg_list[c->cmdindex];
2042         chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2043         chain_len = sizeof(*chain_sg) *
2044                 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2045         chain_sg->Len = cpu_to_le32(chain_len);
2046         temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2047                                 PCI_DMA_TODEVICE);
2048         if (dma_mapping_error(&h->pdev->dev, temp64)) {
2049                 /* prevent subsequent unmapping */
2050                 chain_sg->Addr = cpu_to_le64(0);
2051                 return -1;
2052         }
2053         chain_sg->Addr = cpu_to_le64(temp64);
2054         return 0;
2055 }
2056
2057 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2058         struct CommandList *c)
2059 {
2060         struct SGDescriptor *chain_sg;
2061
2062         if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2063                 return;
2064
2065         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2066         pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2067                         le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2068 }
2069
2070
2071 /* Decode the various types of errors on ioaccel2 path.
2072  * Return 1 for any error that should generate a RAID path retry.
2073  * Return 0 for errors that don't require a RAID path retry.
2074  */
2075 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2076                                         struct CommandList *c,
2077                                         struct scsi_cmnd *cmd,
2078                                         struct io_accel2_cmd *c2)
2079 {
2080         int data_len;
2081         int retry = 0;
2082         u32 ioaccel2_resid = 0;
2083
2084         switch (c2->error_data.serv_response) {
2085         case IOACCEL2_SERV_RESPONSE_COMPLETE:
2086                 switch (c2->error_data.status) {
2087                 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2088                         break;
2089                 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2090                         cmd->result |= SAM_STAT_CHECK_CONDITION;
2091                         if (c2->error_data.data_present !=
2092                                         IOACCEL2_SENSE_DATA_PRESENT) {
2093                                 memset(cmd->sense_buffer, 0,
2094                                         SCSI_SENSE_BUFFERSIZE);
2095                                 break;
2096                         }
2097                         /* copy the sense data */
2098                         data_len = c2->error_data.sense_data_len;
2099                         if (data_len > SCSI_SENSE_BUFFERSIZE)
2100                                 data_len = SCSI_SENSE_BUFFERSIZE;
2101                         if (data_len > sizeof(c2->error_data.sense_data_buff))
2102                                 data_len =
2103                                         sizeof(c2->error_data.sense_data_buff);
2104                         memcpy(cmd->sense_buffer,
2105                                 c2->error_data.sense_data_buff, data_len);
2106                         retry = 1;
2107                         break;
2108                 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2109                         retry = 1;
2110                         break;
2111                 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2112                         retry = 1;
2113                         break;
2114                 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2115                         retry = 1;
2116                         break;
2117                 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2118                         retry = 1;
2119                         break;
2120                 default:
2121                         retry = 1;
2122                         break;
2123                 }
2124                 break;
2125         case IOACCEL2_SERV_RESPONSE_FAILURE:
2126                 switch (c2->error_data.status) {
2127                 case IOACCEL2_STATUS_SR_IO_ERROR:
2128                 case IOACCEL2_STATUS_SR_IO_ABORTED:
2129                 case IOACCEL2_STATUS_SR_OVERRUN:
2130                         retry = 1;
2131                         break;
2132                 case IOACCEL2_STATUS_SR_UNDERRUN:
2133                         cmd->result = (DID_OK << 16);           /* host byte */
2134                         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2135                         ioaccel2_resid = get_unaligned_le32(
2136                                                 &c2->error_data.resid_cnt[0]);
2137                         scsi_set_resid(cmd, ioaccel2_resid);
2138                         break;
2139                 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2140                 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2141                 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2142                         /* We will get an event from ctlr to trigger rescan */
2143                         retry = 1;
2144                         break;
2145                 default:
2146                         retry = 1;
2147                 }
2148                 break;
2149         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2150                 break;
2151         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2152                 break;
2153         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2154                 retry = 1;
2155                 break;
2156         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2157                 break;
2158         default:
2159                 retry = 1;
2160                 break;
2161         }
2162
2163         return retry;   /* retry on raid path? */
2164 }
2165
2166 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2167                 struct CommandList *c)
2168 {
2169         bool do_wake = false;
2170
2171         /*
2172          * Prevent the following race in the abort handler:
2173          *
2174          * 1. LLD is requested to abort a SCSI command
2175          * 2. The SCSI command completes
2176          * 3. The struct CommandList associated with step 2 is made available
2177          * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2178          * 5. Abort handler follows scsi_cmnd->host_scribble and
2179          *    finds struct CommandList and tries to aborts it
2180          * Now we have aborted the wrong command.
2181          *
2182          * Reset c->scsi_cmd here so that the abort or reset handler will know
2183          * this command has completed.  Then, check to see if the handler is
2184          * waiting for this command, and, if so, wake it.
2185          */
2186         c->scsi_cmd = SCSI_CMD_IDLE;
2187         mb();   /* Declare command idle before checking for pending events. */
2188         if (c->abort_pending) {
2189                 do_wake = true;
2190                 c->abort_pending = false;
2191         }
2192         if (c->reset_pending) {
2193                 unsigned long flags;
2194                 struct hpsa_scsi_dev_t *dev;
2195
2196                 /*
2197                  * There appears to be a reset pending; lock the lock and
2198                  * reconfirm.  If so, then decrement the count of outstanding
2199                  * commands and wake the reset command if this is the last one.
2200                  */
2201                 spin_lock_irqsave(&h->lock, flags);
2202                 dev = c->reset_pending;         /* Re-fetch under the lock. */
2203                 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2204                         do_wake = true;
2205                 c->reset_pending = NULL;
2206                 spin_unlock_irqrestore(&h->lock, flags);
2207         }
2208
2209         if (do_wake)
2210                 wake_up_all(&h->event_sync_wait_queue);
2211 }
2212
2213 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2214                                       struct CommandList *c)
2215 {
2216         hpsa_cmd_resolve_events(h, c);
2217         cmd_tagged_free(h, c);
2218 }
2219
2220 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2221                 struct CommandList *c, struct scsi_cmnd *cmd)
2222 {
2223         hpsa_cmd_resolve_and_free(h, c);
2224         cmd->scsi_done(cmd);
2225 }
2226
2227 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2228 {
2229         INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2230         queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2231 }
2232
2233 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2234 {
2235         cmd->result = DID_ABORT << 16;
2236 }
2237
2238 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2239                                     struct scsi_cmnd *cmd)
2240 {
2241         hpsa_set_scsi_cmd_aborted(cmd);
2242         dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2243                          c->Request.CDB, c->err_info->ScsiStatus);
2244         hpsa_cmd_resolve_and_free(h, c);
2245 }
2246
2247 static void process_ioaccel2_completion(struct ctlr_info *h,
2248                 struct CommandList *c, struct scsi_cmnd *cmd,
2249                 struct hpsa_scsi_dev_t *dev)
2250 {
2251         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2252
2253         /* check for good status */
2254         if (likely(c2->error_data.serv_response == 0 &&
2255                         c2->error_data.status == 0))
2256                 return hpsa_cmd_free_and_done(h, c, cmd);
2257
2258         /*
2259          * Any RAID offload error results in retry which will use
2260          * the normal I/O path so the controller can handle whatever's
2261          * wrong.
2262          */
2263         if (is_logical_device(dev) &&
2264                 c2->error_data.serv_response ==
2265                         IOACCEL2_SERV_RESPONSE_FAILURE) {
2266                 if (c2->error_data.status ==
2267                         IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2268                         dev->offload_enabled = 0;
2269
2270                 return hpsa_retry_cmd(h, c);
2271         }
2272
2273         if (handle_ioaccel_mode2_error(h, c, cmd, c2))
2274                 return hpsa_retry_cmd(h, c);
2275
2276         return hpsa_cmd_free_and_done(h, c, cmd);
2277 }
2278
2279 /* Returns 0 on success, < 0 otherwise. */
2280 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2281                                         struct CommandList *cp)
2282 {
2283         u8 tmf_status = cp->err_info->ScsiStatus;
2284
2285         switch (tmf_status) {
2286         case CISS_TMF_COMPLETE:
2287                 /*
2288                  * CISS_TMF_COMPLETE never happens, instead,
2289                  * ei->CommandStatus == 0 for this case.
2290                  */
2291         case CISS_TMF_SUCCESS:
2292                 return 0;
2293         case CISS_TMF_INVALID_FRAME:
2294         case CISS_TMF_NOT_SUPPORTED:
2295         case CISS_TMF_FAILED:
2296         case CISS_TMF_WRONG_LUN:
2297         case CISS_TMF_OVERLAPPED_TAG:
2298                 break;
2299         default:
2300                 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2301                                 tmf_status);
2302                 break;
2303         }
2304         return -tmf_status;
2305 }
2306
2307 static void complete_scsi_command(struct CommandList *cp)
2308 {
2309         struct scsi_cmnd *cmd;
2310         struct ctlr_info *h;
2311         struct ErrorInfo *ei;
2312         struct hpsa_scsi_dev_t *dev;
2313         struct io_accel2_cmd *c2;
2314
2315         u8 sense_key;
2316         u8 asc;      /* additional sense code */
2317         u8 ascq;     /* additional sense code qualifier */
2318         unsigned long sense_data_size;
2319
2320         ei = cp->err_info;
2321         cmd = cp->scsi_cmd;
2322         h = cp->h;
2323         dev = cmd->device->hostdata;
2324         c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2325
2326         scsi_dma_unmap(cmd); /* undo the DMA mappings */
2327         if ((cp->cmd_type == CMD_SCSI) &&
2328                 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2329                 hpsa_unmap_sg_chain_block(h, cp);
2330
2331         if ((cp->cmd_type == CMD_IOACCEL2) &&
2332                 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2333                 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2334
2335         cmd->result = (DID_OK << 16);           /* host byte */
2336         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2337
2338         if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2339                 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2340
2341         /*
2342          * We check for lockup status here as it may be set for
2343          * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2344          * fail_all_oustanding_cmds()
2345          */
2346         if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2347                 /* DID_NO_CONNECT will prevent a retry */
2348                 cmd->result = DID_NO_CONNECT << 16;
2349                 return hpsa_cmd_free_and_done(h, cp, cmd);
2350         }
2351
2352         if ((unlikely(hpsa_is_pending_event(cp)))) {
2353                 if (cp->reset_pending)
2354                         return hpsa_cmd_resolve_and_free(h, cp);
2355                 if (cp->abort_pending)
2356                         return hpsa_cmd_abort_and_free(h, cp, cmd);
2357         }
2358
2359         if (cp->cmd_type == CMD_IOACCEL2)
2360                 return process_ioaccel2_completion(h, cp, cmd, dev);
2361
2362         scsi_set_resid(cmd, ei->ResidualCnt);
2363         if (ei->CommandStatus == 0)
2364                 return hpsa_cmd_free_and_done(h, cp, cmd);
2365
2366         /* For I/O accelerator commands, copy over some fields to the normal
2367          * CISS header used below for error handling.
2368          */
2369         if (cp->cmd_type == CMD_IOACCEL1) {
2370                 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2371                 cp->Header.SGList = scsi_sg_count(cmd);
2372                 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2373                 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2374                         IOACCEL1_IOFLAGS_CDBLEN_MASK;
2375                 cp->Header.tag = c->tag;
2376                 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2377                 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2378
2379                 /* Any RAID offload error results in retry which will use
2380                  * the normal I/O path so the controller can handle whatever's
2381                  * wrong.
2382                  */
2383                 if (is_logical_device(dev)) {
2384                         if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2385                                 dev->offload_enabled = 0;
2386                         return hpsa_retry_cmd(h, cp);
2387                 }
2388         }
2389
2390         /* an error has occurred */
2391         switch (ei->CommandStatus) {
2392
2393         case CMD_TARGET_STATUS:
2394                 cmd->result |= ei->ScsiStatus;
2395                 /* copy the sense data */
2396                 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2397                         sense_data_size = SCSI_SENSE_BUFFERSIZE;
2398                 else
2399                         sense_data_size = sizeof(ei->SenseInfo);
2400                 if (ei->SenseLen < sense_data_size)
2401                         sense_data_size = ei->SenseLen;
2402                 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2403                 if (ei->ScsiStatus)
2404                         decode_sense_data(ei->SenseInfo, sense_data_size,
2405                                 &sense_key, &asc, &ascq);
2406                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2407                         if (sense_key == ABORTED_COMMAND) {
2408                                 cmd->result |= DID_SOFT_ERROR << 16;
2409                                 break;
2410                         }
2411                         break;
2412                 }
2413                 /* Problem was not a check condition
2414                  * Pass it up to the upper layers...
2415                  */
2416                 if (ei->ScsiStatus) {
2417                         dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2418                                 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2419                                 "Returning result: 0x%x\n",
2420                                 cp, ei->ScsiStatus,
2421                                 sense_key, asc, ascq,
2422                                 cmd->result);
2423                 } else {  /* scsi status is zero??? How??? */
2424                         dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2425                                 "Returning no connection.\n", cp),
2426
2427                         /* Ordinarily, this case should never happen,
2428                          * but there is a bug in some released firmware
2429                          * revisions that allows it to happen if, for
2430                          * example, a 4100 backplane loses power and
2431                          * the tape drive is in it.  We assume that
2432                          * it's a fatal error of some kind because we
2433                          * can't show that it wasn't. We will make it
2434                          * look like selection timeout since that is
2435                          * the most common reason for this to occur,
2436                          * and it's severe enough.
2437                          */
2438
2439                         cmd->result = DID_NO_CONNECT << 16;
2440                 }
2441                 break;
2442
2443         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2444                 break;
2445         case CMD_DATA_OVERRUN:
2446                 dev_warn(&h->pdev->dev,
2447                         "CDB %16phN data overrun\n", cp->Request.CDB);
2448                 break;
2449         case CMD_INVALID: {
2450                 /* print_bytes(cp, sizeof(*cp), 1, 0);
2451                 print_cmd(cp); */
2452                 /* We get CMD_INVALID if you address a non-existent device
2453                  * instead of a selection timeout (no response).  You will
2454                  * see this if you yank out a drive, then try to access it.
2455                  * This is kind of a shame because it means that any other
2456                  * CMD_INVALID (e.g. driver bug) will get interpreted as a
2457                  * missing target. */
2458                 cmd->result = DID_NO_CONNECT << 16;
2459         }
2460                 break;
2461         case CMD_PROTOCOL_ERR:
2462                 cmd->result = DID_ERROR << 16;
2463                 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2464                                 cp->Request.CDB);
2465                 break;
2466         case CMD_HARDWARE_ERR:
2467                 cmd->result = DID_ERROR << 16;
2468                 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2469                         cp->Request.CDB);
2470                 break;
2471         case CMD_CONNECTION_LOST:
2472                 cmd->result = DID_ERROR << 16;
2473                 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2474                         cp->Request.CDB);
2475                 break;
2476         case CMD_ABORTED:
2477                 /* Return now to avoid calling scsi_done(). */
2478                 return hpsa_cmd_abort_and_free(h, cp, cmd);
2479         case CMD_ABORT_FAILED:
2480                 cmd->result = DID_ERROR << 16;
2481                 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2482                         cp->Request.CDB);
2483                 break;
2484         case CMD_UNSOLICITED_ABORT:
2485                 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2486                 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2487                         cp->Request.CDB);
2488                 break;
2489         case CMD_TIMEOUT:
2490                 cmd->result = DID_TIME_OUT << 16;
2491                 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2492                         cp->Request.CDB);
2493                 break;
2494         case CMD_UNABORTABLE:
2495                 cmd->result = DID_ERROR << 16;
2496                 dev_warn(&h->pdev->dev, "Command unabortable\n");
2497                 break;
2498         case CMD_TMF_STATUS:
2499                 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2500                         cmd->result = DID_ERROR << 16;
2501                 break;
2502         case CMD_IOACCEL_DISABLED:
2503                 /* This only handles the direct pass-through case since RAID
2504                  * offload is handled above.  Just attempt a retry.
2505                  */
2506                 cmd->result = DID_SOFT_ERROR << 16;
2507                 dev_warn(&h->pdev->dev,
2508                                 "cp %p had HP SSD Smart Path error\n", cp);
2509                 break;
2510         default:
2511                 cmd->result = DID_ERROR << 16;
2512                 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2513                                 cp, ei->CommandStatus);
2514         }
2515
2516         return hpsa_cmd_free_and_done(h, cp, cmd);
2517 }
2518
2519 static void hpsa_pci_unmap(struct pci_dev *pdev,
2520         struct CommandList *c, int sg_used, int data_direction)
2521 {
2522         int i;
2523
2524         for (i = 0; i < sg_used; i++)
2525                 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2526                                 le32_to_cpu(c->SG[i].Len),
2527                                 data_direction);
2528 }
2529
2530 static int hpsa_map_one(struct pci_dev *pdev,
2531                 struct CommandList *cp,
2532                 unsigned char *buf,
2533                 size_t buflen,
2534                 int data_direction)
2535 {
2536         u64 addr64;
2537
2538         if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2539                 cp->Header.SGList = 0;
2540                 cp->Header.SGTotal = cpu_to_le16(0);
2541                 return 0;
2542         }
2543
2544         addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2545         if (dma_mapping_error(&pdev->dev, addr64)) {
2546                 /* Prevent subsequent unmap of something never mapped */
2547                 cp->Header.SGList = 0;
2548                 cp->Header.SGTotal = cpu_to_le16(0);
2549                 return -1;
2550         }
2551         cp->SG[0].Addr = cpu_to_le64(addr64);
2552         cp->SG[0].Len = cpu_to_le32(buflen);
2553         cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2554         cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
2555         cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2556         return 0;
2557 }
2558
2559 #define NO_TIMEOUT ((unsigned long) -1)
2560 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2561 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2562         struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2563 {
2564         DECLARE_COMPLETION_ONSTACK(wait);
2565
2566         c->waiting = &wait;
2567         __enqueue_cmd_and_start_io(h, c, reply_queue);
2568         if (timeout_msecs == NO_TIMEOUT) {
2569                 /* TODO: get rid of this no-timeout thing */
2570                 wait_for_completion_io(&wait);
2571                 return IO_OK;
2572         }
2573         if (!wait_for_completion_io_timeout(&wait,
2574                                         msecs_to_jiffies(timeout_msecs))) {
2575                 dev_warn(&h->pdev->dev, "Command timed out.\n");
2576                 return -ETIMEDOUT;
2577         }
2578         return IO_OK;
2579 }
2580
2581 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2582                                    int reply_queue, unsigned long timeout_msecs)
2583 {
2584         if (unlikely(lockup_detected(h))) {
2585                 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2586                 return IO_OK;
2587         }
2588         return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2589 }
2590
2591 static u32 lockup_detected(struct ctlr_info *h)
2592 {
2593         int cpu;
2594         u32 rc, *lockup_detected;
2595
2596         cpu = get_cpu();
2597         lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2598         rc = *lockup_detected;
2599         put_cpu();
2600         return rc;
2601 }
2602
2603 #define MAX_DRIVER_CMD_RETRIES 25
2604 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2605         struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2606 {
2607         int backoff_time = 10, retry_count = 0;
2608         int rc;
2609
2610         do {
2611                 memset(c->err_info, 0, sizeof(*c->err_info));
2612                 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2613                                                   timeout_msecs);
2614                 if (rc)
2615                         break;
2616                 retry_count++;
2617                 if (retry_count > 3) {
2618                         msleep(backoff_time);
2619                         if (backoff_time < 1000)
2620                                 backoff_time *= 2;
2621                 }
2622         } while ((check_for_unit_attention(h, c) ||
2623                         check_for_busy(h, c)) &&
2624                         retry_count <= MAX_DRIVER_CMD_RETRIES);
2625         hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2626         if (retry_count > MAX_DRIVER_CMD_RETRIES)
2627                 rc = -EIO;
2628         return rc;
2629 }
2630
2631 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2632                                 struct CommandList *c)
2633 {
2634         const u8 *cdb = c->Request.CDB;
2635         const u8 *lun = c->Header.LUN.LunAddrBytes;
2636
2637         dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2638         " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2639                 txt, lun[0], lun[1], lun[2], lun[3],
2640                 lun[4], lun[5], lun[6], lun[7],
2641                 cdb[0], cdb[1], cdb[2], cdb[3],
2642                 cdb[4], cdb[5], cdb[6], cdb[7],
2643                 cdb[8], cdb[9], cdb[10], cdb[11],
2644                 cdb[12], cdb[13], cdb[14], cdb[15]);
2645 }
2646
2647 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2648                         struct CommandList *cp)
2649 {
2650         const struct ErrorInfo *ei = cp->err_info;
2651         struct device *d = &cp->h->pdev->dev;
2652         u8 sense_key, asc, ascq;
2653         int sense_len;
2654
2655         switch (ei->CommandStatus) {
2656         case CMD_TARGET_STATUS:
2657                 if (ei->SenseLen > sizeof(ei->SenseInfo))
2658                         sense_len = sizeof(ei->SenseInfo);
2659                 else
2660                         sense_len = ei->SenseLen;
2661                 decode_sense_data(ei->SenseInfo, sense_len,
2662                                         &sense_key, &asc, &ascq);
2663                 hpsa_print_cmd(h, "SCSI status", cp);
2664                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2665                         dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2666                                 sense_key, asc, ascq);
2667                 else
2668                         dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2669                 if (ei->ScsiStatus == 0)
2670                         dev_warn(d, "SCSI status is abnormally zero.  "
2671                         "(probably indicates selection timeout "
2672                         "reported incorrectly due to a known "
2673                         "firmware bug, circa July, 2001.)\n");
2674                 break;
2675         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2676                 break;
2677         case CMD_DATA_OVERRUN:
2678                 hpsa_print_cmd(h, "overrun condition", cp);
2679                 break;
2680         case CMD_INVALID: {
2681                 /* controller unfortunately reports SCSI passthru's
2682                  * to non-existent targets as invalid commands.
2683                  */
2684                 hpsa_print_cmd(h, "invalid command", cp);
2685                 dev_warn(d, "probably means device no longer present\n");
2686                 }
2687                 break;
2688         case CMD_PROTOCOL_ERR:
2689                 hpsa_print_cmd(h, "protocol error", cp);
2690                 break;
2691         case CMD_HARDWARE_ERR:
2692                 hpsa_print_cmd(h, "hardware error", cp);
2693                 break;
2694         case CMD_CONNECTION_LOST:
2695                 hpsa_print_cmd(h, "connection lost", cp);
2696                 break;
2697         case CMD_ABORTED:
2698                 hpsa_print_cmd(h, "aborted", cp);
2699                 break;
2700         case CMD_ABORT_FAILED:
2701                 hpsa_print_cmd(h, "abort failed", cp);
2702                 break;
2703         case CMD_UNSOLICITED_ABORT:
2704                 hpsa_print_cmd(h, "unsolicited abort", cp);
2705                 break;
2706         case CMD_TIMEOUT:
2707                 hpsa_print_cmd(h, "timed out", cp);
2708                 break;
2709         case CMD_UNABORTABLE:
2710                 hpsa_print_cmd(h, "unabortable", cp);
2711                 break;
2712         case CMD_CTLR_LOCKUP:
2713                 hpsa_print_cmd(h, "controller lockup detected", cp);
2714                 break;
2715         default:
2716                 hpsa_print_cmd(h, "unknown status", cp);
2717                 dev_warn(d, "Unknown command status %x\n",
2718                                 ei->CommandStatus);
2719         }
2720 }
2721
2722 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2723                         u16 page, unsigned char *buf,
2724                         unsigned char bufsize)
2725 {
2726         int rc = IO_OK;
2727         struct CommandList *c;
2728         struct ErrorInfo *ei;
2729
2730         c = cmd_alloc(h);
2731
2732         if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2733                         page, scsi3addr, TYPE_CMD)) {
2734                 rc = -1;
2735                 goto out;
2736         }
2737         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2738                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2739         if (rc)
2740                 goto out;
2741         ei = c->err_info;
2742         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2743                 hpsa_scsi_interpret_error(h, c);
2744                 rc = -1;
2745         }
2746 out:
2747         cmd_free(h, c);
2748         return rc;
2749 }
2750
2751 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2752         u8 reset_type, int reply_queue)
2753 {
2754         int rc = IO_OK;
2755         struct CommandList *c;
2756         struct ErrorInfo *ei;
2757
2758         c = cmd_alloc(h);
2759
2760
2761         /* fill_cmd can't fail here, no data buffer to map. */
2762         (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2763                         scsi3addr, TYPE_MSG);
2764         rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2765         if (rc) {
2766                 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2767                 goto out;
2768         }
2769         /* no unmap needed here because no data xfer. */
2770
2771         ei = c->err_info;
2772         if (ei->CommandStatus != 0) {
2773                 hpsa_scsi_interpret_error(h, c);
2774                 rc = -1;
2775         }
2776 out:
2777         cmd_free(h, c);
2778         return rc;
2779 }
2780
2781 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2782                                struct hpsa_scsi_dev_t *dev,
2783                                unsigned char *scsi3addr)
2784 {
2785         int i;
2786         bool match = false;
2787         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2788         struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2789
2790         if (hpsa_is_cmd_idle(c))
2791                 return false;
2792
2793         switch (c->cmd_type) {
2794         case CMD_SCSI:
2795         case CMD_IOCTL_PEND:
2796                 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2797                                 sizeof(c->Header.LUN.LunAddrBytes));
2798                 break;
2799
2800         case CMD_IOACCEL1:
2801         case CMD_IOACCEL2:
2802                 if (c->phys_disk == dev) {
2803                         /* HBA mode match */
2804                         match = true;
2805                 } else {
2806                         /* Possible RAID mode -- check each phys dev. */
2807                         /* FIXME:  Do we need to take out a lock here?  If
2808                          * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2809                          * instead. */
2810                         for (i = 0; i < dev->nphysical_disks && !match; i++) {
2811                                 /* FIXME: an alternate test might be
2812                                  *
2813                                  * match = dev->phys_disk[i]->ioaccel_handle
2814                                  *              == c2->scsi_nexus;      */
2815                                 match = dev->phys_disk[i] == c->phys_disk;
2816                         }
2817                 }
2818                 break;
2819
2820         case IOACCEL2_TMF:
2821                 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2822                         match = dev->phys_disk[i]->ioaccel_handle ==
2823                                         le32_to_cpu(ac->it_nexus);
2824                 }
2825                 break;
2826
2827         case 0:         /* The command is in the middle of being initialized. */
2828                 match = false;
2829                 break;
2830
2831         default:
2832                 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
2833                         c->cmd_type);
2834                 BUG();
2835         }
2836
2837         return match;
2838 }
2839
2840 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2841         unsigned char *scsi3addr, u8 reset_type, int reply_queue)
2842 {
2843         int i;
2844         int rc = 0;
2845
2846         /* We can really only handle one reset at a time */
2847         if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
2848                 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
2849                 return -EINTR;
2850         }
2851
2852         BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
2853
2854         for (i = 0; i < h->nr_cmds; i++) {
2855                 struct CommandList *c = h->cmd_pool + i;
2856                 int refcount = atomic_inc_return(&c->refcount);
2857
2858                 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
2859                         unsigned long flags;
2860
2861                         /*
2862                          * Mark the target command as having a reset pending,
2863                          * then lock a lock so that the command cannot complete
2864                          * while we're considering it.  If the command is not
2865                          * idle then count it; otherwise revoke the event.
2866                          */
2867                         c->reset_pending = dev;
2868                         spin_lock_irqsave(&h->lock, flags);     /* Implied MB */
2869                         if (!hpsa_is_cmd_idle(c))
2870                                 atomic_inc(&dev->reset_cmds_out);
2871                         else
2872                                 c->reset_pending = NULL;
2873                         spin_unlock_irqrestore(&h->lock, flags);
2874                 }
2875
2876                 cmd_free(h, c);
2877         }
2878
2879         rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
2880         if (!rc)
2881                 wait_event(h->event_sync_wait_queue,
2882                         atomic_read(&dev->reset_cmds_out) == 0 ||
2883                         lockup_detected(h));
2884
2885         if (unlikely(lockup_detected(h))) {
2886                 dev_warn(&h->pdev->dev,
2887                          "Controller lockup detected during reset wait\n");
2888                 rc = -ENODEV;
2889         }
2890
2891         if (unlikely(rc))
2892                 atomic_set(&dev->reset_cmds_out, 0);
2893
2894         mutex_unlock(&h->reset_mutex);
2895         return rc;
2896 }
2897
2898 static void hpsa_get_raid_level(struct ctlr_info *h,
2899         unsigned char *scsi3addr, unsigned char *raid_level)
2900 {
2901         int rc;
2902         unsigned char *buf;
2903
2904         *raid_level = RAID_UNKNOWN;
2905         buf = kzalloc(64, GFP_KERNEL);
2906         if (!buf)
2907                 return;
2908         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2909         if (rc == 0)
2910                 *raid_level = buf[8];
2911         if (*raid_level > RAID_UNKNOWN)
2912                 *raid_level = RAID_UNKNOWN;
2913         kfree(buf);
2914         return;
2915 }
2916
2917 #define HPSA_MAP_DEBUG
2918 #ifdef HPSA_MAP_DEBUG
2919 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2920                                 struct raid_map_data *map_buff)
2921 {
2922         struct raid_map_disk_data *dd = &map_buff->data[0];
2923         int map, row, col;
2924         u16 map_cnt, row_cnt, disks_per_row;
2925
2926         if (rc != 0)
2927                 return;
2928
2929         /* Show details only if debugging has been activated. */
2930         if (h->raid_offload_debug < 2)
2931                 return;
2932
2933         dev_info(&h->pdev->dev, "structure_size = %u\n",
2934                                 le32_to_cpu(map_buff->structure_size));
2935         dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2936                         le32_to_cpu(map_buff->volume_blk_size));
2937         dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2938                         le64_to_cpu(map_buff->volume_blk_cnt));
2939         dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2940                         map_buff->phys_blk_shift);
2941         dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2942                         map_buff->parity_rotation_shift);
2943         dev_info(&h->pdev->dev, "strip_size = %u\n",
2944                         le16_to_cpu(map_buff->strip_size));
2945         dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2946                         le64_to_cpu(map_buff->disk_starting_blk));
2947         dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2948                         le64_to_cpu(map_buff->disk_blk_cnt));
2949         dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2950                         le16_to_cpu(map_buff->data_disks_per_row));
2951         dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2952                         le16_to_cpu(map_buff->metadata_disks_per_row));
2953         dev_info(&h->pdev->dev, "row_cnt = %u\n",
2954                         le16_to_cpu(map_buff->row_cnt));
2955         dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2956                         le16_to_cpu(map_buff->layout_map_count));
2957         dev_info(&h->pdev->dev, "flags = 0x%x\n",
2958                         le16_to_cpu(map_buff->flags));
2959         dev_info(&h->pdev->dev, "encrypytion = %s\n",
2960                         le16_to_cpu(map_buff->flags) &
2961                         RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
2962         dev_info(&h->pdev->dev, "dekindex = %u\n",
2963                         le16_to_cpu(map_buff->dekindex));
2964         map_cnt = le16_to_cpu(map_buff->layout_map_count);
2965         for (map = 0; map < map_cnt; map++) {
2966                 dev_info(&h->pdev->dev, "Map%u:\n", map);
2967                 row_cnt = le16_to_cpu(map_buff->row_cnt);
2968                 for (row = 0; row < row_cnt; row++) {
2969                         dev_info(&h->pdev->dev, "  Row%u:\n", row);
2970                         disks_per_row =
2971                                 le16_to_cpu(map_buff->data_disks_per_row);
2972                         for (col = 0; col < disks_per_row; col++, dd++)
2973                                 dev_info(&h->pdev->dev,
2974                                         "    D%02u: h=0x%04x xor=%u,%u\n",
2975                                         col, dd->ioaccel_handle,
2976                                         dd->xor_mult[0], dd->xor_mult[1]);
2977                         disks_per_row =
2978                                 le16_to_cpu(map_buff->metadata_disks_per_row);
2979                         for (col = 0; col < disks_per_row; col++, dd++)
2980                                 dev_info(&h->pdev->dev,
2981                                         "    M%02u: h=0x%04x xor=%u,%u\n",
2982                                         col, dd->ioaccel_handle,
2983                                         dd->xor_mult[0], dd->xor_mult[1]);
2984                 }
2985         }
2986 }
2987 #else
2988 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2989                         __attribute__((unused)) int rc,
2990                         __attribute__((unused)) struct raid_map_data *map_buff)
2991 {
2992 }
2993 #endif
2994
2995 static int hpsa_get_raid_map(struct ctlr_info *h,
2996         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2997 {
2998         int rc = 0;
2999         struct CommandList *c;
3000         struct ErrorInfo *ei;
3001
3002         c = cmd_alloc(h);
3003
3004         if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3005                         sizeof(this_device->raid_map), 0,
3006                         scsi3addr, TYPE_CMD)) {
3007                 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3008                 cmd_free(h, c);
3009                 return -1;
3010         }
3011         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3012                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3013         if (rc)
3014                 goto out;
3015         ei = c->err_info;
3016         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3017                 hpsa_scsi_interpret_error(h, c);
3018                 rc = -1;
3019                 goto out;
3020         }
3021         cmd_free(h, c);
3022
3023         /* @todo in the future, dynamically allocate RAID map memory */
3024         if (le32_to_cpu(this_device->raid_map.structure_size) >
3025                                 sizeof(this_device->raid_map)) {
3026                 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3027                 rc = -1;
3028         }
3029         hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3030         return rc;
3031 out:
3032         cmd_free(h, c);
3033         return rc;
3034 }
3035
3036 static int hpsa_bmic_id_controller(struct ctlr_info *h,
3037         struct bmic_identify_controller *buf, size_t bufsize)
3038 {
3039         int rc = IO_OK;
3040         struct CommandList *c;
3041         struct ErrorInfo *ei;
3042
3043         c = cmd_alloc(h);
3044
3045         rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3046                 0, RAID_CTLR_LUNID, TYPE_CMD);
3047         if (rc)
3048                 goto out;
3049
3050         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3051                 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3052         if (rc)
3053                 goto out;
3054         ei = c->err_info;
3055         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3056                 hpsa_scsi_interpret_error(h, c);
3057                 rc = -1;
3058         }
3059 out:
3060         cmd_free(h, c);
3061         return rc;
3062 }
3063
3064
3065 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3066                 unsigned char scsi3addr[], u16 bmic_device_index,
3067                 struct bmic_identify_physical_device *buf, size_t bufsize)
3068 {
3069         int rc = IO_OK;
3070         struct CommandList *c;
3071         struct ErrorInfo *ei;
3072
3073         c = cmd_alloc(h);
3074         rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3075                 0, RAID_CTLR_LUNID, TYPE_CMD);
3076         if (rc)
3077                 goto out;
3078
3079         c->Request.CDB[2] = bmic_device_index & 0xff;
3080         c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3081
3082         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3083                                                 NO_TIMEOUT);
3084         ei = c->err_info;
3085         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3086                 hpsa_scsi_interpret_error(h, c);
3087                 rc = -1;
3088         }
3089 out:
3090         cmd_free(h, c);
3091         return rc;
3092 }
3093
3094 static int hpsa_vpd_page_supported(struct ctlr_info *h,
3095         unsigned char scsi3addr[], u8 page)
3096 {
3097         int rc;
3098         int i;
3099         int pages;
3100         unsigned char *buf, bufsize;
3101
3102         buf = kzalloc(256, GFP_KERNEL);
3103         if (!buf)
3104                 return 0;
3105
3106         /* Get the size of the page list first */
3107         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3108                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3109                                 buf, HPSA_VPD_HEADER_SZ);
3110         if (rc != 0)
3111                 goto exit_unsupported;
3112         pages = buf[3];
3113         if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3114                 bufsize = pages + HPSA_VPD_HEADER_SZ;
3115         else
3116                 bufsize = 255;
3117
3118         /* Get the whole VPD page list */
3119         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3120                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3121                                 buf, bufsize);
3122         if (rc != 0)
3123                 goto exit_unsupported;
3124
3125         pages = buf[3];
3126         for (i = 1; i <= pages; i++)
3127                 if (buf[3 + i] == page)
3128                         goto exit_supported;
3129 exit_unsupported:
3130         kfree(buf);
3131         return 0;
3132 exit_supported:
3133         kfree(buf);
3134         return 1;
3135 }
3136
3137 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3138         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3139 {
3140         int rc;
3141         unsigned char *buf;
3142         u8 ioaccel_status;
3143
3144         this_device->offload_config = 0;
3145         this_device->offload_enabled = 0;
3146         this_device->offload_to_be_enabled = 0;
3147
3148         buf = kzalloc(64, GFP_KERNEL);
3149         if (!buf)
3150                 return;
3151         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3152                 goto out;
3153         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3154                         VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3155         if (rc != 0)
3156                 goto out;
3157
3158 #define IOACCEL_STATUS_BYTE 4
3159 #define OFFLOAD_CONFIGURED_BIT 0x01
3160 #define OFFLOAD_ENABLED_BIT 0x02
3161         ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3162         this_device->offload_config =
3163                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3164         if (this_device->offload_config) {
3165                 this_device->offload_enabled =
3166                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3167                 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3168                         this_device->offload_enabled = 0;
3169         }
3170         this_device->offload_to_be_enabled = this_device->offload_enabled;
3171 out:
3172         kfree(buf);
3173         return;
3174 }
3175
3176 /* Get the device id from inquiry page 0x83 */
3177 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3178         unsigned char *device_id, int index, int buflen)
3179 {
3180         int rc;
3181         unsigned char *buf;
3182
3183         if (buflen > 16)
3184                 buflen = 16;
3185         buf = kzalloc(64, GFP_KERNEL);
3186         if (!buf)
3187                 return -ENOMEM;
3188         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
3189         if (rc == 0)
3190                 memcpy(device_id, &buf[index], buflen);
3191
3192         kfree(buf);
3193
3194         return rc != 0;
3195 }
3196
3197 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3198                 void *buf, int bufsize,
3199                 int extended_response)
3200 {
3201         int rc = IO_OK;
3202         struct CommandList *c;
3203         unsigned char scsi3addr[8];
3204         struct ErrorInfo *ei;
3205
3206         c = cmd_alloc(h);
3207
3208         /* address the controller */
3209         memset(scsi3addr, 0, sizeof(scsi3addr));
3210         if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3211                 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3212                 rc = -1;
3213                 goto out;
3214         }
3215         if (extended_response)
3216                 c->Request.CDB[1] = extended_response;
3217         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3218                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3219         if (rc)
3220                 goto out;
3221         ei = c->err_info;
3222         if (ei->CommandStatus != 0 &&
3223             ei->CommandStatus != CMD_DATA_UNDERRUN) {
3224                 hpsa_scsi_interpret_error(h, c);
3225                 rc = -1;
3226         } else {
3227                 struct ReportLUNdata *rld = buf;
3228
3229                 if (rld->extended_response_flag != extended_response) {
3230                         dev_err(&h->pdev->dev,
3231                                 "report luns requested format %u, got %u\n",
3232                                 extended_response,
3233                                 rld->extended_response_flag);
3234                         rc = -1;
3235                 }
3236         }
3237 out:
3238         cmd_free(h, c);
3239         return rc;
3240 }
3241
3242 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3243                 struct ReportExtendedLUNdata *buf, int bufsize)
3244 {
3245         return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3246                                                 HPSA_REPORT_PHYS_EXTENDED);
3247 }
3248
3249 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3250                 struct ReportLUNdata *buf, int bufsize)
3251 {
3252         return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3253 }
3254
3255 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3256         int bus, int target, int lun)
3257 {
3258         device->bus = bus;
3259         device->target = target;
3260         device->lun = lun;
3261 }
3262
3263 /* Use VPD inquiry to get details of volume status */
3264 static int hpsa_get_volume_status(struct ctlr_info *h,
3265                                         unsigned char scsi3addr[])
3266 {
3267         int rc;
3268         int status;
3269         int size;
3270         unsigned char *buf;
3271
3272         buf = kzalloc(64, GFP_KERNEL);
3273         if (!buf)
3274                 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3275
3276         /* Does controller have VPD for logical volume status? */
3277         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3278                 goto exit_failed;
3279
3280         /* Get the size of the VPD return buffer */
3281         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3282                                         buf, HPSA_VPD_HEADER_SZ);
3283         if (rc != 0)
3284                 goto exit_failed;
3285         size = buf[3];
3286
3287         /* Now get the whole VPD buffer */
3288         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3289                                         buf, size + HPSA_VPD_HEADER_SZ);
3290         if (rc != 0)
3291                 goto exit_failed;
3292         status = buf[4]; /* status byte */
3293
3294         kfree(buf);
3295         return status;
3296 exit_failed:
3297         kfree(buf);
3298         return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3299 }
3300
3301 /* Determine offline status of a volume.
3302  * Return either:
3303  *  0 (not offline)
3304  *  0xff (offline for unknown reasons)
3305  *  # (integer code indicating one of several NOT READY states
3306  *     describing why a volume is to be kept offline)
3307  */
3308 static int hpsa_volume_offline(struct ctlr_info *h,
3309                                         unsigned char scsi3addr[])
3310 {
3311         struct CommandList *c;
3312         unsigned char *sense;
3313         u8 sense_key, asc, ascq;
3314         int sense_len;
3315         int rc, ldstat = 0;
3316         u16 cmd_status;
3317         u8 scsi_status;
3318 #define ASC_LUN_NOT_READY 0x04
3319 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3320 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3321
3322         c = cmd_alloc(h);
3323
3324         (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3325         rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3326         if (rc) {
3327                 cmd_free(h, c);
3328                 return 0;
3329         }
3330         sense = c->err_info->SenseInfo;
3331         if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3332                 sense_len = sizeof(c->err_info->SenseInfo);
3333         else
3334                 sense_len = c->err_info->SenseLen;
3335         decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3336         cmd_status = c->err_info->CommandStatus;
3337         scsi_status = c->err_info->ScsiStatus;
3338         cmd_free(h, c);
3339         /* Is the volume 'not ready'? */
3340         if (cmd_status != CMD_TARGET_STATUS ||
3341                 scsi_status != SAM_STAT_CHECK_CONDITION ||
3342                 sense_key != NOT_READY ||
3343                 asc != ASC_LUN_NOT_READY)  {
3344                 return 0;
3345         }
3346
3347         /* Determine the reason for not ready state */
3348         ldstat = hpsa_get_volume_status(h, scsi3addr);
3349
3350         /* Keep volume offline in certain cases: */
3351         switch (ldstat) {
3352         case HPSA_LV_UNDERGOING_ERASE:
3353         case HPSA_LV_NOT_AVAILABLE:
3354         case HPSA_LV_UNDERGOING_RPI:
3355         case HPSA_LV_PENDING_RPI:
3356         case HPSA_LV_ENCRYPTED_NO_KEY:
3357         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3358         case HPSA_LV_UNDERGOING_ENCRYPTION:
3359         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3360         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3361                 return ldstat;
3362         case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3363                 /* If VPD status page isn't available,
3364                  * use ASC/ASCQ to determine state
3365                  */
3366                 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3367                         (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3368                         return ldstat;
3369                 break;
3370         default:
3371                 break;
3372         }
3373         return 0;
3374 }
3375
3376 /*
3377  * Find out if a logical device supports aborts by simply trying one.
3378  * Smart Array may claim not to support aborts on logical drives, but
3379  * if a MSA2000 * is connected, the drives on that will be presented
3380  * by the Smart Array as logical drives, and aborts may be sent to
3381  * those devices successfully.  So the simplest way to find out is
3382  * to simply try an abort and see how the device responds.
3383  */
3384 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3385                                         unsigned char *scsi3addr)
3386 {
3387         struct CommandList *c;
3388         struct ErrorInfo *ei;
3389         int rc = 0;
3390
3391         u64 tag = (u64) -1; /* bogus tag */
3392
3393         /* Assume that physical devices support aborts */
3394         if (!is_logical_dev_addr_mode(scsi3addr))
3395                 return 1;
3396
3397         c = cmd_alloc(h);
3398
3399         (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3400         (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3401         /* no unmap needed here because no data xfer. */
3402         ei = c->err_info;
3403         switch (ei->CommandStatus) {
3404         case CMD_INVALID:
3405                 rc = 0;
3406                 break;
3407         case CMD_UNABORTABLE:
3408         case CMD_ABORT_FAILED:
3409                 rc = 1;
3410                 break;
3411         case CMD_TMF_STATUS:
3412                 rc = hpsa_evaluate_tmf_status(h, c);
3413                 break;
3414         default:
3415                 rc = 0;
3416                 break;
3417         }
3418         cmd_free(h, c);
3419         return rc;
3420 }
3421
3422 static void sanitize_inquiry_string(unsigned char *s, int len)
3423 {
3424         bool terminated = false;
3425
3426         for (; len > 0; (--len, ++s)) {
3427                 if (*s == 0)
3428                         terminated = true;
3429                 if (terminated || *s < 0x20 || *s > 0x7e)
3430                         *s = ' ';
3431         }
3432 }
3433
3434 static int hpsa_update_device_info(struct ctlr_info *h,
3435         unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3436         unsigned char *is_OBDR_device)
3437 {
3438
3439 #define OBDR_SIG_OFFSET 43
3440 #define OBDR_TAPE_SIG "$DR-10"
3441 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3442 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3443
3444         unsigned char *inq_buff;
3445         unsigned char *obdr_sig;
3446         int rc = 0;
3447
3448         inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3449         if (!inq_buff) {
3450                 rc = -ENOMEM;
3451                 goto bail_out;
3452         }
3453
3454         /* Do an inquiry to the device to see what it is. */
3455         if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3456                 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3457                 /* Inquiry failed (msg printed already) */
3458                 dev_err(&h->pdev->dev,
3459                         "hpsa_update_device_info: inquiry failed\n");
3460                 rc = -EIO;
3461                 goto bail_out;
3462         }
3463
3464         sanitize_inquiry_string(&inq_buff[8], 8);
3465         sanitize_inquiry_string(&inq_buff[16], 16);
3466
3467         this_device->devtype = (inq_buff[0] & 0x1f);
3468         memcpy(this_device->scsi3addr, scsi3addr, 8);
3469         memcpy(this_device->vendor, &inq_buff[8],
3470                 sizeof(this_device->vendor));
3471         memcpy(this_device->model, &inq_buff[16],
3472                 sizeof(this_device->model));
3473         memset(this_device->device_id, 0,
3474                 sizeof(this_device->device_id));
3475         hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3476                 sizeof(this_device->device_id));
3477
3478         if (this_device->devtype == TYPE_DISK &&
3479                 is_logical_dev_addr_mode(scsi3addr)) {
3480                 int volume_offline;
3481
3482                 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3483                 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3484                         hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3485                 volume_offline = hpsa_volume_offline(h, scsi3addr);
3486                 if (volume_offline < 0 || volume_offline > 0xff)
3487                         volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3488                 this_device->volume_offline = volume_offline & 0xff;
3489         } else {
3490                 this_device->raid_level = RAID_UNKNOWN;
3491                 this_device->offload_config = 0;
3492                 this_device->offload_enabled = 0;
3493                 this_device->offload_to_be_enabled = 0;
3494                 this_device->hba_ioaccel_enabled = 0;
3495                 this_device->volume_offline = 0;
3496                 this_device->queue_depth = h->nr_cmds;
3497         }
3498
3499         if (is_OBDR_device) {
3500                 /* See if this is a One-Button-Disaster-Recovery device
3501                  * by looking for "$DR-10" at offset 43 in inquiry data.
3502                  */
3503                 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3504                 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3505                                         strncmp(obdr_sig, OBDR_TAPE_SIG,
3506                                                 OBDR_SIG_LEN) == 0);
3507         }
3508         kfree(inq_buff);
3509         return 0;
3510
3511 bail_out:
3512         kfree(inq_buff);
3513         return rc;
3514 }
3515
3516 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3517                         struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3518 {
3519         unsigned long flags;
3520         int rc, entry;
3521         /*
3522          * See if this device supports aborts.  If we already know
3523          * the device, we already know if it supports aborts, otherwise
3524          * we have to find out if it supports aborts by trying one.
3525          */
3526         spin_lock_irqsave(&h->devlock, flags);
3527         rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3528         if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3529                 entry >= 0 && entry < h->ndevices) {
3530                 dev->supports_aborts = h->dev[entry]->supports_aborts;
3531                 spin_unlock_irqrestore(&h->devlock, flags);
3532         } else {
3533                 spin_unlock_irqrestore(&h->devlock, flags);
3534                 dev->supports_aborts =
3535                                 hpsa_device_supports_aborts(h, scsi3addr);
3536                 if (dev->supports_aborts < 0)
3537                         dev->supports_aborts = 0;
3538         }
3539 }
3540
3541 /*
3542  * Helper function to assign bus, target, lun mapping of devices.
3543  * Logical drive target and lun are assigned at this time, but
3544  * physical device lun and target assignment are deferred (assigned
3545  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3546 */
3547 static void figure_bus_target_lun(struct ctlr_info *h,
3548         u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3549 {
3550         u32 lunid = get_unaligned_le32(lunaddrbytes);
3551
3552         if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3553                 /* physical device, target and lun filled in later */
3554                 if (is_hba_lunid(lunaddrbytes))
3555                         hpsa_set_bus_target_lun(device,
3556                                         HPSA_HBA_BUS, 0, lunid & 0x3fff);
3557                 else
3558                         /* defer target, lun assignment for physical devices */
3559                         hpsa_set_bus_target_lun(device,
3560                                         HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
3561                 return;
3562         }
3563         /* It's a logical device */
3564         if (device->external) {
3565                 hpsa_set_bus_target_lun(device,
3566                         HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
3567                         lunid & 0x00ff);
3568                 return;
3569         }
3570         hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
3571                                 0, lunid & 0x3fff);
3572 }
3573
3574 /*
3575  * If there is no lun 0 on a target, linux won't find any devices.
3576  * For the external targets (arrays), we have to manually detect the enclosure
3577  * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3578  * it for some reason.  *tmpdevice is the target we're adding,
3579  * this_device is a pointer into the current element of currentsd[]
3580  * that we're building up in update_scsi_devices(), below.
3581  * lunzerobits is a bitmap that tracks which targets already have a
3582  * lun 0 assigned.
3583  * Returns 1 if an enclosure was added, 0 if not.
3584  */
3585 static int add_ext_target_dev(struct ctlr_info *h,
3586         struct hpsa_scsi_dev_t *tmpdevice,
3587         struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
3588         unsigned long lunzerobits[], int *n_ext_target_devs)
3589 {
3590         unsigned char scsi3addr[8];
3591
3592         if (test_bit(tmpdevice->target, lunzerobits))
3593                 return 0; /* There is already a lun 0 on this target. */
3594
3595         if (!is_logical_dev_addr_mode(lunaddrbytes))
3596                 return 0; /* It's the logical targets that may lack lun 0. */
3597
3598         if (!tmpdevice->external)
3599                 return 0; /* Only external target devices have this problem. */
3600
3601         if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
3602                 return 0;
3603
3604         memset(scsi3addr, 0, 8);
3605         scsi3addr[3] = tmpdevice->target;
3606         if (is_hba_lunid(scsi3addr))
3607                 return 0; /* Don't add the RAID controller here. */
3608
3609         if (is_scsi_rev_5(h))
3610                 return 0; /* p1210m doesn't need to do this. */
3611
3612         if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
3613                 dev_warn(&h->pdev->dev, "Maximum number of external "
3614                         "target devices exceeded.  Check your hardware "
3615                         "configuration.");
3616                 return 0;
3617         }
3618
3619         if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
3620                 return 0;
3621         (*n_ext_target_devs)++;
3622         hpsa_set_bus_target_lun(this_device,
3623                                 tmpdevice->bus, tmpdevice->target, 0);
3624         hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
3625         set_bit(tmpdevice->target, lunzerobits);
3626         return 1;
3627 }
3628
3629 /*
3630  * Get address of physical disk used for an ioaccel2 mode command:
3631  *      1. Extract ioaccel2 handle from the command.
3632  *      2. Find a matching ioaccel2 handle from list of physical disks.
3633  *      3. Return:
3634  *              1 and set scsi3addr to address of matching physical
3635  *              0 if no matching physical disk was found.
3636  */
3637 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3638         struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3639 {
3640         struct io_accel2_cmd *c2 =
3641                         &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3642         unsigned long flags;
3643         int i;
3644
3645         spin_lock_irqsave(&h->devlock, flags);
3646         for (i = 0; i < h->ndevices; i++)
3647                 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3648                         memcpy(scsi3addr, h->dev[i]->scsi3addr,
3649                                 sizeof(h->dev[i]->scsi3addr));
3650                         spin_unlock_irqrestore(&h->devlock, flags);
3651                         return 1;
3652                 }
3653         spin_unlock_irqrestore(&h->devlock, flags);
3654         return 0;
3655 }
3656
3657 static int  figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
3658         int i, int nphysicals, int nlocal_logicals)
3659 {
3660         /* In report logicals, local logicals are listed first,
3661         * then any externals.
3662         */
3663         int logicals_start = nphysicals + (raid_ctlr_position == 0);
3664
3665         if (i == raid_ctlr_position)
3666                 return 0;
3667
3668         if (i < logicals_start)
3669                 return 0;
3670
3671         /* i is in logicals range, but still within local logicals */
3672         if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
3673                 return 0;
3674
3675         return 1; /* it's an external lun */
3676 }
3677
3678 /*
3679  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
3680  * logdev.  The number of luns in physdev and logdev are returned in
3681  * *nphysicals and *nlogicals, respectively.
3682  * Returns 0 on success, -1 otherwise.
3683  */
3684 static int hpsa_gather_lun_info(struct ctlr_info *h,
3685         struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3686         struct ReportLUNdata *logdev, u32 *nlogicals)
3687 {
3688         if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3689                 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3690                 return -1;
3691         }
3692         *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3693         if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3694                 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3695                         HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3696                 *nphysicals = HPSA_MAX_PHYS_LUN;
3697         }
3698         if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3699                 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3700                 return -1;
3701         }
3702         *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3703         /* Reject Logicals in excess of our max capability. */
3704         if (*nlogicals > HPSA_MAX_LUN) {
3705                 dev_warn(&h->pdev->dev,
3706                         "maximum logical LUNs (%d) exceeded.  "
3707                         "%d LUNs ignored.\n", HPSA_MAX_LUN,
3708                         *nlogicals - HPSA_MAX_LUN);
3709                         *nlogicals = HPSA_MAX_LUN;
3710         }
3711         if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3712                 dev_warn(&h->pdev->dev,
3713                         "maximum logical + physical LUNs (%d) exceeded. "
3714                         "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3715                         *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3716                 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3717         }
3718         return 0;
3719 }
3720
3721 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3722         int i, int nphysicals, int nlogicals,
3723         struct ReportExtendedLUNdata *physdev_list,
3724         struct ReportLUNdata *logdev_list)
3725 {
3726         /* Helper function, figure out where the LUN ID info is coming from
3727          * given index i, lists of physical and logical devices, where in
3728          * the list the raid controller is supposed to appear (first or last)
3729          */
3730
3731         int logicals_start = nphysicals + (raid_ctlr_position == 0);
3732         int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3733
3734         if (i == raid_ctlr_position)
3735                 return RAID_CTLR_LUNID;
3736
3737         if (i < logicals_start)
3738                 return &physdev_list->LUN[i -
3739                                 (raid_ctlr_position == 0)].lunid[0];
3740
3741         if (i < last_device)
3742                 return &logdev_list->LUN[i - nphysicals -
3743                         (raid_ctlr_position == 0)][0];
3744         BUG();
3745         return NULL;
3746 }
3747
3748 /* get physical drive ioaccel handle and queue depth */
3749 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3750                 struct hpsa_scsi_dev_t *dev,
3751                 struct ReportExtendedLUNdata *rlep, int rle_index,
3752                 struct bmic_identify_physical_device *id_phys)
3753 {
3754         int rc;
3755         struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3756
3757         dev->ioaccel_handle = rle->ioaccel_handle;
3758         if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
3759                 dev->hba_ioaccel_enabled = 1;
3760         memset(id_phys, 0, sizeof(*id_phys));
3761         rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
3762                         GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
3763                         sizeof(*id_phys));
3764         if (!rc)
3765                 /* Reserve space for FW operations */
3766 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3767 #define DRIVE_QUEUE_DEPTH 7
3768                 dev->queue_depth =
3769                         le16_to_cpu(id_phys->current_queue_depth_limit) -
3770                                 DRIVE_CMDS_RESERVED_FOR_FW;
3771         else
3772                 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3773 }
3774
3775 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
3776         struct ReportExtendedLUNdata *rlep, int rle_index,
3777         struct bmic_identify_physical_device *id_phys)
3778 {
3779         struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3780
3781         if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
3782                 this_device->hba_ioaccel_enabled = 1;
3783
3784         memcpy(&this_device->active_path_index,
3785                 &id_phys->active_path_number,
3786                 sizeof(this_device->active_path_index));
3787         memcpy(&this_device->path_map,
3788                 &id_phys->redundant_path_present_map,
3789                 sizeof(this_device->path_map));
3790         memcpy(&this_device->box,
3791                 &id_phys->alternate_paths_phys_box_on_port,
3792                 sizeof(this_device->box));
3793         memcpy(&this_device->phys_connector,
3794                 &id_phys->alternate_paths_phys_connector,
3795                 sizeof(this_device->phys_connector));
3796         memcpy(&this_device->bay,
3797                 &id_phys->phys_bay_in_box,
3798                 sizeof(this_device->bay));
3799 }
3800
3801 /* get number of local logical disks. */
3802 static int hpsa_set_local_logical_count(struct ctlr_info *h,
3803         struct bmic_identify_controller *id_ctlr,
3804         u32 *nlocals)
3805 {
3806         int rc;
3807
3808         if (!id_ctlr) {
3809                 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
3810                         __func__);
3811                 return -ENOMEM;
3812         }
3813         memset(id_ctlr, 0, sizeof(*id_ctlr));
3814         rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
3815         if (!rc)
3816                 if (id_ctlr->configured_logical_drive_count < 256)
3817                         *nlocals = id_ctlr->configured_logical_drive_count;
3818                 else
3819                         *nlocals = le16_to_cpu(
3820                                         id_ctlr->extended_logical_unit_count);
3821         else
3822                 *nlocals = -1;
3823         return rc;
3824 }
3825
3826
3827 static void hpsa_update_scsi_devices(struct ctlr_info *h)
3828 {
3829         /* the idea here is we could get notified
3830          * that some devices have changed, so we do a report
3831          * physical luns and report logical luns cmd, and adjust
3832          * our list of devices accordingly.
3833          *
3834          * The scsi3addr's of devices won't change so long as the
3835          * adapter is not reset.  That means we can rescan and
3836          * tell which devices we already know about, vs. new
3837          * devices, vs.  disappearing devices.
3838          */
3839         struct ReportExtendedLUNdata *physdev_list = NULL;
3840         struct ReportLUNdata *logdev_list = NULL;
3841         struct bmic_identify_physical_device *id_phys = NULL;
3842         struct bmic_identify_controller *id_ctlr = NULL;
3843         u32 nphysicals = 0;
3844         u32 nlogicals = 0;
3845         u32 nlocal_logicals = 0;
3846         u32 ndev_allocated = 0;
3847         struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3848         int ncurrent = 0;
3849         int i, n_ext_target_devs, ndevs_to_allocate;
3850         int raid_ctlr_position;
3851         bool physical_device;
3852         DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3853
3854         currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3855         physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3856         logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3857         tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3858         id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3859         id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
3860
3861         if (!currentsd || !physdev_list || !logdev_list ||
3862                 !tmpdevice || !id_phys || !id_ctlr) {
3863                 dev_err(&h->pdev->dev, "out of memory\n");
3864                 goto out;
3865         }
3866         memset(lunzerobits, 0, sizeof(lunzerobits));
3867
3868         h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
3869
3870         if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3871                         logdev_list, &nlogicals)) {
3872                 h->drv_req_rescan = 1;
3873                 goto out;
3874         }
3875
3876         /* Set number of local logicals (non PTRAID) */
3877         if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
3878                 dev_warn(&h->pdev->dev,
3879                         "%s: Can't determine number of local logical devices.\n",
3880                         __func__);
3881         }
3882
3883         /* We might see up to the maximum number of logical and physical disks
3884          * plus external target devices, and a device for the local RAID
3885          * controller.
3886          */
3887         ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3888
3889         /* Allocate the per device structures */
3890         for (i = 0; i < ndevs_to_allocate; i++) {
3891                 if (i >= HPSA_MAX_DEVICES) {
3892                         dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3893                                 "  %d devices ignored.\n", HPSA_MAX_DEVICES,
3894                                 ndevs_to_allocate - HPSA_MAX_DEVICES);
3895                         break;
3896                 }
3897
3898                 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3899                 if (!currentsd[i]) {
3900                         dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3901                                 __FILE__, __LINE__);
3902                         h->drv_req_rescan = 1;
3903                         goto out;
3904                 }
3905                 ndev_allocated++;
3906         }
3907
3908         if (is_scsi_rev_5(h))
3909                 raid_ctlr_position = 0;
3910         else
3911                 raid_ctlr_position = nphysicals + nlogicals;
3912
3913         /* adjust our table of devices */
3914         n_ext_target_devs = 0;
3915         for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3916                 u8 *lunaddrbytes, is_OBDR = 0;
3917                 int rc = 0;
3918                 int phys_dev_index = i - (raid_ctlr_position == 0);
3919
3920                 physical_device = i < nphysicals + (raid_ctlr_position == 0);
3921
3922                 /* Figure out where the LUN ID info is coming from */
3923                 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3924                         i, nphysicals, nlogicals, physdev_list, logdev_list);
3925
3926                 /* skip masked non-disk devices */
3927                 if (MASKED_DEVICE(lunaddrbytes) && physical_device &&
3928                         (physdev_list->LUN[phys_dev_index].device_flags & 0x01))
3929                         continue;
3930
3931                 /* Get device type, vendor, model, device id */
3932                 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3933                                                         &is_OBDR);
3934                 if (rc == -ENOMEM) {
3935                         dev_warn(&h->pdev->dev,
3936                                 "Out of memory, rescan deferred.\n");
3937                         h->drv_req_rescan = 1;
3938                         goto out;
3939                 }
3940                 if (rc) {
3941                         dev_warn(&h->pdev->dev,
3942                                 "Inquiry failed, skipping device.\n");
3943                         continue;
3944                 }
3945
3946                 /* Determine if this is a lun from an external target array */
3947                 tmpdevice->external =
3948                         figure_external_status(h, raid_ctlr_position, i,
3949                                                 nphysicals, nlocal_logicals);
3950
3951                 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3952                 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
3953                 this_device = currentsd[ncurrent];
3954
3955                 /*
3956                  * For external target devices, we have to insert a LUN 0 which
3957                  * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3958                  * is nonetheless an enclosure device there.  We have to
3959                  * present that otherwise linux won't find anything if
3960                  * there is no lun 0.
3961                  */
3962                 if (add_ext_target_dev(h, tmpdevice, this_device,
3963                                 lunaddrbytes, lunzerobits,
3964                                 &n_ext_target_devs)) {
3965                         ncurrent++;
3966                         this_device = currentsd[ncurrent];
3967                 }
3968
3969                 *this_device = *tmpdevice;
3970                 this_device->physical_device = physical_device;
3971
3972                 /*
3973                  * Expose all devices except for physical devices that
3974                  * are masked.
3975                  */
3976                 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
3977                         this_device->expose_device = 0;
3978                 else
3979                         this_device->expose_device = 1;
3980
3981                 switch (this_device->devtype) {
3982                 case TYPE_ROM:
3983                         /* We don't *really* support actual CD-ROM devices,
3984                          * just "One Button Disaster Recovery" tape drive
3985                          * which temporarily pretends to be a CD-ROM drive.
3986                          * So we check that the device is really an OBDR tape
3987                          * device by checking for "$DR-10" in bytes 43-48 of
3988                          * the inquiry data.
3989                          */
3990                         if (is_OBDR)
3991                                 ncurrent++;
3992                         break;
3993                 case TYPE_DISK:
3994                         if (this_device->physical_device) {
3995                                 /* The disk is in HBA mode. */
3996                                 /* Never use RAID mapper in HBA mode. */
3997                                 this_device->offload_enabled = 0;
3998                                 hpsa_get_ioaccel_drive_info(h, this_device,
3999                                         physdev_list, phys_dev_index, id_phys);
4000                                 hpsa_get_path_info(this_device,
4001                                         physdev_list, phys_dev_index, id_phys);
4002                         }
4003                         ncurrent++;
4004                         break;
4005                 case TYPE_TAPE:
4006                 case TYPE_MEDIUM_CHANGER:
4007                 case TYPE_ENCLOSURE:
4008                         ncurrent++;
4009                         break;
4010                 case TYPE_RAID:
4011                         /* Only present the Smartarray HBA as a RAID controller.
4012                          * If it's a RAID controller other than the HBA itself
4013                          * (an external RAID controller, MSA500 or similar)
4014                          * don't present it.
4015                          */
4016                         if (!is_hba_lunid(lunaddrbytes))
4017                                 break;
4018                         ncurrent++;
4019                         break;
4020                 default:
4021                         break;
4022                 }
4023                 if (ncurrent >= HPSA_MAX_DEVICES)
4024                         break;
4025         }
4026         adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4027 out:
4028         kfree(tmpdevice);
4029         for (i = 0; i < ndev_allocated; i++)
4030                 kfree(currentsd[i]);
4031         kfree(currentsd);
4032         kfree(physdev_list);
4033         kfree(logdev_list);
4034         kfree(id_ctlr);
4035         kfree(id_phys);
4036 }
4037
4038 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4039                                    struct scatterlist *sg)
4040 {
4041         u64 addr64 = (u64) sg_dma_address(sg);
4042         unsigned int len = sg_dma_len(sg);
4043
4044         desc->Addr = cpu_to_le64(addr64);
4045         desc->Len = cpu_to_le32(len);
4046         desc->Ext = 0;
4047 }
4048
4049 /*
4050  * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4051  * dma mapping  and fills in the scatter gather entries of the
4052  * hpsa command, cp.
4053  */
4054 static int hpsa_scatter_gather(struct ctlr_info *h,
4055                 struct CommandList *cp,
4056                 struct scsi_cmnd *cmd)
4057 {
4058         struct scatterlist *sg;
4059         int use_sg, i, sg_limit, chained, last_sg;
4060         struct SGDescriptor *curr_sg;
4061
4062         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4063
4064         use_sg = scsi_dma_map(cmd);
4065         if (use_sg < 0)
4066                 return use_sg;
4067
4068         if (!use_sg)
4069                 goto sglist_finished;
4070
4071         /*
4072          * If the number of entries is greater than the max for a single list,
4073          * then we have a chained list; we will set up all but one entry in the
4074          * first list (the last entry is saved for link information);
4075          * otherwise, we don't have a chained list and we'll set up at each of
4076          * the entries in the one list.
4077          */
4078         curr_sg = cp->SG;
4079         chained = use_sg > h->max_cmd_sg_entries;
4080         sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4081         last_sg = scsi_sg_count(cmd) - 1;
4082         scsi_for_each_sg(cmd, sg, sg_limit, i) {
4083                 hpsa_set_sg_descriptor(curr_sg, sg);
4084                 curr_sg++;
4085         }
4086
4087         if (chained) {
4088                 /*
4089                  * Continue with the chained list.  Set curr_sg to the chained
4090                  * list.  Modify the limit to the total count less the entries
4091                  * we've already set up.  Resume the scan at the list entry
4092                  * where the previous loop left off.
4093                  */
4094                 curr_sg = h->cmd_sg_list[cp->cmdindex];
4095                 sg_limit = use_sg - sg_limit;
4096                 for_each_sg(sg, sg, sg_limit, i) {
4097                         hpsa_set_sg_descriptor(curr_sg, sg);
4098                         curr_sg++;
4099                 }
4100         }
4101
4102         /* Back the pointer up to the last entry and mark it as "last". */
4103         (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4104
4105         if (use_sg + chained > h->maxSG)
4106                 h->maxSG = use_sg + chained;
4107
4108         if (chained) {
4109                 cp->Header.SGList = h->max_cmd_sg_entries;
4110                 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4111                 if (hpsa_map_sg_chain_block(h, cp)) {
4112                         scsi_dma_unmap(cmd);
4113                         return -1;
4114                 }
4115                 return 0;
4116         }
4117
4118 sglist_finished:
4119
4120         cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
4121         cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4122         return 0;
4123 }
4124
4125 #define IO_ACCEL_INELIGIBLE (1)
4126 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4127 {
4128         int is_write = 0;
4129         u32 block;
4130         u32 block_cnt;
4131
4132         /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4133         switch (cdb[0]) {
4134         case WRITE_6:
4135         case WRITE_12:
4136                 is_write = 1;
4137         case READ_6:
4138         case READ_12:
4139                 if (*cdb_len == 6) {
4140                         block = get_unaligned_be16(&cdb[2]);
4141                         block_cnt = cdb[4];
4142                         if (block_cnt == 0)
4143                                 block_cnt = 256;
4144                 } else {
4145                         BUG_ON(*cdb_len != 12);
4146                         block = get_unaligned_be32(&cdb[2]);
4147                         block_cnt = get_unaligned_be32(&cdb[6]);
4148                 }
4149                 if (block_cnt > 0xffff)
4150                         return IO_ACCEL_INELIGIBLE;
4151
4152                 cdb[0] = is_write ? WRITE_10 : READ_10;
4153                 cdb[1] = 0;
4154                 cdb[2] = (u8) (block >> 24);
4155                 cdb[3] = (u8) (block >> 16);
4156                 cdb[4] = (u8) (block >> 8);
4157                 cdb[5] = (u8) (block);
4158                 cdb[6] = 0;
4159                 cdb[7] = (u8) (block_cnt >> 8);
4160                 cdb[8] = (u8) (block_cnt);
4161                 cdb[9] = 0;
4162                 *cdb_len = 10;
4163                 break;
4164         }
4165         return 0;
4166 }
4167
4168 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4169         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4170         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4171 {
4172         struct scsi_cmnd *cmd = c->scsi_cmd;
4173         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4174         unsigned int len;
4175         unsigned int total_len = 0;
4176         struct scatterlist *sg;
4177         u64 addr64;
4178         int use_sg, i;
4179         struct SGDescriptor *curr_sg;
4180         u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4181
4182         /* TODO: implement chaining support */
4183         if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4184                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4185                 return IO_ACCEL_INELIGIBLE;
4186         }
4187
4188         BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4189
4190         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4191                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4192                 return IO_ACCEL_INELIGIBLE;
4193         }
4194
4195         c->cmd_type = CMD_IOACCEL1;
4196
4197         /* Adjust the DMA address to point to the accelerated command buffer */
4198         c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4199                                 (c->cmdindex * sizeof(*cp));
4200         BUG_ON(c->busaddr & 0x0000007F);
4201
4202         use_sg = scsi_dma_map(cmd);
4203         if (use_sg < 0) {
4204                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4205                 return use_sg;
4206         }
4207
4208         if (use_sg) {
4209                 curr_sg = cp->SG;
4210                 scsi_for_each_sg(cmd, sg, use_sg, i) {
4211                         addr64 = (u64) sg_dma_address(sg);
4212                         len  = sg_dma_len(sg);
4213                         total_len += len;
4214                         curr_sg->Addr = cpu_to_le64(addr64);
4215                         curr_sg->Len = cpu_to_le32(len);
4216                         curr_sg->Ext = cpu_to_le32(0);
4217                         curr_sg++;
4218                 }
4219                 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4220
4221                 switch (cmd->sc_data_direction) {
4222                 case DMA_TO_DEVICE:
4223                         control |= IOACCEL1_CONTROL_DATA_OUT;
4224                         break;
4225                 case DMA_FROM_DEVICE:
4226                         control |= IOACCEL1_CONTROL_DATA_IN;
4227                         break;
4228                 case DMA_NONE:
4229                         control |= IOACCEL1_CONTROL_NODATAXFER;
4230                         break;
4231                 default:
4232                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4233                         cmd->sc_data_direction);
4234                         BUG();
4235                         break;
4236                 }
4237         } else {
4238                 control |= IOACCEL1_CONTROL_NODATAXFER;
4239         }
4240
4241         c->Header.SGList = use_sg;
4242         /* Fill out the command structure to submit */
4243         cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4244         cp->transfer_len = cpu_to_le32(total_len);
4245         cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4246                         (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4247         cp->control = cpu_to_le32(control);
4248         memcpy(cp->CDB, cdb, cdb_len);
4249         memcpy(cp->CISS_LUN, scsi3addr, 8);
4250         /* Tag was already set at init time. */
4251         enqueue_cmd_and_start_io(h, c);
4252         return 0;
4253 }
4254
4255 /*
4256  * Queue a command directly to a device behind the controller using the
4257  * I/O accelerator path.
4258  */
4259 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4260         struct CommandList *c)
4261 {
4262         struct scsi_cmnd *cmd = c->scsi_cmd;
4263         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4264
4265         c->phys_disk = dev;
4266
4267         return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4268                 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4269 }
4270
4271 /*
4272  * Set encryption parameters for the ioaccel2 request
4273  */
4274 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4275         struct CommandList *c, struct io_accel2_cmd *cp)
4276 {
4277         struct scsi_cmnd *cmd = c->scsi_cmd;
4278         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4279         struct raid_map_data *map = &dev->raid_map;
4280         u64 first_block;
4281
4282         /* Are we doing encryption on this device */
4283         if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4284                 return;
4285         /* Set the data encryption key index. */
4286         cp->dekindex = map->dekindex;
4287
4288         /* Set the encryption enable flag, encoded into direction field. */
4289         cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4290
4291         /* Set encryption tweak values based on logical block address
4292          * If block size is 512, tweak value is LBA.
4293          * For other block sizes, tweak is (LBA * block size)/ 512)
4294          */
4295         switch (cmd->cmnd[0]) {
4296         /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4297         case WRITE_6:
4298         case READ_6:
4299                 first_block = get_unaligned_be16(&cmd->cmnd[2]);
4300                 break;
4301         case WRITE_10:
4302         case READ_10:
4303         /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4304         case WRITE_12:
4305         case READ_12:
4306                 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4307                 break;
4308         case WRITE_16:
4309         case READ_16:
4310                 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4311                 break;
4312         default:
4313                 dev_err(&h->pdev->dev,
4314                         "ERROR: %s: size (0x%x) not supported for encryption\n",
4315                         __func__, cmd->cmnd[0]);
4316                 BUG();
4317                 break;
4318         }
4319
4320         if (le32_to_cpu(map->volume_blk_size) != 512)
4321                 first_block = first_block *
4322                                 le32_to_cpu(map->volume_blk_size)/512;
4323
4324         cp->tweak_lower = cpu_to_le32(first_block);
4325         cp->tweak_upper = cpu_to_le32(first_block >> 32);
4326 }
4327
4328 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4329         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4330         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4331 {
4332         struct scsi_cmnd *cmd = c->scsi_cmd;
4333         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4334         struct ioaccel2_sg_element *curr_sg;
4335         int use_sg, i;
4336         struct scatterlist *sg;
4337         u64 addr64;
4338         u32 len;
4339         u32 total_len = 0;
4340
4341         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4342
4343         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4344                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4345                 return IO_ACCEL_INELIGIBLE;
4346         }
4347
4348         c->cmd_type = CMD_IOACCEL2;
4349         /* Adjust the DMA address to point to the accelerated command buffer */
4350         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4351                                 (c->cmdindex * sizeof(*cp));
4352         BUG_ON(c->busaddr & 0x0000007F);
4353
4354         memset(cp, 0, sizeof(*cp));
4355         cp->IU_type = IOACCEL2_IU_TYPE;
4356
4357         use_sg = scsi_dma_map(cmd);
4358         if (use_sg < 0) {
4359                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4360                 return use_sg;
4361         }
4362
4363         if (use_sg) {
4364                 curr_sg = cp->sg;
4365                 if (use_sg > h->ioaccel_maxsg) {
4366                         addr64 = le64_to_cpu(
4367                                 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4368                         curr_sg->address = cpu_to_le64(addr64);
4369                         curr_sg->length = 0;
4370                         curr_sg->reserved[0] = 0;
4371                         curr_sg->reserved[1] = 0;
4372                         curr_sg->reserved[2] = 0;
4373                         curr_sg->chain_indicator = 0x80;
4374
4375                         curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4376                 }
4377                 scsi_for_each_sg(cmd, sg, use_sg, i) {
4378                         addr64 = (u64) sg_dma_address(sg);
4379                         len  = sg_dma_len(sg);
4380                         total_len += len;
4381                         curr_sg->address = cpu_to_le64(addr64);
4382                         curr_sg->length = cpu_to_le32(len);
4383                         curr_sg->reserved[0] = 0;
4384                         curr_sg->reserved[1] = 0;
4385                         curr_sg->reserved[2] = 0;
4386                         curr_sg->chain_indicator = 0;
4387                         curr_sg++;
4388                 }
4389
4390                 switch (cmd->sc_data_direction) {
4391                 case DMA_TO_DEVICE:
4392                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4393                         cp->direction |= IOACCEL2_DIR_DATA_OUT;
4394                         break;
4395                 case DMA_FROM_DEVICE:
4396                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4397                         cp->direction |= IOACCEL2_DIR_DATA_IN;
4398                         break;
4399                 case DMA_NONE:
4400                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4401                         cp->direction |= IOACCEL2_DIR_NO_DATA;
4402                         break;
4403                 default:
4404                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4405                                 cmd->sc_data_direction);
4406                         BUG();
4407                         break;
4408                 }
4409         } else {
4410                 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4411                 cp->direction |= IOACCEL2_DIR_NO_DATA;
4412         }
4413
4414         /* Set encryption parameters, if necessary */
4415         set_encrypt_ioaccel2(h, c, cp);
4416
4417         cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4418         cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4419         memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4420
4421         cp->data_len = cpu_to_le32(total_len);
4422         cp->err_ptr = cpu_to_le64(c->busaddr +
4423                         offsetof(struct io_accel2_cmd, error_data));
4424         cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4425
4426         /* fill in sg elements */
4427         if (use_sg > h->ioaccel_maxsg) {
4428                 cp->sg_count = 1;
4429                 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4430                 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4431                         atomic_dec(&phys_disk->ioaccel_cmds_out);
4432                         scsi_dma_unmap(cmd);
4433                         return -1;
4434                 }
4435         } else
4436                 cp->sg_count = (u8) use_sg;
4437
4438         enqueue_cmd_and_start_io(h, c);
4439         return 0;
4440 }
4441
4442 /*
4443  * Queue a command to the correct I/O accelerator path.
4444  */
4445 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4446         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4447         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4448 {
4449         /* Try to honor the device's queue depth */
4450         if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4451                                         phys_disk->queue_depth) {
4452                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4453                 return IO_ACCEL_INELIGIBLE;
4454         }
4455         if (h->transMethod & CFGTBL_Trans_io_accel1)
4456                 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4457                                                 cdb, cdb_len, scsi3addr,
4458                                                 phys_disk);
4459         else
4460                 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4461                                                 cdb, cdb_len, scsi3addr,
4462                                                 phys_disk);
4463 }
4464
4465 static void raid_map_helper(struct raid_map_data *map,
4466                 int offload_to_mirror, u32 *map_index, u32 *current_group)
4467 {
4468         if (offload_to_mirror == 0)  {
4469                 /* use physical disk in the first mirrored group. */
4470                 *map_index %= le16_to_cpu(map->data_disks_per_row);
4471                 return;
4472         }
4473         do {
4474                 /* determine mirror group that *map_index indicates */
4475                 *current_group = *map_index /
4476                         le16_to_cpu(map->data_disks_per_row);
4477                 if (offload_to_mirror == *current_group)
4478                         continue;
4479                 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4480                         /* select map index from next group */
4481                         *map_index += le16_to_cpu(map->data_disks_per_row);
4482                         (*current_group)++;
4483                 } else {
4484                         /* select map index from first group */
4485                         *map_index %= le16_to_cpu(map->data_disks_per_row);
4486                         *current_group = 0;
4487                 }
4488         } while (offload_to_mirror != *current_group);
4489 }
4490
4491 /*
4492  * Attempt to perform offload RAID mapping for a logical volume I/O.
4493  */
4494 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4495         struct CommandList *c)
4496 {
4497         struct scsi_cmnd *cmd = c->scsi_cmd;
4498         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4499         struct raid_map_data *map = &dev->raid_map;
4500         struct raid_map_disk_data *dd = &map->data[0];
4501         int is_write = 0;
4502         u32 map_index;
4503         u64 first_block, last_block;
4504         u32 block_cnt;
4505         u32 blocks_per_row;
4506         u64 first_row, last_row;
4507         u32 first_row_offset, last_row_offset;
4508         u32 first_column, last_column;
4509         u64 r0_first_row, r0_last_row;
4510         u32 r5or6_blocks_per_row;
4511         u64 r5or6_first_row, r5or6_last_row;
4512         u32 r5or6_first_row_offset, r5or6_last_row_offset;
4513         u32 r5or6_first_column, r5or6_last_column;
4514         u32 total_disks_per_row;
4515         u32 stripesize;
4516         u32 first_group, last_group, current_group;
4517         u32 map_row;
4518         u32 disk_handle;
4519         u64 disk_block;
4520         u32 disk_block_cnt;
4521         u8 cdb[16];
4522         u8 cdb_len;
4523         u16 strip_size;
4524 #if BITS_PER_LONG == 32
4525         u64 tmpdiv;
4526 #endif
4527         int offload_to_mirror;
4528
4529         /* check for valid opcode, get LBA and block count */
4530         switch (cmd->cmnd[0]) {
4531         case WRITE_6:
4532                 is_write = 1;
4533         case READ_6:
4534                 first_block = get_unaligned_be16(&cmd->cmnd[2]);
4535                 block_cnt = cmd->cmnd[4];
4536                 if (block_cnt == 0)
4537                         block_cnt = 256;
4538                 break;
4539         case WRITE_10:
4540                 is_write = 1;
4541         case READ_10:
4542                 first_block =
4543                         (((u64) cmd->cmnd[2]) << 24) |
4544                         (((u64) cmd->cmnd[3]) << 16) |
4545                         (((u64) cmd->cmnd[4]) << 8) |
4546                         cmd->cmnd[5];
4547                 block_cnt =
4548                         (((u32) cmd->cmnd[7]) << 8) |
4549                         cmd->cmnd[8];
4550                 break;
4551         case WRITE_12:
4552                 is_write = 1;
4553         case READ_12:
4554                 first_block =
4555                         (((u64) cmd->cmnd[2]) << 24) |
4556                         (((u64) cmd->cmnd[3]) << 16) |
4557                         (((u64) cmd->cmnd[4]) << 8) |
4558                         cmd->cmnd[5];
4559                 block_cnt =
4560                         (((u32) cmd->cmnd[6]) << 24) |
4561                         (((u32) cmd->cmnd[7]) << 16) |
4562                         (((u32) cmd->cmnd[8]) << 8) |
4563                 cmd->cmnd[9];
4564                 break;
4565         case WRITE_16:
4566                 is_write = 1;
4567         case READ_16:
4568                 first_block =
4569                         (((u64) cmd->cmnd[2]) << 56) |
4570                         (((u64) cmd->cmnd[3]) << 48) |
4571                         (((u64) cmd->cmnd[4]) << 40) |
4572                         (((u64) cmd->cmnd[5]) << 32) |
4573                         (((u64) cmd->cmnd[6]) << 24) |
4574                         (((u64) cmd->cmnd[7]) << 16) |
4575                         (((u64) cmd->cmnd[8]) << 8) |
4576                         cmd->cmnd[9];
4577                 block_cnt =
4578                         (((u32) cmd->cmnd[10]) << 24) |
4579                         (((u32) cmd->cmnd[11]) << 16) |
4580                         (((u32) cmd->cmnd[12]) << 8) |
4581                         cmd->cmnd[13];
4582                 break;
4583         default:
4584                 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4585         }
4586         last_block = first_block + block_cnt - 1;
4587
4588         /* check for write to non-RAID-0 */
4589         if (is_write && dev->raid_level != 0)
4590                 return IO_ACCEL_INELIGIBLE;
4591
4592         /* check for invalid block or wraparound */
4593         if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4594                 last_block < first_block)
4595                 return IO_ACCEL_INELIGIBLE;
4596
4597         /* calculate stripe information for the request */
4598         blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4599                                 le16_to_cpu(map->strip_size);
4600         strip_size = le16_to_cpu(map->strip_size);
4601 #if BITS_PER_LONG == 32
4602         tmpdiv = first_block;
4603         (void) do_div(tmpdiv, blocks_per_row);
4604         first_row = tmpdiv;
4605         tmpdiv = last_block;
4606         (void) do_div(tmpdiv, blocks_per_row);
4607         last_row = tmpdiv;
4608         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4609         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4610         tmpdiv = first_row_offset;
4611         (void) do_div(tmpdiv, strip_size);
4612         first_column = tmpdiv;
4613         tmpdiv = last_row_offset;
4614         (void) do_div(tmpdiv, strip_size);
4615         last_column = tmpdiv;
4616 #else
4617         first_row = first_block / blocks_per_row;
4618         last_row = last_block / blocks_per_row;
4619         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4620         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4621         first_column = first_row_offset / strip_size;
4622         last_column = last_row_offset / strip_size;
4623 #endif
4624
4625         /* if this isn't a single row/column then give to the controller */
4626         if ((first_row != last_row) || (first_column != last_column))
4627                 return IO_ACCEL_INELIGIBLE;
4628
4629         /* proceeding with driver mapping */
4630         total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4631                                 le16_to_cpu(map->metadata_disks_per_row);
4632         map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4633                                 le16_to_cpu(map->row_cnt);
4634         map_index = (map_row * total_disks_per_row) + first_column;
4635
4636         switch (dev->raid_level) {
4637         case HPSA_RAID_0:
4638                 break; /* nothing special to do */
4639         case HPSA_RAID_1:
4640                 /* Handles load balance across RAID 1 members.
4641                  * (2-drive R1 and R10 with even # of drives.)
4642                  * Appropriate for SSDs, not optimal for HDDs
4643                  */
4644                 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
4645                 if (dev->offload_to_mirror)
4646                         map_index += le16_to_cpu(map->data_disks_per_row);
4647                 dev->offload_to_mirror = !dev->offload_to_mirror;
4648                 break;
4649         case HPSA_RAID_ADM:
4650                 /* Handles N-way mirrors  (R1-ADM)
4651                  * and R10 with # of drives divisible by 3.)
4652                  */
4653                 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
4654
4655                 offload_to_mirror = dev->offload_to_mirror;
4656                 raid_map_helper(map, offload_to_mirror,
4657                                 &map_index, &current_group);
4658                 /* set mirror group to use next time */
4659                 offload_to_mirror =
4660                         (offload_to_mirror >=
4661                         le16_to_cpu(map->layout_map_count) - 1)
4662                         ? 0 : offload_to_mirror + 1;
4663                 dev->offload_to_mirror = offload_to_mirror;
4664                 /* Avoid direct use of dev->offload_to_mirror within this
4665                  * function since multiple threads might simultaneously
4666                  * increment it beyond the range of dev->layout_map_count -1.
4667                  */
4668                 break;
4669         case HPSA_RAID_5:
4670         case HPSA_RAID_6:
4671                 if (le16_to_cpu(map->layout_map_count) <= 1)
4672                         break;
4673
4674                 /* Verify first and last block are in same RAID group */
4675                 r5or6_blocks_per_row =
4676                         le16_to_cpu(map->strip_size) *
4677                         le16_to_cpu(map->data_disks_per_row);
4678                 BUG_ON(r5or6_blocks_per_row == 0);
4679                 stripesize = r5or6_blocks_per_row *
4680                         le16_to_cpu(map->layout_map_count);
4681 #if BITS_PER_LONG == 32
4682                 tmpdiv = first_block;
4683                 first_group = do_div(tmpdiv, stripesize);
4684                 tmpdiv = first_group;
4685                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4686                 first_group = tmpdiv;
4687                 tmpdiv = last_block;
4688                 last_group = do_div(tmpdiv, stripesize);
4689                 tmpdiv = last_group;
4690                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4691                 last_group = tmpdiv;
4692 #else
4693                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4694                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
4695 #endif
4696                 if (first_group != last_group)
4697                         return IO_ACCEL_INELIGIBLE;
4698
4699                 /* Verify request is in a single row of RAID 5/6 */
4700 #if BITS_PER_LONG == 32
4701                 tmpdiv = first_block;
4702                 (void) do_div(tmpdiv, stripesize);
4703                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4704                 tmpdiv = last_block;
4705                 (void) do_div(tmpdiv, stripesize);
4706                 r5or6_last_row = r0_last_row = tmpdiv;
4707 #else
4708                 first_row = r5or6_first_row = r0_first_row =
4709                                                 first_block / stripesize;
4710                 r5or6_last_row = r0_last_row = last_block / stripesize;
4711 #endif
4712                 if (r5or6_first_row != r5or6_last_row)
4713                         return IO_ACCEL_INELIGIBLE;
4714
4715
4716                 /* Verify request is in a single column */
4717 #if BITS_PER_LONG == 32
4718                 tmpdiv = first_block;
4719                 first_row_offset = do_div(tmpdiv, stripesize);
4720                 tmpdiv = first_row_offset;
4721                 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4722                 r5or6_first_row_offset = first_row_offset;
4723                 tmpdiv = last_block;
4724                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4725                 tmpdiv = r5or6_last_row_offset;
4726                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4727                 tmpdiv = r5or6_first_row_offset;
4728                 (void) do_div(tmpdiv, map->strip_size);
4729                 first_column = r5or6_first_column = tmpdiv;
4730                 tmpdiv = r5or6_last_row_offset;
4731                 (void) do_div(tmpdiv, map->strip_size);
4732                 r5or6_last_column = tmpdiv;
4733 #else
4734                 first_row_offset = r5or6_first_row_offset =
4735                         (u32)((first_block % stripesize) %
4736                                                 r5or6_blocks_per_row);
4737
4738                 r5or6_last_row_offset =
4739                         (u32)((last_block % stripesize) %
4740                                                 r5or6_blocks_per_row);
4741
4742                 first_column = r5or6_first_column =
4743                         r5or6_first_row_offset / le16_to_cpu(map->strip_size);
4744                 r5or6_last_column =
4745                         r5or6_last_row_offset / le16_to_cpu(map->strip_size);
4746 #endif
4747                 if (r5or6_first_column != r5or6_last_column)
4748                         return IO_ACCEL_INELIGIBLE;
4749
4750                 /* Request is eligible */
4751                 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4752                         le16_to_cpu(map->row_cnt);
4753
4754                 map_index = (first_group *
4755                         (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
4756                         (map_row * total_disks_per_row) + first_column;
4757                 break;
4758         default:
4759                 return IO_ACCEL_INELIGIBLE;
4760         }
4761
4762         if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4763                 return IO_ACCEL_INELIGIBLE;
4764
4765         c->phys_disk = dev->phys_disk[map_index];
4766
4767         disk_handle = dd[map_index].ioaccel_handle;
4768         disk_block = le64_to_cpu(map->disk_starting_blk) +
4769                         first_row * le16_to_cpu(map->strip_size) +
4770                         (first_row_offset - first_column *
4771                         le16_to_cpu(map->strip_size));
4772         disk_block_cnt = block_cnt;
4773
4774         /* handle differing logical/physical block sizes */
4775         if (map->phys_blk_shift) {
4776                 disk_block <<= map->phys_blk_shift;
4777                 disk_block_cnt <<= map->phys_blk_shift;
4778         }
4779         BUG_ON(disk_block_cnt > 0xffff);
4780
4781         /* build the new CDB for the physical disk I/O */
4782         if (disk_block > 0xffffffff) {
4783                 cdb[0] = is_write ? WRITE_16 : READ_16;
4784                 cdb[1] = 0;
4785                 cdb[2] = (u8) (disk_block >> 56);
4786                 cdb[3] = (u8) (disk_block >> 48);
4787                 cdb[4] = (u8) (disk_block >> 40);
4788                 cdb[5] = (u8) (disk_block >> 32);
4789                 cdb[6] = (u8) (disk_block >> 24);
4790                 cdb[7] = (u8) (disk_block >> 16);
4791                 cdb[8] = (u8) (disk_block >> 8);
4792                 cdb[9] = (u8) (disk_block);
4793                 cdb[10] = (u8) (disk_block_cnt >> 24);
4794                 cdb[11] = (u8) (disk_block_cnt >> 16);
4795                 cdb[12] = (u8) (disk_block_cnt >> 8);
4796                 cdb[13] = (u8) (disk_block_cnt);
4797                 cdb[14] = 0;
4798                 cdb[15] = 0;
4799                 cdb_len = 16;
4800         } else {
4801                 cdb[0] = is_write ? WRITE_10 : READ_10;
4802                 cdb[1] = 0;
4803                 cdb[2] = (u8) (disk_block >> 24);
4804                 cdb[3] = (u8) (disk_block >> 16);
4805                 cdb[4] = (u8) (disk_block >> 8);
4806                 cdb[5] = (u8) (disk_block);
4807                 cdb[6] = 0;
4808                 cdb[7] = (u8) (disk_block_cnt >> 8);
4809                 cdb[8] = (u8) (disk_block_cnt);
4810                 cdb[9] = 0;
4811                 cdb_len = 10;
4812         }
4813         return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
4814                                                 dev->scsi3addr,
4815                                                 dev->phys_disk[map_index]);
4816 }
4817
4818 /*
4819  * Submit commands down the "normal" RAID stack path
4820  * All callers to hpsa_ciss_submit must check lockup_detected
4821  * beforehand, before (opt.) and after calling cmd_alloc
4822  */
4823 static int hpsa_ciss_submit(struct ctlr_info *h,
4824         struct CommandList *c, struct scsi_cmnd *cmd,
4825         unsigned char scsi3addr[])
4826 {
4827         cmd->host_scribble = (unsigned char *) c;
4828         c->cmd_type = CMD_SCSI;
4829         c->scsi_cmd = cmd;
4830         c->Header.ReplyQueue = 0;  /* unused in simple mode */
4831         memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4832         c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
4833
4834         /* Fill in the request block... */
4835
4836         c->Request.Timeout = 0;
4837         BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4838         c->Request.CDBLen = cmd->cmd_len;
4839         memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4840         switch (cmd->sc_data_direction) {
4841         case DMA_TO_DEVICE:
4842                 c->Request.type_attr_dir =
4843                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4844                 break;
4845         case DMA_FROM_DEVICE:
4846                 c->Request.type_attr_dir =
4847                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4848                 break;
4849         case DMA_NONE:
4850                 c->Request.type_attr_dir =
4851                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4852                 break;
4853         case DMA_BIDIRECTIONAL:
4854                 /* This can happen if a buggy application does a scsi passthru
4855                  * and sets both inlen and outlen to non-zero. ( see
4856                  * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4857                  */
4858
4859                 c->Request.type_attr_dir =
4860                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4861                 /* This is technically wrong, and hpsa controllers should
4862                  * reject it with CMD_INVALID, which is the most correct
4863                  * response, but non-fibre backends appear to let it
4864                  * slide by, and give the same results as if this field
4865                  * were set correctly.  Either way is acceptable for
4866                  * our purposes here.
4867                  */
4868
4869                 break;
4870
4871         default:
4872                 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4873                         cmd->sc_data_direction);
4874                 BUG();
4875                 break;
4876         }
4877
4878         if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4879                 hpsa_cmd_resolve_and_free(h, c);
4880                 return SCSI_MLQUEUE_HOST_BUSY;
4881         }
4882         enqueue_cmd_and_start_io(h, c);
4883         /* the cmd'll come back via intr handler in complete_scsi_command()  */
4884         return 0;
4885 }
4886
4887 static void hpsa_cmd_init(struct ctlr_info *h, int index,
4888                                 struct CommandList *c)
4889 {
4890         dma_addr_t cmd_dma_handle, err_dma_handle;
4891
4892         /* Zero out all of commandlist except the last field, refcount */
4893         memset(c, 0, offsetof(struct CommandList, refcount));
4894         c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4895         cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4896         c->err_info = h->errinfo_pool + index;
4897         memset(c->err_info, 0, sizeof(*c->err_info));
4898         err_dma_handle = h->errinfo_pool_dhandle
4899             + index * sizeof(*c->err_info);
4900         c->cmdindex = index;
4901         c->busaddr = (u32) cmd_dma_handle;
4902         c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4903         c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4904         c->h = h;
4905         c->scsi_cmd = SCSI_CMD_IDLE;
4906 }
4907
4908 static void hpsa_preinitialize_commands(struct ctlr_info *h)
4909 {
4910         int i;
4911
4912         for (i = 0; i < h->nr_cmds; i++) {
4913                 struct CommandList *c = h->cmd_pool + i;
4914
4915                 hpsa_cmd_init(h, i, c);
4916                 atomic_set(&c->refcount, 0);
4917         }
4918 }
4919
4920 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4921                                 struct CommandList *c)
4922 {
4923         dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4924
4925         BUG_ON(c->cmdindex != index);
4926
4927         memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4928         memset(c->err_info, 0, sizeof(*c->err_info));
4929         c->busaddr = (u32) cmd_dma_handle;
4930 }
4931
4932 static int hpsa_ioaccel_submit(struct ctlr_info *h,
4933                 struct CommandList *c, struct scsi_cmnd *cmd,
4934                 unsigned char *scsi3addr)
4935 {
4936         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4937         int rc = IO_ACCEL_INELIGIBLE;
4938
4939         cmd->host_scribble = (unsigned char *) c;
4940
4941         if (dev->offload_enabled) {
4942                 hpsa_cmd_init(h, c->cmdindex, c);
4943                 c->cmd_type = CMD_SCSI;
4944                 c->scsi_cmd = cmd;
4945                 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4946                 if (rc < 0)     /* scsi_dma_map failed. */
4947                         rc = SCSI_MLQUEUE_HOST_BUSY;
4948         } else if (dev->hba_ioaccel_enabled) {
4949                 hpsa_cmd_init(h, c->cmdindex, c);
4950                 c->cmd_type = CMD_SCSI;
4951                 c->scsi_cmd = cmd;
4952                 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4953                 if (rc < 0)     /* scsi_dma_map failed. */
4954                         rc = SCSI_MLQUEUE_HOST_BUSY;
4955         }
4956         return rc;
4957 }
4958
4959 static void hpsa_command_resubmit_worker(struct work_struct *work)
4960 {
4961         struct scsi_cmnd *cmd;
4962         struct hpsa_scsi_dev_t *dev;
4963         struct CommandList *c = container_of(work, struct CommandList, work);
4964
4965         cmd = c->scsi_cmd;
4966         dev = cmd->device->hostdata;
4967         if (!dev) {
4968                 cmd->result = DID_NO_CONNECT << 16;
4969                 return hpsa_cmd_free_and_done(c->h, c, cmd);
4970         }
4971         if (c->reset_pending)
4972                 return hpsa_cmd_resolve_and_free(c->h, c);
4973         if (c->abort_pending)
4974                 return hpsa_cmd_abort_and_free(c->h, c, cmd);
4975         if (c->cmd_type == CMD_IOACCEL2) {
4976                 struct ctlr_info *h = c->h;
4977                 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4978                 int rc;
4979
4980                 if (c2->error_data.serv_response ==
4981                                 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4982                         rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4983                         if (rc == 0)
4984                                 return;
4985                         if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4986                                 /*
4987                                  * If we get here, it means dma mapping failed.
4988                                  * Try again via scsi mid layer, which will
4989                                  * then get SCSI_MLQUEUE_HOST_BUSY.
4990                                  */
4991                                 cmd->result = DID_IMM_RETRY << 16;
4992                                 return hpsa_cmd_free_and_done(h, c, cmd);
4993                         }
4994                         /* else, fall thru and resubmit down CISS path */
4995                 }
4996         }
4997         hpsa_cmd_partial_init(c->h, c->cmdindex, c);
4998         if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4999                 /*
5000                  * If we get here, it means dma mapping failed. Try
5001                  * again via scsi mid layer, which will then get
5002                  * SCSI_MLQUEUE_HOST_BUSY.
5003                  *
5004                  * hpsa_ciss_submit will have already freed c
5005                  * if it encountered a dma mapping failure.
5006                  */
5007                 cmd->result = DID_IMM_RETRY << 16;
5008                 cmd->scsi_done(cmd);
5009         }
5010 }
5011
5012 /* Running in struct Scsi_Host->host_lock less mode */
5013 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5014 {
5015         struct ctlr_info *h;
5016         struct hpsa_scsi_dev_t *dev;
5017         unsigned char scsi3addr[8];
5018         struct CommandList *c;
5019         int rc = 0;
5020
5021         /* Get the ptr to our adapter structure out of cmd->host. */
5022         h = sdev_to_hba(cmd->device);
5023
5024         BUG_ON(cmd->request->tag < 0);
5025
5026         dev = cmd->device->hostdata;
5027         if (!dev) {
5028                 cmd->result = DID_NO_CONNECT << 16;
5029                 cmd->scsi_done(cmd);
5030                 return 0;
5031         }
5032
5033         memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
5034
5035         if (unlikely(lockup_detected(h))) {
5036                 cmd->result = DID_NO_CONNECT << 16;
5037                 cmd->scsi_done(cmd);
5038                 return 0;
5039         }
5040         c = cmd_tagged_alloc(h, cmd);
5041
5042         /*
5043          * Call alternate submit routine for I/O accelerated commands.
5044          * Retries always go down the normal I/O path.
5045          */
5046         if (likely(cmd->retries == 0 &&
5047                 cmd->request->cmd_type == REQ_TYPE_FS &&
5048                 h->acciopath_status)) {
5049                 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5050                 if (rc == 0)
5051                         return 0;
5052                 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5053                         hpsa_cmd_resolve_and_free(h, c);
5054                         return SCSI_MLQUEUE_HOST_BUSY;
5055                 }
5056         }
5057         return hpsa_ciss_submit(h, c, cmd, scsi3addr);
5058 }
5059
5060 static void hpsa_scan_complete(struct ctlr_info *h)
5061 {
5062         unsigned long flags;
5063
5064         spin_lock_irqsave(&h->scan_lock, flags);
5065         h->scan_finished = 1;
5066         wake_up_all(&h->scan_wait_queue);
5067         spin_unlock_irqrestore(&h->scan_lock, flags);
5068 }
5069
5070 static void hpsa_scan_start(struct Scsi_Host *sh)
5071 {
5072         struct ctlr_info *h = shost_to_hba(sh);
5073         unsigned long flags;
5074
5075         /*
5076          * Don't let rescans be initiated on a controller known to be locked
5077          * up.  If the controller locks up *during* a rescan, that thread is
5078          * probably hosed, but at least we can prevent new rescan threads from
5079          * piling up on a locked up controller.
5080          */
5081         if (unlikely(lockup_detected(h)))
5082                 return hpsa_scan_complete(h);
5083
5084         /* wait until any scan already in progress is finished. */
5085         while (1) {
5086                 spin_lock_irqsave(&h->scan_lock, flags);
5087                 if (h->scan_finished)
5088                         break;
5089                 spin_unlock_irqrestore(&h->scan_lock, flags);
5090                 wait_event(h->scan_wait_queue, h->scan_finished);
5091                 /* Note: We don't need to worry about a race between this
5092                  * thread and driver unload because the midlayer will
5093                  * have incremented the reference count, so unload won't
5094                  * happen if we're in here.
5095                  */
5096         }
5097         h->scan_finished = 0; /* mark scan as in progress */
5098         spin_unlock_irqrestore(&h->scan_lock, flags);
5099
5100         if (unlikely(lockup_detected(h)))
5101                 return hpsa_scan_complete(h);
5102
5103         hpsa_update_scsi_devices(h);
5104
5105         hpsa_scan_complete(h);
5106 }
5107
5108 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5109 {
5110         struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5111
5112         if (!logical_drive)
5113                 return -ENODEV;
5114
5115         if (qdepth < 1)
5116                 qdepth = 1;
5117         else if (qdepth > logical_drive->queue_depth)
5118                 qdepth = logical_drive->queue_depth;
5119
5120         return scsi_change_queue_depth(sdev, qdepth);
5121 }
5122
5123 static int hpsa_scan_finished(struct Scsi_Host *sh,
5124         unsigned long elapsed_time)
5125 {
5126         struct ctlr_info *h = shost_to_hba(sh);
5127         unsigned long flags;
5128         int finished;
5129
5130         spin_lock_irqsave(&h->scan_lock, flags);
5131         finished = h->scan_finished;
5132         spin_unlock_irqrestore(&h->scan_lock, flags);
5133         return finished;
5134 }
5135
5136 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5137 {
5138         struct Scsi_Host *sh;
5139         int error;
5140
5141         sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5142         if (sh == NULL) {
5143                 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5144                 return -ENOMEM;
5145         }
5146
5147         sh->io_port = 0;
5148         sh->n_io_port = 0;
5149         sh->this_id = -1;
5150         sh->max_channel = 3;
5151         sh->max_cmd_len = MAX_COMMAND_SIZE;
5152         sh->max_lun = HPSA_MAX_LUN;
5153         sh->max_id = HPSA_MAX_LUN;
5154         sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5155         sh->cmd_per_lun = sh->can_queue;
5156         sh->sg_tablesize = h->maxsgentries;
5157         sh->hostdata[0] = (unsigned long) h;
5158         sh->irq = h->intr[h->intr_mode];
5159         sh->unique_id = sh->irq;
5160         error = scsi_init_shared_tag_map(sh, sh->can_queue);
5161         if (error) {
5162                 dev_err(&h->pdev->dev,
5163                         "%s: scsi_init_shared_tag_map failed for controller %d\n",
5164                         __func__, h->ctlr);
5165                         scsi_host_put(sh);
5166                         return error;
5167         }
5168         h->scsi_host = sh;
5169         return 0;
5170 }
5171
5172 static int hpsa_scsi_add_host(struct ctlr_info *h)
5173 {
5174         int rv;
5175
5176         rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5177         if (rv) {
5178                 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5179                 return rv;
5180         }
5181         scsi_scan_host(h->scsi_host);
5182         return 0;
5183 }
5184
5185 /*
5186  * The block layer has already gone to the trouble of picking out a unique,
5187  * small-integer tag for this request.  We use an offset from that value as
5188  * an index to select our command block.  (The offset allows us to reserve the
5189  * low-numbered entries for our own uses.)
5190  */
5191 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5192 {
5193         int idx = scmd->request->tag;
5194
5195         if (idx < 0)
5196                 return idx;
5197
5198         /* Offset to leave space for internal cmds. */
5199         return idx += HPSA_NRESERVED_CMDS;
5200 }
5201
5202 /*
5203  * Send a TEST_UNIT_READY command to the specified LUN using the specified
5204  * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5205  */
5206 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5207                                 struct CommandList *c, unsigned char lunaddr[],
5208                                 int reply_queue)
5209 {
5210         int rc;
5211
5212         /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5213         (void) fill_cmd(c, TEST_UNIT_READY, h,
5214                         NULL, 0, 0, lunaddr, TYPE_CMD);
5215         rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5216         if (rc)
5217                 return rc;
5218         /* no unmap needed here because no data xfer. */
5219
5220         /* Check if the unit is already ready. */
5221         if (c->err_info->CommandStatus == CMD_SUCCESS)
5222                 return 0;
5223
5224         /*
5225          * The first command sent after reset will receive "unit attention" to
5226          * indicate that the LUN has been reset...this is actually what we're
5227          * looking for (but, success is good too).
5228          */
5229         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5230                 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5231                         (c->err_info->SenseInfo[2] == NO_SENSE ||
5232                          c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5233                 return 0;
5234
5235         return 1;
5236 }
5237
5238 /*
5239  * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5240  * returns zero when the unit is ready, and non-zero when giving up.
5241  */
5242 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5243                                 struct CommandList *c,
5244                                 unsigned char lunaddr[], int reply_queue)
5245 {
5246         int rc;
5247         int count = 0;
5248         int waittime = 1; /* seconds */
5249
5250         /* Send test unit ready until device ready, or give up. */
5251         for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5252
5253                 /*
5254                  * Wait for a bit.  do this first, because if we send
5255                  * the TUR right away, the reset will just abort it.
5256                  */
5257                 msleep(1000 * waittime);
5258
5259                 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5260                 if (!rc)
5261                         break;
5262
5263                 /* Increase wait time with each try, up to a point. */
5264                 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5265                         waittime *= 2;
5266
5267                 dev_warn(&h->pdev->dev,
5268                          "waiting %d secs for device to become ready.\n",
5269                          waittime);
5270         }
5271
5272         return rc;
5273 }
5274
5275 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5276                                            unsigned char lunaddr[],
5277                                            int reply_queue)
5278 {
5279         int first_queue;
5280         int last_queue;
5281         int rq;
5282         int rc = 0;
5283         struct CommandList *c;
5284
5285         c = cmd_alloc(h);
5286
5287         /*
5288          * If no specific reply queue was requested, then send the TUR
5289          * repeatedly, requesting a reply on each reply queue; otherwise execute
5290          * the loop exactly once using only the specified queue.
5291          */
5292         if (reply_queue == DEFAULT_REPLY_QUEUE) {
5293                 first_queue = 0;
5294                 last_queue = h->nreply_queues - 1;
5295         } else {
5296                 first_queue = reply_queue;
5297                 last_queue = reply_queue;
5298         }
5299
5300         for (rq = first_queue; rq <= last_queue; rq++) {
5301                 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5302                 if (rc)
5303                         break;
5304         }
5305
5306         if (rc)
5307                 dev_warn(&h->pdev->dev, "giving up on device.\n");
5308         else
5309                 dev_warn(&h->pdev->dev, "device is ready.\n");
5310
5311         cmd_free(h, c);
5312         return rc;
5313 }
5314
5315 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5316  * complaining.  Doing a host- or bus-reset can't do anything good here.
5317  */
5318 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5319 {
5320         int rc;
5321         struct ctlr_info *h;
5322         struct hpsa_scsi_dev_t *dev;
5323         u8 reset_type;
5324         char msg[48];
5325
5326         /* find the controller to which the command to be aborted was sent */
5327         h = sdev_to_hba(scsicmd->device);
5328         if (h == NULL) /* paranoia */
5329                 return FAILED;
5330
5331         if (lockup_detected(h))
5332                 return FAILED;
5333
5334         dev = scsicmd->device->hostdata;
5335         if (!dev) {
5336                 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5337                 return FAILED;
5338         }
5339
5340         /* if controller locked up, we can guarantee command won't complete */
5341         if (lockup_detected(h)) {
5342                 snprintf(msg, sizeof(msg),
5343                          "cmd %d RESET FAILED, lockup detected",
5344                          hpsa_get_cmd_index(scsicmd));
5345                 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5346                 return FAILED;
5347         }
5348
5349         /* this reset request might be the result of a lockup; check */
5350         if (detect_controller_lockup(h)) {
5351                 snprintf(msg, sizeof(msg),
5352                          "cmd %d RESET FAILED, new lockup detected",
5353                          hpsa_get_cmd_index(scsicmd));
5354                 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5355                 return FAILED;
5356         }
5357
5358         /* Do not attempt on controller */
5359         if (is_hba_lunid(dev->scsi3addr))
5360                 return SUCCESS;
5361
5362         if (is_logical_dev_addr_mode(dev->scsi3addr))
5363                 reset_type = HPSA_DEVICE_RESET_MSG;
5364         else
5365                 reset_type = HPSA_PHYS_TARGET_RESET;
5366
5367         sprintf(msg, "resetting %s",
5368                 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5369         hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5370
5371         h->reset_in_progress = 1;
5372
5373         /* send a reset to the SCSI LUN which the command was sent to */
5374         rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
5375                            DEFAULT_REPLY_QUEUE);
5376         sprintf(msg, "reset %s %s",
5377                 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5378                 rc == 0 ? "completed successfully" : "failed");
5379         hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5380         h->reset_in_progress = 0;
5381         return rc == 0 ? SUCCESS : FAILED;
5382 }
5383
5384 static void swizzle_abort_tag(u8 *tag)
5385 {
5386         u8 original_tag[8];
5387
5388         memcpy(original_tag, tag, 8);
5389         tag[0] = original_tag[3];
5390         tag[1] = original_tag[2];
5391         tag[2] = original_tag[1];
5392         tag[3] = original_tag[0];
5393         tag[4] = original_tag[7];
5394         tag[5] = original_tag[6];
5395         tag[6] = original_tag[5];
5396         tag[7] = original_tag[4];
5397 }
5398
5399 static void hpsa_get_tag(struct ctlr_info *h,
5400         struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5401 {
5402         u64 tag;
5403         if (c->cmd_type == CMD_IOACCEL1) {
5404                 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5405                         &h->ioaccel_cmd_pool[c->cmdindex];
5406                 tag = le64_to_cpu(cm1->tag);
5407                 *tagupper = cpu_to_le32(tag >> 32);
5408                 *taglower = cpu_to_le32(tag);
5409                 return;
5410         }
5411         if (c->cmd_type == CMD_IOACCEL2) {
5412                 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5413                         &h->ioaccel2_cmd_pool[c->cmdindex];
5414                 /* upper tag not used in ioaccel2 mode */
5415                 memset(tagupper, 0, sizeof(*tagupper));
5416                 *taglower = cm2->Tag;
5417                 return;
5418         }
5419         tag = le64_to_cpu(c->Header.tag);
5420         *tagupper = cpu_to_le32(tag >> 32);
5421         *taglower = cpu_to_le32(tag);
5422 }
5423
5424 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5425         struct CommandList *abort, int reply_queue)
5426 {
5427         int rc = IO_OK;
5428         struct CommandList *c;
5429         struct ErrorInfo *ei;
5430         __le32 tagupper, taglower;
5431
5432         c = cmd_alloc(h);
5433
5434         /* fill_cmd can't fail here, no buffer to map */
5435         (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5436                 0, 0, scsi3addr, TYPE_MSG);
5437         if (h->needs_abort_tags_swizzled)
5438                 swizzle_abort_tag(&c->Request.CDB[4]);
5439         (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5440         hpsa_get_tag(h, abort, &taglower, &tagupper);
5441         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5442                 __func__, tagupper, taglower);
5443         /* no unmap needed here because no data xfer. */
5444
5445         ei = c->err_info;
5446         switch (ei->CommandStatus) {
5447         case CMD_SUCCESS:
5448                 break;
5449         case CMD_TMF_STATUS:
5450                 rc = hpsa_evaluate_tmf_status(h, c);
5451                 break;
5452         case CMD_UNABORTABLE: /* Very common, don't make noise. */
5453                 rc = -1;
5454                 break;
5455         default:
5456                 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5457                         __func__, tagupper, taglower);
5458                 hpsa_scsi_interpret_error(h, c);
5459                 rc = -1;
5460                 break;
5461         }
5462         cmd_free(h, c);
5463         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5464                 __func__, tagupper, taglower);
5465         return rc;
5466 }
5467
5468 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5469         struct CommandList *command_to_abort, int reply_queue)
5470 {
5471         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5472         struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5473         struct io_accel2_cmd *c2a =
5474                 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5475         struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5476         struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5477
5478         /*
5479          * We're overlaying struct hpsa_tmf_struct on top of something which
5480          * was allocated as a struct io_accel2_cmd, so we better be sure it
5481          * actually fits, and doesn't overrun the error info space.
5482          */
5483         BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5484                         sizeof(struct io_accel2_cmd));
5485         BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5486                         offsetof(struct hpsa_tmf_struct, error_len) +
5487                                 sizeof(ac->error_len));
5488
5489         c->cmd_type = IOACCEL2_TMF;
5490         c->scsi_cmd = SCSI_CMD_BUSY;
5491
5492         /* Adjust the DMA address to point to the accelerated command buffer */
5493         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5494                                 (c->cmdindex * sizeof(struct io_accel2_cmd));
5495         BUG_ON(c->busaddr & 0x0000007F);
5496
5497         memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5498         ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5499         ac->reply_queue = reply_queue;
5500         ac->tmf = IOACCEL2_TMF_ABORT;
5501         ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5502         memset(ac->lun_id, 0, sizeof(ac->lun_id));
5503         ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5504         ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5505         ac->error_ptr = cpu_to_le64(c->busaddr +
5506                         offsetof(struct io_accel2_cmd, error_data));
5507         ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5508 }
5509
5510 /* ioaccel2 path firmware cannot handle abort task requests.
5511  * Change abort requests to physical target reset, and send to the
5512  * address of the physical disk used for the ioaccel 2 command.
5513  * Return 0 on success (IO_OK)
5514  *       -1 on failure
5515  */
5516
5517 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5518         unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5519 {
5520         int rc = IO_OK;
5521         struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5522         struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5523         unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5524         unsigned char *psa = &phys_scsi3addr[0];
5525
5526         /* Get a pointer to the hpsa logical device. */
5527         scmd = abort->scsi_cmd;
5528         dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5529         if (dev == NULL) {
5530                 dev_warn(&h->pdev->dev,
5531                         "Cannot abort: no device pointer for command.\n");
5532                         return -1; /* not abortable */
5533         }
5534
5535         if (h->raid_offload_debug > 0)
5536                 dev_info(&h->pdev->dev,
5537                         "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5538                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
5539                         "Reset as abort",
5540                         scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5541                         scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5542
5543         if (!dev->offload_enabled) {
5544                 dev_warn(&h->pdev->dev,
5545                         "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5546                 return -1; /* not abortable */
5547         }
5548
5549         /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5550         if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5551                 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5552                 return -1; /* not abortable */
5553         }
5554
5555         /* send the reset */
5556         if (h->raid_offload_debug > 0)
5557                 dev_info(&h->pdev->dev,
5558                         "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5559                         psa[0], psa[1], psa[2], psa[3],
5560                         psa[4], psa[5], psa[6], psa[7]);
5561         rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
5562         if (rc != 0) {
5563                 dev_warn(&h->pdev->dev,
5564                         "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5565                         psa[0], psa[1], psa[2], psa[3],
5566                         psa[4], psa[5], psa[6], psa[7]);
5567                 return rc; /* failed to reset */
5568         }
5569
5570         /* wait for device to recover */
5571         if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
5572                 dev_warn(&h->pdev->dev,
5573                         "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5574                         psa[0], psa[1], psa[2], psa[3],
5575                         psa[4], psa[5], psa[6], psa[7]);
5576                 return -1;  /* failed to recover */
5577         }
5578
5579         /* device recovered */
5580         dev_info(&h->pdev->dev,
5581                 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5582                 psa[0], psa[1], psa[2], psa[3],
5583                 psa[4], psa[5], psa[6], psa[7]);
5584
5585         return rc; /* success */
5586 }
5587
5588 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5589         struct CommandList *abort, int reply_queue)
5590 {
5591         int rc = IO_OK;
5592         struct CommandList *c;
5593         __le32 taglower, tagupper;
5594         struct hpsa_scsi_dev_t *dev;
5595         struct io_accel2_cmd *c2;
5596
5597         dev = abort->scsi_cmd->device->hostdata;
5598         if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5599                 return -1;
5600
5601         c = cmd_alloc(h);
5602         setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5603         c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5604         (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5605         hpsa_get_tag(h, abort, &taglower, &tagupper);
5606         dev_dbg(&h->pdev->dev,
5607                 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5608                 __func__, tagupper, taglower);
5609         /* no unmap needed here because no data xfer. */
5610
5611         dev_dbg(&h->pdev->dev,
5612                 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5613                 __func__, tagupper, taglower, c2->error_data.serv_response);
5614         switch (c2->error_data.serv_response) {
5615         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5616         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5617                 rc = 0;
5618                 break;
5619         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5620         case IOACCEL2_SERV_RESPONSE_FAILURE:
5621         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5622                 rc = -1;
5623                 break;
5624         default:
5625                 dev_warn(&h->pdev->dev,
5626                         "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5627                         __func__, tagupper, taglower,
5628                         c2->error_data.serv_response);
5629                 rc = -1;
5630         }
5631         cmd_free(h, c);
5632         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5633                 tagupper, taglower);
5634         return rc;
5635 }
5636
5637 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
5638         unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5639 {
5640         /*
5641          * ioccelerator mode 2 commands should be aborted via the
5642          * accelerated path, since RAID path is unaware of these commands,
5643          * but not all underlying firmware can handle abort TMF.
5644          * Change abort to physical device reset when abort TMF is unsupported.
5645          */
5646         if (abort->cmd_type == CMD_IOACCEL2) {
5647                 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5648                         return hpsa_send_abort_ioaccel2(h, abort,
5649                                                 reply_queue);
5650                 else
5651                         return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
5652                                                         abort, reply_queue);
5653         }
5654         return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
5655 }
5656
5657 /* Find out which reply queue a command was meant to return on */
5658 static int hpsa_extract_reply_queue(struct ctlr_info *h,
5659                                         struct CommandList *c)
5660 {
5661         if (c->cmd_type == CMD_IOACCEL2)
5662                 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5663         return c->Header.ReplyQueue;
5664 }
5665
5666 /*
5667  * Limit concurrency of abort commands to prevent
5668  * over-subscription of commands
5669  */
5670 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5671 {
5672 #define ABORT_CMD_WAIT_MSECS 5000
5673         return !wait_event_timeout(h->abort_cmd_wait_queue,
5674                         atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5675                         msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5676 }
5677
5678 /* Send an abort for the specified command.
5679  *      If the device and controller support it,
5680  *              send a task abort request.
5681  */
5682 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5683 {
5684
5685         int rc;
5686         struct ctlr_info *h;
5687         struct hpsa_scsi_dev_t *dev;
5688         struct CommandList *abort; /* pointer to command to be aborted */
5689         struct scsi_cmnd *as;   /* ptr to scsi cmd inside aborted command. */
5690         char msg[256];          /* For debug messaging. */
5691         int ml = 0;
5692         __le32 tagupper, taglower;
5693         int refcount, reply_queue;
5694
5695         if (sc == NULL)
5696                 return FAILED;
5697
5698         if (sc->device == NULL)
5699                 return FAILED;
5700
5701         /* Find the controller of the command to be aborted */
5702         h = sdev_to_hba(sc->device);
5703         if (h == NULL)
5704                 return FAILED;
5705
5706         /* Find the device of the command to be aborted */
5707         dev = sc->device->hostdata;
5708         if (!dev) {
5709                 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5710                                 msg);
5711                 return FAILED;
5712         }
5713
5714         /* If controller locked up, we can guarantee command won't complete */
5715         if (lockup_detected(h)) {
5716                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5717                                         "ABORT FAILED, lockup detected");
5718                 return FAILED;
5719         }
5720
5721         /* This is a good time to check if controller lockup has occurred */
5722         if (detect_controller_lockup(h)) {
5723                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5724                                         "ABORT FAILED, new lockup detected");
5725                 return FAILED;
5726         }
5727
5728         /* Check that controller supports some kind of task abort */
5729         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5730                 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5731                 return FAILED;
5732
5733         memset(msg, 0, sizeof(msg));
5734         ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
5735                 h->scsi_host->host_no, sc->device->channel,
5736                 sc->device->id, sc->device->lun,
5737                 "Aborting command", sc);
5738
5739         /* Get SCSI command to be aborted */
5740         abort = (struct CommandList *) sc->host_scribble;
5741         if (abort == NULL) {
5742                 /* This can happen if the command already completed. */
5743                 return SUCCESS;
5744         }
5745         refcount = atomic_inc_return(&abort->refcount);
5746         if (refcount == 1) { /* Command is done already. */
5747                 cmd_free(h, abort);
5748                 return SUCCESS;
5749         }
5750
5751         /* Don't bother trying the abort if we know it won't work. */
5752         if (abort->cmd_type != CMD_IOACCEL2 &&
5753                 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5754                 cmd_free(h, abort);
5755                 return FAILED;
5756         }
5757
5758         /*
5759          * Check that we're aborting the right command.
5760          * It's possible the CommandList already completed and got re-used.
5761          */
5762         if (abort->scsi_cmd != sc) {
5763                 cmd_free(h, abort);
5764                 return SUCCESS;
5765         }
5766
5767         abort->abort_pending = true;
5768         hpsa_get_tag(h, abort, &taglower, &tagupper);
5769         reply_queue = hpsa_extract_reply_queue(h, abort);
5770         ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
5771         as  = abort->scsi_cmd;
5772         if (as != NULL)
5773                 ml += sprintf(msg+ml,
5774                         "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5775                         as->cmd_len, as->cmnd[0], as->cmnd[1],
5776                         as->serial_number);
5777         dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
5778         hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
5779
5780         /*
5781          * Command is in flight, or possibly already completed
5782          * by the firmware (but not to the scsi mid layer) but we can't
5783          * distinguish which.  Send the abort down.
5784          */
5785         if (wait_for_available_abort_cmd(h)) {
5786                 dev_warn(&h->pdev->dev,
5787                         "%s FAILED, timeout waiting for an abort command to become available.\n",
5788                         msg);
5789                 cmd_free(h, abort);
5790                 return FAILED;
5791         }
5792         rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
5793         atomic_inc(&h->abort_cmds_available);
5794         wake_up_all(&h->abort_cmd_wait_queue);
5795         if (rc != 0) {
5796                 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
5797                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5798                                 "FAILED to abort command");
5799                 cmd_free(h, abort);
5800                 return FAILED;
5801         }
5802         dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
5803         wait_event(h->event_sync_wait_queue,
5804                    abort->scsi_cmd != sc || lockup_detected(h));
5805         cmd_free(h, abort);
5806         return !lockup_detected(h) ? SUCCESS : FAILED;
5807 }
5808
5809 /*
5810  * For operations with an associated SCSI command, a command block is allocated
5811  * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5812  * block request tag as an index into a table of entries.  cmd_tagged_free() is
5813  * the complement, although cmd_free() may be called instead.
5814  */
5815 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5816                                             struct scsi_cmnd *scmd)
5817 {
5818         int idx = hpsa_get_cmd_index(scmd);
5819         struct CommandList *c = h->cmd_pool + idx;
5820
5821         if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5822                 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5823                         idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5824                 /* The index value comes from the block layer, so if it's out of
5825                  * bounds, it's probably not our bug.
5826                  */
5827                 BUG();
5828         }
5829
5830         atomic_inc(&c->refcount);
5831         if (unlikely(!hpsa_is_cmd_idle(c))) {
5832                 /*
5833                  * We expect that the SCSI layer will hand us a unique tag
5834                  * value.  Thus, there should never be a collision here between
5835                  * two requests...because if the selected command isn't idle
5836                  * then someone is going to be very disappointed.
5837                  */
5838                 dev_err(&h->pdev->dev,
5839                         "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5840                         idx);
5841                 if (c->scsi_cmd != NULL)
5842                         scsi_print_command(c->scsi_cmd);
5843                 scsi_print_command(scmd);
5844         }
5845
5846         hpsa_cmd_partial_init(h, idx, c);
5847         return c;
5848 }
5849
5850 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5851 {
5852         /*
5853          * Release our reference to the block.  We don't need to do anything
5854          * else to free it, because it is accessed by index.  (There's no point
5855          * in checking the result of the decrement, since we cannot guarantee
5856          * that there isn't a concurrent abort which is also accessing it.)
5857          */
5858         (void)atomic_dec(&c->refcount);
5859 }
5860
5861 /*
5862  * For operations that cannot sleep, a command block is allocated at init,
5863  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5864  * which ones are free or in use.  Lock must be held when calling this.
5865  * cmd_free() is the complement.
5866  * This function never gives up and returns NULL.  If it hangs,
5867  * another thread must call cmd_free() to free some tags.
5868  */
5869
5870 static struct CommandList *cmd_alloc(struct ctlr_info *h)
5871 {
5872         struct CommandList *c;
5873         int refcount, i;
5874         int offset = 0;
5875
5876         /*
5877          * There is some *extremely* small but non-zero chance that that
5878          * multiple threads could get in here, and one thread could
5879          * be scanning through the list of bits looking for a free
5880          * one, but the free ones are always behind him, and other
5881          * threads sneak in behind him and eat them before he can
5882          * get to them, so that while there is always a free one, a
5883          * very unlucky thread might be starved anyway, never able to
5884          * beat the other threads.  In reality, this happens so
5885          * infrequently as to be indistinguishable from never.
5886          *
5887          * Note that we start allocating commands before the SCSI host structure
5888          * is initialized.  Since the search starts at bit zero, this
5889          * all works, since we have at least one command structure available;
5890          * however, it means that the structures with the low indexes have to be
5891          * reserved for driver-initiated requests, while requests from the block
5892          * layer will use the higher indexes.
5893          */
5894
5895         for (;;) {
5896                 i = find_next_zero_bit(h->cmd_pool_bits,
5897                                         HPSA_NRESERVED_CMDS,
5898                                         offset);
5899                 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
5900                         offset = 0;
5901                         continue;
5902                 }
5903                 c = h->cmd_pool + i;
5904                 refcount = atomic_inc_return(&c->refcount);
5905                 if (unlikely(refcount > 1)) {
5906                         cmd_free(h, c); /* already in use */
5907                         offset = (i + 1) % HPSA_NRESERVED_CMDS;
5908                         continue;
5909                 }
5910                 set_bit(i & (BITS_PER_LONG - 1),
5911                         h->cmd_pool_bits + (i / BITS_PER_LONG));
5912                 break; /* it's ours now. */
5913         }
5914         hpsa_cmd_partial_init(h, i, c);
5915         return c;
5916 }
5917
5918 /*
5919  * This is the complementary operation to cmd_alloc().  Note, however, in some
5920  * corner cases it may also be used to free blocks allocated by
5921  * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5922  * the clear-bit is harmless.
5923  */
5924 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5925 {
5926         if (atomic_dec_and_test(&c->refcount)) {
5927                 int i;
5928
5929                 i = c - h->cmd_pool;
5930                 clear_bit(i & (BITS_PER_LONG - 1),
5931                           h->cmd_pool_bits + (i / BITS_PER_LONG));
5932         }
5933 }
5934
5935 #ifdef CONFIG_COMPAT
5936
5937 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5938         void __user *arg)
5939 {
5940         IOCTL32_Command_struct __user *arg32 =
5941             (IOCTL32_Command_struct __user *) arg;
5942         IOCTL_Command_struct arg64;
5943         IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5944         int err;
5945         u32 cp;
5946
5947         memset(&arg64, 0, sizeof(arg64));
5948         err = 0;
5949         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5950                            sizeof(arg64.LUN_info));
5951         err |= copy_from_user(&arg64.Request, &arg32->Request,
5952                            sizeof(arg64.Request));
5953         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5954                            sizeof(arg64.error_info));
5955         err |= get_user(arg64.buf_size, &arg32->buf_size);
5956         err |= get_user(cp, &arg32->buf);
5957         arg64.buf = compat_ptr(cp);
5958         err |= copy_to_user(p, &arg64, sizeof(arg64));
5959
5960         if (err)
5961                 return -EFAULT;
5962
5963         err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
5964         if (err)
5965                 return err;
5966         err |= copy_in_user(&arg32->error_info, &p->error_info,
5967                          sizeof(arg32->error_info));
5968         if (err)
5969                 return -EFAULT;
5970         return err;
5971 }
5972
5973 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
5974         int cmd, void __user *arg)
5975 {
5976         BIG_IOCTL32_Command_struct __user *arg32 =
5977             (BIG_IOCTL32_Command_struct __user *) arg;
5978         BIG_IOCTL_Command_struct arg64;
5979         BIG_IOCTL_Command_struct __user *p =
5980             compat_alloc_user_space(sizeof(arg64));
5981         int err;
5982         u32 cp;
5983
5984         memset(&arg64, 0, sizeof(arg64));
5985         err = 0;
5986         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5987                            sizeof(arg64.LUN_info));
5988         err |= copy_from_user(&arg64.Request, &arg32->Request,
5989                            sizeof(arg64.Request));
5990         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5991                            sizeof(arg64.error_info));
5992         err |= get_user(arg64.buf_size, &arg32->buf_size);
5993         err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5994         err |= get_user(cp, &arg32->buf);
5995         arg64.buf = compat_ptr(cp);
5996         err |= copy_to_user(p, &arg64, sizeof(arg64));
5997
5998         if (err)
5999                 return -EFAULT;
6000
6001         err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6002         if (err)
6003                 return err;
6004         err |= copy_in_user(&arg32->error_info, &p->error_info,
6005                          sizeof(arg32->error_info));
6006         if (err)
6007                 return -EFAULT;
6008         return err;
6009 }
6010
6011 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6012 {
6013         switch (cmd) {
6014         case CCISS_GETPCIINFO:
6015         case CCISS_GETINTINFO:
6016         case CCISS_SETINTINFO:
6017         case CCISS_GETNODENAME:
6018         case CCISS_SETNODENAME:
6019         case CCISS_GETHEARTBEAT:
6020         case CCISS_GETBUSTYPES:
6021         case CCISS_GETFIRMVER:
6022         case CCISS_GETDRIVVER:
6023         case CCISS_REVALIDVOLS:
6024         case CCISS_DEREGDISK:
6025         case CCISS_REGNEWDISK:
6026         case CCISS_REGNEWD:
6027         case CCISS_RESCANDISK:
6028         case CCISS_GETLUNINFO:
6029                 return hpsa_ioctl(dev, cmd, arg);
6030
6031         case CCISS_PASSTHRU32:
6032                 return hpsa_ioctl32_passthru(dev, cmd, arg);
6033         case CCISS_BIG_PASSTHRU32:
6034                 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6035
6036         default:
6037                 return -ENOIOCTLCMD;
6038         }
6039 }
6040 #endif
6041
6042 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6043 {
6044         struct hpsa_pci_info pciinfo;
6045
6046         if (!argp)
6047                 return -EINVAL;
6048         pciinfo.domain = pci_domain_nr(h->pdev->bus);
6049         pciinfo.bus = h->pdev->bus->number;
6050         pciinfo.dev_fn = h->pdev->devfn;
6051         pciinfo.board_id = h->board_id;
6052         if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6053                 return -EFAULT;
6054         return 0;
6055 }
6056
6057 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6058 {
6059         DriverVer_type DriverVer;
6060         unsigned char vmaj, vmin, vsubmin;
6061         int rc;
6062
6063         rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6064                 &vmaj, &vmin, &vsubmin);
6065         if (rc != 3) {
6066                 dev_info(&h->pdev->dev, "driver version string '%s' "
6067                         "unrecognized.", HPSA_DRIVER_VERSION);
6068                 vmaj = 0;
6069                 vmin = 0;
6070                 vsubmin = 0;
6071         }
6072         DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6073         if (!argp)
6074                 return -EINVAL;
6075         if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6076                 return -EFAULT;
6077         return 0;
6078 }
6079
6080 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6081 {
6082         IOCTL_Command_struct iocommand;
6083         struct CommandList *c;
6084         char *buff = NULL;
6085         u64 temp64;
6086         int rc = 0;
6087
6088         if (!argp)
6089                 return -EINVAL;
6090         if (!capable(CAP_SYS_RAWIO))
6091                 return -EPERM;
6092         if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6093                 return -EFAULT;
6094         if ((iocommand.buf_size < 1) &&
6095             (iocommand.Request.Type.Direction != XFER_NONE)) {
6096                 return -EINVAL;
6097         }
6098         if (iocommand.buf_size > 0) {
6099                 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6100                 if (buff == NULL)
6101                         return -ENOMEM;
6102                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6103                         /* Copy the data into the buffer we created */
6104                         if (copy_from_user(buff, iocommand.buf,
6105                                 iocommand.buf_size)) {
6106                                 rc = -EFAULT;
6107                                 goto out_kfree;
6108                         }
6109                 } else {
6110                         memset(buff, 0, iocommand.buf_size);
6111                 }
6112         }
6113         c = cmd_alloc(h);
6114
6115         /* Fill in the command type */
6116         c->cmd_type = CMD_IOCTL_PEND;
6117         c->scsi_cmd = SCSI_CMD_BUSY;
6118         /* Fill in Command Header */
6119         c->Header.ReplyQueue = 0; /* unused in simple mode */
6120         if (iocommand.buf_size > 0) {   /* buffer to fill */
6121                 c->Header.SGList = 1;
6122                 c->Header.SGTotal = cpu_to_le16(1);
6123         } else  { /* no buffers to fill */
6124                 c->Header.SGList = 0;
6125                 c->Header.SGTotal = cpu_to_le16(0);
6126         }
6127         memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6128
6129         /* Fill in Request block */
6130         memcpy(&c->Request, &iocommand.Request,
6131                 sizeof(c->Request));
6132
6133         /* Fill in the scatter gather information */
6134         if (iocommand.buf_size > 0) {
6135                 temp64 = pci_map_single(h->pdev, buff,
6136                         iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6137                 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6138                         c->SG[0].Addr = cpu_to_le64(0);
6139                         c->SG[0].Len = cpu_to_le32(0);
6140                         rc = -ENOMEM;
6141                         goto out;
6142                 }
6143                 c->SG[0].Addr = cpu_to_le64(temp64);
6144                 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6145                 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6146         }
6147         rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
6148         if (iocommand.buf_size > 0)
6149                 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6150         check_ioctl_unit_attention(h, c);
6151         if (rc) {
6152                 rc = -EIO;
6153                 goto out;
6154         }
6155
6156         /* Copy the error information out */
6157         memcpy(&iocommand.error_info, c->err_info,
6158                 sizeof(iocommand.error_info));
6159         if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6160                 rc = -EFAULT;
6161                 goto out;
6162         }
6163         if ((iocommand.Request.Type.Direction & XFER_READ) &&
6164                 iocommand.buf_size > 0) {
6165                 /* Copy the data out of the buffer we created */
6166                 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6167                         rc = -EFAULT;
6168                         goto out;
6169                 }
6170         }
6171 out:
6172         cmd_free(h, c);
6173 out_kfree:
6174         kfree(buff);
6175         return rc;
6176 }
6177
6178 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6179 {
6180         BIG_IOCTL_Command_struct *ioc;
6181         struct CommandList *c;
6182         unsigned char **buff = NULL;
6183         int *buff_size = NULL;
6184         u64 temp64;
6185         BYTE sg_used = 0;
6186         int status = 0;
6187         u32 left;
6188         u32 sz;
6189         BYTE __user *data_ptr;
6190
6191         if (!argp)
6192                 return -EINVAL;
6193         if (!capable(CAP_SYS_RAWIO))
6194                 return -EPERM;
6195         ioc = (BIG_IOCTL_Command_struct *)
6196             kmalloc(sizeof(*ioc), GFP_KERNEL);
6197         if (!ioc) {
6198                 status = -ENOMEM;
6199                 goto cleanup1;
6200         }
6201         if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6202                 status = -EFAULT;
6203                 goto cleanup1;
6204         }
6205         if ((ioc->buf_size < 1) &&
6206             (ioc->Request.Type.Direction != XFER_NONE)) {
6207                 status = -EINVAL;
6208                 goto cleanup1;
6209         }
6210         /* Check kmalloc limits  using all SGs */
6211         if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6212                 status = -EINVAL;
6213                 goto cleanup1;
6214         }
6215         if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6216                 status = -EINVAL;
6217                 goto cleanup1;
6218         }
6219         buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
6220         if (!buff) {
6221                 status = -ENOMEM;
6222                 goto cleanup1;
6223         }
6224         buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
6225         if (!buff_size) {
6226                 status = -ENOMEM;
6227                 goto cleanup1;
6228         }
6229         left = ioc->buf_size;
6230         data_ptr = ioc->buf;
6231         while (left) {
6232                 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6233                 buff_size[sg_used] = sz;
6234                 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6235                 if (buff[sg_used] == NULL) {
6236                         status = -ENOMEM;
6237                         goto cleanup1;
6238                 }
6239                 if (ioc->Request.Type.Direction & XFER_WRITE) {
6240                         if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6241                                 status = -EFAULT;
6242                                 goto cleanup1;
6243                         }
6244                 } else
6245                         memset(buff[sg_used], 0, sz);
6246                 left -= sz;
6247                 data_ptr += sz;
6248                 sg_used++;
6249         }
6250         c = cmd_alloc(h);
6251
6252         c->cmd_type = CMD_IOCTL_PEND;
6253         c->scsi_cmd = SCSI_CMD_BUSY;
6254         c->Header.ReplyQueue = 0;
6255         c->Header.SGList = (u8) sg_used;
6256         c->Header.SGTotal = cpu_to_le16(sg_used);
6257         memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6258         memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6259         if (ioc->buf_size > 0) {
6260                 int i;
6261                 for (i = 0; i < sg_used; i++) {
6262                         temp64 = pci_map_single(h->pdev, buff[i],
6263                                     buff_size[i], PCI_DMA_BIDIRECTIONAL);
6264                         if (dma_mapping_error(&h->pdev->dev,
6265                                                         (dma_addr_t) temp64)) {
6266                                 c->SG[i].Addr = cpu_to_le64(0);
6267                                 c->SG[i].Len = cpu_to_le32(0);
6268                                 hpsa_pci_unmap(h->pdev, c, i,
6269                                         PCI_DMA_BIDIRECTIONAL);
6270                                 status = -ENOMEM;
6271                                 goto cleanup0;
6272                         }
6273                         c->SG[i].Addr = cpu_to_le64(temp64);
6274                         c->SG[i].Len = cpu_to_le32(buff_size[i]);
6275                         c->SG[i].Ext = cpu_to_le32(0);
6276                 }
6277                 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6278         }
6279         status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
6280         if (sg_used)
6281                 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6282         check_ioctl_unit_attention(h, c);
6283         if (status) {
6284                 status = -EIO;
6285                 goto cleanup0;
6286         }
6287
6288         /* Copy the error information out */
6289         memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6290         if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6291                 status = -EFAULT;
6292                 goto cleanup0;
6293         }
6294         if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6295                 int i;
6296
6297                 /* Copy the data out of the buffer we created */
6298                 BYTE __user *ptr = ioc->buf;
6299                 for (i = 0; i < sg_used; i++) {
6300                         if (copy_to_user(ptr, buff[i], buff_size[i])) {
6301                                 status = -EFAULT;
6302                                 goto cleanup0;
6303                         }
6304                         ptr += buff_size[i];
6305                 }
6306         }
6307         status = 0;
6308 cleanup0:
6309         cmd_free(h, c);
6310 cleanup1:
6311         if (buff) {
6312                 int i;
6313
6314                 for (i = 0; i < sg_used; i++)
6315                         kfree(buff[i]);
6316                 kfree(buff);
6317         }
6318         kfree(buff_size);
6319         kfree(ioc);
6320         return status;
6321 }
6322
6323 static void check_ioctl_unit_attention(struct ctlr_info *h,
6324         struct CommandList *c)
6325 {
6326         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6327                         c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6328                 (void) check_for_unit_attention(h, c);
6329 }
6330
6331 /*
6332  * ioctl
6333  */
6334 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6335 {
6336         struct ctlr_info *h;
6337         void __user *argp = (void __user *)arg;
6338         int rc;
6339
6340         h = sdev_to_hba(dev);
6341
6342         switch (cmd) {
6343         case CCISS_DEREGDISK:
6344         case CCISS_REGNEWDISK:
6345         case CCISS_REGNEWD:
6346                 hpsa_scan_start(h->scsi_host);
6347                 return 0;
6348         case CCISS_GETPCIINFO:
6349                 return hpsa_getpciinfo_ioctl(h, argp);
6350         case CCISS_GETDRIVVER:
6351                 return hpsa_getdrivver_ioctl(h, argp);
6352         case CCISS_PASSTHRU:
6353                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6354                         return -EAGAIN;
6355                 rc = hpsa_passthru_ioctl(h, argp);
6356                 atomic_inc(&h->passthru_cmds_avail);
6357                 return rc;
6358         case CCISS_BIG_PASSTHRU:
6359                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6360                         return -EAGAIN;
6361                 rc = hpsa_big_passthru_ioctl(h, argp);
6362                 atomic_inc(&h->passthru_cmds_avail);
6363                 return rc;
6364         default:
6365                 return -ENOTTY;
6366         }
6367 }
6368
6369 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6370                                 u8 reset_type)
6371 {
6372         struct CommandList *c;
6373
6374         c = cmd_alloc(h);
6375
6376         /* fill_cmd can't fail here, no data buffer to map */
6377         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6378                 RAID_CTLR_LUNID, TYPE_MSG);
6379         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6380         c->waiting = NULL;
6381         enqueue_cmd_and_start_io(h, c);
6382         /* Don't wait for completion, the reset won't complete.  Don't free
6383          * the command either.  This is the last command we will send before
6384          * re-initializing everything, so it doesn't matter and won't leak.
6385          */
6386         return;
6387 }
6388
6389 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6390         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6391         int cmd_type)
6392 {
6393         int pci_dir = XFER_NONE;
6394         u64 tag; /* for commands to be aborted */
6395
6396         c->cmd_type = CMD_IOCTL_PEND;
6397         c->scsi_cmd = SCSI_CMD_BUSY;
6398         c->Header.ReplyQueue = 0;
6399         if (buff != NULL && size > 0) {
6400                 c->Header.SGList = 1;
6401                 c->Header.SGTotal = cpu_to_le16(1);
6402         } else {
6403                 c->Header.SGList = 0;
6404                 c->Header.SGTotal = cpu_to_le16(0);
6405         }
6406         memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6407
6408         if (cmd_type == TYPE_CMD) {
6409                 switch (cmd) {
6410                 case HPSA_INQUIRY:
6411                         /* are we trying to read a vital product page */
6412                         if (page_code & VPD_PAGE) {
6413                                 c->Request.CDB[1] = 0x01;
6414                                 c->Request.CDB[2] = (page_code & 0xff);
6415                         }
6416                         c->Request.CDBLen = 6;
6417                         c->Request.type_attr_dir =
6418                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6419                         c->Request.Timeout = 0;
6420                         c->Request.CDB[0] = HPSA_INQUIRY;
6421                         c->Request.CDB[4] = size & 0xFF;
6422                         break;
6423                 case HPSA_REPORT_LOG:
6424                 case HPSA_REPORT_PHYS:
6425                         /* Talking to controller so It's a physical command
6426                            mode = 00 target = 0.  Nothing to write.
6427                          */
6428                         c->Request.CDBLen = 12;
6429                         c->Request.type_attr_dir =
6430                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6431                         c->Request.Timeout = 0;
6432                         c->Request.CDB[0] = cmd;
6433                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6434                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6435                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6436                         c->Request.CDB[9] = size & 0xFF;
6437                         break;
6438                 case HPSA_CACHE_FLUSH:
6439                         c->Request.CDBLen = 12;
6440                         c->Request.type_attr_dir =
6441                                         TYPE_ATTR_DIR(cmd_type,
6442                                                 ATTR_SIMPLE, XFER_WRITE);
6443                         c->Request.Timeout = 0;
6444                         c->Request.CDB[0] = BMIC_WRITE;
6445                         c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6446                         c->Request.CDB[7] = (size >> 8) & 0xFF;
6447                         c->Request.CDB[8] = size & 0xFF;
6448                         break;
6449                 case TEST_UNIT_READY:
6450                         c->Request.CDBLen = 6;
6451                         c->Request.type_attr_dir =
6452                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6453                         c->Request.Timeout = 0;
6454                         break;
6455                 case HPSA_GET_RAID_MAP:
6456                         c->Request.CDBLen = 12;
6457                         c->Request.type_attr_dir =
6458                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6459                         c->Request.Timeout = 0;
6460                         c->Request.CDB[0] = HPSA_CISS_READ;
6461                         c->Request.CDB[1] = cmd;
6462                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6463                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6464                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6465                         c->Request.CDB[9] = size & 0xFF;
6466                         break;
6467                 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6468                         c->Request.CDBLen = 10;
6469                         c->Request.type_attr_dir =
6470                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6471                         c->Request.Timeout = 0;
6472                         c->Request.CDB[0] = BMIC_READ;
6473                         c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6474                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6475                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6476                         break;
6477                 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6478                         c->Request.CDBLen = 10;
6479                         c->Request.type_attr_dir =
6480                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6481                         c->Request.Timeout = 0;
6482                         c->Request.CDB[0] = BMIC_READ;
6483                         c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6484                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6485                         c->Request.CDB[8] = (size >> 8) & 0XFF;
6486                         break;
6487                 case BMIC_IDENTIFY_CONTROLLER:
6488                         c->Request.CDBLen = 10;
6489                         c->Request.type_attr_dir =
6490                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6491                         c->Request.Timeout = 0;
6492                         c->Request.CDB[0] = BMIC_READ;
6493                         c->Request.CDB[1] = 0;
6494                         c->Request.CDB[2] = 0;
6495                         c->Request.CDB[3] = 0;
6496                         c->Request.CDB[4] = 0;
6497                         c->Request.CDB[5] = 0;
6498                         c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6499                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6500                         c->Request.CDB[8] = (size >> 8) & 0XFF;
6501                         c->Request.CDB[9] = 0;
6502                         break;
6503
6504                 default:
6505                         dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6506                         BUG();
6507                         return -1;
6508                 }
6509         } else if (cmd_type == TYPE_MSG) {
6510                 switch (cmd) {
6511
6512                 case  HPSA_PHYS_TARGET_RESET:
6513                         c->Request.CDBLen = 16;
6514                         c->Request.type_attr_dir =
6515                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6516                         c->Request.Timeout = 0; /* Don't time out */
6517                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6518                         c->Request.CDB[0] = HPSA_RESET;
6519                         c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6520                         /* Physical target reset needs no control bytes 4-7*/
6521                         c->Request.CDB[4] = 0x00;
6522                         c->Request.CDB[5] = 0x00;
6523                         c->Request.CDB[6] = 0x00;
6524                         c->Request.CDB[7] = 0x00;
6525                         break;
6526                 case  HPSA_DEVICE_RESET_MSG:
6527                         c->Request.CDBLen = 16;
6528                         c->Request.type_attr_dir =
6529                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6530                         c->Request.Timeout = 0; /* Don't time out */
6531                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6532                         c->Request.CDB[0] =  cmd;
6533                         c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6534                         /* If bytes 4-7 are zero, it means reset the */
6535                         /* LunID device */
6536                         c->Request.CDB[4] = 0x00;
6537                         c->Request.CDB[5] = 0x00;
6538                         c->Request.CDB[6] = 0x00;
6539                         c->Request.CDB[7] = 0x00;
6540                         break;
6541                 case  HPSA_ABORT_MSG:
6542                         memcpy(&tag, buff, sizeof(tag));
6543                         dev_dbg(&h->pdev->dev,
6544                                 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6545                                 tag, c->Header.tag);
6546                         c->Request.CDBLen = 16;
6547                         c->Request.type_attr_dir =
6548                                         TYPE_ATTR_DIR(cmd_type,
6549                                                 ATTR_SIMPLE, XFER_WRITE);
6550                         c->Request.Timeout = 0; /* Don't time out */
6551                         c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6552                         c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6553                         c->Request.CDB[2] = 0x00; /* reserved */
6554                         c->Request.CDB[3] = 0x00; /* reserved */
6555                         /* Tag to abort goes in CDB[4]-CDB[11] */
6556                         memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
6557                         c->Request.CDB[12] = 0x00; /* reserved */
6558                         c->Request.CDB[13] = 0x00; /* reserved */
6559                         c->Request.CDB[14] = 0x00; /* reserved */
6560                         c->Request.CDB[15] = 0x00; /* reserved */
6561                 break;
6562                 default:
6563                         dev_warn(&h->pdev->dev, "unknown message type %d\n",
6564                                 cmd);
6565                         BUG();
6566                 }
6567         } else {
6568                 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6569                 BUG();
6570         }
6571
6572         switch (GET_DIR(c->Request.type_attr_dir)) {
6573         case XFER_READ:
6574                 pci_dir = PCI_DMA_FROMDEVICE;
6575                 break;
6576         case XFER_WRITE:
6577                 pci_dir = PCI_DMA_TODEVICE;
6578                 break;
6579         case XFER_NONE:
6580                 pci_dir = PCI_DMA_NONE;
6581                 break;
6582         default:
6583                 pci_dir = PCI_DMA_BIDIRECTIONAL;
6584         }
6585         if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6586                 return -1;
6587         return 0;
6588 }
6589
6590 /*
6591  * Map (physical) PCI mem into (virtual) kernel space
6592  */
6593 static void __iomem *remap_pci_mem(ulong base, ulong size)
6594 {
6595         ulong page_base = ((ulong) base) & PAGE_MASK;
6596         ulong page_offs = ((ulong) base) - page_base;
6597         void __iomem *page_remapped = ioremap_nocache(page_base,
6598                 page_offs + size);
6599
6600         return page_remapped ? (page_remapped + page_offs) : NULL;
6601 }
6602
6603 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6604 {
6605         return h->access.command_completed(h, q);
6606 }
6607
6608 static inline bool interrupt_pending(struct ctlr_info *h)
6609 {
6610         return h->access.intr_pending(h);
6611 }
6612
6613 static inline long interrupt_not_for_us(struct ctlr_info *h)
6614 {
6615         return (h->access.intr_pending(h) == 0) ||
6616                 (h->interrupts_enabled == 0);
6617 }
6618
6619 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6620         u32 raw_tag)
6621 {
6622         if (unlikely(tag_index >= h->nr_cmds)) {
6623                 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6624                 return 1;
6625         }
6626         return 0;
6627 }
6628
6629 static inline void finish_cmd(struct CommandList *c)
6630 {
6631         dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6632         if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6633                         || c->cmd_type == CMD_IOACCEL2))
6634                 complete_scsi_command(c);
6635         else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6636                 complete(c->waiting);
6637 }
6638
6639 /* process completion of an indexed ("direct lookup") command */
6640 static inline void process_indexed_cmd(struct ctlr_info *h,
6641         u32 raw_tag)
6642 {
6643         u32 tag_index;
6644         struct CommandList *c;
6645
6646         tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6647         if (!bad_tag(h, tag_index, raw_tag)) {
6648                 c = h->cmd_pool + tag_index;
6649                 finish_cmd(c);
6650         }
6651 }
6652
6653 /* Some controllers, like p400, will give us one interrupt
6654  * after a soft reset, even if we turned interrupts off.
6655  * Only need to check for this in the hpsa_xxx_discard_completions
6656  * functions.
6657  */
6658 static int ignore_bogus_interrupt(struct ctlr_info *h)
6659 {
6660         if (likely(!reset_devices))
6661                 return 0;
6662
6663         if (likely(h->interrupts_enabled))
6664                 return 0;
6665
6666         dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6667                 "(known firmware bug.)  Ignoring.\n");
6668
6669         return 1;
6670 }
6671
6672 /*
6673  * Convert &h->q[x] (passed to interrupt handlers) back to h.
6674  * Relies on (h-q[x] == x) being true for x such that
6675  * 0 <= x < MAX_REPLY_QUEUES.
6676  */
6677 static struct ctlr_info *queue_to_hba(u8 *queue)
6678 {
6679         return container_of((queue - *queue), struct ctlr_info, q[0]);
6680 }
6681
6682 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6683 {
6684         struct ctlr_info *h = queue_to_hba(queue);
6685         u8 q = *(u8 *) queue;
6686         u32 raw_tag;
6687
6688         if (ignore_bogus_interrupt(h))
6689                 return IRQ_NONE;
6690
6691         if (interrupt_not_for_us(h))
6692                 return IRQ_NONE;
6693         h->last_intr_timestamp = get_jiffies_64();
6694         while (interrupt_pending(h)) {
6695                 raw_tag = get_next_completion(h, q);
6696                 while (raw_tag != FIFO_EMPTY)
6697                         raw_tag = next_command(h, q);
6698         }
6699         return IRQ_HANDLED;
6700 }
6701
6702 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6703 {
6704         struct ctlr_info *h = queue_to_hba(queue);
6705         u32 raw_tag;
6706         u8 q = *(u8 *) queue;
6707
6708         if (ignore_bogus_interrupt(h))
6709                 return IRQ_NONE;
6710
6711         h->last_intr_timestamp = get_jiffies_64();
6712         raw_tag = get_next_completion(h, q);
6713         while (raw_tag != FIFO_EMPTY)
6714                 raw_tag = next_command(h, q);
6715         return IRQ_HANDLED;
6716 }
6717
6718 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6719 {
6720         struct ctlr_info *h = queue_to_hba((u8 *) queue);
6721         u32 raw_tag;
6722         u8 q = *(u8 *) queue;
6723
6724         if (interrupt_not_for_us(h))
6725                 return IRQ_NONE;
6726         h->last_intr_timestamp = get_jiffies_64();
6727         while (interrupt_pending(h)) {
6728                 raw_tag = get_next_completion(h, q);
6729                 while (raw_tag != FIFO_EMPTY) {
6730                         process_indexed_cmd(h, raw_tag);
6731                         raw_tag = next_command(h, q);
6732                 }
6733         }
6734         return IRQ_HANDLED;
6735 }
6736
6737 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
6738 {
6739         struct ctlr_info *h = queue_to_hba(queue);
6740         u32 raw_tag;
6741         u8 q = *(u8 *) queue;
6742
6743         h->last_intr_timestamp = get_jiffies_64();
6744         raw_tag = get_next_completion(h, q);
6745         while (raw_tag != FIFO_EMPTY) {
6746                 process_indexed_cmd(h, raw_tag);
6747                 raw_tag = next_command(h, q);
6748         }
6749         return IRQ_HANDLED;
6750 }
6751
6752 /* Send a message CDB to the firmware. Careful, this only works
6753  * in simple mode, not performant mode due to the tag lookup.
6754  * We only ever use this immediately after a controller reset.
6755  */
6756 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6757                         unsigned char type)
6758 {
6759         struct Command {
6760                 struct CommandListHeader CommandHeader;
6761                 struct RequestBlock Request;
6762                 struct ErrDescriptor ErrorDescriptor;
6763         };
6764         struct Command *cmd;
6765         static const size_t cmd_sz = sizeof(*cmd) +
6766                                         sizeof(cmd->ErrorDescriptor);
6767         dma_addr_t paddr64;
6768         __le32 paddr32;
6769         u32 tag;
6770         void __iomem *vaddr;
6771         int i, err;
6772
6773         vaddr = pci_ioremap_bar(pdev, 0);
6774         if (vaddr == NULL)
6775                 return -ENOMEM;
6776
6777         /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6778          * CCISS commands, so they must be allocated from the lower 4GiB of
6779          * memory.
6780          */
6781         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6782         if (err) {
6783                 iounmap(vaddr);
6784                 return err;
6785         }
6786
6787         cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6788         if (cmd == NULL) {
6789                 iounmap(vaddr);
6790                 return -ENOMEM;
6791         }
6792
6793         /* This must fit, because of the 32-bit consistent DMA mask.  Also,
6794          * although there's no guarantee, we assume that the address is at
6795          * least 4-byte aligned (most likely, it's page-aligned).
6796          */
6797         paddr32 = cpu_to_le32(paddr64);
6798
6799         cmd->CommandHeader.ReplyQueue = 0;
6800         cmd->CommandHeader.SGList = 0;
6801         cmd->CommandHeader.SGTotal = cpu_to_le16(0);
6802         cmd->CommandHeader.tag = cpu_to_le64(paddr64);
6803         memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6804
6805         cmd->Request.CDBLen = 16;
6806         cmd->Request.type_attr_dir =
6807                         TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
6808         cmd->Request.Timeout = 0; /* Don't time out */
6809         cmd->Request.CDB[0] = opcode;
6810         cmd->Request.CDB[1] = type;
6811         memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
6812         cmd->ErrorDescriptor.Addr =
6813                         cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
6814         cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
6815
6816         writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
6817
6818         for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6819                 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
6820                 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
6821                         break;
6822                 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6823         }
6824
6825         iounmap(vaddr);
6826
6827         /* we leak the DMA buffer here ... no choice since the controller could
6828          *  still complete the command.
6829          */
6830         if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6831                 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6832                         opcode, type);
6833                 return -ETIMEDOUT;
6834         }
6835
6836         pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6837
6838         if (tag & HPSA_ERROR_BIT) {
6839                 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6840                         opcode, type);
6841                 return -EIO;
6842         }
6843
6844         dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6845                 opcode, type);
6846         return 0;
6847 }
6848
6849 #define hpsa_noop(p) hpsa_message(p, 3, 0)
6850
6851 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
6852         void __iomem *vaddr, u32 use_doorbell)
6853 {
6854
6855         if (use_doorbell) {
6856                 /* For everything after the P600, the PCI power state method
6857                  * of resetting the controller doesn't work, so we have this
6858                  * other way using the doorbell register.
6859                  */
6860                 dev_info(&pdev->dev, "using doorbell to reset controller\n");
6861                 writel(use_doorbell, vaddr + SA5_DOORBELL);
6862
6863                 /* PMC hardware guys tell us we need a 10 second delay after
6864                  * doorbell reset and before any attempt to talk to the board
6865                  * at all to ensure that this actually works and doesn't fall
6866                  * over in some weird corner cases.
6867                  */
6868                 msleep(10000);
6869         } else { /* Try to do it the PCI power state way */
6870
6871                 /* Quoting from the Open CISS Specification: "The Power
6872                  * Management Control/Status Register (CSR) controls the power
6873                  * state of the device.  The normal operating state is D0,
6874                  * CSR=00h.  The software off state is D3, CSR=03h.  To reset
6875                  * the controller, place the interface device in D3 then to D0,
6876                  * this causes a secondary PCI reset which will reset the
6877                  * controller." */
6878
6879                 int rc = 0;
6880
6881                 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
6882
6883                 /* enter the D3hot power management state */
6884                 rc = pci_set_power_state(pdev, PCI_D3hot);
6885                 if (rc)
6886                         return rc;
6887
6888                 msleep(500);
6889
6890                 /* enter the D0 power management state */
6891                 rc = pci_set_power_state(pdev, PCI_D0);
6892                 if (rc)
6893                         return rc;
6894
6895                 /*
6896                  * The P600 requires a small delay when changing states.
6897                  * Otherwise we may think the board did not reset and we bail.
6898                  * This for kdump only and is particular to the P600.
6899                  */
6900                 msleep(500);
6901         }
6902         return 0;
6903 }
6904
6905 static void init_driver_version(char *driver_version, int len)
6906 {
6907         memset(driver_version, 0, len);
6908         strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
6909 }
6910
6911 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
6912 {
6913         char *driver_version;
6914         int i, size = sizeof(cfgtable->driver_version);
6915
6916         driver_version = kmalloc(size, GFP_KERNEL);
6917         if (!driver_version)
6918                 return -ENOMEM;
6919
6920         init_driver_version(driver_version, size);
6921         for (i = 0; i < size; i++)
6922                 writeb(driver_version[i], &cfgtable->driver_version[i]);
6923         kfree(driver_version);
6924         return 0;
6925 }
6926
6927 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6928                                           unsigned char *driver_ver)
6929 {
6930         int i;
6931
6932         for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6933                 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6934 }
6935
6936 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
6937 {
6938
6939         char *driver_ver, *old_driver_ver;
6940         int rc, size = sizeof(cfgtable->driver_version);
6941
6942         old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6943         if (!old_driver_ver)
6944                 return -ENOMEM;
6945         driver_ver = old_driver_ver + size;
6946
6947         /* After a reset, the 32 bytes of "driver version" in the cfgtable
6948          * should have been changed, otherwise we know the reset failed.
6949          */
6950         init_driver_version(old_driver_ver, size);
6951         read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6952         rc = !memcmp(driver_ver, old_driver_ver, size);
6953         kfree(old_driver_ver);
6954         return rc;
6955 }
6956 /* This does a hard reset of the controller using PCI power management
6957  * states or the using the doorbell register.
6958  */
6959 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
6960 {
6961         u64 cfg_offset;
6962         u32 cfg_base_addr;
6963         u64 cfg_base_addr_index;
6964         void __iomem *vaddr;
6965         unsigned long paddr;
6966         u32 misc_fw_support;
6967         int rc;
6968         struct CfgTable __iomem *cfgtable;
6969         u32 use_doorbell;
6970         u16 command_register;
6971
6972         /* For controllers as old as the P600, this is very nearly
6973          * the same thing as
6974          *
6975          * pci_save_state(pci_dev);
6976          * pci_set_power_state(pci_dev, PCI_D3hot);
6977          * pci_set_power_state(pci_dev, PCI_D0);
6978          * pci_restore_state(pci_dev);
6979          *
6980          * For controllers newer than the P600, the pci power state
6981          * method of resetting doesn't work so we have another way
6982          * using the doorbell register.
6983          */
6984
6985         if (!ctlr_is_resettable(board_id)) {
6986                 dev_warn(&pdev->dev, "Controller not resettable\n");
6987                 return -ENODEV;
6988         }
6989
6990         /* if controller is soft- but not hard resettable... */
6991         if (!ctlr_is_hard_resettable(board_id))
6992                 return -ENOTSUPP; /* try soft reset later. */
6993
6994         /* Save the PCI command register */
6995         pci_read_config_word(pdev, 4, &command_register);
6996         pci_save_state(pdev);
6997
6998         /* find the first memory BAR, so we can find the cfg table */
6999         rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7000         if (rc)
7001                 return rc;
7002         vaddr = remap_pci_mem(paddr, 0x250);
7003         if (!vaddr)
7004                 return -ENOMEM;
7005
7006         /* find cfgtable in order to check if reset via doorbell is supported */
7007         rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7008                                         &cfg_base_addr_index, &cfg_offset);
7009         if (rc)
7010                 goto unmap_vaddr;
7011         cfgtable = remap_pci_mem(pci_resource_start(pdev,
7012                        cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7013         if (!cfgtable) {
7014                 rc = -ENOMEM;
7015                 goto unmap_vaddr;
7016         }
7017         rc = write_driver_ver_to_cfgtable(cfgtable);
7018         if (rc)
7019                 goto unmap_cfgtable;
7020
7021         /* If reset via doorbell register is supported, use that.
7022          * There are two such methods.  Favor the newest method.
7023          */
7024         misc_fw_support = readl(&cfgtable->misc_fw_support);
7025         use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7026         if (use_doorbell) {
7027                 use_doorbell = DOORBELL_CTLR_RESET2;
7028         } else {
7029                 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7030                 if (use_doorbell) {
7031                         dev_warn(&pdev->dev,
7032                                 "Soft reset not supported. Firmware update is required.\n");
7033                         rc = -ENOTSUPP; /* try soft reset */
7034                         goto unmap_cfgtable;
7035                 }
7036         }
7037
7038         rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7039         if (rc)
7040                 goto unmap_cfgtable;
7041
7042         pci_restore_state(pdev);
7043         pci_write_config_word(pdev, 4, command_register);
7044
7045         /* Some devices (notably the HP Smart Array 5i Controller)
7046            need a little pause here */
7047         msleep(HPSA_POST_RESET_PAUSE_MSECS);
7048
7049         rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7050         if (rc) {
7051                 dev_warn(&pdev->dev,
7052                         "Failed waiting for board to become ready after hard reset\n");
7053                 goto unmap_cfgtable;
7054         }
7055
7056         rc = controller_reset_failed(vaddr);
7057         if (rc < 0)
7058                 goto unmap_cfgtable;
7059         if (rc) {
7060                 dev_warn(&pdev->dev, "Unable to successfully reset "
7061                         "controller. Will try soft reset.\n");
7062                 rc = -ENOTSUPP;
7063         } else {
7064                 dev_info(&pdev->dev, "board ready after hard reset.\n");
7065         }
7066
7067 unmap_cfgtable:
7068         iounmap(cfgtable);
7069
7070 unmap_vaddr:
7071         iounmap(vaddr);
7072         return rc;
7073 }
7074
7075 /*
7076  *  We cannot read the structure directly, for portability we must use
7077  *   the io functions.
7078  *   This is for debug only.
7079  */
7080 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7081 {
7082 #ifdef HPSA_DEBUG
7083         int i;
7084         char temp_name[17];
7085
7086         dev_info(dev, "Controller Configuration information\n");
7087         dev_info(dev, "------------------------------------\n");
7088         for (i = 0; i < 4; i++)
7089                 temp_name[i] = readb(&(tb->Signature[i]));
7090         temp_name[4] = '\0';
7091         dev_info(dev, "   Signature = %s\n", temp_name);
7092         dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
7093         dev_info(dev, "   Transport methods supported = 0x%x\n",
7094                readl(&(tb->TransportSupport)));
7095         dev_info(dev, "   Transport methods active = 0x%x\n",
7096                readl(&(tb->TransportActive)));
7097         dev_info(dev, "   Requested transport Method = 0x%x\n",
7098                readl(&(tb->HostWrite.TransportRequest)));
7099         dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
7100                readl(&(tb->HostWrite.CoalIntDelay)));
7101         dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
7102                readl(&(tb->HostWrite.CoalIntCount)));
7103         dev_info(dev, "   Max outstanding commands = %d\n",
7104                readl(&(tb->CmdsOutMax)));
7105         dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7106         for (i = 0; i < 16; i++)
7107                 temp_name[i] = readb(&(tb->ServerName[i]));
7108         temp_name[16] = '\0';
7109         dev_info(dev, "   Server Name = %s\n", temp_name);
7110         dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
7111                 readl(&(tb->HeartBeat)));
7112 #endif                          /* HPSA_DEBUG */
7113 }
7114
7115 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7116 {
7117         int i, offset, mem_type, bar_type;
7118
7119         if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7120                 return 0;
7121         offset = 0;
7122         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7123                 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7124                 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7125                         offset += 4;
7126                 else {
7127                         mem_type = pci_resource_flags(pdev, i) &
7128                             PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7129                         switch (mem_type) {
7130                         case PCI_BASE_ADDRESS_MEM_TYPE_32:
7131                         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7132                                 offset += 4;    /* 32 bit */
7133                                 break;
7134                         case PCI_BASE_ADDRESS_MEM_TYPE_64:
7135                                 offset += 8;
7136                                 break;
7137                         default:        /* reserved in PCI 2.2 */
7138                                 dev_warn(&pdev->dev,
7139                                        "base address is invalid\n");
7140                                 return -1;
7141                                 break;
7142                         }
7143                 }
7144                 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7145                         return i + 1;
7146         }
7147         return -1;
7148 }
7149
7150 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7151 {
7152         if (h->msix_vector) {
7153                 if (h->pdev->msix_enabled)
7154                         pci_disable_msix(h->pdev);
7155                 h->msix_vector = 0;
7156         } else if (h->msi_vector) {
7157                 if (h->pdev->msi_enabled)
7158                         pci_disable_msi(h->pdev);
7159                 h->msi_vector = 0;
7160         }
7161 }
7162
7163 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7164  * controllers that are capable. If not, we use legacy INTx mode.
7165  */
7166 static void hpsa_interrupt_mode(struct ctlr_info *h)
7167 {
7168 #ifdef CONFIG_PCI_MSI
7169         int err, i;
7170         struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
7171
7172         for (i = 0; i < MAX_REPLY_QUEUES; i++) {
7173                 hpsa_msix_entries[i].vector = 0;
7174                 hpsa_msix_entries[i].entry = i;
7175         }
7176
7177         /* Some boards advertise MSI but don't really support it */
7178         if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
7179             (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
7180                 goto default_int_mode;
7181         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
7182                 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
7183                 h->msix_vector = MAX_REPLY_QUEUES;
7184                 if (h->msix_vector > num_online_cpus())
7185                         h->msix_vector = num_online_cpus();
7186                 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
7187                                             1, h->msix_vector);
7188                 if (err < 0) {
7189                         dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
7190                         h->msix_vector = 0;
7191                         goto single_msi_mode;
7192                 } else if (err < h->msix_vector) {
7193                         dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
7194                                "available\n", err);
7195                 }
7196                 h->msix_vector = err;
7197                 for (i = 0; i < h->msix_vector; i++)
7198                         h->intr[i] = hpsa_msix_entries[i].vector;
7199                 return;
7200         }
7201 single_msi_mode:
7202         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
7203                 dev_info(&h->pdev->dev, "MSI capable controller\n");
7204                 if (!pci_enable_msi(h->pdev))
7205                         h->msi_vector = 1;
7206                 else
7207                         dev_warn(&h->pdev->dev, "MSI init failed\n");
7208         }
7209 default_int_mode:
7210 #endif                          /* CONFIG_PCI_MSI */
7211         /* if we get here we're going to use the default interrupt mode */
7212         h->intr[h->intr_mode] = h->pdev->irq;
7213 }
7214
7215 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
7216 {
7217         int i;
7218         u32 subsystem_vendor_id, subsystem_device_id;
7219
7220         subsystem_vendor_id = pdev->subsystem_vendor;
7221         subsystem_device_id = pdev->subsystem_device;
7222         *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7223                     subsystem_vendor_id;
7224
7225         for (i = 0; i < ARRAY_SIZE(products); i++)
7226                 if (*board_id == products[i].board_id)
7227                         return i;
7228
7229         if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7230                 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7231                 !hpsa_allow_any) {
7232                 dev_warn(&pdev->dev, "unrecognized board ID: "
7233                         "0x%08x, ignoring.\n", *board_id);
7234                         return -ENODEV;
7235         }
7236         return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7237 }
7238
7239 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7240                                     unsigned long *memory_bar)
7241 {
7242         int i;
7243
7244         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7245                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7246                         /* addressing mode bits already removed */
7247                         *memory_bar = pci_resource_start(pdev, i);
7248                         dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7249                                 *memory_bar);
7250                         return 0;
7251                 }
7252         dev_warn(&pdev->dev, "no memory BAR found\n");
7253         return -ENODEV;
7254 }
7255
7256 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7257                                      int wait_for_ready)
7258 {
7259         int i, iterations;
7260         u32 scratchpad;
7261         if (wait_for_ready)
7262                 iterations = HPSA_BOARD_READY_ITERATIONS;
7263         else
7264                 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7265
7266         for (i = 0; i < iterations; i++) {
7267                 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7268                 if (wait_for_ready) {
7269                         if (scratchpad == HPSA_FIRMWARE_READY)
7270                                 return 0;
7271                 } else {
7272                         if (scratchpad != HPSA_FIRMWARE_READY)
7273                                 return 0;
7274                 }
7275                 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7276         }
7277         dev_warn(&pdev->dev, "board not ready, timed out.\n");
7278         return -ENODEV;
7279 }
7280
7281 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7282                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7283                                u64 *cfg_offset)
7284 {
7285         *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7286         *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7287         *cfg_base_addr &= (u32) 0x0000ffff;
7288         *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7289         if (*cfg_base_addr_index == -1) {
7290                 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7291                 return -ENODEV;
7292         }
7293         return 0;
7294 }
7295
7296 static void hpsa_free_cfgtables(struct ctlr_info *h)
7297 {
7298         if (h->transtable) {
7299                 iounmap(h->transtable);
7300                 h->transtable = NULL;
7301         }
7302         if (h->cfgtable) {
7303                 iounmap(h->cfgtable);
7304                 h->cfgtable = NULL;
7305         }
7306 }
7307
7308 /* Find and map CISS config table and transfer table
7309 + * several items must be unmapped (freed) later
7310 + * */
7311 static int hpsa_find_cfgtables(struct ctlr_info *h)
7312 {
7313         u64 cfg_offset;
7314         u32 cfg_base_addr;
7315         u64 cfg_base_addr_index;
7316         u32 trans_offset;
7317         int rc;
7318
7319         rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7320                 &cfg_base_addr_index, &cfg_offset);
7321         if (rc)
7322                 return rc;
7323         h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7324                        cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7325         if (!h->cfgtable) {
7326                 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7327                 return -ENOMEM;
7328         }
7329         rc = write_driver_ver_to_cfgtable(h->cfgtable);
7330         if (rc)
7331                 return rc;
7332         /* Find performant mode table. */
7333         trans_offset = readl(&h->cfgtable->TransMethodOffset);
7334         h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7335                                 cfg_base_addr_index)+cfg_offset+trans_offset,
7336                                 sizeof(*h->transtable));
7337         if (!h->transtable) {
7338                 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7339                 hpsa_free_cfgtables(h);
7340                 return -ENOMEM;
7341         }
7342         return 0;
7343 }
7344
7345 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7346 {
7347 #define MIN_MAX_COMMANDS 16
7348         BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7349
7350         h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7351
7352         /* Limit commands in memory limited kdump scenario. */
7353         if (reset_devices && h->max_commands > 32)
7354                 h->max_commands = 32;
7355
7356         if (h->max_commands < MIN_MAX_COMMANDS) {
7357                 dev_warn(&h->pdev->dev,
7358                         "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7359                         h->max_commands,
7360                         MIN_MAX_COMMANDS);
7361                 h->max_commands = MIN_MAX_COMMANDS;
7362         }
7363 }
7364
7365 /* If the controller reports that the total max sg entries is greater than 512,
7366  * then we know that chained SG blocks work.  (Original smart arrays did not
7367  * support chained SG blocks and would return zero for max sg entries.)
7368  */
7369 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7370 {
7371         return h->maxsgentries > 512;
7372 }
7373
7374 /* Interrogate the hardware for some limits:
7375  * max commands, max SG elements without chaining, and with chaining,
7376  * SG chain block size, etc.
7377  */
7378 static void hpsa_find_board_params(struct ctlr_info *h)
7379 {
7380         hpsa_get_max_perf_mode_cmds(h);
7381         h->nr_cmds = h->max_commands;
7382         h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7383         h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7384         if (hpsa_supports_chained_sg_blocks(h)) {
7385                 /* Limit in-command s/g elements to 32 save dma'able memory. */
7386                 h->max_cmd_sg_entries = 32;
7387                 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7388                 h->maxsgentries--; /* save one for chain pointer */
7389         } else {
7390                 /*
7391                  * Original smart arrays supported at most 31 s/g entries
7392                  * embedded inline in the command (trying to use more
7393                  * would lock up the controller)
7394                  */
7395                 h->max_cmd_sg_entries = 31;
7396                 h->maxsgentries = 31; /* default to traditional values */
7397                 h->chainsize = 0;
7398         }
7399
7400         /* Find out what task management functions are supported and cache */
7401         h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7402         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7403                 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7404         if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7405                 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7406         if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7407                 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7408 }
7409
7410 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7411 {
7412         if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7413                 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7414                 return false;
7415         }
7416         return true;
7417 }
7418
7419 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7420 {
7421         u32 driver_support;
7422
7423         driver_support = readl(&(h->cfgtable->driver_support));
7424         /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7425 #ifdef CONFIG_X86
7426         driver_support |= ENABLE_SCSI_PREFETCH;
7427 #endif
7428         driver_support |= ENABLE_UNIT_ATTN;
7429         writel(driver_support, &(h->cfgtable->driver_support));
7430 }
7431
7432 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
7433  * in a prefetch beyond physical memory.
7434  */
7435 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7436 {
7437         u32 dma_prefetch;
7438
7439         if (h->board_id != 0x3225103C)
7440                 return;
7441         dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7442         dma_prefetch |= 0x8000;
7443         writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7444 }
7445
7446 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7447 {
7448         int i;
7449         u32 doorbell_value;
7450         unsigned long flags;
7451         /* wait until the clear_event_notify bit 6 is cleared by controller. */
7452         for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7453                 spin_lock_irqsave(&h->lock, flags);
7454                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7455                 spin_unlock_irqrestore(&h->lock, flags);
7456                 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7457                         goto done;
7458                 /* delay and try again */
7459                 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7460         }
7461         return -ENODEV;
7462 done:
7463         return 0;
7464 }
7465
7466 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7467 {
7468         int i;
7469         u32 doorbell_value;
7470         unsigned long flags;
7471
7472         /* under certain very rare conditions, this can take awhile.
7473          * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7474          * as we enter this code.)
7475          */
7476         for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7477                 if (h->remove_in_progress)
7478                         goto done;
7479                 spin_lock_irqsave(&h->lock, flags);
7480                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7481                 spin_unlock_irqrestore(&h->lock, flags);
7482                 if (!(doorbell_value & CFGTBL_ChangeReq))
7483                         goto done;
7484                 /* delay and try again */
7485                 msleep(MODE_CHANGE_WAIT_INTERVAL);
7486         }
7487         return -ENODEV;
7488 done:
7489         return 0;
7490 }
7491
7492 /* return -ENODEV or other reason on error, 0 on success */
7493 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7494 {
7495         u32 trans_support;
7496
7497         trans_support = readl(&(h->cfgtable->TransportSupport));
7498         if (!(trans_support & SIMPLE_MODE))
7499                 return -ENOTSUPP;
7500
7501         h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7502
7503         /* Update the field, and then ring the doorbell */
7504         writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7505         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7506         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7507         if (hpsa_wait_for_mode_change_ack(h))
7508                 goto error;
7509         print_cfg_table(&h->pdev->dev, h->cfgtable);
7510         if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7511                 goto error;
7512         h->transMethod = CFGTBL_Trans_Simple;
7513         return 0;
7514 error:
7515         dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7516         return -ENODEV;
7517 }
7518
7519 /* free items allocated or mapped by hpsa_pci_init */
7520 static void hpsa_free_pci_init(struct ctlr_info *h)
7521 {
7522         hpsa_free_cfgtables(h);                 /* pci_init 4 */
7523         iounmap(h->vaddr);                      /* pci_init 3 */
7524         h->vaddr = NULL;
7525         hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
7526         /*
7527          * call pci_disable_device before pci_release_regions per
7528          * Documentation/PCI/pci.txt
7529          */
7530         pci_disable_device(h->pdev);            /* pci_init 1 */
7531         pci_release_regions(h->pdev);           /* pci_init 2 */
7532 }
7533
7534 /* several items must be freed later */
7535 static int hpsa_pci_init(struct ctlr_info *h)
7536 {
7537         int prod_index, err;
7538
7539         prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7540         if (prod_index < 0)
7541                 return prod_index;
7542         h->product_name = products[prod_index].product_name;
7543         h->access = *(products[prod_index].access);
7544
7545         h->needs_abort_tags_swizzled =
7546                 ctlr_needs_abort_tags_swizzled(h->board_id);
7547
7548         pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7549                                PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7550
7551         err = pci_enable_device(h->pdev);
7552         if (err) {
7553                 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7554                 pci_disable_device(h->pdev);
7555                 return err;
7556         }
7557
7558         err = pci_request_regions(h->pdev, HPSA);
7559         if (err) {
7560                 dev_err(&h->pdev->dev,
7561                         "failed to obtain PCI resources\n");
7562                 pci_disable_device(h->pdev);
7563                 return err;
7564         }
7565
7566         pci_set_master(h->pdev);
7567
7568         hpsa_interrupt_mode(h);
7569         err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7570         if (err)
7571                 goto clean2;    /* intmode+region, pci */
7572         h->vaddr = remap_pci_mem(h->paddr, 0x250);
7573         if (!h->vaddr) {
7574                 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7575                 err = -ENOMEM;
7576                 goto clean2;    /* intmode+region, pci */
7577         }
7578         err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7579         if (err)
7580                 goto clean3;    /* vaddr, intmode+region, pci */
7581         err = hpsa_find_cfgtables(h);
7582         if (err)
7583                 goto clean3;    /* vaddr, intmode+region, pci */
7584         hpsa_find_board_params(h);
7585
7586         if (!hpsa_CISS_signature_present(h)) {
7587                 err = -ENODEV;
7588                 goto clean4;    /* cfgtables, vaddr, intmode+region, pci */
7589         }
7590         hpsa_set_driver_support_bits(h);
7591         hpsa_p600_dma_prefetch_quirk(h);
7592         err = hpsa_enter_simple_mode(h);
7593         if (err)
7594                 goto clean4;    /* cfgtables, vaddr, intmode+region, pci */
7595         return 0;
7596
7597 clean4: /* cfgtables, vaddr, intmode+region, pci */
7598         hpsa_free_cfgtables(h);
7599 clean3: /* vaddr, intmode+region, pci */
7600         iounmap(h->vaddr);
7601         h->vaddr = NULL;
7602 clean2: /* intmode+region, pci */
7603         hpsa_disable_interrupt_mode(h);
7604         /*
7605          * call pci_disable_device before pci_release_regions per
7606          * Documentation/PCI/pci.txt
7607          */
7608         pci_disable_device(h->pdev);
7609         pci_release_regions(h->pdev);
7610         return err;
7611 }
7612
7613 static void hpsa_hba_inquiry(struct ctlr_info *h)
7614 {
7615         int rc;
7616
7617 #define HBA_INQUIRY_BYTE_COUNT 64
7618         h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7619         if (!h->hba_inquiry_data)
7620                 return;
7621         rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7622                 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7623         if (rc != 0) {
7624                 kfree(h->hba_inquiry_data);
7625                 h->hba_inquiry_data = NULL;
7626         }
7627 }
7628
7629 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7630 {
7631         int rc, i;
7632         void __iomem *vaddr;
7633
7634         if (!reset_devices)
7635                 return 0;
7636
7637         /* kdump kernel is loading, we don't know in which state is
7638          * the pci interface. The dev->enable_cnt is equal zero
7639          * so we call enable+disable, wait a while and switch it on.
7640          */
7641         rc = pci_enable_device(pdev);
7642         if (rc) {
7643                 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7644                 return -ENODEV;
7645         }
7646         pci_disable_device(pdev);
7647         msleep(260);                    /* a randomly chosen number */
7648         rc = pci_enable_device(pdev);
7649         if (rc) {
7650                 dev_warn(&pdev->dev, "failed to enable device.\n");
7651                 return -ENODEV;
7652         }
7653
7654         pci_set_master(pdev);
7655
7656         vaddr = pci_ioremap_bar(pdev, 0);
7657         if (vaddr == NULL) {
7658                 rc = -ENOMEM;
7659                 goto out_disable;
7660         }
7661         writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7662         iounmap(vaddr);
7663
7664         /* Reset the controller with a PCI power-cycle or via doorbell */
7665         rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7666
7667         /* -ENOTSUPP here means we cannot reset the controller
7668          * but it's already (and still) up and running in
7669          * "performant mode".  Or, it might be 640x, which can't reset
7670          * due to concerns about shared bbwc between 6402/6404 pair.
7671          */
7672         if (rc)
7673                 goto out_disable;
7674
7675         /* Now try to get the controller to respond to a no-op */
7676         dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7677         for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7678                 if (hpsa_noop(pdev) == 0)
7679                         break;
7680                 else
7681                         dev_warn(&pdev->dev, "no-op failed%s\n",
7682                                         (i < 11 ? "; re-trying" : ""));
7683         }
7684
7685 out_disable:
7686
7687         pci_disable_device(pdev);
7688         return rc;
7689 }
7690
7691 static void hpsa_free_cmd_pool(struct ctlr_info *h)
7692 {
7693         kfree(h->cmd_pool_bits);
7694         h->cmd_pool_bits = NULL;
7695         if (h->cmd_pool) {
7696                 pci_free_consistent(h->pdev,
7697                                 h->nr_cmds * sizeof(struct CommandList),
7698                                 h->cmd_pool,
7699                                 h->cmd_pool_dhandle);
7700                 h->cmd_pool = NULL;
7701                 h->cmd_pool_dhandle = 0;
7702         }
7703         if (h->errinfo_pool) {
7704                 pci_free_consistent(h->pdev,
7705                                 h->nr_cmds * sizeof(struct ErrorInfo),
7706                                 h->errinfo_pool,
7707                                 h->errinfo_pool_dhandle);
7708                 h->errinfo_pool = NULL;
7709                 h->errinfo_pool_dhandle = 0;
7710         }
7711 }
7712
7713 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7714 {
7715         h->cmd_pool_bits = kzalloc(
7716                 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7717                 sizeof(unsigned long), GFP_KERNEL);
7718         h->cmd_pool = pci_alloc_consistent(h->pdev,
7719                     h->nr_cmds * sizeof(*h->cmd_pool),
7720                     &(h->cmd_pool_dhandle));
7721         h->errinfo_pool = pci_alloc_consistent(h->pdev,
7722                     h->nr_cmds * sizeof(*h->errinfo_pool),
7723                     &(h->errinfo_pool_dhandle));
7724         if ((h->cmd_pool_bits == NULL)
7725             || (h->cmd_pool == NULL)
7726             || (h->errinfo_pool == NULL)) {
7727                 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
7728                 goto clean_up;
7729         }
7730         hpsa_preinitialize_commands(h);
7731         return 0;
7732 clean_up:
7733         hpsa_free_cmd_pool(h);
7734         return -ENOMEM;
7735 }
7736
7737 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7738 {
7739         int i, cpu;
7740
7741         cpu = cpumask_first(cpu_online_mask);
7742         for (i = 0; i < h->msix_vector; i++) {
7743                 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
7744                 cpu = cpumask_next(cpu, cpu_online_mask);
7745         }
7746 }
7747
7748 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7749 static void hpsa_free_irqs(struct ctlr_info *h)
7750 {
7751         int i;
7752
7753         if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7754                 /* Single reply queue, only one irq to free */
7755                 i = h->intr_mode;
7756                 irq_set_affinity_hint(h->intr[i], NULL);
7757                 free_irq(h->intr[i], &h->q[i]);
7758                 h->q[i] = 0;
7759                 return;
7760         }
7761
7762         for (i = 0; i < h->msix_vector; i++) {
7763                 irq_set_affinity_hint(h->intr[i], NULL);
7764                 free_irq(h->intr[i], &h->q[i]);
7765                 h->q[i] = 0;
7766         }
7767         for (; i < MAX_REPLY_QUEUES; i++)
7768                 h->q[i] = 0;
7769 }
7770
7771 /* returns 0 on success; cleans up and returns -Enn on error */
7772 static int hpsa_request_irqs(struct ctlr_info *h,
7773         irqreturn_t (*msixhandler)(int, void *),
7774         irqreturn_t (*intxhandler)(int, void *))
7775 {
7776         int rc, i;
7777
7778         /*
7779          * initialize h->q[x] = x so that interrupt handlers know which
7780          * queue to process.
7781          */
7782         for (i = 0; i < MAX_REPLY_QUEUES; i++)
7783                 h->q[i] = (u8) i;
7784
7785         if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
7786                 /* If performant mode and MSI-X, use multiple reply queues */
7787                 for (i = 0; i < h->msix_vector; i++) {
7788                         sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
7789                         rc = request_irq(h->intr[i], msixhandler,
7790                                         0, h->intrname[i],
7791                                         &h->q[i]);
7792                         if (rc) {
7793                                 int j;
7794
7795                                 dev_err(&h->pdev->dev,
7796                                         "failed to get irq %d for %s\n",
7797                                        h->intr[i], h->devname);
7798                                 for (j = 0; j < i; j++) {
7799                                         free_irq(h->intr[j], &h->q[j]);
7800                                         h->q[j] = 0;
7801                                 }
7802                                 for (; j < MAX_REPLY_QUEUES; j++)
7803                                         h->q[j] = 0;
7804                                 return rc;
7805                         }
7806                 }
7807                 hpsa_irq_affinity_hints(h);
7808         } else {
7809                 /* Use single reply pool */
7810                 if (h->msix_vector > 0 || h->msi_vector) {
7811                         if (h->msix_vector)
7812                                 sprintf(h->intrname[h->intr_mode],
7813                                         "%s-msix", h->devname);
7814                         else
7815                                 sprintf(h->intrname[h->intr_mode],
7816                                         "%s-msi", h->devname);
7817                         rc = request_irq(h->intr[h->intr_mode],
7818                                 msixhandler, 0,
7819                                 h->intrname[h->intr_mode],
7820                                 &h->q[h->intr_mode]);
7821                 } else {
7822                         sprintf(h->intrname[h->intr_mode],
7823                                 "%s-intx", h->devname);
7824                         rc = request_irq(h->intr[h->intr_mode],
7825                                 intxhandler, IRQF_SHARED,
7826                                 h->intrname[h->intr_mode],
7827                                 &h->q[h->intr_mode]);
7828                 }
7829                 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
7830         }
7831         if (rc) {
7832                 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
7833                        h->intr[h->intr_mode], h->devname);
7834                 hpsa_free_irqs(h);
7835                 return -ENODEV;
7836         }
7837         return 0;
7838 }
7839
7840 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
7841 {
7842         int rc;
7843         hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
7844
7845         dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
7846         rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7847         if (rc) {
7848                 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
7849                 return rc;
7850         }
7851
7852         dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
7853         rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7854         if (rc) {
7855                 dev_warn(&h->pdev->dev, "Board failed to become ready "
7856                         "after soft reset.\n");
7857                 return rc;
7858         }
7859
7860         return 0;
7861 }
7862
7863 static void hpsa_free_reply_queues(struct ctlr_info *h)
7864 {
7865         int i;
7866
7867         for (i = 0; i < h->nreply_queues; i++) {
7868                 if (!h->reply_queue[i].head)
7869                         continue;
7870                 pci_free_consistent(h->pdev,
7871                                         h->reply_queue_size,
7872                                         h->reply_queue[i].head,
7873                                         h->reply_queue[i].busaddr);
7874                 h->reply_queue[i].head = NULL;
7875                 h->reply_queue[i].busaddr = 0;
7876         }
7877         h->reply_queue_size = 0;
7878 }
7879
7880 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7881 {
7882         hpsa_free_performant_mode(h);           /* init_one 7 */
7883         hpsa_free_sg_chain_blocks(h);           /* init_one 6 */
7884         hpsa_free_cmd_pool(h);                  /* init_one 5 */
7885         hpsa_free_irqs(h);                      /* init_one 4 */
7886         scsi_host_put(h->scsi_host);            /* init_one 3 */
7887         h->scsi_host = NULL;                    /* init_one 3 */
7888         hpsa_free_pci_init(h);                  /* init_one 2_5 */
7889         free_percpu(h->lockup_detected);        /* init_one 2 */
7890         h->lockup_detected = NULL;              /* init_one 2 */
7891         if (h->resubmit_wq) {
7892                 destroy_workqueue(h->resubmit_wq);      /* init_one 1 */
7893                 h->resubmit_wq = NULL;
7894         }
7895         if (h->rescan_ctlr_wq) {
7896                 destroy_workqueue(h->rescan_ctlr_wq);
7897                 h->rescan_ctlr_wq = NULL;
7898         }
7899         kfree(h);                               /* init_one 1 */
7900 }
7901
7902 /* Called when controller lockup detected. */
7903 static void fail_all_outstanding_cmds(struct ctlr_info *h)
7904 {
7905         int i, refcount;
7906         struct CommandList *c;
7907         int failcount = 0;
7908
7909         flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
7910         for (i = 0; i < h->nr_cmds; i++) {
7911                 c = h->cmd_pool + i;
7912                 refcount = atomic_inc_return(&c->refcount);
7913                 if (refcount > 1) {
7914                         c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
7915                         finish_cmd(c);
7916                         atomic_dec(&h->commands_outstanding);
7917                         failcount++;
7918                 }
7919                 cmd_free(h, c);
7920         }
7921         dev_warn(&h->pdev->dev,
7922                 "failed %d commands in fail_all\n", failcount);
7923 }
7924
7925 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7926 {
7927         int cpu;
7928
7929         for_each_online_cpu(cpu) {
7930                 u32 *lockup_detected;
7931                 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7932                 *lockup_detected = value;
7933         }
7934         wmb(); /* be sure the per-cpu variables are out to memory */
7935 }
7936
7937 static void controller_lockup_detected(struct ctlr_info *h)
7938 {
7939         unsigned long flags;
7940         u32 lockup_detected;
7941
7942         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7943         spin_lock_irqsave(&h->lock, flags);
7944         lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7945         if (!lockup_detected) {
7946                 /* no heartbeat, but controller gave us a zero. */
7947                 dev_warn(&h->pdev->dev,
7948                         "lockup detected after %d but scratchpad register is zero\n",
7949                         h->heartbeat_sample_interval / HZ);
7950                 lockup_detected = 0xffffffff;
7951         }
7952         set_lockup_detected_for_all_cpus(h, lockup_detected);
7953         spin_unlock_irqrestore(&h->lock, flags);
7954         dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7955                         lockup_detected, h->heartbeat_sample_interval / HZ);
7956         pci_disable_device(h->pdev);
7957         fail_all_outstanding_cmds(h);
7958 }
7959
7960 static int detect_controller_lockup(struct ctlr_info *h)
7961 {
7962         u64 now;
7963         u32 heartbeat;
7964         unsigned long flags;
7965
7966         now = get_jiffies_64();
7967         /* If we've received an interrupt recently, we're ok. */
7968         if (time_after64(h->last_intr_timestamp +
7969                                 (h->heartbeat_sample_interval), now))
7970                 return false;
7971
7972         /*
7973          * If we've already checked the heartbeat recently, we're ok.
7974          * This could happen if someone sends us a signal. We
7975          * otherwise don't care about signals in this thread.
7976          */
7977         if (time_after64(h->last_heartbeat_timestamp +
7978                                 (h->heartbeat_sample_interval), now))
7979                 return false;
7980
7981         /* If heartbeat has not changed since we last looked, we're not ok. */
7982         spin_lock_irqsave(&h->lock, flags);
7983         heartbeat = readl(&h->cfgtable->HeartBeat);
7984         spin_unlock_irqrestore(&h->lock, flags);
7985         if (h->last_heartbeat == heartbeat) {
7986                 controller_lockup_detected(h);
7987                 return true;
7988         }
7989
7990         /* We're ok. */
7991         h->last_heartbeat = heartbeat;
7992         h->last_heartbeat_timestamp = now;
7993         return false;
7994 }
7995
7996 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
7997 {
7998         int i;
7999         char *event_type;
8000
8001         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8002                 return;
8003
8004         /* Ask the controller to clear the events we're handling. */
8005         if ((h->transMethod & (CFGTBL_Trans_io_accel1
8006                         | CFGTBL_Trans_io_accel2)) &&
8007                 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8008                  h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8009
8010                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8011                         event_type = "state change";
8012                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8013                         event_type = "configuration change";
8014                 /* Stop sending new RAID offload reqs via the IO accelerator */
8015                 scsi_block_requests(h->scsi_host);
8016                 for (i = 0; i < h->ndevices; i++)
8017                         h->dev[i]->offload_enabled = 0;
8018                 hpsa_drain_accel_commands(h);
8019                 /* Set 'accelerator path config change' bit */
8020                 dev_warn(&h->pdev->dev,
8021                         "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8022                         h->events, event_type);
8023                 writel(h->events, &(h->cfgtable->clear_event_notify));
8024                 /* Set the "clear event notify field update" bit 6 */
8025                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8026                 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8027                 hpsa_wait_for_clear_event_notify_ack(h);
8028                 scsi_unblock_requests(h->scsi_host);
8029         } else {
8030                 /* Acknowledge controller notification events. */
8031                 writel(h->events, &(h->cfgtable->clear_event_notify));
8032                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8033                 hpsa_wait_for_clear_event_notify_ack(h);
8034 #if 0
8035                 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8036                 hpsa_wait_for_mode_change_ack(h);
8037 #endif
8038         }
8039         return;
8040 }
8041
8042 /* Check a register on the controller to see if there are configuration
8043  * changes (added/changed/removed logical drives, etc.) which mean that
8044  * we should rescan the controller for devices.
8045  * Also check flag for driver-initiated rescan.
8046  */
8047 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8048 {
8049         if (h->drv_req_rescan) {
8050                 h->drv_req_rescan = 0;
8051                 return 1;
8052         }
8053
8054         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8055                 return 0;
8056
8057         h->events = readl(&(h->cfgtable->event_notify));
8058         return h->events & RESCAN_REQUIRED_EVENT_BITS;
8059 }
8060
8061 /*
8062  * Check if any of the offline devices have become ready
8063  */
8064 static int hpsa_offline_devices_ready(struct ctlr_info *h)
8065 {
8066         unsigned long flags;
8067         struct offline_device_entry *d;
8068         struct list_head *this, *tmp;
8069
8070         spin_lock_irqsave(&h->offline_device_lock, flags);
8071         list_for_each_safe(this, tmp, &h->offline_device_list) {
8072                 d = list_entry(this, struct offline_device_entry,
8073                                 offline_list);
8074                 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8075                 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8076                         spin_lock_irqsave(&h->offline_device_lock, flags);
8077                         list_del(&d->offline_list);
8078                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
8079                         return 1;
8080                 }
8081                 spin_lock_irqsave(&h->offline_device_lock, flags);
8082         }
8083         spin_unlock_irqrestore(&h->offline_device_lock, flags);
8084         return 0;
8085 }
8086
8087 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8088 {
8089         unsigned long flags;
8090         struct ctlr_info *h = container_of(to_delayed_work(work),
8091                                         struct ctlr_info, rescan_ctlr_work);
8092
8093
8094         if (h->remove_in_progress)
8095                 return;
8096
8097         if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
8098                 scsi_host_get(h->scsi_host);
8099                 hpsa_ack_ctlr_events(h);
8100                 hpsa_scan_start(h->scsi_host);
8101                 scsi_host_put(h->scsi_host);
8102         }
8103         spin_lock_irqsave(&h->lock, flags);
8104         if (!h->remove_in_progress)
8105                 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8106                                 h->heartbeat_sample_interval);
8107         spin_unlock_irqrestore(&h->lock, flags);
8108 }
8109
8110 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8111 {
8112         unsigned long flags;
8113         struct ctlr_info *h = container_of(to_delayed_work(work),
8114                                         struct ctlr_info, monitor_ctlr_work);
8115
8116         detect_controller_lockup(h);
8117         if (lockup_detected(h))
8118                 return;
8119
8120         spin_lock_irqsave(&h->lock, flags);
8121         if (!h->remove_in_progress)
8122                 schedule_delayed_work(&h->monitor_ctlr_work,
8123                                 h->heartbeat_sample_interval);
8124         spin_unlock_irqrestore(&h->lock, flags);
8125 }
8126
8127 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8128                                                 char *name)
8129 {
8130         struct workqueue_struct *wq = NULL;
8131
8132         wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8133         if (!wq)
8134                 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8135
8136         return wq;
8137 }
8138
8139 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8140 {
8141         int dac, rc;
8142         struct ctlr_info *h;
8143         int try_soft_reset = 0;
8144         unsigned long flags;
8145         u32 board_id;
8146
8147         if (number_of_controllers == 0)
8148                 printk(KERN_INFO DRIVER_NAME "\n");
8149
8150         rc = hpsa_lookup_board_id(pdev, &board_id);
8151         if (rc < 0) {
8152                 dev_warn(&pdev->dev, "Board ID not found\n");
8153                 return rc;
8154         }
8155
8156         rc = hpsa_init_reset_devices(pdev, board_id);
8157         if (rc) {
8158                 if (rc != -ENOTSUPP)
8159                         return rc;
8160                 /* If the reset fails in a particular way (it has no way to do
8161                  * a proper hard reset, so returns -ENOTSUPP) we can try to do
8162                  * a soft reset once we get the controller configured up to the
8163                  * point that it can accept a command.
8164                  */
8165                 try_soft_reset = 1;
8166                 rc = 0;
8167         }
8168
8169 reinit_after_soft_reset:
8170
8171         /* Command structures must be aligned on a 32-byte boundary because
8172          * the 5 lower bits of the address are used by the hardware. and by
8173          * the driver.  See comments in hpsa.h for more info.
8174          */
8175         BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8176         h = kzalloc(sizeof(*h), GFP_KERNEL);
8177         if (!h) {
8178                 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8179                 return -ENOMEM;
8180         }
8181
8182         h->pdev = pdev;
8183
8184         h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8185         INIT_LIST_HEAD(&h->offline_device_list);
8186         spin_lock_init(&h->lock);
8187         spin_lock_init(&h->offline_device_lock);
8188         spin_lock_init(&h->scan_lock);
8189         atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8190         atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
8191
8192         /* Allocate and clear per-cpu variable lockup_detected */
8193         h->lockup_detected = alloc_percpu(u32);
8194         if (!h->lockup_detected) {
8195                 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8196                 rc = -ENOMEM;
8197                 goto clean1;    /* aer/h */
8198         }
8199         set_lockup_detected_for_all_cpus(h, 0);
8200
8201         rc = hpsa_pci_init(h);
8202         if (rc)
8203                 goto clean2;    /* lu, aer/h */
8204
8205         /* relies on h-> settings made by hpsa_pci_init, including
8206          * interrupt_mode h->intr */
8207         rc = hpsa_scsi_host_alloc(h);
8208         if (rc)
8209                 goto clean2_5;  /* pci, lu, aer/h */
8210
8211         sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8212         h->ctlr = number_of_controllers;
8213         number_of_controllers++;
8214
8215         /* configure PCI DMA stuff */
8216         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8217         if (rc == 0) {
8218                 dac = 1;
8219         } else {
8220                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8221                 if (rc == 0) {
8222                         dac = 0;
8223                 } else {
8224                         dev_err(&pdev->dev, "no suitable DMA available\n");
8225                         goto clean3;    /* shost, pci, lu, aer/h */
8226                 }
8227         }
8228
8229         /* make sure the board interrupts are off */
8230         h->access.set_intr_mask(h, HPSA_INTR_OFF);
8231
8232         rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8233         if (rc)
8234                 goto clean3;    /* shost, pci, lu, aer/h */
8235         rc = hpsa_alloc_cmd_pool(h);
8236         if (rc)
8237                 goto clean4;    /* irq, shost, pci, lu, aer/h */
8238         rc = hpsa_alloc_sg_chain_blocks(h);
8239         if (rc)
8240                 goto clean5;    /* cmd, irq, shost, pci, lu, aer/h */
8241         init_waitqueue_head(&h->scan_wait_queue);
8242         init_waitqueue_head(&h->abort_cmd_wait_queue);
8243         init_waitqueue_head(&h->event_sync_wait_queue);
8244         mutex_init(&h->reset_mutex);
8245         h->scan_finished = 1; /* no scan currently in progress */
8246
8247         pci_set_drvdata(pdev, h);
8248         h->ndevices = 0;
8249
8250         spin_lock_init(&h->devlock);
8251         rc = hpsa_put_ctlr_into_performant_mode(h);
8252         if (rc)
8253                 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8254
8255         /* hook into SCSI subsystem */
8256         rc = hpsa_scsi_add_host(h);
8257         if (rc)
8258                 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8259
8260         /* create the resubmit workqueue */
8261         h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8262         if (!h->rescan_ctlr_wq) {
8263                 rc = -ENOMEM;
8264                 goto clean7;
8265         }
8266
8267         h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8268         if (!h->resubmit_wq) {
8269                 rc = -ENOMEM;
8270                 goto clean7;    /* aer/h */
8271         }
8272
8273         /*
8274          * At this point, the controller is ready to take commands.
8275          * Now, if reset_devices and the hard reset didn't work, try
8276          * the soft reset and see if that works.
8277          */
8278         if (try_soft_reset) {
8279
8280                 /* This is kind of gross.  We may or may not get a completion
8281                  * from the soft reset command, and if we do, then the value
8282                  * from the fifo may or may not be valid.  So, we wait 10 secs
8283                  * after the reset throwing away any completions we get during
8284                  * that time.  Unregister the interrupt handler and register
8285                  * fake ones to scoop up any residual completions.
8286                  */
8287                 spin_lock_irqsave(&h->lock, flags);
8288                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8289                 spin_unlock_irqrestore(&h->lock, flags);
8290                 hpsa_free_irqs(h);
8291                 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8292                                         hpsa_intx_discard_completions);
8293                 if (rc) {
8294                         dev_warn(&h->pdev->dev,
8295                                 "Failed to request_irq after soft reset.\n");
8296                         /*
8297                          * cannot goto clean7 or free_irqs will be called
8298                          * again. Instead, do its work
8299                          */
8300                         hpsa_free_performant_mode(h);   /* clean7 */
8301                         hpsa_free_sg_chain_blocks(h);   /* clean6 */
8302                         hpsa_free_cmd_pool(h);          /* clean5 */
8303                         /*
8304                          * skip hpsa_free_irqs(h) clean4 since that
8305                          * was just called before request_irqs failed
8306                          */
8307                         goto clean3;
8308                 }
8309
8310                 rc = hpsa_kdump_soft_reset(h);
8311                 if (rc)
8312                         /* Neither hard nor soft reset worked, we're hosed. */
8313                         goto clean7;
8314
8315                 dev_info(&h->pdev->dev, "Board READY.\n");
8316                 dev_info(&h->pdev->dev,
8317                         "Waiting for stale completions to drain.\n");
8318                 h->access.set_intr_mask(h, HPSA_INTR_ON);
8319                 msleep(10000);
8320                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8321
8322                 rc = controller_reset_failed(h->cfgtable);
8323                 if (rc)
8324                         dev_info(&h->pdev->dev,
8325                                 "Soft reset appears to have failed.\n");
8326
8327                 /* since the controller's reset, we have to go back and re-init
8328                  * everything.  Easiest to just forget what we've done and do it
8329                  * all over again.
8330                  */
8331                 hpsa_undo_allocations_after_kdump_soft_reset(h);
8332                 try_soft_reset = 0;
8333                 if (rc)
8334                         /* don't goto clean, we already unallocated */
8335                         return -ENODEV;
8336
8337                 goto reinit_after_soft_reset;
8338         }
8339
8340         /* Enable Accelerated IO path at driver layer */
8341         h->acciopath_status = 1;
8342
8343
8344         /* Turn the interrupts on so we can service requests */
8345         h->access.set_intr_mask(h, HPSA_INTR_ON);
8346
8347         hpsa_hba_inquiry(h);
8348
8349         /* Monitor the controller for firmware lockups */
8350         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8351         INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8352         schedule_delayed_work(&h->monitor_ctlr_work,
8353                                 h->heartbeat_sample_interval);
8354         INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8355         queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8356                                 h->heartbeat_sample_interval);
8357         return 0;
8358
8359 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8360         hpsa_free_performant_mode(h);
8361         h->access.set_intr_mask(h, HPSA_INTR_OFF);
8362 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8363         hpsa_free_sg_chain_blocks(h);
8364 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8365         hpsa_free_cmd_pool(h);
8366 clean4: /* irq, shost, pci, lu, aer/h */
8367         hpsa_free_irqs(h);
8368 clean3: /* shost, pci, lu, aer/h */
8369         scsi_host_put(h->scsi_host);
8370         h->scsi_host = NULL;
8371 clean2_5: /* pci, lu, aer/h */
8372         hpsa_free_pci_init(h);
8373 clean2: /* lu, aer/h */
8374         if (h->lockup_detected) {
8375                 free_percpu(h->lockup_detected);
8376                 h->lockup_detected = NULL;
8377         }
8378 clean1: /* wq/aer/h */
8379         if (h->resubmit_wq) {
8380                 destroy_workqueue(h->resubmit_wq);
8381                 h->resubmit_wq = NULL;
8382         }
8383         if (h->rescan_ctlr_wq) {
8384                 destroy_workqueue(h->rescan_ctlr_wq);
8385                 h->rescan_ctlr_wq = NULL;
8386         }
8387         kfree(h);
8388         return rc;
8389 }
8390
8391 static void hpsa_flush_cache(struct ctlr_info *h)
8392 {
8393         char *flush_buf;
8394         struct CommandList *c;
8395         int rc;
8396
8397         if (unlikely(lockup_detected(h)))
8398                 return;
8399         flush_buf = kzalloc(4, GFP_KERNEL);
8400         if (!flush_buf)
8401                 return;
8402
8403         c = cmd_alloc(h);
8404
8405         if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8406                 RAID_CTLR_LUNID, TYPE_CMD)) {
8407                 goto out;
8408         }
8409         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8410                                         PCI_DMA_TODEVICE, NO_TIMEOUT);
8411         if (rc)
8412                 goto out;
8413         if (c->err_info->CommandStatus != 0)
8414 out:
8415                 dev_warn(&h->pdev->dev,
8416                         "error flushing cache on controller\n");
8417         cmd_free(h, c);
8418         kfree(flush_buf);
8419 }
8420
8421 static void hpsa_shutdown(struct pci_dev *pdev)
8422 {
8423         struct ctlr_info *h;
8424
8425         h = pci_get_drvdata(pdev);
8426         /* Turn board interrupts off  and send the flush cache command
8427          * sendcmd will turn off interrupt, and send the flush...
8428          * To write all data in the battery backed cache to disks
8429          */
8430         hpsa_flush_cache(h);
8431         h->access.set_intr_mask(h, HPSA_INTR_OFF);
8432         hpsa_free_irqs(h);                      /* init_one 4 */
8433         hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
8434 }
8435
8436 static void hpsa_free_device_info(struct ctlr_info *h)
8437 {
8438         int i;
8439
8440         for (i = 0; i < h->ndevices; i++) {
8441                 kfree(h->dev[i]);
8442                 h->dev[i] = NULL;
8443         }
8444 }
8445
8446 static void hpsa_remove_one(struct pci_dev *pdev)
8447 {
8448         struct ctlr_info *h;
8449         unsigned long flags;
8450
8451         if (pci_get_drvdata(pdev) == NULL) {
8452                 dev_err(&pdev->dev, "unable to remove device\n");
8453                 return;
8454         }
8455         h = pci_get_drvdata(pdev);
8456
8457         /* Get rid of any controller monitoring work items */
8458         spin_lock_irqsave(&h->lock, flags);
8459         h->remove_in_progress = 1;
8460         spin_unlock_irqrestore(&h->lock, flags);
8461         cancel_delayed_work_sync(&h->monitor_ctlr_work);
8462         cancel_delayed_work_sync(&h->rescan_ctlr_work);
8463         destroy_workqueue(h->rescan_ctlr_wq);
8464         destroy_workqueue(h->resubmit_wq);
8465
8466         /*
8467          * Call before disabling interrupts.
8468          * scsi_remove_host can trigger I/O operations especially
8469          * when multipath is enabled. There can be SYNCHRONIZE CACHE
8470          * operations which cannot complete and will hang the system.
8471          */
8472         if (h->scsi_host)
8473                 scsi_remove_host(h->scsi_host);         /* init_one 8 */
8474         /* includes hpsa_free_irqs - init_one 4 */
8475         /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8476         hpsa_shutdown(pdev);
8477
8478         hpsa_free_device_info(h);               /* scan */
8479
8480         kfree(h->hba_inquiry_data);                     /* init_one 10 */
8481         h->hba_inquiry_data = NULL;                     /* init_one 10 */
8482         hpsa_free_ioaccel2_sg_chain_blocks(h);
8483         hpsa_free_performant_mode(h);                   /* init_one 7 */
8484         hpsa_free_sg_chain_blocks(h);                   /* init_one 6 */
8485         hpsa_free_cmd_pool(h);                          /* init_one 5 */
8486
8487         /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
8488
8489         scsi_host_put(h->scsi_host);                    /* init_one 3 */
8490         h->scsi_host = NULL;                            /* init_one 3 */
8491
8492         /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8493         hpsa_free_pci_init(h);                          /* init_one 2.5 */
8494
8495         free_percpu(h->lockup_detected);                /* init_one 2 */
8496         h->lockup_detected = NULL;                      /* init_one 2 */
8497         /* (void) pci_disable_pcie_error_reporting(pdev); */    /* init_one 1 */
8498         kfree(h);                                       /* init_one 1 */
8499 }
8500
8501 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8502         __attribute__((unused)) pm_message_t state)
8503 {
8504         return -ENOSYS;
8505 }
8506
8507 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8508 {
8509         return -ENOSYS;
8510 }
8511
8512 static struct pci_driver hpsa_pci_driver = {
8513         .name = HPSA,
8514         .probe = hpsa_init_one,
8515         .remove = hpsa_remove_one,
8516         .id_table = hpsa_pci_device_id, /* id_table */
8517         .shutdown = hpsa_shutdown,
8518         .suspend = hpsa_suspend,
8519         .resume = hpsa_resume,
8520 };
8521
8522 /* Fill in bucket_map[], given nsgs (the max number of
8523  * scatter gather elements supported) and bucket[],
8524  * which is an array of 8 integers.  The bucket[] array
8525  * contains 8 different DMA transfer sizes (in 16
8526  * byte increments) which the controller uses to fetch
8527  * commands.  This function fills in bucket_map[], which
8528  * maps a given number of scatter gather elements to one of
8529  * the 8 DMA transfer sizes.  The point of it is to allow the
8530  * controller to only do as much DMA as needed to fetch the
8531  * command, with the DMA transfer size encoded in the lower
8532  * bits of the command address.
8533  */
8534 static void  calc_bucket_map(int bucket[], int num_buckets,
8535         int nsgs, int min_blocks, u32 *bucket_map)
8536 {
8537         int i, j, b, size;
8538
8539         /* Note, bucket_map must have nsgs+1 entries. */
8540         for (i = 0; i <= nsgs; i++) {
8541                 /* Compute size of a command with i SG entries */
8542                 size = i + min_blocks;
8543                 b = num_buckets; /* Assume the biggest bucket */
8544                 /* Find the bucket that is just big enough */
8545                 for (j = 0; j < num_buckets; j++) {
8546                         if (bucket[j] >= size) {
8547                                 b = j;
8548                                 break;
8549                         }
8550                 }
8551                 /* for a command with i SG entries, use bucket b. */
8552                 bucket_map[i] = b;
8553         }
8554 }
8555
8556 /*
8557  * return -ENODEV on err, 0 on success (or no action)
8558  * allocates numerous items that must be freed later
8559  */
8560 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
8561 {
8562         int i;
8563         unsigned long register_value;
8564         unsigned long transMethod = CFGTBL_Trans_Performant |
8565                         (trans_support & CFGTBL_Trans_use_short_tags) |
8566                                 CFGTBL_Trans_enable_directed_msix |
8567                         (trans_support & (CFGTBL_Trans_io_accel1 |
8568                                 CFGTBL_Trans_io_accel2));
8569         struct access_method access = SA5_performant_access;
8570
8571         /* This is a bit complicated.  There are 8 registers on
8572          * the controller which we write to to tell it 8 different
8573          * sizes of commands which there may be.  It's a way of
8574          * reducing the DMA done to fetch each command.  Encoded into
8575          * each command's tag are 3 bits which communicate to the controller
8576          * which of the eight sizes that command fits within.  The size of
8577          * each command depends on how many scatter gather entries there are.
8578          * Each SG entry requires 16 bytes.  The eight registers are programmed
8579          * with the number of 16-byte blocks a command of that size requires.
8580          * The smallest command possible requires 5 such 16 byte blocks.
8581          * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
8582          * blocks.  Note, this only extends to the SG entries contained
8583          * within the command block, and does not extend to chained blocks
8584          * of SG elements.   bft[] contains the eight values we write to
8585          * the registers.  They are not evenly distributed, but have more
8586          * sizes for small commands, and fewer sizes for larger commands.
8587          */
8588         int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
8589 #define MIN_IOACCEL2_BFT_ENTRY 5
8590 #define HPSA_IOACCEL2_HEADER_SZ 4
8591         int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8592                         13, 14, 15, 16, 17, 18, 19,
8593                         HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8594         BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8595         BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8596         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8597                                  16 * MIN_IOACCEL2_BFT_ENTRY);
8598         BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
8599         BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
8600         /*  5 = 1 s/g entry or 4k
8601          *  6 = 2 s/g entry or 8k
8602          *  8 = 4 s/g entry or 16k
8603          * 10 = 6 s/g entry or 24k
8604          */
8605
8606         /* If the controller supports either ioaccel method then
8607          * we can also use the RAID stack submit path that does not
8608          * perform the superfluous readl() after each command submission.
8609          */
8610         if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8611                 access = SA5_performant_access_no_read;
8612
8613         /* Controller spec: zero out this buffer. */
8614         for (i = 0; i < h->nreply_queues; i++)
8615                 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
8616
8617         bft[7] = SG_ENTRIES_IN_CMD + 4;
8618         calc_bucket_map(bft, ARRAY_SIZE(bft),
8619                                 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
8620         for (i = 0; i < 8; i++)
8621                 writel(bft[i], &h->transtable->BlockFetch[i]);
8622
8623         /* size of controller ring buffer */
8624         writel(h->max_commands, &h->transtable->RepQSize);
8625         writel(h->nreply_queues, &h->transtable->RepQCount);
8626         writel(0, &h->transtable->RepQCtrAddrLow32);
8627         writel(0, &h->transtable->RepQCtrAddrHigh32);
8628
8629         for (i = 0; i < h->nreply_queues; i++) {
8630                 writel(0, &h->transtable->RepQAddr[i].upper);
8631                 writel(h->reply_queue[i].busaddr,
8632                         &h->transtable->RepQAddr[i].lower);
8633         }
8634
8635         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
8636         writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8637         /*
8638          * enable outbound interrupt coalescing in accelerator mode;
8639          */
8640         if (trans_support & CFGTBL_Trans_io_accel1) {
8641                 access = SA5_ioaccel_mode1_access;
8642                 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8643                 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8644         } else {
8645                 if (trans_support & CFGTBL_Trans_io_accel2) {
8646                         access = SA5_ioaccel_mode2_access;
8647                         writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8648                         writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8649                 }
8650         }
8651         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8652         if (hpsa_wait_for_mode_change_ack(h)) {
8653                 dev_err(&h->pdev->dev,
8654                         "performant mode problem - doorbell timeout\n");
8655                 return -ENODEV;
8656         }
8657         register_value = readl(&(h->cfgtable->TransportActive));
8658         if (!(register_value & CFGTBL_Trans_Performant)) {
8659                 dev_err(&h->pdev->dev,
8660                         "performant mode problem - transport not active\n");
8661                 return -ENODEV;
8662         }
8663         /* Change the access methods to the performant access methods */
8664         h->access = access;
8665         h->transMethod = transMethod;
8666
8667         if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8668                 (trans_support & CFGTBL_Trans_io_accel2)))
8669                 return 0;
8670
8671         if (trans_support & CFGTBL_Trans_io_accel1) {
8672                 /* Set up I/O accelerator mode */
8673                 for (i = 0; i < h->nreply_queues; i++) {
8674                         writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8675                         h->reply_queue[i].current_entry =
8676                                 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8677                 }
8678                 bft[7] = h->ioaccel_maxsg + 8;
8679                 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8680                                 h->ioaccel1_blockFetchTable);
8681
8682                 /* initialize all reply queue entries to unused */
8683                 for (i = 0; i < h->nreply_queues; i++)
8684                         memset(h->reply_queue[i].head,
8685                                 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8686                                 h->reply_queue_size);
8687
8688                 /* set all the constant fields in the accelerator command
8689                  * frames once at init time to save CPU cycles later.
8690                  */
8691                 for (i = 0; i < h->nr_cmds; i++) {
8692                         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8693
8694                         cp->function = IOACCEL1_FUNCTION_SCSIIO;
8695                         cp->err_info = (u32) (h->errinfo_pool_dhandle +
8696                                         (i * sizeof(struct ErrorInfo)));
8697                         cp->err_info_len = sizeof(struct ErrorInfo);
8698                         cp->sgl_offset = IOACCEL1_SGLOFFSET;
8699                         cp->host_context_flags =
8700                                 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
8701                         cp->timeout_sec = 0;
8702                         cp->ReplyQueue = 0;
8703                         cp->tag =
8704                                 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
8705                         cp->host_addr =
8706                                 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
8707                                         (i * sizeof(struct io_accel1_cmd)));
8708                 }
8709         } else if (trans_support & CFGTBL_Trans_io_accel2) {
8710                 u64 cfg_offset, cfg_base_addr_index;
8711                 u32 bft2_offset, cfg_base_addr;
8712                 int rc;
8713
8714                 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8715                         &cfg_base_addr_index, &cfg_offset);
8716                 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8717                 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8718                 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8719                                 4, h->ioaccel2_blockFetchTable);
8720                 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8721                 BUILD_BUG_ON(offsetof(struct CfgTable,
8722                                 io_accel_request_size_offset) != 0xb8);
8723                 h->ioaccel2_bft2_regs =
8724                         remap_pci_mem(pci_resource_start(h->pdev,
8725                                         cfg_base_addr_index) +
8726                                         cfg_offset + bft2_offset,
8727                                         ARRAY_SIZE(bft2) *
8728                                         sizeof(*h->ioaccel2_bft2_regs));
8729                 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8730                         writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
8731         }
8732         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8733         if (hpsa_wait_for_mode_change_ack(h)) {
8734                 dev_err(&h->pdev->dev,
8735                         "performant mode problem - enabling ioaccel mode\n");
8736                 return -ENODEV;
8737         }
8738         return 0;
8739 }
8740
8741 /* Free ioaccel1 mode command blocks and block fetch table */
8742 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8743 {
8744         if (h->ioaccel_cmd_pool) {
8745                 pci_free_consistent(h->pdev,
8746                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8747                         h->ioaccel_cmd_pool,
8748                         h->ioaccel_cmd_pool_dhandle);
8749                 h->ioaccel_cmd_pool = NULL;
8750                 h->ioaccel_cmd_pool_dhandle = 0;
8751         }
8752         kfree(h->ioaccel1_blockFetchTable);
8753         h->ioaccel1_blockFetchTable = NULL;
8754 }
8755
8756 /* Allocate ioaccel1 mode command blocks and block fetch table */
8757 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8758 {
8759         h->ioaccel_maxsg =
8760                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8761         if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8762                 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8763
8764         /* Command structures must be aligned on a 128-byte boundary
8765          * because the 7 lower bits of the address are used by the
8766          * hardware.
8767          */
8768         BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
8769                         IOACCEL1_COMMANDLIST_ALIGNMENT);
8770         h->ioaccel_cmd_pool =
8771                 pci_alloc_consistent(h->pdev,
8772                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8773                         &(h->ioaccel_cmd_pool_dhandle));
8774
8775         h->ioaccel1_blockFetchTable =
8776                 kmalloc(((h->ioaccel_maxsg + 1) *
8777                                 sizeof(u32)), GFP_KERNEL);
8778
8779         if ((h->ioaccel_cmd_pool == NULL) ||
8780                 (h->ioaccel1_blockFetchTable == NULL))
8781                 goto clean_up;
8782
8783         memset(h->ioaccel_cmd_pool, 0,
8784                 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
8785         return 0;
8786
8787 clean_up:
8788         hpsa_free_ioaccel1_cmd_and_bft(h);
8789         return -ENOMEM;
8790 }
8791
8792 /* Free ioaccel2 mode command blocks and block fetch table */
8793 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8794 {
8795         hpsa_free_ioaccel2_sg_chain_blocks(h);
8796
8797         if (h->ioaccel2_cmd_pool) {
8798                 pci_free_consistent(h->pdev,
8799                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8800                         h->ioaccel2_cmd_pool,
8801                         h->ioaccel2_cmd_pool_dhandle);
8802                 h->ioaccel2_cmd_pool = NULL;
8803                 h->ioaccel2_cmd_pool_dhandle = 0;
8804         }
8805         kfree(h->ioaccel2_blockFetchTable);
8806         h->ioaccel2_blockFetchTable = NULL;
8807 }
8808
8809 /* Allocate ioaccel2 mode command blocks and block fetch table */
8810 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8811 {
8812         int rc;
8813
8814         /* Allocate ioaccel2 mode command blocks and block fetch table */
8815
8816         h->ioaccel_maxsg =
8817                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8818         if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
8819                 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
8820
8821         BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
8822                         IOACCEL2_COMMANDLIST_ALIGNMENT);
8823         h->ioaccel2_cmd_pool =
8824                 pci_alloc_consistent(h->pdev,
8825                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8826                         &(h->ioaccel2_cmd_pool_dhandle));
8827
8828         h->ioaccel2_blockFetchTable =
8829                 kmalloc(((h->ioaccel_maxsg + 1) *
8830                                 sizeof(u32)), GFP_KERNEL);
8831
8832         if ((h->ioaccel2_cmd_pool == NULL) ||
8833                 (h->ioaccel2_blockFetchTable == NULL)) {
8834                 rc = -ENOMEM;
8835                 goto clean_up;
8836         }
8837
8838         rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8839         if (rc)
8840                 goto clean_up;
8841
8842         memset(h->ioaccel2_cmd_pool, 0,
8843                 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8844         return 0;
8845
8846 clean_up:
8847         hpsa_free_ioaccel2_cmd_and_bft(h);
8848         return rc;
8849 }
8850
8851 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8852 static void hpsa_free_performant_mode(struct ctlr_info *h)
8853 {
8854         kfree(h->blockFetchTable);
8855         h->blockFetchTable = NULL;
8856         hpsa_free_reply_queues(h);
8857         hpsa_free_ioaccel1_cmd_and_bft(h);
8858         hpsa_free_ioaccel2_cmd_and_bft(h);
8859 }
8860
8861 /* return -ENODEV on error, 0 on success (or no action)
8862  * allocates numerous items that must be freed later
8863  */
8864 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
8865 {
8866         u32 trans_support;
8867         unsigned long transMethod = CFGTBL_Trans_Performant |
8868                                         CFGTBL_Trans_use_short_tags;
8869         int i, rc;
8870
8871         if (hpsa_simple_mode)
8872                 return 0;
8873
8874         trans_support = readl(&(h->cfgtable->TransportSupport));
8875         if (!(trans_support & PERFORMANT_MODE))
8876                 return 0;
8877
8878         /* Check for I/O accelerator mode support */
8879         if (trans_support & CFGTBL_Trans_io_accel1) {
8880                 transMethod |= CFGTBL_Trans_io_accel1 |
8881                                 CFGTBL_Trans_enable_directed_msix;
8882                 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
8883                 if (rc)
8884                         return rc;
8885         } else if (trans_support & CFGTBL_Trans_io_accel2) {
8886                 transMethod |= CFGTBL_Trans_io_accel2 |
8887                                 CFGTBL_Trans_enable_directed_msix;
8888                 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
8889                 if (rc)
8890                         return rc;
8891         }
8892
8893         h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
8894         hpsa_get_max_perf_mode_cmds(h);
8895         /* Performant mode ring buffer and supporting data structures */
8896         h->reply_queue_size = h->max_commands * sizeof(u64);
8897
8898         for (i = 0; i < h->nreply_queues; i++) {
8899                 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8900                                                 h->reply_queue_size,
8901                                                 &(h->reply_queue[i].busaddr));
8902                 if (!h->reply_queue[i].head) {
8903                         rc = -ENOMEM;
8904                         goto clean1;    /* rq, ioaccel */
8905                 }
8906                 h->reply_queue[i].size = h->max_commands;
8907                 h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
8908                 h->reply_queue[i].current_entry = 0;
8909         }
8910
8911         /* Need a block fetch table for performant mode */
8912         h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
8913                                 sizeof(u32)), GFP_KERNEL);
8914         if (!h->blockFetchTable) {
8915                 rc = -ENOMEM;
8916                 goto clean1;    /* rq, ioaccel */
8917         }
8918
8919         rc = hpsa_enter_performant_mode(h, trans_support);
8920         if (rc)
8921                 goto clean2;    /* bft, rq, ioaccel */
8922         return 0;
8923
8924 clean2: /* bft, rq, ioaccel */
8925         kfree(h->blockFetchTable);
8926         h->blockFetchTable = NULL;
8927 clean1: /* rq, ioaccel */
8928         hpsa_free_reply_queues(h);
8929         hpsa_free_ioaccel1_cmd_and_bft(h);
8930         hpsa_free_ioaccel2_cmd_and_bft(h);
8931         return rc;
8932 }
8933
8934 static int is_accelerated_cmd(struct CommandList *c)
8935 {
8936         return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8937 }
8938
8939 static void hpsa_drain_accel_commands(struct ctlr_info *h)
8940 {
8941         struct CommandList *c = NULL;
8942         int i, accel_cmds_out;
8943         int refcount;
8944
8945         do { /* wait for all outstanding ioaccel commands to drain out */
8946                 accel_cmds_out = 0;
8947                 for (i = 0; i < h->nr_cmds; i++) {
8948                         c = h->cmd_pool + i;
8949                         refcount = atomic_inc_return(&c->refcount);
8950                         if (refcount > 1) /* Command is allocated */
8951                                 accel_cmds_out += is_accelerated_cmd(c);
8952                         cmd_free(h, c);
8953                 }
8954                 if (accel_cmds_out <= 0)
8955                         break;
8956                 msleep(100);
8957         } while (1);
8958 }
8959
8960 /*
8961  *  This is it.  Register the PCI driver information for the cards we control
8962  *  the OS will call our registered routines when it finds one of our cards.
8963  */
8964 static int __init hpsa_init(void)
8965 {
8966         return pci_register_driver(&hpsa_pci_driver);
8967 }
8968
8969 static void __exit hpsa_cleanup(void)
8970 {
8971         pci_unregister_driver(&hpsa_pci_driver);
8972 }
8973
8974 static void __attribute__((unused)) verify_offsets(void)
8975 {
8976 #define VERIFY_OFFSET(member, offset) \
8977         BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8978
8979         VERIFY_OFFSET(structure_size, 0);
8980         VERIFY_OFFSET(volume_blk_size, 4);
8981         VERIFY_OFFSET(volume_blk_cnt, 8);
8982         VERIFY_OFFSET(phys_blk_shift, 16);
8983         VERIFY_OFFSET(parity_rotation_shift, 17);
8984         VERIFY_OFFSET(strip_size, 18);
8985         VERIFY_OFFSET(disk_starting_blk, 20);
8986         VERIFY_OFFSET(disk_blk_cnt, 28);
8987         VERIFY_OFFSET(data_disks_per_row, 36);
8988         VERIFY_OFFSET(metadata_disks_per_row, 38);
8989         VERIFY_OFFSET(row_cnt, 40);
8990         VERIFY_OFFSET(layout_map_count, 42);
8991         VERIFY_OFFSET(flags, 44);
8992         VERIFY_OFFSET(dekindex, 46);
8993         /* VERIFY_OFFSET(reserved, 48 */
8994         VERIFY_OFFSET(data, 64);
8995
8996 #undef VERIFY_OFFSET
8997
8998 #define VERIFY_OFFSET(member, offset) \
8999         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9000
9001         VERIFY_OFFSET(IU_type, 0);
9002         VERIFY_OFFSET(direction, 1);
9003         VERIFY_OFFSET(reply_queue, 2);
9004         /* VERIFY_OFFSET(reserved1, 3);  */
9005         VERIFY_OFFSET(scsi_nexus, 4);
9006         VERIFY_OFFSET(Tag, 8);
9007         VERIFY_OFFSET(cdb, 16);
9008         VERIFY_OFFSET(cciss_lun, 32);
9009         VERIFY_OFFSET(data_len, 40);
9010         VERIFY_OFFSET(cmd_priority_task_attr, 44);
9011         VERIFY_OFFSET(sg_count, 45);
9012         /* VERIFY_OFFSET(reserved3 */
9013         VERIFY_OFFSET(err_ptr, 48);
9014         VERIFY_OFFSET(err_len, 56);
9015         /* VERIFY_OFFSET(reserved4  */
9016         VERIFY_OFFSET(sg, 64);
9017
9018 #undef VERIFY_OFFSET
9019
9020 #define VERIFY_OFFSET(member, offset) \
9021         BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9022
9023         VERIFY_OFFSET(dev_handle, 0x00);
9024         VERIFY_OFFSET(reserved1, 0x02);
9025         VERIFY_OFFSET(function, 0x03);
9026         VERIFY_OFFSET(reserved2, 0x04);
9027         VERIFY_OFFSET(err_info, 0x0C);
9028         VERIFY_OFFSET(reserved3, 0x10);
9029         VERIFY_OFFSET(err_info_len, 0x12);
9030         VERIFY_OFFSET(reserved4, 0x13);
9031         VERIFY_OFFSET(sgl_offset, 0x14);
9032         VERIFY_OFFSET(reserved5, 0x15);
9033         VERIFY_OFFSET(transfer_len, 0x1C);
9034         VERIFY_OFFSET(reserved6, 0x20);
9035         VERIFY_OFFSET(io_flags, 0x24);
9036         VERIFY_OFFSET(reserved7, 0x26);
9037         VERIFY_OFFSET(LUN, 0x34);
9038         VERIFY_OFFSET(control, 0x3C);
9039         VERIFY_OFFSET(CDB, 0x40);
9040         VERIFY_OFFSET(reserved8, 0x50);
9041         VERIFY_OFFSET(host_context_flags, 0x60);
9042         VERIFY_OFFSET(timeout_sec, 0x62);
9043         VERIFY_OFFSET(ReplyQueue, 0x64);
9044         VERIFY_OFFSET(reserved9, 0x65);
9045         VERIFY_OFFSET(tag, 0x68);
9046         VERIFY_OFFSET(host_addr, 0x70);
9047         VERIFY_OFFSET(CISS_LUN, 0x78);
9048         VERIFY_OFFSET(SG, 0x78 + 8);
9049 #undef VERIFY_OFFSET
9050 }
9051
9052 module_init(hpsa_init);
9053 module_exit(hpsa_cleanup);