2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/pci-aspm.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/timer.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/compat.h>
32 #include <linux/blktrace_api.h>
33 #include <linux/uaccess.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/completion.h>
37 #include <linux/moduleparam.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_eh.h>
44 #include <scsi/scsi_transport_sas.h>
45 #include <scsi/scsi_dbg.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
58 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
59 #define HPSA_DRIVER_VERSION "3.4.10-0"
60 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
63 /* How long to wait for CISS doorbell communication */
64 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
65 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
66 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
67 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
68 #define MAX_IOCTL_CONFIG_WAIT 1000
70 /*define how many times we will try a command because of bus resets */
71 #define MAX_CMD_RETRIES 3
73 /* Embedded module documentation macros - see modules.h */
74 MODULE_AUTHOR("Hewlett-Packard Company");
75 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
77 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
78 MODULE_VERSION(HPSA_DRIVER_VERSION);
79 MODULE_LICENSE("GPL");
81 static int hpsa_allow_any;
82 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
83 MODULE_PARM_DESC(hpsa_allow_any,
84 "Allow hpsa driver to access unknown HP Smart Array hardware");
85 static int hpsa_simple_mode;
86 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
87 MODULE_PARM_DESC(hpsa_simple_mode,
88 "Use 'simple mode' rather than 'performant mode'");
90 /* define the PCI info for the cards we can control */
91 static const struct pci_device_id hpsa_pci_device_id[] = {
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
132 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
133 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
134 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
135 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
138 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
139 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
140 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
141 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
142 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
143 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
144 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
148 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
150 /* board_id = Subsystem Device ID & Vendor ID
151 * product = Marketing Name for the board
152 * access = Address of the struct of function pointers
154 static struct board_type products[] = {
155 {0x3241103C, "Smart Array P212", &SA5_access},
156 {0x3243103C, "Smart Array P410", &SA5_access},
157 {0x3245103C, "Smart Array P410i", &SA5_access},
158 {0x3247103C, "Smart Array P411", &SA5_access},
159 {0x3249103C, "Smart Array P812", &SA5_access},
160 {0x324A103C, "Smart Array P712m", &SA5_access},
161 {0x324B103C, "Smart Array P711m", &SA5_access},
162 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
163 {0x3350103C, "Smart Array P222", &SA5_access},
164 {0x3351103C, "Smart Array P420", &SA5_access},
165 {0x3352103C, "Smart Array P421", &SA5_access},
166 {0x3353103C, "Smart Array P822", &SA5_access},
167 {0x3354103C, "Smart Array P420i", &SA5_access},
168 {0x3355103C, "Smart Array P220i", &SA5_access},
169 {0x3356103C, "Smart Array P721m", &SA5_access},
170 {0x1921103C, "Smart Array P830i", &SA5_access},
171 {0x1922103C, "Smart Array P430", &SA5_access},
172 {0x1923103C, "Smart Array P431", &SA5_access},
173 {0x1924103C, "Smart Array P830", &SA5_access},
174 {0x1926103C, "Smart Array P731m", &SA5_access},
175 {0x1928103C, "Smart Array P230i", &SA5_access},
176 {0x1929103C, "Smart Array P530", &SA5_access},
177 {0x21BD103C, "Smart Array P244br", &SA5_access},
178 {0x21BE103C, "Smart Array P741m", &SA5_access},
179 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
180 {0x21C0103C, "Smart Array P440ar", &SA5_access},
181 {0x21C1103C, "Smart Array P840ar", &SA5_access},
182 {0x21C2103C, "Smart Array P440", &SA5_access},
183 {0x21C3103C, "Smart Array P441", &SA5_access},
184 {0x21C4103C, "Smart Array", &SA5_access},
185 {0x21C5103C, "Smart Array P841", &SA5_access},
186 {0x21C6103C, "Smart HBA H244br", &SA5_access},
187 {0x21C7103C, "Smart HBA H240", &SA5_access},
188 {0x21C8103C, "Smart HBA H241", &SA5_access},
189 {0x21C9103C, "Smart Array", &SA5_access},
190 {0x21CA103C, "Smart Array P246br", &SA5_access},
191 {0x21CB103C, "Smart Array P840", &SA5_access},
192 {0x21CC103C, "Smart Array", &SA5_access},
193 {0x21CD103C, "Smart Array", &SA5_access},
194 {0x21CE103C, "Smart HBA", &SA5_access},
195 {0x05809005, "SmartHBA-SA", &SA5_access},
196 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
197 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
198 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
199 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
200 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
201 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
202 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
203 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
204 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
205 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
206 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
209 static struct scsi_transport_template *hpsa_sas_transport_template;
210 static int hpsa_add_sas_host(struct ctlr_info *h);
211 static void hpsa_delete_sas_host(struct ctlr_info *h);
212 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
213 struct hpsa_scsi_dev_t *device);
214 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
215 static struct hpsa_scsi_dev_t
216 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
217 struct sas_rphy *rphy);
219 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
220 static const struct scsi_cmnd hpsa_cmd_busy;
221 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
222 static const struct scsi_cmnd hpsa_cmd_idle;
223 static int number_of_controllers;
225 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
226 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
227 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
230 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
234 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
235 static struct CommandList *cmd_alloc(struct ctlr_info *h);
236 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
237 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
238 struct scsi_cmnd *scmd);
239 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
240 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
242 static void hpsa_free_cmd_pool(struct ctlr_info *h);
243 #define VPD_PAGE (1 << 8)
244 #define HPSA_SIMPLE_ERROR_BITS 0x03
246 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
247 static void hpsa_scan_start(struct Scsi_Host *);
248 static int hpsa_scan_finished(struct Scsi_Host *sh,
249 unsigned long elapsed_time);
250 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
252 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
253 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
254 static int hpsa_slave_alloc(struct scsi_device *sdev);
255 static int hpsa_slave_configure(struct scsi_device *sdev);
256 static void hpsa_slave_destroy(struct scsi_device *sdev);
258 static void hpsa_update_scsi_devices(struct ctlr_info *h);
259 static int check_for_unit_attention(struct ctlr_info *h,
260 struct CommandList *c);
261 static void check_ioctl_unit_attention(struct ctlr_info *h,
262 struct CommandList *c);
263 /* performant mode helper functions */
264 static void calc_bucket_map(int *bucket, int num_buckets,
265 int nsgs, int min_blocks, u32 *bucket_map);
266 static void hpsa_free_performant_mode(struct ctlr_info *h);
267 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
268 static inline u32 next_command(struct ctlr_info *h, u8 q);
269 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
270 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
272 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
273 unsigned long *memory_bar);
274 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
275 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
277 static inline void finish_cmd(struct CommandList *c);
278 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
279 #define BOARD_NOT_READY 0
280 #define BOARD_READY 1
281 static void hpsa_drain_accel_commands(struct ctlr_info *h);
282 static void hpsa_flush_cache(struct ctlr_info *h);
283 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
284 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
285 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
286 static void hpsa_command_resubmit_worker(struct work_struct *work);
287 static u32 lockup_detected(struct ctlr_info *h);
288 static int detect_controller_lockup(struct ctlr_info *h);
289 static void hpsa_disable_rld_caching(struct ctlr_info *h);
290 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
291 struct ReportExtendedLUNdata *buf, int bufsize);
292 static int hpsa_luns_changed(struct ctlr_info *h);
294 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
296 unsigned long *priv = shost_priv(sdev->host);
297 return (struct ctlr_info *) *priv;
300 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
302 unsigned long *priv = shost_priv(sh);
303 return (struct ctlr_info *) *priv;
306 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
308 return c->scsi_cmd == SCSI_CMD_IDLE;
311 static inline bool hpsa_is_pending_event(struct CommandList *c)
313 return c->abort_pending || c->reset_pending;
316 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
317 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
318 u8 *sense_key, u8 *asc, u8 *ascq)
320 struct scsi_sense_hdr sshdr;
327 if (sense_data_len < 1)
330 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
332 *sense_key = sshdr.sense_key;
338 static int check_for_unit_attention(struct ctlr_info *h,
339 struct CommandList *c)
341 u8 sense_key, asc, ascq;
344 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
345 sense_len = sizeof(c->err_info->SenseInfo);
347 sense_len = c->err_info->SenseLen;
349 decode_sense_data(c->err_info->SenseInfo, sense_len,
350 &sense_key, &asc, &ascq);
351 if (sense_key != UNIT_ATTENTION || asc == 0xff)
356 dev_warn(&h->pdev->dev,
357 "%s: a state change detected, command retried\n",
361 dev_warn(&h->pdev->dev,
362 "%s: LUN failure detected\n", h->devname);
364 case REPORT_LUNS_CHANGED:
365 dev_warn(&h->pdev->dev,
366 "%s: report LUN data changed\n", h->devname);
368 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
369 * target (array) devices.
373 dev_warn(&h->pdev->dev,
374 "%s: a power on or device reset detected\n",
377 case UNIT_ATTENTION_CLEARED:
378 dev_warn(&h->pdev->dev,
379 "%s: unit attention cleared by another initiator\n",
383 dev_warn(&h->pdev->dev,
384 "%s: unknown unit attention detected\n",
391 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
393 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
394 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
395 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
397 dev_warn(&h->pdev->dev, HPSA "device busy");
401 static u32 lockup_detected(struct ctlr_info *h);
402 static ssize_t host_show_lockup_detected(struct device *dev,
403 struct device_attribute *attr, char *buf)
407 struct Scsi_Host *shost = class_to_shost(dev);
409 h = shost_to_hba(shost);
410 ld = lockup_detected(h);
412 return sprintf(buf, "ld=%d\n", ld);
415 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
416 struct device_attribute *attr,
417 const char *buf, size_t count)
421 struct Scsi_Host *shost = class_to_shost(dev);
424 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
426 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
427 strncpy(tmpbuf, buf, len);
429 if (sscanf(tmpbuf, "%d", &status) != 1)
431 h = shost_to_hba(shost);
432 h->acciopath_status = !!status;
433 dev_warn(&h->pdev->dev,
434 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
435 h->acciopath_status ? "enabled" : "disabled");
439 static ssize_t host_store_raid_offload_debug(struct device *dev,
440 struct device_attribute *attr,
441 const char *buf, size_t count)
443 int debug_level, len;
445 struct Scsi_Host *shost = class_to_shost(dev);
448 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
450 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
451 strncpy(tmpbuf, buf, len);
453 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
457 h = shost_to_hba(shost);
458 h->raid_offload_debug = debug_level;
459 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
460 h->raid_offload_debug);
464 static ssize_t host_store_rescan(struct device *dev,
465 struct device_attribute *attr,
466 const char *buf, size_t count)
469 struct Scsi_Host *shost = class_to_shost(dev);
470 h = shost_to_hba(shost);
471 hpsa_scan_start(h->scsi_host);
475 static ssize_t host_show_firmware_revision(struct device *dev,
476 struct device_attribute *attr, char *buf)
479 struct Scsi_Host *shost = class_to_shost(dev);
480 unsigned char *fwrev;
482 h = shost_to_hba(shost);
483 if (!h->hba_inquiry_data)
485 fwrev = &h->hba_inquiry_data[32];
486 return snprintf(buf, 20, "%c%c%c%c\n",
487 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
490 static ssize_t host_show_commands_outstanding(struct device *dev,
491 struct device_attribute *attr, char *buf)
493 struct Scsi_Host *shost = class_to_shost(dev);
494 struct ctlr_info *h = shost_to_hba(shost);
496 return snprintf(buf, 20, "%d\n",
497 atomic_read(&h->commands_outstanding));
500 static ssize_t host_show_transport_mode(struct device *dev,
501 struct device_attribute *attr, char *buf)
504 struct Scsi_Host *shost = class_to_shost(dev);
506 h = shost_to_hba(shost);
507 return snprintf(buf, 20, "%s\n",
508 h->transMethod & CFGTBL_Trans_Performant ?
509 "performant" : "simple");
512 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
513 struct device_attribute *attr, char *buf)
516 struct Scsi_Host *shost = class_to_shost(dev);
518 h = shost_to_hba(shost);
519 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
520 (h->acciopath_status == 1) ? "enabled" : "disabled");
523 /* List of controllers which cannot be hard reset on kexec with reset_devices */
524 static u32 unresettable_controller[] = {
525 0x324a103C, /* Smart Array P712m */
526 0x324b103C, /* Smart Array P711m */
527 0x3223103C, /* Smart Array P800 */
528 0x3234103C, /* Smart Array P400 */
529 0x3235103C, /* Smart Array P400i */
530 0x3211103C, /* Smart Array E200i */
531 0x3212103C, /* Smart Array E200 */
532 0x3213103C, /* Smart Array E200i */
533 0x3214103C, /* Smart Array E200i */
534 0x3215103C, /* Smart Array E200i */
535 0x3237103C, /* Smart Array E500 */
536 0x323D103C, /* Smart Array P700m */
537 0x40800E11, /* Smart Array 5i */
538 0x409C0E11, /* Smart Array 6400 */
539 0x409D0E11, /* Smart Array 6400 EM */
540 0x40700E11, /* Smart Array 5300 */
541 0x40820E11, /* Smart Array 532 */
542 0x40830E11, /* Smart Array 5312 */
543 0x409A0E11, /* Smart Array 641 */
544 0x409B0E11, /* Smart Array 642 */
545 0x40910E11, /* Smart Array 6i */
548 /* List of controllers which cannot even be soft reset */
549 static u32 soft_unresettable_controller[] = {
550 0x40800E11, /* Smart Array 5i */
551 0x40700E11, /* Smart Array 5300 */
552 0x40820E11, /* Smart Array 532 */
553 0x40830E11, /* Smart Array 5312 */
554 0x409A0E11, /* Smart Array 641 */
555 0x409B0E11, /* Smart Array 642 */
556 0x40910E11, /* Smart Array 6i */
557 /* Exclude 640x boards. These are two pci devices in one slot
558 * which share a battery backed cache module. One controls the
559 * cache, the other accesses the cache through the one that controls
560 * it. If we reset the one controlling the cache, the other will
561 * likely not be happy. Just forbid resetting this conjoined mess.
562 * The 640x isn't really supported by hpsa anyway.
564 0x409C0E11, /* Smart Array 6400 */
565 0x409D0E11, /* Smart Array 6400 EM */
568 static u32 needs_abort_tags_swizzled[] = {
569 0x323D103C, /* Smart Array P700m */
570 0x324a103C, /* Smart Array P712m */
571 0x324b103C, /* SmartArray P711m */
574 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
578 for (i = 0; i < nelems; i++)
579 if (a[i] == board_id)
584 static int ctlr_is_hard_resettable(u32 board_id)
586 return !board_id_in_array(unresettable_controller,
587 ARRAY_SIZE(unresettable_controller), board_id);
590 static int ctlr_is_soft_resettable(u32 board_id)
592 return !board_id_in_array(soft_unresettable_controller,
593 ARRAY_SIZE(soft_unresettable_controller), board_id);
596 static int ctlr_is_resettable(u32 board_id)
598 return ctlr_is_hard_resettable(board_id) ||
599 ctlr_is_soft_resettable(board_id);
602 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
604 return board_id_in_array(needs_abort_tags_swizzled,
605 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
608 static ssize_t host_show_resettable(struct device *dev,
609 struct device_attribute *attr, char *buf)
612 struct Scsi_Host *shost = class_to_shost(dev);
614 h = shost_to_hba(shost);
615 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
618 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
620 return (scsi3addr[3] & 0xC0) == 0x40;
623 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
624 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
626 #define HPSA_RAID_0 0
627 #define HPSA_RAID_4 1
628 #define HPSA_RAID_1 2 /* also used for RAID 10 */
629 #define HPSA_RAID_5 3 /* also used for RAID 50 */
630 #define HPSA_RAID_51 4
631 #define HPSA_RAID_6 5 /* also used for RAID 60 */
632 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
633 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
634 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
636 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
638 return !device->physical_device;
641 static ssize_t raid_level_show(struct device *dev,
642 struct device_attribute *attr, char *buf)
645 unsigned char rlevel;
647 struct scsi_device *sdev;
648 struct hpsa_scsi_dev_t *hdev;
651 sdev = to_scsi_device(dev);
652 h = sdev_to_hba(sdev);
653 spin_lock_irqsave(&h->lock, flags);
654 hdev = sdev->hostdata;
656 spin_unlock_irqrestore(&h->lock, flags);
660 /* Is this even a logical drive? */
661 if (!is_logical_device(hdev)) {
662 spin_unlock_irqrestore(&h->lock, flags);
663 l = snprintf(buf, PAGE_SIZE, "N/A\n");
667 rlevel = hdev->raid_level;
668 spin_unlock_irqrestore(&h->lock, flags);
669 if (rlevel > RAID_UNKNOWN)
670 rlevel = RAID_UNKNOWN;
671 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
675 static ssize_t lunid_show(struct device *dev,
676 struct device_attribute *attr, char *buf)
679 struct scsi_device *sdev;
680 struct hpsa_scsi_dev_t *hdev;
682 unsigned char lunid[8];
684 sdev = to_scsi_device(dev);
685 h = sdev_to_hba(sdev);
686 spin_lock_irqsave(&h->lock, flags);
687 hdev = sdev->hostdata;
689 spin_unlock_irqrestore(&h->lock, flags);
692 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
693 spin_unlock_irqrestore(&h->lock, flags);
694 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
695 lunid[0], lunid[1], lunid[2], lunid[3],
696 lunid[4], lunid[5], lunid[6], lunid[7]);
699 static ssize_t unique_id_show(struct device *dev,
700 struct device_attribute *attr, char *buf)
703 struct scsi_device *sdev;
704 struct hpsa_scsi_dev_t *hdev;
706 unsigned char sn[16];
708 sdev = to_scsi_device(dev);
709 h = sdev_to_hba(sdev);
710 spin_lock_irqsave(&h->lock, flags);
711 hdev = sdev->hostdata;
713 spin_unlock_irqrestore(&h->lock, flags);
716 memcpy(sn, hdev->device_id, sizeof(sn));
717 spin_unlock_irqrestore(&h->lock, flags);
718 return snprintf(buf, 16 * 2 + 2,
719 "%02X%02X%02X%02X%02X%02X%02X%02X"
720 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
721 sn[0], sn[1], sn[2], sn[3],
722 sn[4], sn[5], sn[6], sn[7],
723 sn[8], sn[9], sn[10], sn[11],
724 sn[12], sn[13], sn[14], sn[15]);
727 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
728 struct device_attribute *attr, char *buf)
731 struct scsi_device *sdev;
732 struct hpsa_scsi_dev_t *hdev;
736 sdev = to_scsi_device(dev);
737 h = sdev_to_hba(sdev);
738 spin_lock_irqsave(&h->lock, flags);
739 hdev = sdev->hostdata;
741 spin_unlock_irqrestore(&h->lock, flags);
744 offload_enabled = hdev->offload_enabled;
745 spin_unlock_irqrestore(&h->lock, flags);
746 return snprintf(buf, 20, "%d\n", offload_enabled);
751 static ssize_t path_info_show(struct device *dev,
752 struct device_attribute *attr, char *buf)
755 struct scsi_device *sdev;
756 struct hpsa_scsi_dev_t *hdev;
762 u8 path_map_index = 0;
764 unsigned char phys_connector[2];
766 sdev = to_scsi_device(dev);
767 h = sdev_to_hba(sdev);
768 spin_lock_irqsave(&h->devlock, flags);
769 hdev = sdev->hostdata;
771 spin_unlock_irqrestore(&h->devlock, flags);
776 for (i = 0; i < MAX_PATHS; i++) {
777 path_map_index = 1<<i;
778 if (i == hdev->active_path_index)
780 else if (hdev->path_map & path_map_index)
785 output_len += scnprintf(buf + output_len,
786 PAGE_SIZE - output_len,
787 "[%d:%d:%d:%d] %20.20s ",
788 h->scsi_host->host_no,
789 hdev->bus, hdev->target, hdev->lun,
790 scsi_device_type(hdev->devtype));
792 if (hdev->external ||
793 hdev->devtype == TYPE_RAID ||
794 is_logical_device(hdev)) {
795 output_len += snprintf(buf + output_len,
796 PAGE_SIZE - output_len,
802 memcpy(&phys_connector, &hdev->phys_connector[i],
803 sizeof(phys_connector));
804 if (phys_connector[0] < '0')
805 phys_connector[0] = '0';
806 if (phys_connector[1] < '0')
807 phys_connector[1] = '0';
808 if (hdev->phys_connector[i] > 0)
809 output_len += snprintf(buf + output_len,
810 PAGE_SIZE - output_len,
813 if (hdev->devtype == TYPE_DISK && hdev->expose_device) {
814 if (box == 0 || box == 0xFF) {
815 output_len += snprintf(buf + output_len,
816 PAGE_SIZE - output_len,
820 output_len += snprintf(buf + output_len,
821 PAGE_SIZE - output_len,
822 "BOX: %hhu BAY: %hhu %s\n",
825 } else if (box != 0 && box != 0xFF) {
826 output_len += snprintf(buf + output_len,
827 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
830 output_len += snprintf(buf + output_len,
831 PAGE_SIZE - output_len, "%s\n", active);
834 spin_unlock_irqrestore(&h->devlock, flags);
838 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
839 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
840 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
841 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
842 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
843 host_show_hp_ssd_smart_path_enabled, NULL);
844 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
845 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
846 host_show_hp_ssd_smart_path_status,
847 host_store_hp_ssd_smart_path_status);
848 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
849 host_store_raid_offload_debug);
850 static DEVICE_ATTR(firmware_revision, S_IRUGO,
851 host_show_firmware_revision, NULL);
852 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
853 host_show_commands_outstanding, NULL);
854 static DEVICE_ATTR(transport_mode, S_IRUGO,
855 host_show_transport_mode, NULL);
856 static DEVICE_ATTR(resettable, S_IRUGO,
857 host_show_resettable, NULL);
858 static DEVICE_ATTR(lockup_detected, S_IRUGO,
859 host_show_lockup_detected, NULL);
861 static struct device_attribute *hpsa_sdev_attrs[] = {
862 &dev_attr_raid_level,
865 &dev_attr_hp_ssd_smart_path_enabled,
867 &dev_attr_lockup_detected,
871 static struct device_attribute *hpsa_shost_attrs[] = {
873 &dev_attr_firmware_revision,
874 &dev_attr_commands_outstanding,
875 &dev_attr_transport_mode,
876 &dev_attr_resettable,
877 &dev_attr_hp_ssd_smart_path_status,
878 &dev_attr_raid_offload_debug,
882 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
883 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
885 static struct scsi_host_template hpsa_driver_template = {
886 .module = THIS_MODULE,
889 .queuecommand = hpsa_scsi_queue_command,
890 .scan_start = hpsa_scan_start,
891 .scan_finished = hpsa_scan_finished,
892 .change_queue_depth = hpsa_change_queue_depth,
894 .use_clustering = ENABLE_CLUSTERING,
895 .eh_abort_handler = hpsa_eh_abort_handler,
896 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
898 .slave_alloc = hpsa_slave_alloc,
899 .slave_configure = hpsa_slave_configure,
900 .slave_destroy = hpsa_slave_destroy,
902 .compat_ioctl = hpsa_compat_ioctl,
904 .sdev_attrs = hpsa_sdev_attrs,
905 .shost_attrs = hpsa_shost_attrs,
910 static inline u32 next_command(struct ctlr_info *h, u8 q)
913 struct reply_queue_buffer *rq = &h->reply_queue[q];
915 if (h->transMethod & CFGTBL_Trans_io_accel1)
916 return h->access.command_completed(h, q);
918 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
919 return h->access.command_completed(h, q);
921 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
922 a = rq->head[rq->current_entry];
924 atomic_dec(&h->commands_outstanding);
928 /* Check for wraparound */
929 if (rq->current_entry == h->max_commands) {
930 rq->current_entry = 0;
937 * There are some special bits in the bus address of the
938 * command that we have to set for the controller to know
939 * how to process the command:
941 * Normal performant mode:
942 * bit 0: 1 means performant mode, 0 means simple mode.
943 * bits 1-3 = block fetch table entry
944 * bits 4-6 = command type (== 0)
947 * bit 0 = "performant mode" bit.
948 * bits 1-3 = block fetch table entry
949 * bits 4-6 = command type (== 110)
950 * (command type is needed because ioaccel1 mode
951 * commands are submitted through the same register as normal
952 * mode commands, so this is how the controller knows whether
953 * the command is normal mode or ioaccel1 mode.)
956 * bit 0 = "performant mode" bit.
957 * bits 1-4 = block fetch table entry (note extra bit)
958 * bits 4-6 = not needed, because ioaccel2 mode has
959 * a separate special register for submitting commands.
963 * set_performant_mode: Modify the tag for cciss performant
964 * set bit 0 for pull model, bits 3-1 for block fetch
967 #define DEFAULT_REPLY_QUEUE (-1)
968 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
971 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
972 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
973 if (unlikely(!h->msix_vector))
975 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
976 c->Header.ReplyQueue =
977 raw_smp_processor_id() % h->nreply_queues;
979 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
983 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
984 struct CommandList *c,
987 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
990 * Tell the controller to post the reply to the queue for this
991 * processor. This seems to give the best I/O throughput.
993 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
994 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
996 cp->ReplyQueue = reply_queue % h->nreply_queues;
998 * Set the bits in the address sent down to include:
999 * - performant mode bit (bit 0)
1000 * - pull count (bits 1-3)
1001 * - command type (bits 4-6)
1003 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1004 IOACCEL1_BUSADDR_CMDTYPE;
1007 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1008 struct CommandList *c,
1011 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1012 &h->ioaccel2_cmd_pool[c->cmdindex];
1014 /* Tell the controller to post the reply to the queue for this
1015 * processor. This seems to give the best I/O throughput.
1017 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1018 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1020 cp->reply_queue = reply_queue % h->nreply_queues;
1021 /* Set the bits in the address sent down to include:
1022 * - performant mode bit not used in ioaccel mode 2
1023 * - pull count (bits 0-3)
1024 * - command type isn't needed for ioaccel2
1026 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1029 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1030 struct CommandList *c,
1033 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1036 * Tell the controller to post the reply to the queue for this
1037 * processor. This seems to give the best I/O throughput.
1039 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1040 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1042 cp->reply_queue = reply_queue % h->nreply_queues;
1044 * Set the bits in the address sent down to include:
1045 * - performant mode bit not used in ioaccel mode 2
1046 * - pull count (bits 0-3)
1047 * - command type isn't needed for ioaccel2
1049 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1052 static int is_firmware_flash_cmd(u8 *cdb)
1054 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1058 * During firmware flash, the heartbeat register may not update as frequently
1059 * as it should. So we dial down lockup detection during firmware flash. and
1060 * dial it back up when firmware flash completes.
1062 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1063 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1064 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1065 struct CommandList *c)
1067 if (!is_firmware_flash_cmd(c->Request.CDB))
1069 atomic_inc(&h->firmware_flash_in_progress);
1070 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1073 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1074 struct CommandList *c)
1076 if (is_firmware_flash_cmd(c->Request.CDB) &&
1077 atomic_dec_and_test(&h->firmware_flash_in_progress))
1078 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1081 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1082 struct CommandList *c, int reply_queue)
1084 dial_down_lockup_detection_during_fw_flash(h, c);
1085 atomic_inc(&h->commands_outstanding);
1086 switch (c->cmd_type) {
1088 set_ioaccel1_performant_mode(h, c, reply_queue);
1089 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1092 set_ioaccel2_performant_mode(h, c, reply_queue);
1093 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1096 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1097 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1100 set_performant_mode(h, c, reply_queue);
1101 h->access.submit_command(h, c);
1105 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1107 if (unlikely(hpsa_is_pending_event(c)))
1108 return finish_cmd(c);
1110 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1113 static inline int is_hba_lunid(unsigned char scsi3addr[])
1115 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1118 static inline int is_scsi_rev_5(struct ctlr_info *h)
1120 if (!h->hba_inquiry_data)
1122 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1127 static int hpsa_find_target_lun(struct ctlr_info *h,
1128 unsigned char scsi3addr[], int bus, int *target, int *lun)
1130 /* finds an unused bus, target, lun for a new physical device
1131 * assumes h->devlock is held
1134 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1136 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1138 for (i = 0; i < h->ndevices; i++) {
1139 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1140 __set_bit(h->dev[i]->target, lun_taken);
1143 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1144 if (i < HPSA_MAX_DEVICES) {
1153 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1154 struct hpsa_scsi_dev_t *dev, char *description)
1156 #define LABEL_SIZE 25
1157 char label[LABEL_SIZE];
1159 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1162 switch (dev->devtype) {
1164 snprintf(label, LABEL_SIZE, "controller");
1166 case TYPE_ENCLOSURE:
1167 snprintf(label, LABEL_SIZE, "enclosure");
1171 snprintf(label, LABEL_SIZE, "external");
1172 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1173 snprintf(label, LABEL_SIZE, "%s",
1174 raid_label[PHYSICAL_DRIVE]);
1176 snprintf(label, LABEL_SIZE, "RAID-%s",
1177 dev->raid_level > RAID_UNKNOWN ? "?" :
1178 raid_label[dev->raid_level]);
1181 snprintf(label, LABEL_SIZE, "rom");
1184 snprintf(label, LABEL_SIZE, "tape");
1186 case TYPE_MEDIUM_CHANGER:
1187 snprintf(label, LABEL_SIZE, "changer");
1190 snprintf(label, LABEL_SIZE, "UNKNOWN");
1194 dev_printk(level, &h->pdev->dev,
1195 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1196 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1198 scsi_device_type(dev->devtype),
1202 dev->offload_config ? '+' : '-',
1203 dev->offload_enabled ? '+' : '-',
1204 dev->expose_device);
1207 /* Add an entry into h->dev[] array. */
1208 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1209 struct hpsa_scsi_dev_t *device,
1210 struct hpsa_scsi_dev_t *added[], int *nadded)
1212 /* assumes h->devlock is held */
1213 int n = h->ndevices;
1215 unsigned char addr1[8], addr2[8];
1216 struct hpsa_scsi_dev_t *sd;
1218 if (n >= HPSA_MAX_DEVICES) {
1219 dev_err(&h->pdev->dev, "too many devices, some will be "
1224 /* physical devices do not have lun or target assigned until now. */
1225 if (device->lun != -1)
1226 /* Logical device, lun is already assigned. */
1229 /* If this device a non-zero lun of a multi-lun device
1230 * byte 4 of the 8-byte LUN addr will contain the logical
1231 * unit no, zero otherwise.
1233 if (device->scsi3addr[4] == 0) {
1234 /* This is not a non-zero lun of a multi-lun device */
1235 if (hpsa_find_target_lun(h, device->scsi3addr,
1236 device->bus, &device->target, &device->lun) != 0)
1241 /* This is a non-zero lun of a multi-lun device.
1242 * Search through our list and find the device which
1243 * has the same 8 byte LUN address, excepting byte 4 and 5.
1244 * Assign the same bus and target for this new LUN.
1245 * Use the logical unit number from the firmware.
1247 memcpy(addr1, device->scsi3addr, 8);
1250 for (i = 0; i < n; i++) {
1252 memcpy(addr2, sd->scsi3addr, 8);
1255 /* differ only in byte 4 and 5? */
1256 if (memcmp(addr1, addr2, 8) == 0) {
1257 device->bus = sd->bus;
1258 device->target = sd->target;
1259 device->lun = device->scsi3addr[4];
1263 if (device->lun == -1) {
1264 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1265 " suspect firmware bug or unsupported hardware "
1266 "configuration.\n");
1274 added[*nadded] = device;
1276 hpsa_show_dev_msg(KERN_INFO, h, device,
1277 device->expose_device ? "added" : "masked");
1278 device->offload_to_be_enabled = device->offload_enabled;
1279 device->offload_enabled = 0;
1283 /* Update an entry in h->dev[] array. */
1284 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1285 int entry, struct hpsa_scsi_dev_t *new_entry)
1287 int offload_enabled;
1288 /* assumes h->devlock is held */
1289 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1291 /* Raid level changed. */
1292 h->dev[entry]->raid_level = new_entry->raid_level;
1294 /* Raid offload parameters changed. Careful about the ordering. */
1295 if (new_entry->offload_config && new_entry->offload_enabled) {
1297 * if drive is newly offload_enabled, we want to copy the
1298 * raid map data first. If previously offload_enabled and
1299 * offload_config were set, raid map data had better be
1300 * the same as it was before. if raid map data is changed
1301 * then it had better be the case that
1302 * h->dev[entry]->offload_enabled is currently 0.
1304 h->dev[entry]->raid_map = new_entry->raid_map;
1305 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1307 if (new_entry->hba_ioaccel_enabled) {
1308 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1309 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1311 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1312 h->dev[entry]->offload_config = new_entry->offload_config;
1313 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1314 h->dev[entry]->queue_depth = new_entry->queue_depth;
1317 * We can turn off ioaccel offload now, but need to delay turning
1318 * it on until we can update h->dev[entry]->phys_disk[], but we
1319 * can't do that until all the devices are updated.
1321 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1322 if (!new_entry->offload_enabled)
1323 h->dev[entry]->offload_enabled = 0;
1325 offload_enabled = h->dev[entry]->offload_enabled;
1326 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1327 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1328 h->dev[entry]->offload_enabled = offload_enabled;
1331 /* Replace an entry from h->dev[] array. */
1332 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1333 int entry, struct hpsa_scsi_dev_t *new_entry,
1334 struct hpsa_scsi_dev_t *added[], int *nadded,
1335 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1337 /* assumes h->devlock is held */
1338 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1339 removed[*nremoved] = h->dev[entry];
1343 * New physical devices won't have target/lun assigned yet
1344 * so we need to preserve the values in the slot we are replacing.
1346 if (new_entry->target == -1) {
1347 new_entry->target = h->dev[entry]->target;
1348 new_entry->lun = h->dev[entry]->lun;
1351 h->dev[entry] = new_entry;
1352 added[*nadded] = new_entry;
1354 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1355 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1356 new_entry->offload_enabled = 0;
1359 /* Remove an entry from h->dev[] array. */
1360 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1361 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1363 /* assumes h->devlock is held */
1365 struct hpsa_scsi_dev_t *sd;
1367 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1370 removed[*nremoved] = h->dev[entry];
1373 for (i = entry; i < h->ndevices-1; i++)
1374 h->dev[i] = h->dev[i+1];
1376 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1379 #define SCSI3ADDR_EQ(a, b) ( \
1380 (a)[7] == (b)[7] && \
1381 (a)[6] == (b)[6] && \
1382 (a)[5] == (b)[5] && \
1383 (a)[4] == (b)[4] && \
1384 (a)[3] == (b)[3] && \
1385 (a)[2] == (b)[2] && \
1386 (a)[1] == (b)[1] && \
1389 static void fixup_botched_add(struct ctlr_info *h,
1390 struct hpsa_scsi_dev_t *added)
1392 /* called when scsi_add_device fails in order to re-adjust
1393 * h->dev[] to match the mid layer's view.
1395 unsigned long flags;
1398 spin_lock_irqsave(&h->lock, flags);
1399 for (i = 0; i < h->ndevices; i++) {
1400 if (h->dev[i] == added) {
1401 for (j = i; j < h->ndevices-1; j++)
1402 h->dev[j] = h->dev[j+1];
1407 spin_unlock_irqrestore(&h->lock, flags);
1411 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1412 struct hpsa_scsi_dev_t *dev2)
1414 /* we compare everything except lun and target as these
1415 * are not yet assigned. Compare parts likely
1418 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1419 sizeof(dev1->scsi3addr)) != 0)
1421 if (memcmp(dev1->device_id, dev2->device_id,
1422 sizeof(dev1->device_id)) != 0)
1424 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1426 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1428 if (dev1->devtype != dev2->devtype)
1430 if (dev1->bus != dev2->bus)
1435 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1436 struct hpsa_scsi_dev_t *dev2)
1438 /* Device attributes that can change, but don't mean
1439 * that the device is a different device, nor that the OS
1440 * needs to be told anything about the change.
1442 if (dev1->raid_level != dev2->raid_level)
1444 if (dev1->offload_config != dev2->offload_config)
1446 if (dev1->offload_enabled != dev2->offload_enabled)
1448 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1449 if (dev1->queue_depth != dev2->queue_depth)
1454 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1455 * and return needle location in *index. If scsi3addr matches, but not
1456 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1457 * location in *index.
1458 * In the case of a minor device attribute change, such as RAID level, just
1459 * return DEVICE_UPDATED, along with the updated device's location in index.
1460 * If needle not found, return DEVICE_NOT_FOUND.
1462 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1463 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1467 #define DEVICE_NOT_FOUND 0
1468 #define DEVICE_CHANGED 1
1469 #define DEVICE_SAME 2
1470 #define DEVICE_UPDATED 3
1472 return DEVICE_NOT_FOUND;
1474 for (i = 0; i < haystack_size; i++) {
1475 if (haystack[i] == NULL) /* previously removed. */
1477 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1479 if (device_is_the_same(needle, haystack[i])) {
1480 if (device_updated(needle, haystack[i]))
1481 return DEVICE_UPDATED;
1484 /* Keep offline devices offline */
1485 if (needle->volume_offline)
1486 return DEVICE_NOT_FOUND;
1487 return DEVICE_CHANGED;
1492 return DEVICE_NOT_FOUND;
1495 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1496 unsigned char scsi3addr[])
1498 struct offline_device_entry *device;
1499 unsigned long flags;
1501 /* Check to see if device is already on the list */
1502 spin_lock_irqsave(&h->offline_device_lock, flags);
1503 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1504 if (memcmp(device->scsi3addr, scsi3addr,
1505 sizeof(device->scsi3addr)) == 0) {
1506 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1510 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1512 /* Device is not on the list, add it. */
1513 device = kmalloc(sizeof(*device), GFP_KERNEL);
1515 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1518 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1519 spin_lock_irqsave(&h->offline_device_lock, flags);
1520 list_add_tail(&device->offline_list, &h->offline_device_list);
1521 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1524 /* Print a message explaining various offline volume states */
1525 static void hpsa_show_volume_status(struct ctlr_info *h,
1526 struct hpsa_scsi_dev_t *sd)
1528 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1529 dev_info(&h->pdev->dev,
1530 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1531 h->scsi_host->host_no,
1532 sd->bus, sd->target, sd->lun);
1533 switch (sd->volume_offline) {
1536 case HPSA_LV_UNDERGOING_ERASE:
1537 dev_info(&h->pdev->dev,
1538 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1539 h->scsi_host->host_no,
1540 sd->bus, sd->target, sd->lun);
1542 case HPSA_LV_NOT_AVAILABLE:
1543 dev_info(&h->pdev->dev,
1544 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1545 h->scsi_host->host_no,
1546 sd->bus, sd->target, sd->lun);
1548 case HPSA_LV_UNDERGOING_RPI:
1549 dev_info(&h->pdev->dev,
1550 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1551 h->scsi_host->host_no,
1552 sd->bus, sd->target, sd->lun);
1554 case HPSA_LV_PENDING_RPI:
1555 dev_info(&h->pdev->dev,
1556 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1557 h->scsi_host->host_no,
1558 sd->bus, sd->target, sd->lun);
1560 case HPSA_LV_ENCRYPTED_NO_KEY:
1561 dev_info(&h->pdev->dev,
1562 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1563 h->scsi_host->host_no,
1564 sd->bus, sd->target, sd->lun);
1566 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1567 dev_info(&h->pdev->dev,
1568 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1569 h->scsi_host->host_no,
1570 sd->bus, sd->target, sd->lun);
1572 case HPSA_LV_UNDERGOING_ENCRYPTION:
1573 dev_info(&h->pdev->dev,
1574 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1575 h->scsi_host->host_no,
1576 sd->bus, sd->target, sd->lun);
1578 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1579 dev_info(&h->pdev->dev,
1580 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1581 h->scsi_host->host_no,
1582 sd->bus, sd->target, sd->lun);
1584 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1585 dev_info(&h->pdev->dev,
1586 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1587 h->scsi_host->host_no,
1588 sd->bus, sd->target, sd->lun);
1590 case HPSA_LV_PENDING_ENCRYPTION:
1591 dev_info(&h->pdev->dev,
1592 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1593 h->scsi_host->host_no,
1594 sd->bus, sd->target, sd->lun);
1596 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1597 dev_info(&h->pdev->dev,
1598 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1599 h->scsi_host->host_no,
1600 sd->bus, sd->target, sd->lun);
1606 * Figure the list of physical drive pointers for a logical drive with
1607 * raid offload configured.
1609 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1610 struct hpsa_scsi_dev_t *dev[], int ndevices,
1611 struct hpsa_scsi_dev_t *logical_drive)
1613 struct raid_map_data *map = &logical_drive->raid_map;
1614 struct raid_map_disk_data *dd = &map->data[0];
1616 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1617 le16_to_cpu(map->metadata_disks_per_row);
1618 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1619 le16_to_cpu(map->layout_map_count) *
1620 total_disks_per_row;
1621 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1622 total_disks_per_row;
1625 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1626 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1628 logical_drive->nphysical_disks = nraid_map_entries;
1631 for (i = 0; i < nraid_map_entries; i++) {
1632 logical_drive->phys_disk[i] = NULL;
1633 if (!logical_drive->offload_config)
1635 for (j = 0; j < ndevices; j++) {
1638 if (dev[j]->devtype != TYPE_DISK)
1640 if (is_logical_device(dev[j]))
1642 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1645 logical_drive->phys_disk[i] = dev[j];
1647 qdepth = min(h->nr_cmds, qdepth +
1648 logical_drive->phys_disk[i]->queue_depth);
1653 * This can happen if a physical drive is removed and
1654 * the logical drive is degraded. In that case, the RAID
1655 * map data will refer to a physical disk which isn't actually
1656 * present. And in that case offload_enabled should already
1657 * be 0, but we'll turn it off here just in case
1659 if (!logical_drive->phys_disk[i]) {
1660 logical_drive->offload_enabled = 0;
1661 logical_drive->offload_to_be_enabled = 0;
1662 logical_drive->queue_depth = 8;
1665 if (nraid_map_entries)
1667 * This is correct for reads, too high for full stripe writes,
1668 * way too high for partial stripe writes
1670 logical_drive->queue_depth = qdepth;
1672 logical_drive->queue_depth = h->nr_cmds;
1675 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1676 struct hpsa_scsi_dev_t *dev[], int ndevices)
1680 for (i = 0; i < ndevices; i++) {
1683 if (dev[i]->devtype != TYPE_DISK)
1685 if (!is_logical_device(dev[i]))
1689 * If offload is currently enabled, the RAID map and
1690 * phys_disk[] assignment *better* not be changing
1691 * and since it isn't changing, we do not need to
1694 if (dev[i]->offload_enabled)
1697 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1701 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1708 if (is_logical_device(device)) /* RAID */
1709 rc = scsi_add_device(h->scsi_host, device->bus,
1710 device->target, device->lun);
1712 rc = hpsa_add_sas_device(h->sas_host, device);
1717 static void hpsa_remove_device(struct ctlr_info *h,
1718 struct hpsa_scsi_dev_t *device)
1720 struct scsi_device *sdev = NULL;
1725 if (is_logical_device(device)) { /* RAID */
1726 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1727 device->target, device->lun);
1729 scsi_remove_device(sdev);
1730 scsi_device_put(sdev);
1733 * We don't expect to get here. Future commands
1734 * to this device will get a selection timeout as
1735 * if the device were gone.
1737 hpsa_show_dev_msg(KERN_WARNING, h, device,
1738 "didn't find device for removal.");
1741 hpsa_remove_sas_device(device);
1744 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1745 struct hpsa_scsi_dev_t *sd[], int nsds)
1747 /* sd contains scsi3 addresses and devtypes, and inquiry
1748 * data. This function takes what's in sd to be the current
1749 * reality and updates h->dev[] to reflect that reality.
1751 int i, entry, device_change, changes = 0;
1752 struct hpsa_scsi_dev_t *csd;
1753 unsigned long flags;
1754 struct hpsa_scsi_dev_t **added, **removed;
1755 int nadded, nremoved;
1758 * A reset can cause a device status to change
1759 * re-schedule the scan to see what happened.
1761 if (h->reset_in_progress) {
1762 h->drv_req_rescan = 1;
1766 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1767 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1769 if (!added || !removed) {
1770 dev_warn(&h->pdev->dev, "out of memory in "
1771 "adjust_hpsa_scsi_table\n");
1775 spin_lock_irqsave(&h->devlock, flags);
1777 /* find any devices in h->dev[] that are not in
1778 * sd[] and remove them from h->dev[], and for any
1779 * devices which have changed, remove the old device
1780 * info and add the new device info.
1781 * If minor device attributes change, just update
1782 * the existing device structure.
1787 while (i < h->ndevices) {
1789 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1790 if (device_change == DEVICE_NOT_FOUND) {
1792 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1793 continue; /* remove ^^^, hence i not incremented */
1794 } else if (device_change == DEVICE_CHANGED) {
1796 hpsa_scsi_replace_entry(h, i, sd[entry],
1797 added, &nadded, removed, &nremoved);
1798 /* Set it to NULL to prevent it from being freed
1799 * at the bottom of hpsa_update_scsi_devices()
1802 } else if (device_change == DEVICE_UPDATED) {
1803 hpsa_scsi_update_entry(h, i, sd[entry]);
1808 /* Now, make sure every device listed in sd[] is also
1809 * listed in h->dev[], adding them if they aren't found
1812 for (i = 0; i < nsds; i++) {
1813 if (!sd[i]) /* if already added above. */
1816 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1817 * as the SCSI mid-layer does not handle such devices well.
1818 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1819 * at 160Hz, and prevents the system from coming up.
1821 if (sd[i]->volume_offline) {
1822 hpsa_show_volume_status(h, sd[i]);
1823 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1827 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1828 h->ndevices, &entry);
1829 if (device_change == DEVICE_NOT_FOUND) {
1831 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1833 sd[i] = NULL; /* prevent from being freed later. */
1834 } else if (device_change == DEVICE_CHANGED) {
1835 /* should never happen... */
1837 dev_warn(&h->pdev->dev,
1838 "device unexpectedly changed.\n");
1839 /* but if it does happen, we just ignore that device */
1842 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1844 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1845 * any logical drives that need it enabled.
1847 for (i = 0; i < h->ndevices; i++) {
1848 if (h->dev[i] == NULL)
1850 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1853 spin_unlock_irqrestore(&h->devlock, flags);
1855 /* Monitor devices which are in one of several NOT READY states to be
1856 * brought online later. This must be done without holding h->devlock,
1857 * so don't touch h->dev[]
1859 for (i = 0; i < nsds; i++) {
1860 if (!sd[i]) /* if already added above. */
1862 if (sd[i]->volume_offline)
1863 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1866 /* Don't notify scsi mid layer of any changes the first time through
1867 * (or if there are no changes) scsi_scan_host will do it later the
1868 * first time through.
1873 /* Notify scsi mid layer of any removed devices */
1874 for (i = 0; i < nremoved; i++) {
1875 if (removed[i] == NULL)
1877 if (removed[i]->expose_device)
1878 hpsa_remove_device(h, removed[i]);
1883 /* Notify scsi mid layer of any added devices */
1884 for (i = 0; i < nadded; i++) {
1887 if (added[i] == NULL)
1889 if (!(added[i]->expose_device))
1891 rc = hpsa_add_device(h, added[i]);
1894 dev_warn(&h->pdev->dev,
1895 "addition failed %d, device not added.", rc);
1896 /* now we have to remove it from h->dev,
1897 * since it didn't get added to scsi mid layer
1899 fixup_botched_add(h, added[i]);
1900 h->drv_req_rescan = 1;
1909 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1910 * Assume's h->devlock is held.
1912 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1913 int bus, int target, int lun)
1916 struct hpsa_scsi_dev_t *sd;
1918 for (i = 0; i < h->ndevices; i++) {
1920 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1926 static int hpsa_slave_alloc(struct scsi_device *sdev)
1928 struct hpsa_scsi_dev_t *sd;
1929 unsigned long flags;
1930 struct ctlr_info *h;
1932 h = sdev_to_hba(sdev);
1933 spin_lock_irqsave(&h->devlock, flags);
1934 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
1935 struct scsi_target *starget;
1936 struct sas_rphy *rphy;
1938 starget = scsi_target(sdev);
1939 rphy = target_to_rphy(starget);
1940 sd = hpsa_find_device_by_sas_rphy(h, rphy);
1942 sd->target = sdev_id(sdev);
1943 sd->lun = sdev->lun;
1946 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1947 sdev_id(sdev), sdev->lun);
1949 if (sd && sd->expose_device) {
1950 atomic_set(&sd->ioaccel_cmds_out, 0);
1951 sdev->hostdata = sd;
1953 sdev->hostdata = NULL;
1954 spin_unlock_irqrestore(&h->devlock, flags);
1958 /* configure scsi device based on internal per-device structure */
1959 static int hpsa_slave_configure(struct scsi_device *sdev)
1961 struct hpsa_scsi_dev_t *sd;
1964 sd = sdev->hostdata;
1965 sdev->no_uld_attach = !sd || !sd->expose_device;
1968 queue_depth = sd->queue_depth != 0 ?
1969 sd->queue_depth : sdev->host->can_queue;
1971 queue_depth = sdev->host->can_queue;
1973 scsi_change_queue_depth(sdev, queue_depth);
1978 static void hpsa_slave_destroy(struct scsi_device *sdev)
1980 /* nothing to do. */
1983 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1987 if (!h->ioaccel2_cmd_sg_list)
1989 for (i = 0; i < h->nr_cmds; i++) {
1990 kfree(h->ioaccel2_cmd_sg_list[i]);
1991 h->ioaccel2_cmd_sg_list[i] = NULL;
1993 kfree(h->ioaccel2_cmd_sg_list);
1994 h->ioaccel2_cmd_sg_list = NULL;
1997 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2001 if (h->chainsize <= 0)
2004 h->ioaccel2_cmd_sg_list =
2005 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
2007 if (!h->ioaccel2_cmd_sg_list)
2009 for (i = 0; i < h->nr_cmds; i++) {
2010 h->ioaccel2_cmd_sg_list[i] =
2011 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
2012 h->maxsgentries, GFP_KERNEL);
2013 if (!h->ioaccel2_cmd_sg_list[i])
2019 hpsa_free_ioaccel2_sg_chain_blocks(h);
2023 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2027 if (!h->cmd_sg_list)
2029 for (i = 0; i < h->nr_cmds; i++) {
2030 kfree(h->cmd_sg_list[i]);
2031 h->cmd_sg_list[i] = NULL;
2033 kfree(h->cmd_sg_list);
2034 h->cmd_sg_list = NULL;
2037 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2041 if (h->chainsize <= 0)
2044 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
2046 if (!h->cmd_sg_list) {
2047 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
2050 for (i = 0; i < h->nr_cmds; i++) {
2051 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
2052 h->chainsize, GFP_KERNEL);
2053 if (!h->cmd_sg_list[i]) {
2054 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
2061 hpsa_free_sg_chain_blocks(h);
2065 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2066 struct io_accel2_cmd *cp, struct CommandList *c)
2068 struct ioaccel2_sg_element *chain_block;
2072 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2073 chain_size = le32_to_cpu(cp->sg[0].length);
2074 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2076 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2077 /* prevent subsequent unmapping */
2078 cp->sg->address = 0;
2081 cp->sg->address = cpu_to_le64(temp64);
2085 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2086 struct io_accel2_cmd *cp)
2088 struct ioaccel2_sg_element *chain_sg;
2093 temp64 = le64_to_cpu(chain_sg->address);
2094 chain_size = le32_to_cpu(cp->sg[0].length);
2095 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2098 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2099 struct CommandList *c)
2101 struct SGDescriptor *chain_sg, *chain_block;
2105 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2106 chain_block = h->cmd_sg_list[c->cmdindex];
2107 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2108 chain_len = sizeof(*chain_sg) *
2109 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2110 chain_sg->Len = cpu_to_le32(chain_len);
2111 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2113 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2114 /* prevent subsequent unmapping */
2115 chain_sg->Addr = cpu_to_le64(0);
2118 chain_sg->Addr = cpu_to_le64(temp64);
2122 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2123 struct CommandList *c)
2125 struct SGDescriptor *chain_sg;
2127 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2130 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2131 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2132 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2136 /* Decode the various types of errors on ioaccel2 path.
2137 * Return 1 for any error that should generate a RAID path retry.
2138 * Return 0 for errors that don't require a RAID path retry.
2140 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2141 struct CommandList *c,
2142 struct scsi_cmnd *cmd,
2143 struct io_accel2_cmd *c2)
2147 u32 ioaccel2_resid = 0;
2149 switch (c2->error_data.serv_response) {
2150 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2151 switch (c2->error_data.status) {
2152 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2154 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2155 cmd->result |= SAM_STAT_CHECK_CONDITION;
2156 if (c2->error_data.data_present !=
2157 IOACCEL2_SENSE_DATA_PRESENT) {
2158 memset(cmd->sense_buffer, 0,
2159 SCSI_SENSE_BUFFERSIZE);
2162 /* copy the sense data */
2163 data_len = c2->error_data.sense_data_len;
2164 if (data_len > SCSI_SENSE_BUFFERSIZE)
2165 data_len = SCSI_SENSE_BUFFERSIZE;
2166 if (data_len > sizeof(c2->error_data.sense_data_buff))
2168 sizeof(c2->error_data.sense_data_buff);
2169 memcpy(cmd->sense_buffer,
2170 c2->error_data.sense_data_buff, data_len);
2173 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2176 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2179 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2182 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2190 case IOACCEL2_SERV_RESPONSE_FAILURE:
2191 switch (c2->error_data.status) {
2192 case IOACCEL2_STATUS_SR_IO_ERROR:
2193 case IOACCEL2_STATUS_SR_IO_ABORTED:
2194 case IOACCEL2_STATUS_SR_OVERRUN:
2197 case IOACCEL2_STATUS_SR_UNDERRUN:
2198 cmd->result = (DID_OK << 16); /* host byte */
2199 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2200 ioaccel2_resid = get_unaligned_le32(
2201 &c2->error_data.resid_cnt[0]);
2202 scsi_set_resid(cmd, ioaccel2_resid);
2204 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2205 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2206 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2207 /* We will get an event from ctlr to trigger rescan */
2214 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2216 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2218 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2221 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2228 return retry; /* retry on raid path? */
2231 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2232 struct CommandList *c)
2234 bool do_wake = false;
2237 * Prevent the following race in the abort handler:
2239 * 1. LLD is requested to abort a SCSI command
2240 * 2. The SCSI command completes
2241 * 3. The struct CommandList associated with step 2 is made available
2242 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2243 * 5. Abort handler follows scsi_cmnd->host_scribble and
2244 * finds struct CommandList and tries to aborts it
2245 * Now we have aborted the wrong command.
2247 * Reset c->scsi_cmd here so that the abort or reset handler will know
2248 * this command has completed. Then, check to see if the handler is
2249 * waiting for this command, and, if so, wake it.
2251 c->scsi_cmd = SCSI_CMD_IDLE;
2252 mb(); /* Declare command idle before checking for pending events. */
2253 if (c->abort_pending) {
2255 c->abort_pending = false;
2257 if (c->reset_pending) {
2258 unsigned long flags;
2259 struct hpsa_scsi_dev_t *dev;
2262 * There appears to be a reset pending; lock the lock and
2263 * reconfirm. If so, then decrement the count of outstanding
2264 * commands and wake the reset command if this is the last one.
2266 spin_lock_irqsave(&h->lock, flags);
2267 dev = c->reset_pending; /* Re-fetch under the lock. */
2268 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2270 c->reset_pending = NULL;
2271 spin_unlock_irqrestore(&h->lock, flags);
2275 wake_up_all(&h->event_sync_wait_queue);
2278 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2279 struct CommandList *c)
2281 hpsa_cmd_resolve_events(h, c);
2282 cmd_tagged_free(h, c);
2285 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2286 struct CommandList *c, struct scsi_cmnd *cmd)
2288 hpsa_cmd_resolve_and_free(h, c);
2289 cmd->scsi_done(cmd);
2292 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2294 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2295 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2298 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2300 cmd->result = DID_ABORT << 16;
2303 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2304 struct scsi_cmnd *cmd)
2306 hpsa_set_scsi_cmd_aborted(cmd);
2307 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2308 c->Request.CDB, c->err_info->ScsiStatus);
2309 hpsa_cmd_resolve_and_free(h, c);
2312 static void process_ioaccel2_completion(struct ctlr_info *h,
2313 struct CommandList *c, struct scsi_cmnd *cmd,
2314 struct hpsa_scsi_dev_t *dev)
2316 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2318 /* check for good status */
2319 if (likely(c2->error_data.serv_response == 0 &&
2320 c2->error_data.status == 0))
2321 return hpsa_cmd_free_and_done(h, c, cmd);
2324 * Any RAID offload error results in retry which will use
2325 * the normal I/O path so the controller can handle whatever's
2328 if (is_logical_device(dev) &&
2329 c2->error_data.serv_response ==
2330 IOACCEL2_SERV_RESPONSE_FAILURE) {
2331 if (c2->error_data.status ==
2332 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2333 dev->offload_enabled = 0;
2335 return hpsa_retry_cmd(h, c);
2338 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
2339 return hpsa_retry_cmd(h, c);
2341 return hpsa_cmd_free_and_done(h, c, cmd);
2344 /* Returns 0 on success, < 0 otherwise. */
2345 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2346 struct CommandList *cp)
2348 u8 tmf_status = cp->err_info->ScsiStatus;
2350 switch (tmf_status) {
2351 case CISS_TMF_COMPLETE:
2353 * CISS_TMF_COMPLETE never happens, instead,
2354 * ei->CommandStatus == 0 for this case.
2356 case CISS_TMF_SUCCESS:
2358 case CISS_TMF_INVALID_FRAME:
2359 case CISS_TMF_NOT_SUPPORTED:
2360 case CISS_TMF_FAILED:
2361 case CISS_TMF_WRONG_LUN:
2362 case CISS_TMF_OVERLAPPED_TAG:
2365 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2372 static void complete_scsi_command(struct CommandList *cp)
2374 struct scsi_cmnd *cmd;
2375 struct ctlr_info *h;
2376 struct ErrorInfo *ei;
2377 struct hpsa_scsi_dev_t *dev;
2378 struct io_accel2_cmd *c2;
2381 u8 asc; /* additional sense code */
2382 u8 ascq; /* additional sense code qualifier */
2383 unsigned long sense_data_size;
2388 dev = cmd->device->hostdata;
2389 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2391 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2392 if ((cp->cmd_type == CMD_SCSI) &&
2393 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2394 hpsa_unmap_sg_chain_block(h, cp);
2396 if ((cp->cmd_type == CMD_IOACCEL2) &&
2397 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2398 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2400 cmd->result = (DID_OK << 16); /* host byte */
2401 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2403 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2404 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2407 * We check for lockup status here as it may be set for
2408 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2409 * fail_all_oustanding_cmds()
2411 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2412 /* DID_NO_CONNECT will prevent a retry */
2413 cmd->result = DID_NO_CONNECT << 16;
2414 return hpsa_cmd_free_and_done(h, cp, cmd);
2417 if ((unlikely(hpsa_is_pending_event(cp)))) {
2418 if (cp->reset_pending)
2419 return hpsa_cmd_resolve_and_free(h, cp);
2420 if (cp->abort_pending)
2421 return hpsa_cmd_abort_and_free(h, cp, cmd);
2424 if (cp->cmd_type == CMD_IOACCEL2)
2425 return process_ioaccel2_completion(h, cp, cmd, dev);
2427 scsi_set_resid(cmd, ei->ResidualCnt);
2428 if (ei->CommandStatus == 0)
2429 return hpsa_cmd_free_and_done(h, cp, cmd);
2431 /* For I/O accelerator commands, copy over some fields to the normal
2432 * CISS header used below for error handling.
2434 if (cp->cmd_type == CMD_IOACCEL1) {
2435 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2436 cp->Header.SGList = scsi_sg_count(cmd);
2437 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2438 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2439 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2440 cp->Header.tag = c->tag;
2441 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2442 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2444 /* Any RAID offload error results in retry which will use
2445 * the normal I/O path so the controller can handle whatever's
2448 if (is_logical_device(dev)) {
2449 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2450 dev->offload_enabled = 0;
2451 return hpsa_retry_cmd(h, cp);
2455 /* an error has occurred */
2456 switch (ei->CommandStatus) {
2458 case CMD_TARGET_STATUS:
2459 cmd->result |= ei->ScsiStatus;
2460 /* copy the sense data */
2461 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2462 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2464 sense_data_size = sizeof(ei->SenseInfo);
2465 if (ei->SenseLen < sense_data_size)
2466 sense_data_size = ei->SenseLen;
2467 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2469 decode_sense_data(ei->SenseInfo, sense_data_size,
2470 &sense_key, &asc, &ascq);
2471 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2472 if (sense_key == ABORTED_COMMAND) {
2473 cmd->result |= DID_SOFT_ERROR << 16;
2478 /* Problem was not a check condition
2479 * Pass it up to the upper layers...
2481 if (ei->ScsiStatus) {
2482 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2483 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2484 "Returning result: 0x%x\n",
2486 sense_key, asc, ascq,
2488 } else { /* scsi status is zero??? How??? */
2489 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2490 "Returning no connection.\n", cp),
2492 /* Ordinarily, this case should never happen,
2493 * but there is a bug in some released firmware
2494 * revisions that allows it to happen if, for
2495 * example, a 4100 backplane loses power and
2496 * the tape drive is in it. We assume that
2497 * it's a fatal error of some kind because we
2498 * can't show that it wasn't. We will make it
2499 * look like selection timeout since that is
2500 * the most common reason for this to occur,
2501 * and it's severe enough.
2504 cmd->result = DID_NO_CONNECT << 16;
2508 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2510 case CMD_DATA_OVERRUN:
2511 dev_warn(&h->pdev->dev,
2512 "CDB %16phN data overrun\n", cp->Request.CDB);
2515 /* print_bytes(cp, sizeof(*cp), 1, 0);
2517 /* We get CMD_INVALID if you address a non-existent device
2518 * instead of a selection timeout (no response). You will
2519 * see this if you yank out a drive, then try to access it.
2520 * This is kind of a shame because it means that any other
2521 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2522 * missing target. */
2523 cmd->result = DID_NO_CONNECT << 16;
2526 case CMD_PROTOCOL_ERR:
2527 cmd->result = DID_ERROR << 16;
2528 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2531 case CMD_HARDWARE_ERR:
2532 cmd->result = DID_ERROR << 16;
2533 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2536 case CMD_CONNECTION_LOST:
2537 cmd->result = DID_ERROR << 16;
2538 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2542 /* Return now to avoid calling scsi_done(). */
2543 return hpsa_cmd_abort_and_free(h, cp, cmd);
2544 case CMD_ABORT_FAILED:
2545 cmd->result = DID_ERROR << 16;
2546 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2549 case CMD_UNSOLICITED_ABORT:
2550 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2551 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2555 cmd->result = DID_TIME_OUT << 16;
2556 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2559 case CMD_UNABORTABLE:
2560 cmd->result = DID_ERROR << 16;
2561 dev_warn(&h->pdev->dev, "Command unabortable\n");
2563 case CMD_TMF_STATUS:
2564 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2565 cmd->result = DID_ERROR << 16;
2567 case CMD_IOACCEL_DISABLED:
2568 /* This only handles the direct pass-through case since RAID
2569 * offload is handled above. Just attempt a retry.
2571 cmd->result = DID_SOFT_ERROR << 16;
2572 dev_warn(&h->pdev->dev,
2573 "cp %p had HP SSD Smart Path error\n", cp);
2576 cmd->result = DID_ERROR << 16;
2577 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2578 cp, ei->CommandStatus);
2581 return hpsa_cmd_free_and_done(h, cp, cmd);
2584 static void hpsa_pci_unmap(struct pci_dev *pdev,
2585 struct CommandList *c, int sg_used, int data_direction)
2589 for (i = 0; i < sg_used; i++)
2590 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2591 le32_to_cpu(c->SG[i].Len),
2595 static int hpsa_map_one(struct pci_dev *pdev,
2596 struct CommandList *cp,
2603 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2604 cp->Header.SGList = 0;
2605 cp->Header.SGTotal = cpu_to_le16(0);
2609 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2610 if (dma_mapping_error(&pdev->dev, addr64)) {
2611 /* Prevent subsequent unmap of something never mapped */
2612 cp->Header.SGList = 0;
2613 cp->Header.SGTotal = cpu_to_le16(0);
2616 cp->SG[0].Addr = cpu_to_le64(addr64);
2617 cp->SG[0].Len = cpu_to_le32(buflen);
2618 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2619 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2620 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2624 #define NO_TIMEOUT ((unsigned long) -1)
2625 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2626 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2627 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2629 DECLARE_COMPLETION_ONSTACK(wait);
2632 __enqueue_cmd_and_start_io(h, c, reply_queue);
2633 if (timeout_msecs == NO_TIMEOUT) {
2634 /* TODO: get rid of this no-timeout thing */
2635 wait_for_completion_io(&wait);
2638 if (!wait_for_completion_io_timeout(&wait,
2639 msecs_to_jiffies(timeout_msecs))) {
2640 dev_warn(&h->pdev->dev, "Command timed out.\n");
2646 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2647 int reply_queue, unsigned long timeout_msecs)
2649 if (unlikely(lockup_detected(h))) {
2650 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2653 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2656 static u32 lockup_detected(struct ctlr_info *h)
2659 u32 rc, *lockup_detected;
2662 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2663 rc = *lockup_detected;
2668 #define MAX_DRIVER_CMD_RETRIES 25
2669 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2670 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2672 int backoff_time = 10, retry_count = 0;
2676 memset(c->err_info, 0, sizeof(*c->err_info));
2677 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2682 if (retry_count > 3) {
2683 msleep(backoff_time);
2684 if (backoff_time < 1000)
2687 } while ((check_for_unit_attention(h, c) ||
2688 check_for_busy(h, c)) &&
2689 retry_count <= MAX_DRIVER_CMD_RETRIES);
2690 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2691 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2696 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2697 struct CommandList *c)
2699 const u8 *cdb = c->Request.CDB;
2700 const u8 *lun = c->Header.LUN.LunAddrBytes;
2702 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2703 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2704 txt, lun[0], lun[1], lun[2], lun[3],
2705 lun[4], lun[5], lun[6], lun[7],
2706 cdb[0], cdb[1], cdb[2], cdb[3],
2707 cdb[4], cdb[5], cdb[6], cdb[7],
2708 cdb[8], cdb[9], cdb[10], cdb[11],
2709 cdb[12], cdb[13], cdb[14], cdb[15]);
2712 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2713 struct CommandList *cp)
2715 const struct ErrorInfo *ei = cp->err_info;
2716 struct device *d = &cp->h->pdev->dev;
2717 u8 sense_key, asc, ascq;
2720 switch (ei->CommandStatus) {
2721 case CMD_TARGET_STATUS:
2722 if (ei->SenseLen > sizeof(ei->SenseInfo))
2723 sense_len = sizeof(ei->SenseInfo);
2725 sense_len = ei->SenseLen;
2726 decode_sense_data(ei->SenseInfo, sense_len,
2727 &sense_key, &asc, &ascq);
2728 hpsa_print_cmd(h, "SCSI status", cp);
2729 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2730 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2731 sense_key, asc, ascq);
2733 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2734 if (ei->ScsiStatus == 0)
2735 dev_warn(d, "SCSI status is abnormally zero. "
2736 "(probably indicates selection timeout "
2737 "reported incorrectly due to a known "
2738 "firmware bug, circa July, 2001.)\n");
2740 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2742 case CMD_DATA_OVERRUN:
2743 hpsa_print_cmd(h, "overrun condition", cp);
2746 /* controller unfortunately reports SCSI passthru's
2747 * to non-existent targets as invalid commands.
2749 hpsa_print_cmd(h, "invalid command", cp);
2750 dev_warn(d, "probably means device no longer present\n");
2753 case CMD_PROTOCOL_ERR:
2754 hpsa_print_cmd(h, "protocol error", cp);
2756 case CMD_HARDWARE_ERR:
2757 hpsa_print_cmd(h, "hardware error", cp);
2759 case CMD_CONNECTION_LOST:
2760 hpsa_print_cmd(h, "connection lost", cp);
2763 hpsa_print_cmd(h, "aborted", cp);
2765 case CMD_ABORT_FAILED:
2766 hpsa_print_cmd(h, "abort failed", cp);
2768 case CMD_UNSOLICITED_ABORT:
2769 hpsa_print_cmd(h, "unsolicited abort", cp);
2772 hpsa_print_cmd(h, "timed out", cp);
2774 case CMD_UNABORTABLE:
2775 hpsa_print_cmd(h, "unabortable", cp);
2777 case CMD_CTLR_LOCKUP:
2778 hpsa_print_cmd(h, "controller lockup detected", cp);
2781 hpsa_print_cmd(h, "unknown status", cp);
2782 dev_warn(d, "Unknown command status %x\n",
2787 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2788 u16 page, unsigned char *buf,
2789 unsigned char bufsize)
2792 struct CommandList *c;
2793 struct ErrorInfo *ei;
2797 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2798 page, scsi3addr, TYPE_CMD)) {
2802 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2803 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2807 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2808 hpsa_scsi_interpret_error(h, c);
2816 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2817 u8 reset_type, int reply_queue)
2820 struct CommandList *c;
2821 struct ErrorInfo *ei;
2826 /* fill_cmd can't fail here, no data buffer to map. */
2827 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2828 scsi3addr, TYPE_MSG);
2829 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2831 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2834 /* no unmap needed here because no data xfer. */
2837 if (ei->CommandStatus != 0) {
2838 hpsa_scsi_interpret_error(h, c);
2846 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2847 struct hpsa_scsi_dev_t *dev,
2848 unsigned char *scsi3addr)
2852 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2853 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2855 if (hpsa_is_cmd_idle(c))
2858 switch (c->cmd_type) {
2860 case CMD_IOCTL_PEND:
2861 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2862 sizeof(c->Header.LUN.LunAddrBytes));
2867 if (c->phys_disk == dev) {
2868 /* HBA mode match */
2871 /* Possible RAID mode -- check each phys dev. */
2872 /* FIXME: Do we need to take out a lock here? If
2873 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2875 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2876 /* FIXME: an alternate test might be
2878 * match = dev->phys_disk[i]->ioaccel_handle
2879 * == c2->scsi_nexus; */
2880 match = dev->phys_disk[i] == c->phys_disk;
2886 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2887 match = dev->phys_disk[i]->ioaccel_handle ==
2888 le32_to_cpu(ac->it_nexus);
2892 case 0: /* The command is in the middle of being initialized. */
2897 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
2905 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2906 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
2911 /* We can really only handle one reset at a time */
2912 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
2913 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
2917 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
2919 for (i = 0; i < h->nr_cmds; i++) {
2920 struct CommandList *c = h->cmd_pool + i;
2921 int refcount = atomic_inc_return(&c->refcount);
2923 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
2924 unsigned long flags;
2927 * Mark the target command as having a reset pending,
2928 * then lock a lock so that the command cannot complete
2929 * while we're considering it. If the command is not
2930 * idle then count it; otherwise revoke the event.
2932 c->reset_pending = dev;
2933 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
2934 if (!hpsa_is_cmd_idle(c))
2935 atomic_inc(&dev->reset_cmds_out);
2937 c->reset_pending = NULL;
2938 spin_unlock_irqrestore(&h->lock, flags);
2944 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
2946 wait_event(h->event_sync_wait_queue,
2947 atomic_read(&dev->reset_cmds_out) == 0 ||
2948 lockup_detected(h));
2950 if (unlikely(lockup_detected(h))) {
2951 dev_warn(&h->pdev->dev,
2952 "Controller lockup detected during reset wait\n");
2957 atomic_set(&dev->reset_cmds_out, 0);
2959 mutex_unlock(&h->reset_mutex);
2963 static void hpsa_get_raid_level(struct ctlr_info *h,
2964 unsigned char *scsi3addr, unsigned char *raid_level)
2969 *raid_level = RAID_UNKNOWN;
2970 buf = kzalloc(64, GFP_KERNEL);
2973 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2975 *raid_level = buf[8];
2976 if (*raid_level > RAID_UNKNOWN)
2977 *raid_level = RAID_UNKNOWN;
2982 #define HPSA_MAP_DEBUG
2983 #ifdef HPSA_MAP_DEBUG
2984 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2985 struct raid_map_data *map_buff)
2987 struct raid_map_disk_data *dd = &map_buff->data[0];
2989 u16 map_cnt, row_cnt, disks_per_row;
2994 /* Show details only if debugging has been activated. */
2995 if (h->raid_offload_debug < 2)
2998 dev_info(&h->pdev->dev, "structure_size = %u\n",
2999 le32_to_cpu(map_buff->structure_size));
3000 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3001 le32_to_cpu(map_buff->volume_blk_size));
3002 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3003 le64_to_cpu(map_buff->volume_blk_cnt));
3004 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3005 map_buff->phys_blk_shift);
3006 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3007 map_buff->parity_rotation_shift);
3008 dev_info(&h->pdev->dev, "strip_size = %u\n",
3009 le16_to_cpu(map_buff->strip_size));
3010 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3011 le64_to_cpu(map_buff->disk_starting_blk));
3012 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3013 le64_to_cpu(map_buff->disk_blk_cnt));
3014 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3015 le16_to_cpu(map_buff->data_disks_per_row));
3016 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3017 le16_to_cpu(map_buff->metadata_disks_per_row));
3018 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3019 le16_to_cpu(map_buff->row_cnt));
3020 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3021 le16_to_cpu(map_buff->layout_map_count));
3022 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3023 le16_to_cpu(map_buff->flags));
3024 dev_info(&h->pdev->dev, "encrypytion = %s\n",
3025 le16_to_cpu(map_buff->flags) &
3026 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3027 dev_info(&h->pdev->dev, "dekindex = %u\n",
3028 le16_to_cpu(map_buff->dekindex));
3029 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3030 for (map = 0; map < map_cnt; map++) {
3031 dev_info(&h->pdev->dev, "Map%u:\n", map);
3032 row_cnt = le16_to_cpu(map_buff->row_cnt);
3033 for (row = 0; row < row_cnt; row++) {
3034 dev_info(&h->pdev->dev, " Row%u:\n", row);
3036 le16_to_cpu(map_buff->data_disks_per_row);
3037 for (col = 0; col < disks_per_row; col++, dd++)
3038 dev_info(&h->pdev->dev,
3039 " D%02u: h=0x%04x xor=%u,%u\n",
3040 col, dd->ioaccel_handle,
3041 dd->xor_mult[0], dd->xor_mult[1]);
3043 le16_to_cpu(map_buff->metadata_disks_per_row);
3044 for (col = 0; col < disks_per_row; col++, dd++)
3045 dev_info(&h->pdev->dev,
3046 " M%02u: h=0x%04x xor=%u,%u\n",
3047 col, dd->ioaccel_handle,
3048 dd->xor_mult[0], dd->xor_mult[1]);
3053 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3054 __attribute__((unused)) int rc,
3055 __attribute__((unused)) struct raid_map_data *map_buff)
3060 static int hpsa_get_raid_map(struct ctlr_info *h,
3061 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3064 struct CommandList *c;
3065 struct ErrorInfo *ei;
3069 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3070 sizeof(this_device->raid_map), 0,
3071 scsi3addr, TYPE_CMD)) {
3072 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3076 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3077 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3081 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3082 hpsa_scsi_interpret_error(h, c);
3088 /* @todo in the future, dynamically allocate RAID map memory */
3089 if (le32_to_cpu(this_device->raid_map.structure_size) >
3090 sizeof(this_device->raid_map)) {
3091 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3094 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3101 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3102 unsigned char scsi3addr[], u16 bmic_device_index,
3103 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3106 struct CommandList *c;
3107 struct ErrorInfo *ei;
3111 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3112 0, RAID_CTLR_LUNID, TYPE_CMD);
3116 c->Request.CDB[2] = bmic_device_index & 0xff;
3117 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3119 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3120 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3124 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3125 hpsa_scsi_interpret_error(h, c);
3133 static int hpsa_bmic_id_controller(struct ctlr_info *h,
3134 struct bmic_identify_controller *buf, size_t bufsize)
3137 struct CommandList *c;
3138 struct ErrorInfo *ei;
3142 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3143 0, RAID_CTLR_LUNID, TYPE_CMD);
3147 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3148 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3152 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3153 hpsa_scsi_interpret_error(h, c);
3161 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3162 unsigned char scsi3addr[], u16 bmic_device_index,
3163 struct bmic_identify_physical_device *buf, size_t bufsize)
3166 struct CommandList *c;
3167 struct ErrorInfo *ei;
3170 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3171 0, RAID_CTLR_LUNID, TYPE_CMD);
3175 c->Request.CDB[2] = bmic_device_index & 0xff;
3176 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3178 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3181 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3182 hpsa_scsi_interpret_error(h, c);
3191 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3192 unsigned char *scsi3addr)
3194 struct ReportExtendedLUNdata *physdev;
3199 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3203 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3204 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3208 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3210 for (i = 0; i < nphysicals; i++)
3211 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3212 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3221 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3222 struct hpsa_scsi_dev_t *dev)
3227 if (is_hba_lunid(scsi3addr)) {
3228 struct bmic_sense_subsystem_info *ssi;
3230 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3232 dev_warn(&h->pdev->dev,
3233 "%s: out of memory\n", __func__);
3237 rc = hpsa_bmic_sense_subsystem_information(h,
3238 scsi3addr, 0, ssi, sizeof(*ssi));
3240 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3241 h->sas_address = sa;
3246 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3248 dev->sas_address = sa;
3251 /* Get a device id from inquiry page 0x83 */
3252 static int hpsa_vpd_page_supported(struct ctlr_info *h,
3253 unsigned char scsi3addr[], u8 page)
3258 unsigned char *buf, bufsize;
3260 buf = kzalloc(256, GFP_KERNEL);
3264 /* Get the size of the page list first */
3265 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3266 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3267 buf, HPSA_VPD_HEADER_SZ);
3269 goto exit_unsupported;
3271 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3272 bufsize = pages + HPSA_VPD_HEADER_SZ;
3276 /* Get the whole VPD page list */
3277 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3278 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3281 goto exit_unsupported;
3284 for (i = 1; i <= pages; i++)
3285 if (buf[3 + i] == page)
3286 goto exit_supported;
3295 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3296 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3302 this_device->offload_config = 0;
3303 this_device->offload_enabled = 0;
3304 this_device->offload_to_be_enabled = 0;
3306 buf = kzalloc(64, GFP_KERNEL);
3309 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3311 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3312 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3316 #define IOACCEL_STATUS_BYTE 4
3317 #define OFFLOAD_CONFIGURED_BIT 0x01
3318 #define OFFLOAD_ENABLED_BIT 0x02
3319 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3320 this_device->offload_config =
3321 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3322 if (this_device->offload_config) {
3323 this_device->offload_enabled =
3324 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3325 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3326 this_device->offload_enabled = 0;
3328 this_device->offload_to_be_enabled = this_device->offload_enabled;
3334 /* Get the device id from inquiry page 0x83 */
3335 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3336 unsigned char *device_id, int index, int buflen)
3343 buf = kzalloc(64, GFP_KERNEL);
3346 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
3348 memcpy(device_id, &buf[index], buflen);
3355 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3356 void *buf, int bufsize,
3357 int extended_response)
3360 struct CommandList *c;
3361 unsigned char scsi3addr[8];
3362 struct ErrorInfo *ei;
3366 /* address the controller */
3367 memset(scsi3addr, 0, sizeof(scsi3addr));
3368 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3369 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3373 if (extended_response)
3374 c->Request.CDB[1] = extended_response;
3375 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3376 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3380 if (ei->CommandStatus != 0 &&
3381 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3382 hpsa_scsi_interpret_error(h, c);
3385 struct ReportLUNdata *rld = buf;
3387 if (rld->extended_response_flag != extended_response) {
3388 dev_err(&h->pdev->dev,
3389 "report luns requested format %u, got %u\n",
3391 rld->extended_response_flag);
3400 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3401 struct ReportExtendedLUNdata *buf, int bufsize)
3403 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3404 HPSA_REPORT_PHYS_EXTENDED);
3407 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3408 struct ReportLUNdata *buf, int bufsize)
3410 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3413 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3414 int bus, int target, int lun)
3417 device->target = target;
3421 /* Use VPD inquiry to get details of volume status */
3422 static int hpsa_get_volume_status(struct ctlr_info *h,
3423 unsigned char scsi3addr[])
3430 buf = kzalloc(64, GFP_KERNEL);
3432 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3434 /* Does controller have VPD for logical volume status? */
3435 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3438 /* Get the size of the VPD return buffer */
3439 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3440 buf, HPSA_VPD_HEADER_SZ);
3445 /* Now get the whole VPD buffer */
3446 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3447 buf, size + HPSA_VPD_HEADER_SZ);
3450 status = buf[4]; /* status byte */
3456 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3459 /* Determine offline status of a volume.
3462 * 0xff (offline for unknown reasons)
3463 * # (integer code indicating one of several NOT READY states
3464 * describing why a volume is to be kept offline)
3466 static int hpsa_volume_offline(struct ctlr_info *h,
3467 unsigned char scsi3addr[])
3469 struct CommandList *c;
3470 unsigned char *sense;
3471 u8 sense_key, asc, ascq;
3476 #define ASC_LUN_NOT_READY 0x04
3477 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3478 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3482 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3483 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3488 sense = c->err_info->SenseInfo;
3489 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3490 sense_len = sizeof(c->err_info->SenseInfo);
3492 sense_len = c->err_info->SenseLen;
3493 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3494 cmd_status = c->err_info->CommandStatus;
3495 scsi_status = c->err_info->ScsiStatus;
3497 /* Is the volume 'not ready'? */
3498 if (cmd_status != CMD_TARGET_STATUS ||
3499 scsi_status != SAM_STAT_CHECK_CONDITION ||
3500 sense_key != NOT_READY ||
3501 asc != ASC_LUN_NOT_READY) {
3505 /* Determine the reason for not ready state */
3506 ldstat = hpsa_get_volume_status(h, scsi3addr);
3508 /* Keep volume offline in certain cases: */
3510 case HPSA_LV_UNDERGOING_ERASE:
3511 case HPSA_LV_NOT_AVAILABLE:
3512 case HPSA_LV_UNDERGOING_RPI:
3513 case HPSA_LV_PENDING_RPI:
3514 case HPSA_LV_ENCRYPTED_NO_KEY:
3515 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3516 case HPSA_LV_UNDERGOING_ENCRYPTION:
3517 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3518 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3520 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3521 /* If VPD status page isn't available,
3522 * use ASC/ASCQ to determine state
3524 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3525 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3535 * Find out if a logical device supports aborts by simply trying one.
3536 * Smart Array may claim not to support aborts on logical drives, but
3537 * if a MSA2000 * is connected, the drives on that will be presented
3538 * by the Smart Array as logical drives, and aborts may be sent to
3539 * those devices successfully. So the simplest way to find out is
3540 * to simply try an abort and see how the device responds.
3542 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3543 unsigned char *scsi3addr)
3545 struct CommandList *c;
3546 struct ErrorInfo *ei;
3549 u64 tag = (u64) -1; /* bogus tag */
3551 /* Assume that physical devices support aborts */
3552 if (!is_logical_dev_addr_mode(scsi3addr))
3557 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3558 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3559 /* no unmap needed here because no data xfer. */
3561 switch (ei->CommandStatus) {
3565 case CMD_UNABORTABLE:
3566 case CMD_ABORT_FAILED:
3569 case CMD_TMF_STATUS:
3570 rc = hpsa_evaluate_tmf_status(h, c);
3580 static void sanitize_inquiry_string(unsigned char *s, int len)
3582 bool terminated = false;
3584 for (; len > 0; (--len, ++s)) {
3587 if (terminated || *s < 0x20 || *s > 0x7e)
3592 static int hpsa_update_device_info(struct ctlr_info *h,
3593 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3594 unsigned char *is_OBDR_device)
3597 #define OBDR_SIG_OFFSET 43
3598 #define OBDR_TAPE_SIG "$DR-10"
3599 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3600 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3602 unsigned char *inq_buff;
3603 unsigned char *obdr_sig;
3606 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3612 /* Do an inquiry to the device to see what it is. */
3613 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3614 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3615 /* Inquiry failed (msg printed already) */
3616 dev_err(&h->pdev->dev,
3617 "hpsa_update_device_info: inquiry failed\n");
3622 sanitize_inquiry_string(&inq_buff[8], 8);
3623 sanitize_inquiry_string(&inq_buff[16], 16);
3625 this_device->devtype = (inq_buff[0] & 0x1f);
3626 memcpy(this_device->scsi3addr, scsi3addr, 8);
3627 memcpy(this_device->vendor, &inq_buff[8],
3628 sizeof(this_device->vendor));
3629 memcpy(this_device->model, &inq_buff[16],
3630 sizeof(this_device->model));
3631 memset(this_device->device_id, 0,
3632 sizeof(this_device->device_id));
3633 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3634 sizeof(this_device->device_id));
3636 if (this_device->devtype == TYPE_DISK &&
3637 is_logical_dev_addr_mode(scsi3addr)) {
3640 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3641 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3642 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3643 volume_offline = hpsa_volume_offline(h, scsi3addr);
3644 if (volume_offline < 0 || volume_offline > 0xff)
3645 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3646 this_device->volume_offline = volume_offline & 0xff;
3648 this_device->raid_level = RAID_UNKNOWN;
3649 this_device->offload_config = 0;
3650 this_device->offload_enabled = 0;
3651 this_device->offload_to_be_enabled = 0;
3652 this_device->hba_ioaccel_enabled = 0;
3653 this_device->volume_offline = 0;
3654 this_device->queue_depth = h->nr_cmds;
3657 if (is_OBDR_device) {
3658 /* See if this is a One-Button-Disaster-Recovery device
3659 * by looking for "$DR-10" at offset 43 in inquiry data.
3661 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3662 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3663 strncmp(obdr_sig, OBDR_TAPE_SIG,
3664 OBDR_SIG_LEN) == 0);
3674 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3675 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3677 unsigned long flags;
3680 * See if this device supports aborts. If we already know
3681 * the device, we already know if it supports aborts, otherwise
3682 * we have to find out if it supports aborts by trying one.
3684 spin_lock_irqsave(&h->devlock, flags);
3685 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3686 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3687 entry >= 0 && entry < h->ndevices) {
3688 dev->supports_aborts = h->dev[entry]->supports_aborts;
3689 spin_unlock_irqrestore(&h->devlock, flags);
3691 spin_unlock_irqrestore(&h->devlock, flags);
3692 dev->supports_aborts =
3693 hpsa_device_supports_aborts(h, scsi3addr);
3694 if (dev->supports_aborts < 0)
3695 dev->supports_aborts = 0;
3700 * Helper function to assign bus, target, lun mapping of devices.
3701 * Logical drive target and lun are assigned at this time, but
3702 * physical device lun and target assignment are deferred (assigned
3703 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3705 static void figure_bus_target_lun(struct ctlr_info *h,
3706 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3708 u32 lunid = get_unaligned_le32(lunaddrbytes);
3710 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3711 /* physical device, target and lun filled in later */
3712 if (is_hba_lunid(lunaddrbytes))
3713 hpsa_set_bus_target_lun(device,
3714 HPSA_HBA_BUS, 0, lunid & 0x3fff);
3716 /* defer target, lun assignment for physical devices */
3717 hpsa_set_bus_target_lun(device,
3718 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
3721 /* It's a logical device */
3722 if (device->external) {
3723 hpsa_set_bus_target_lun(device,
3724 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
3728 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
3734 * Get address of physical disk used for an ioaccel2 mode command:
3735 * 1. Extract ioaccel2 handle from the command.
3736 * 2. Find a matching ioaccel2 handle from list of physical disks.
3738 * 1 and set scsi3addr to address of matching physical
3739 * 0 if no matching physical disk was found.
3741 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3742 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3744 struct io_accel2_cmd *c2 =
3745 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3746 unsigned long flags;
3749 spin_lock_irqsave(&h->devlock, flags);
3750 for (i = 0; i < h->ndevices; i++)
3751 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3752 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3753 sizeof(h->dev[i]->scsi3addr));
3754 spin_unlock_irqrestore(&h->devlock, flags);
3757 spin_unlock_irqrestore(&h->devlock, flags);
3761 static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
3762 int i, int nphysicals, int nlocal_logicals)
3764 /* In report logicals, local logicals are listed first,
3765 * then any externals.
3767 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3769 if (i == raid_ctlr_position)
3772 if (i < logicals_start)
3775 /* i is in logicals range, but still within local logicals */
3776 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
3779 return 1; /* it's an external lun */
3783 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3784 * logdev. The number of luns in physdev and logdev are returned in
3785 * *nphysicals and *nlogicals, respectively.
3786 * Returns 0 on success, -1 otherwise.
3788 static int hpsa_gather_lun_info(struct ctlr_info *h,
3789 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3790 struct ReportLUNdata *logdev, u32 *nlogicals)
3792 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3793 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3796 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3797 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3798 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3799 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3800 *nphysicals = HPSA_MAX_PHYS_LUN;
3802 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3803 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3806 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3807 /* Reject Logicals in excess of our max capability. */
3808 if (*nlogicals > HPSA_MAX_LUN) {
3809 dev_warn(&h->pdev->dev,
3810 "maximum logical LUNs (%d) exceeded. "
3811 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3812 *nlogicals - HPSA_MAX_LUN);
3813 *nlogicals = HPSA_MAX_LUN;
3815 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3816 dev_warn(&h->pdev->dev,
3817 "maximum logical + physical LUNs (%d) exceeded. "
3818 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3819 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3820 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3825 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3826 int i, int nphysicals, int nlogicals,
3827 struct ReportExtendedLUNdata *physdev_list,
3828 struct ReportLUNdata *logdev_list)
3830 /* Helper function, figure out where the LUN ID info is coming from
3831 * given index i, lists of physical and logical devices, where in
3832 * the list the raid controller is supposed to appear (first or last)
3835 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3836 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3838 if (i == raid_ctlr_position)
3839 return RAID_CTLR_LUNID;
3841 if (i < logicals_start)
3842 return &physdev_list->LUN[i -
3843 (raid_ctlr_position == 0)].lunid[0];
3845 if (i < last_device)
3846 return &logdev_list->LUN[i - nphysicals -
3847 (raid_ctlr_position == 0)][0];
3852 /* get physical drive ioaccel handle and queue depth */
3853 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3854 struct hpsa_scsi_dev_t *dev,
3855 struct ReportExtendedLUNdata *rlep, int rle_index,
3856 struct bmic_identify_physical_device *id_phys)
3859 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3861 dev->ioaccel_handle = rle->ioaccel_handle;
3862 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
3863 dev->hba_ioaccel_enabled = 1;
3864 memset(id_phys, 0, sizeof(*id_phys));
3865 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
3866 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
3869 /* Reserve space for FW operations */
3870 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3871 #define DRIVE_QUEUE_DEPTH 7
3873 le16_to_cpu(id_phys->current_queue_depth_limit) -
3874 DRIVE_CMDS_RESERVED_FOR_FW;
3876 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3879 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
3880 struct ReportExtendedLUNdata *rlep, int rle_index,
3881 struct bmic_identify_physical_device *id_phys)
3883 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3885 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
3886 this_device->hba_ioaccel_enabled = 1;
3888 memcpy(&this_device->active_path_index,
3889 &id_phys->active_path_number,
3890 sizeof(this_device->active_path_index));
3891 memcpy(&this_device->path_map,
3892 &id_phys->redundant_path_present_map,
3893 sizeof(this_device->path_map));
3894 memcpy(&this_device->box,
3895 &id_phys->alternate_paths_phys_box_on_port,
3896 sizeof(this_device->box));
3897 memcpy(&this_device->phys_connector,
3898 &id_phys->alternate_paths_phys_connector,
3899 sizeof(this_device->phys_connector));
3900 memcpy(&this_device->bay,
3901 &id_phys->phys_bay_in_box,
3902 sizeof(this_device->bay));
3905 /* get number of local logical disks. */
3906 static int hpsa_set_local_logical_count(struct ctlr_info *h,
3907 struct bmic_identify_controller *id_ctlr,
3913 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
3917 memset(id_ctlr, 0, sizeof(*id_ctlr));
3918 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
3920 if (id_ctlr->configured_logical_drive_count < 256)
3921 *nlocals = id_ctlr->configured_logical_drive_count;
3923 *nlocals = le16_to_cpu(
3924 id_ctlr->extended_logical_unit_count);
3931 static void hpsa_update_scsi_devices(struct ctlr_info *h)
3933 /* the idea here is we could get notified
3934 * that some devices have changed, so we do a report
3935 * physical luns and report logical luns cmd, and adjust
3936 * our list of devices accordingly.
3938 * The scsi3addr's of devices won't change so long as the
3939 * adapter is not reset. That means we can rescan and
3940 * tell which devices we already know about, vs. new
3941 * devices, vs. disappearing devices.
3943 struct ReportExtendedLUNdata *physdev_list = NULL;
3944 struct ReportLUNdata *logdev_list = NULL;
3945 struct bmic_identify_physical_device *id_phys = NULL;
3946 struct bmic_identify_controller *id_ctlr = NULL;
3949 u32 nlocal_logicals = 0;
3950 u32 ndev_allocated = 0;
3951 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3953 int i, n_ext_target_devs, ndevs_to_allocate;
3954 int raid_ctlr_position;
3955 bool physical_device;
3956 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3958 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3959 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3960 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3961 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3962 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3963 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
3965 if (!currentsd || !physdev_list || !logdev_list ||
3966 !tmpdevice || !id_phys || !id_ctlr) {
3967 dev_err(&h->pdev->dev, "out of memory\n");
3970 memset(lunzerobits, 0, sizeof(lunzerobits));
3972 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
3974 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3975 logdev_list, &nlogicals)) {
3976 h->drv_req_rescan = 1;
3980 /* Set number of local logicals (non PTRAID) */
3981 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
3982 dev_warn(&h->pdev->dev,
3983 "%s: Can't determine number of local logical devices.\n",
3987 /* We might see up to the maximum number of logical and physical disks
3988 * plus external target devices, and a device for the local RAID
3991 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3993 /* Allocate the per device structures */
3994 for (i = 0; i < ndevs_to_allocate; i++) {
3995 if (i >= HPSA_MAX_DEVICES) {
3996 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3997 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3998 ndevs_to_allocate - HPSA_MAX_DEVICES);
4002 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4003 if (!currentsd[i]) {
4004 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
4005 __FILE__, __LINE__);
4006 h->drv_req_rescan = 1;
4012 if (is_scsi_rev_5(h))
4013 raid_ctlr_position = 0;
4015 raid_ctlr_position = nphysicals + nlogicals;
4017 /* adjust our table of devices */
4018 n_ext_target_devs = 0;
4019 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4020 u8 *lunaddrbytes, is_OBDR = 0;
4022 int phys_dev_index = i - (raid_ctlr_position == 0);
4024 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4026 /* Figure out where the LUN ID info is coming from */
4027 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4028 i, nphysicals, nlogicals, physdev_list, logdev_list);
4030 /* skip masked non-disk devices */
4031 if (MASKED_DEVICE(lunaddrbytes) && physical_device &&
4032 (physdev_list->LUN[phys_dev_index].device_flags & 0x01))
4035 /* Get device type, vendor, model, device id */
4036 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4038 if (rc == -ENOMEM) {
4039 dev_warn(&h->pdev->dev,
4040 "Out of memory, rescan deferred.\n");
4041 h->drv_req_rescan = 1;
4045 dev_warn(&h->pdev->dev,
4046 "Inquiry failed, skipping device.\n");
4050 /* Determine if this is a lun from an external target array */
4051 tmpdevice->external =
4052 figure_external_status(h, raid_ctlr_position, i,
4053 nphysicals, nlocal_logicals);
4055 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4056 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
4057 this_device = currentsd[ncurrent];
4059 /* Turn on discovery_polling if there are ext target devices.
4060 * Event-based change notification is unreliable for those.
4062 if (!h->discovery_polling) {
4063 if (tmpdevice->external) {
4064 h->discovery_polling = 1;
4065 dev_info(&h->pdev->dev,
4066 "External target, activate discovery polling.\n");
4071 *this_device = *tmpdevice;
4072 this_device->physical_device = physical_device;
4075 * Expose all devices except for physical devices that
4078 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4079 this_device->expose_device = 0;
4081 this_device->expose_device = 1;
4085 * Get the SAS address for physical devices that are exposed.
4087 if (this_device->physical_device && this_device->expose_device)
4088 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4090 switch (this_device->devtype) {
4092 /* We don't *really* support actual CD-ROM devices,
4093 * just "One Button Disaster Recovery" tape drive
4094 * which temporarily pretends to be a CD-ROM drive.
4095 * So we check that the device is really an OBDR tape
4096 * device by checking for "$DR-10" in bytes 43-48 of
4103 if (this_device->physical_device) {
4104 /* The disk is in HBA mode. */
4105 /* Never use RAID mapper in HBA mode. */
4106 this_device->offload_enabled = 0;
4107 hpsa_get_ioaccel_drive_info(h, this_device,
4108 physdev_list, phys_dev_index, id_phys);
4109 hpsa_get_path_info(this_device,
4110 physdev_list, phys_dev_index, id_phys);
4115 case TYPE_MEDIUM_CHANGER:
4116 case TYPE_ENCLOSURE:
4120 /* Only present the Smartarray HBA as a RAID controller.
4121 * If it's a RAID controller other than the HBA itself
4122 * (an external RAID controller, MSA500 or similar)
4125 if (!is_hba_lunid(lunaddrbytes))
4132 if (ncurrent >= HPSA_MAX_DEVICES)
4136 if (h->sas_host == NULL) {
4139 rc = hpsa_add_sas_host(h);
4141 dev_warn(&h->pdev->dev,
4142 "Could not add sas host %d\n", rc);
4147 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4150 for (i = 0; i < ndev_allocated; i++)
4151 kfree(currentsd[i]);
4153 kfree(physdev_list);
4159 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4160 struct scatterlist *sg)
4162 u64 addr64 = (u64) sg_dma_address(sg);
4163 unsigned int len = sg_dma_len(sg);
4165 desc->Addr = cpu_to_le64(addr64);
4166 desc->Len = cpu_to_le32(len);
4171 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4172 * dma mapping and fills in the scatter gather entries of the
4175 static int hpsa_scatter_gather(struct ctlr_info *h,
4176 struct CommandList *cp,
4177 struct scsi_cmnd *cmd)
4179 struct scatterlist *sg;
4180 int use_sg, i, sg_limit, chained, last_sg;
4181 struct SGDescriptor *curr_sg;
4183 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4185 use_sg = scsi_dma_map(cmd);
4190 goto sglist_finished;
4193 * If the number of entries is greater than the max for a single list,
4194 * then we have a chained list; we will set up all but one entry in the
4195 * first list (the last entry is saved for link information);
4196 * otherwise, we don't have a chained list and we'll set up at each of
4197 * the entries in the one list.
4200 chained = use_sg > h->max_cmd_sg_entries;
4201 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4202 last_sg = scsi_sg_count(cmd) - 1;
4203 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4204 hpsa_set_sg_descriptor(curr_sg, sg);
4210 * Continue with the chained list. Set curr_sg to the chained
4211 * list. Modify the limit to the total count less the entries
4212 * we've already set up. Resume the scan at the list entry
4213 * where the previous loop left off.
4215 curr_sg = h->cmd_sg_list[cp->cmdindex];
4216 sg_limit = use_sg - sg_limit;
4217 for_each_sg(sg, sg, sg_limit, i) {
4218 hpsa_set_sg_descriptor(curr_sg, sg);
4223 /* Back the pointer up to the last entry and mark it as "last". */
4224 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4226 if (use_sg + chained > h->maxSG)
4227 h->maxSG = use_sg + chained;
4230 cp->Header.SGList = h->max_cmd_sg_entries;
4231 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4232 if (hpsa_map_sg_chain_block(h, cp)) {
4233 scsi_dma_unmap(cmd);
4241 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
4242 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4246 #define IO_ACCEL_INELIGIBLE (1)
4247 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4253 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4260 if (*cdb_len == 6) {
4261 block = get_unaligned_be16(&cdb[2]);
4266 BUG_ON(*cdb_len != 12);
4267 block = get_unaligned_be32(&cdb[2]);
4268 block_cnt = get_unaligned_be32(&cdb[6]);
4270 if (block_cnt > 0xffff)
4271 return IO_ACCEL_INELIGIBLE;
4273 cdb[0] = is_write ? WRITE_10 : READ_10;
4275 cdb[2] = (u8) (block >> 24);
4276 cdb[3] = (u8) (block >> 16);
4277 cdb[4] = (u8) (block >> 8);
4278 cdb[5] = (u8) (block);
4280 cdb[7] = (u8) (block_cnt >> 8);
4281 cdb[8] = (u8) (block_cnt);
4289 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4290 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4291 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4293 struct scsi_cmnd *cmd = c->scsi_cmd;
4294 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4296 unsigned int total_len = 0;
4297 struct scatterlist *sg;
4300 struct SGDescriptor *curr_sg;
4301 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4303 /* TODO: implement chaining support */
4304 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4305 atomic_dec(&phys_disk->ioaccel_cmds_out);
4306 return IO_ACCEL_INELIGIBLE;
4309 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4311 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4312 atomic_dec(&phys_disk->ioaccel_cmds_out);
4313 return IO_ACCEL_INELIGIBLE;
4316 c->cmd_type = CMD_IOACCEL1;
4318 /* Adjust the DMA address to point to the accelerated command buffer */
4319 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4320 (c->cmdindex * sizeof(*cp));
4321 BUG_ON(c->busaddr & 0x0000007F);
4323 use_sg = scsi_dma_map(cmd);
4325 atomic_dec(&phys_disk->ioaccel_cmds_out);
4331 scsi_for_each_sg(cmd, sg, use_sg, i) {
4332 addr64 = (u64) sg_dma_address(sg);
4333 len = sg_dma_len(sg);
4335 curr_sg->Addr = cpu_to_le64(addr64);
4336 curr_sg->Len = cpu_to_le32(len);
4337 curr_sg->Ext = cpu_to_le32(0);
4340 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4342 switch (cmd->sc_data_direction) {
4344 control |= IOACCEL1_CONTROL_DATA_OUT;
4346 case DMA_FROM_DEVICE:
4347 control |= IOACCEL1_CONTROL_DATA_IN;
4350 control |= IOACCEL1_CONTROL_NODATAXFER;
4353 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4354 cmd->sc_data_direction);
4359 control |= IOACCEL1_CONTROL_NODATAXFER;
4362 c->Header.SGList = use_sg;
4363 /* Fill out the command structure to submit */
4364 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4365 cp->transfer_len = cpu_to_le32(total_len);
4366 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4367 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4368 cp->control = cpu_to_le32(control);
4369 memcpy(cp->CDB, cdb, cdb_len);
4370 memcpy(cp->CISS_LUN, scsi3addr, 8);
4371 /* Tag was already set at init time. */
4372 enqueue_cmd_and_start_io(h, c);
4377 * Queue a command directly to a device behind the controller using the
4378 * I/O accelerator path.
4380 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4381 struct CommandList *c)
4383 struct scsi_cmnd *cmd = c->scsi_cmd;
4384 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4388 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4389 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4393 * Set encryption parameters for the ioaccel2 request
4395 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4396 struct CommandList *c, struct io_accel2_cmd *cp)
4398 struct scsi_cmnd *cmd = c->scsi_cmd;
4399 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4400 struct raid_map_data *map = &dev->raid_map;
4403 /* Are we doing encryption on this device */
4404 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4406 /* Set the data encryption key index. */
4407 cp->dekindex = map->dekindex;
4409 /* Set the encryption enable flag, encoded into direction field. */
4410 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4412 /* Set encryption tweak values based on logical block address
4413 * If block size is 512, tweak value is LBA.
4414 * For other block sizes, tweak is (LBA * block size)/ 512)
4416 switch (cmd->cmnd[0]) {
4417 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4420 first_block = get_unaligned_be16(&cmd->cmnd[2]);
4424 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4427 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4431 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4434 dev_err(&h->pdev->dev,
4435 "ERROR: %s: size (0x%x) not supported for encryption\n",
4436 __func__, cmd->cmnd[0]);
4441 if (le32_to_cpu(map->volume_blk_size) != 512)
4442 first_block = first_block *
4443 le32_to_cpu(map->volume_blk_size)/512;
4445 cp->tweak_lower = cpu_to_le32(first_block);
4446 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4449 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4450 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4451 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4453 struct scsi_cmnd *cmd = c->scsi_cmd;
4454 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4455 struct ioaccel2_sg_element *curr_sg;
4457 struct scatterlist *sg;
4462 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4464 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4465 atomic_dec(&phys_disk->ioaccel_cmds_out);
4466 return IO_ACCEL_INELIGIBLE;
4469 c->cmd_type = CMD_IOACCEL2;
4470 /* Adjust the DMA address to point to the accelerated command buffer */
4471 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4472 (c->cmdindex * sizeof(*cp));
4473 BUG_ON(c->busaddr & 0x0000007F);
4475 memset(cp, 0, sizeof(*cp));
4476 cp->IU_type = IOACCEL2_IU_TYPE;
4478 use_sg = scsi_dma_map(cmd);
4480 atomic_dec(&phys_disk->ioaccel_cmds_out);
4486 if (use_sg > h->ioaccel_maxsg) {
4487 addr64 = le64_to_cpu(
4488 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4489 curr_sg->address = cpu_to_le64(addr64);
4490 curr_sg->length = 0;
4491 curr_sg->reserved[0] = 0;
4492 curr_sg->reserved[1] = 0;
4493 curr_sg->reserved[2] = 0;
4494 curr_sg->chain_indicator = 0x80;
4496 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4498 scsi_for_each_sg(cmd, sg, use_sg, i) {
4499 addr64 = (u64) sg_dma_address(sg);
4500 len = sg_dma_len(sg);
4502 curr_sg->address = cpu_to_le64(addr64);
4503 curr_sg->length = cpu_to_le32(len);
4504 curr_sg->reserved[0] = 0;
4505 curr_sg->reserved[1] = 0;
4506 curr_sg->reserved[2] = 0;
4507 curr_sg->chain_indicator = 0;
4511 switch (cmd->sc_data_direction) {
4513 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4514 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4516 case DMA_FROM_DEVICE:
4517 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4518 cp->direction |= IOACCEL2_DIR_DATA_IN;
4521 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4522 cp->direction |= IOACCEL2_DIR_NO_DATA;
4525 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4526 cmd->sc_data_direction);
4531 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4532 cp->direction |= IOACCEL2_DIR_NO_DATA;
4535 /* Set encryption parameters, if necessary */
4536 set_encrypt_ioaccel2(h, c, cp);
4538 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4539 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4540 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4542 cp->data_len = cpu_to_le32(total_len);
4543 cp->err_ptr = cpu_to_le64(c->busaddr +
4544 offsetof(struct io_accel2_cmd, error_data));
4545 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4547 /* fill in sg elements */
4548 if (use_sg > h->ioaccel_maxsg) {
4550 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4551 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4552 atomic_dec(&phys_disk->ioaccel_cmds_out);
4553 scsi_dma_unmap(cmd);
4557 cp->sg_count = (u8) use_sg;
4559 enqueue_cmd_and_start_io(h, c);
4564 * Queue a command to the correct I/O accelerator path.
4566 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4567 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4568 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4570 /* Try to honor the device's queue depth */
4571 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4572 phys_disk->queue_depth) {
4573 atomic_dec(&phys_disk->ioaccel_cmds_out);
4574 return IO_ACCEL_INELIGIBLE;
4576 if (h->transMethod & CFGTBL_Trans_io_accel1)
4577 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4578 cdb, cdb_len, scsi3addr,
4581 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4582 cdb, cdb_len, scsi3addr,
4586 static void raid_map_helper(struct raid_map_data *map,
4587 int offload_to_mirror, u32 *map_index, u32 *current_group)
4589 if (offload_to_mirror == 0) {
4590 /* use physical disk in the first mirrored group. */
4591 *map_index %= le16_to_cpu(map->data_disks_per_row);
4595 /* determine mirror group that *map_index indicates */
4596 *current_group = *map_index /
4597 le16_to_cpu(map->data_disks_per_row);
4598 if (offload_to_mirror == *current_group)
4600 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4601 /* select map index from next group */
4602 *map_index += le16_to_cpu(map->data_disks_per_row);
4605 /* select map index from first group */
4606 *map_index %= le16_to_cpu(map->data_disks_per_row);
4609 } while (offload_to_mirror != *current_group);
4613 * Attempt to perform offload RAID mapping for a logical volume I/O.
4615 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4616 struct CommandList *c)
4618 struct scsi_cmnd *cmd = c->scsi_cmd;
4619 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4620 struct raid_map_data *map = &dev->raid_map;
4621 struct raid_map_disk_data *dd = &map->data[0];
4624 u64 first_block, last_block;
4627 u64 first_row, last_row;
4628 u32 first_row_offset, last_row_offset;
4629 u32 first_column, last_column;
4630 u64 r0_first_row, r0_last_row;
4631 u32 r5or6_blocks_per_row;
4632 u64 r5or6_first_row, r5or6_last_row;
4633 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4634 u32 r5or6_first_column, r5or6_last_column;
4635 u32 total_disks_per_row;
4637 u32 first_group, last_group, current_group;
4645 #if BITS_PER_LONG == 32
4648 int offload_to_mirror;
4650 /* check for valid opcode, get LBA and block count */
4651 switch (cmd->cmnd[0]) {
4655 first_block = get_unaligned_be16(&cmd->cmnd[2]);
4656 block_cnt = cmd->cmnd[4];
4664 (((u64) cmd->cmnd[2]) << 24) |
4665 (((u64) cmd->cmnd[3]) << 16) |
4666 (((u64) cmd->cmnd[4]) << 8) |
4669 (((u32) cmd->cmnd[7]) << 8) |
4676 (((u64) cmd->cmnd[2]) << 24) |
4677 (((u64) cmd->cmnd[3]) << 16) |
4678 (((u64) cmd->cmnd[4]) << 8) |
4681 (((u32) cmd->cmnd[6]) << 24) |
4682 (((u32) cmd->cmnd[7]) << 16) |
4683 (((u32) cmd->cmnd[8]) << 8) |
4690 (((u64) cmd->cmnd[2]) << 56) |
4691 (((u64) cmd->cmnd[3]) << 48) |
4692 (((u64) cmd->cmnd[4]) << 40) |
4693 (((u64) cmd->cmnd[5]) << 32) |
4694 (((u64) cmd->cmnd[6]) << 24) |
4695 (((u64) cmd->cmnd[7]) << 16) |
4696 (((u64) cmd->cmnd[8]) << 8) |
4699 (((u32) cmd->cmnd[10]) << 24) |
4700 (((u32) cmd->cmnd[11]) << 16) |
4701 (((u32) cmd->cmnd[12]) << 8) |
4705 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4707 last_block = first_block + block_cnt - 1;
4709 /* check for write to non-RAID-0 */
4710 if (is_write && dev->raid_level != 0)
4711 return IO_ACCEL_INELIGIBLE;
4713 /* check for invalid block or wraparound */
4714 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4715 last_block < first_block)
4716 return IO_ACCEL_INELIGIBLE;
4718 /* calculate stripe information for the request */
4719 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4720 le16_to_cpu(map->strip_size);
4721 strip_size = le16_to_cpu(map->strip_size);
4722 #if BITS_PER_LONG == 32
4723 tmpdiv = first_block;
4724 (void) do_div(tmpdiv, blocks_per_row);
4726 tmpdiv = last_block;
4727 (void) do_div(tmpdiv, blocks_per_row);
4729 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4730 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4731 tmpdiv = first_row_offset;
4732 (void) do_div(tmpdiv, strip_size);
4733 first_column = tmpdiv;
4734 tmpdiv = last_row_offset;
4735 (void) do_div(tmpdiv, strip_size);
4736 last_column = tmpdiv;
4738 first_row = first_block / blocks_per_row;
4739 last_row = last_block / blocks_per_row;
4740 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4741 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4742 first_column = first_row_offset / strip_size;
4743 last_column = last_row_offset / strip_size;
4746 /* if this isn't a single row/column then give to the controller */
4747 if ((first_row != last_row) || (first_column != last_column))
4748 return IO_ACCEL_INELIGIBLE;
4750 /* proceeding with driver mapping */
4751 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4752 le16_to_cpu(map->metadata_disks_per_row);
4753 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4754 le16_to_cpu(map->row_cnt);
4755 map_index = (map_row * total_disks_per_row) + first_column;
4757 switch (dev->raid_level) {
4759 break; /* nothing special to do */
4761 /* Handles load balance across RAID 1 members.
4762 * (2-drive R1 and R10 with even # of drives.)
4763 * Appropriate for SSDs, not optimal for HDDs
4765 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
4766 if (dev->offload_to_mirror)
4767 map_index += le16_to_cpu(map->data_disks_per_row);
4768 dev->offload_to_mirror = !dev->offload_to_mirror;
4771 /* Handles N-way mirrors (R1-ADM)
4772 * and R10 with # of drives divisible by 3.)
4774 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
4776 offload_to_mirror = dev->offload_to_mirror;
4777 raid_map_helper(map, offload_to_mirror,
4778 &map_index, ¤t_group);
4779 /* set mirror group to use next time */
4781 (offload_to_mirror >=
4782 le16_to_cpu(map->layout_map_count) - 1)
4783 ? 0 : offload_to_mirror + 1;
4784 dev->offload_to_mirror = offload_to_mirror;
4785 /* Avoid direct use of dev->offload_to_mirror within this
4786 * function since multiple threads might simultaneously
4787 * increment it beyond the range of dev->layout_map_count -1.
4792 if (le16_to_cpu(map->layout_map_count) <= 1)
4795 /* Verify first and last block are in same RAID group */
4796 r5or6_blocks_per_row =
4797 le16_to_cpu(map->strip_size) *
4798 le16_to_cpu(map->data_disks_per_row);
4799 BUG_ON(r5or6_blocks_per_row == 0);
4800 stripesize = r5or6_blocks_per_row *
4801 le16_to_cpu(map->layout_map_count);
4802 #if BITS_PER_LONG == 32
4803 tmpdiv = first_block;
4804 first_group = do_div(tmpdiv, stripesize);
4805 tmpdiv = first_group;
4806 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4807 first_group = tmpdiv;
4808 tmpdiv = last_block;
4809 last_group = do_div(tmpdiv, stripesize);
4810 tmpdiv = last_group;
4811 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4812 last_group = tmpdiv;
4814 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4815 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
4817 if (first_group != last_group)
4818 return IO_ACCEL_INELIGIBLE;
4820 /* Verify request is in a single row of RAID 5/6 */
4821 #if BITS_PER_LONG == 32
4822 tmpdiv = first_block;
4823 (void) do_div(tmpdiv, stripesize);
4824 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4825 tmpdiv = last_block;
4826 (void) do_div(tmpdiv, stripesize);
4827 r5or6_last_row = r0_last_row = tmpdiv;
4829 first_row = r5or6_first_row = r0_first_row =
4830 first_block / stripesize;
4831 r5or6_last_row = r0_last_row = last_block / stripesize;
4833 if (r5or6_first_row != r5or6_last_row)
4834 return IO_ACCEL_INELIGIBLE;
4837 /* Verify request is in a single column */
4838 #if BITS_PER_LONG == 32
4839 tmpdiv = first_block;
4840 first_row_offset = do_div(tmpdiv, stripesize);
4841 tmpdiv = first_row_offset;
4842 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4843 r5or6_first_row_offset = first_row_offset;
4844 tmpdiv = last_block;
4845 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4846 tmpdiv = r5or6_last_row_offset;
4847 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4848 tmpdiv = r5or6_first_row_offset;
4849 (void) do_div(tmpdiv, map->strip_size);
4850 first_column = r5or6_first_column = tmpdiv;
4851 tmpdiv = r5or6_last_row_offset;
4852 (void) do_div(tmpdiv, map->strip_size);
4853 r5or6_last_column = tmpdiv;
4855 first_row_offset = r5or6_first_row_offset =
4856 (u32)((first_block % stripesize) %
4857 r5or6_blocks_per_row);
4859 r5or6_last_row_offset =
4860 (u32)((last_block % stripesize) %
4861 r5or6_blocks_per_row);
4863 first_column = r5or6_first_column =
4864 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
4866 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
4868 if (r5or6_first_column != r5or6_last_column)
4869 return IO_ACCEL_INELIGIBLE;
4871 /* Request is eligible */
4872 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4873 le16_to_cpu(map->row_cnt);
4875 map_index = (first_group *
4876 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
4877 (map_row * total_disks_per_row) + first_column;
4880 return IO_ACCEL_INELIGIBLE;
4883 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4884 return IO_ACCEL_INELIGIBLE;
4886 c->phys_disk = dev->phys_disk[map_index];
4888 disk_handle = dd[map_index].ioaccel_handle;
4889 disk_block = le64_to_cpu(map->disk_starting_blk) +
4890 first_row * le16_to_cpu(map->strip_size) +
4891 (first_row_offset - first_column *
4892 le16_to_cpu(map->strip_size));
4893 disk_block_cnt = block_cnt;
4895 /* handle differing logical/physical block sizes */
4896 if (map->phys_blk_shift) {
4897 disk_block <<= map->phys_blk_shift;
4898 disk_block_cnt <<= map->phys_blk_shift;
4900 BUG_ON(disk_block_cnt > 0xffff);
4902 /* build the new CDB for the physical disk I/O */
4903 if (disk_block > 0xffffffff) {
4904 cdb[0] = is_write ? WRITE_16 : READ_16;
4906 cdb[2] = (u8) (disk_block >> 56);
4907 cdb[3] = (u8) (disk_block >> 48);
4908 cdb[4] = (u8) (disk_block >> 40);
4909 cdb[5] = (u8) (disk_block >> 32);
4910 cdb[6] = (u8) (disk_block >> 24);
4911 cdb[7] = (u8) (disk_block >> 16);
4912 cdb[8] = (u8) (disk_block >> 8);
4913 cdb[9] = (u8) (disk_block);
4914 cdb[10] = (u8) (disk_block_cnt >> 24);
4915 cdb[11] = (u8) (disk_block_cnt >> 16);
4916 cdb[12] = (u8) (disk_block_cnt >> 8);
4917 cdb[13] = (u8) (disk_block_cnt);
4922 cdb[0] = is_write ? WRITE_10 : READ_10;
4924 cdb[2] = (u8) (disk_block >> 24);
4925 cdb[3] = (u8) (disk_block >> 16);
4926 cdb[4] = (u8) (disk_block >> 8);
4927 cdb[5] = (u8) (disk_block);
4929 cdb[7] = (u8) (disk_block_cnt >> 8);
4930 cdb[8] = (u8) (disk_block_cnt);
4934 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
4936 dev->phys_disk[map_index]);
4940 * Submit commands down the "normal" RAID stack path
4941 * All callers to hpsa_ciss_submit must check lockup_detected
4942 * beforehand, before (opt.) and after calling cmd_alloc
4944 static int hpsa_ciss_submit(struct ctlr_info *h,
4945 struct CommandList *c, struct scsi_cmnd *cmd,
4946 unsigned char scsi3addr[])
4948 cmd->host_scribble = (unsigned char *) c;
4949 c->cmd_type = CMD_SCSI;
4951 c->Header.ReplyQueue = 0; /* unused in simple mode */
4952 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4953 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
4955 /* Fill in the request block... */
4957 c->Request.Timeout = 0;
4958 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4959 c->Request.CDBLen = cmd->cmd_len;
4960 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4961 switch (cmd->sc_data_direction) {
4963 c->Request.type_attr_dir =
4964 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4966 case DMA_FROM_DEVICE:
4967 c->Request.type_attr_dir =
4968 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4971 c->Request.type_attr_dir =
4972 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4974 case DMA_BIDIRECTIONAL:
4975 /* This can happen if a buggy application does a scsi passthru
4976 * and sets both inlen and outlen to non-zero. ( see
4977 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4980 c->Request.type_attr_dir =
4981 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4982 /* This is technically wrong, and hpsa controllers should
4983 * reject it with CMD_INVALID, which is the most correct
4984 * response, but non-fibre backends appear to let it
4985 * slide by, and give the same results as if this field
4986 * were set correctly. Either way is acceptable for
4987 * our purposes here.
4993 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4994 cmd->sc_data_direction);
4999 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
5000 hpsa_cmd_resolve_and_free(h, c);
5001 return SCSI_MLQUEUE_HOST_BUSY;
5003 enqueue_cmd_and_start_io(h, c);
5004 /* the cmd'll come back via intr handler in complete_scsi_command() */
5008 static void hpsa_cmd_init(struct ctlr_info *h, int index,
5009 struct CommandList *c)
5011 dma_addr_t cmd_dma_handle, err_dma_handle;
5013 /* Zero out all of commandlist except the last field, refcount */
5014 memset(c, 0, offsetof(struct CommandList, refcount));
5015 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5016 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5017 c->err_info = h->errinfo_pool + index;
5018 memset(c->err_info, 0, sizeof(*c->err_info));
5019 err_dma_handle = h->errinfo_pool_dhandle
5020 + index * sizeof(*c->err_info);
5021 c->cmdindex = index;
5022 c->busaddr = (u32) cmd_dma_handle;
5023 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5024 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5026 c->scsi_cmd = SCSI_CMD_IDLE;
5029 static void hpsa_preinitialize_commands(struct ctlr_info *h)
5033 for (i = 0; i < h->nr_cmds; i++) {
5034 struct CommandList *c = h->cmd_pool + i;
5036 hpsa_cmd_init(h, i, c);
5037 atomic_set(&c->refcount, 0);
5041 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5042 struct CommandList *c)
5044 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5046 BUG_ON(c->cmdindex != index);
5048 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5049 memset(c->err_info, 0, sizeof(*c->err_info));
5050 c->busaddr = (u32) cmd_dma_handle;
5053 static int hpsa_ioaccel_submit(struct ctlr_info *h,
5054 struct CommandList *c, struct scsi_cmnd *cmd,
5055 unsigned char *scsi3addr)
5057 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5058 int rc = IO_ACCEL_INELIGIBLE;
5060 cmd->host_scribble = (unsigned char *) c;
5062 if (dev->offload_enabled) {
5063 hpsa_cmd_init(h, c->cmdindex, c);
5064 c->cmd_type = CMD_SCSI;
5066 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5067 if (rc < 0) /* scsi_dma_map failed. */
5068 rc = SCSI_MLQUEUE_HOST_BUSY;
5069 } else if (dev->hba_ioaccel_enabled) {
5070 hpsa_cmd_init(h, c->cmdindex, c);
5071 c->cmd_type = CMD_SCSI;
5073 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5074 if (rc < 0) /* scsi_dma_map failed. */
5075 rc = SCSI_MLQUEUE_HOST_BUSY;
5080 static void hpsa_command_resubmit_worker(struct work_struct *work)
5082 struct scsi_cmnd *cmd;
5083 struct hpsa_scsi_dev_t *dev;
5084 struct CommandList *c = container_of(work, struct CommandList, work);
5087 dev = cmd->device->hostdata;
5089 cmd->result = DID_NO_CONNECT << 16;
5090 return hpsa_cmd_free_and_done(c->h, c, cmd);
5092 if (c->reset_pending)
5093 return hpsa_cmd_resolve_and_free(c->h, c);
5094 if (c->abort_pending)
5095 return hpsa_cmd_abort_and_free(c->h, c, cmd);
5096 if (c->cmd_type == CMD_IOACCEL2) {
5097 struct ctlr_info *h = c->h;
5098 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5101 if (c2->error_data.serv_response ==
5102 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5103 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
5106 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5108 * If we get here, it means dma mapping failed.
5109 * Try again via scsi mid layer, which will
5110 * then get SCSI_MLQUEUE_HOST_BUSY.
5112 cmd->result = DID_IMM_RETRY << 16;
5113 return hpsa_cmd_free_and_done(h, c, cmd);
5115 /* else, fall thru and resubmit down CISS path */
5118 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5119 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
5121 * If we get here, it means dma mapping failed. Try
5122 * again via scsi mid layer, which will then get
5123 * SCSI_MLQUEUE_HOST_BUSY.
5125 * hpsa_ciss_submit will have already freed c
5126 * if it encountered a dma mapping failure.
5128 cmd->result = DID_IMM_RETRY << 16;
5129 cmd->scsi_done(cmd);
5133 /* Running in struct Scsi_Host->host_lock less mode */
5134 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5136 struct ctlr_info *h;
5137 struct hpsa_scsi_dev_t *dev;
5138 unsigned char scsi3addr[8];
5139 struct CommandList *c;
5142 /* Get the ptr to our adapter structure out of cmd->host. */
5143 h = sdev_to_hba(cmd->device);
5145 BUG_ON(cmd->request->tag < 0);
5147 dev = cmd->device->hostdata;
5149 cmd->result = DID_NO_CONNECT << 16;
5150 cmd->scsi_done(cmd);
5154 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
5156 if (unlikely(lockup_detected(h))) {
5157 cmd->result = DID_NO_CONNECT << 16;
5158 cmd->scsi_done(cmd);
5161 c = cmd_tagged_alloc(h, cmd);
5164 * Call alternate submit routine for I/O accelerated commands.
5165 * Retries always go down the normal I/O path.
5167 if (likely(cmd->retries == 0 &&
5168 cmd->request->cmd_type == REQ_TYPE_FS &&
5169 h->acciopath_status)) {
5170 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5173 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5174 hpsa_cmd_resolve_and_free(h, c);
5175 return SCSI_MLQUEUE_HOST_BUSY;
5178 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
5181 static void hpsa_scan_complete(struct ctlr_info *h)
5183 unsigned long flags;
5185 spin_lock_irqsave(&h->scan_lock, flags);
5186 h->scan_finished = 1;
5187 wake_up_all(&h->scan_wait_queue);
5188 spin_unlock_irqrestore(&h->scan_lock, flags);
5191 static void hpsa_scan_start(struct Scsi_Host *sh)
5193 struct ctlr_info *h = shost_to_hba(sh);
5194 unsigned long flags;
5197 * Don't let rescans be initiated on a controller known to be locked
5198 * up. If the controller locks up *during* a rescan, that thread is
5199 * probably hosed, but at least we can prevent new rescan threads from
5200 * piling up on a locked up controller.
5202 if (unlikely(lockup_detected(h)))
5203 return hpsa_scan_complete(h);
5205 /* wait until any scan already in progress is finished. */
5207 spin_lock_irqsave(&h->scan_lock, flags);
5208 if (h->scan_finished)
5210 spin_unlock_irqrestore(&h->scan_lock, flags);
5211 wait_event(h->scan_wait_queue, h->scan_finished);
5212 /* Note: We don't need to worry about a race between this
5213 * thread and driver unload because the midlayer will
5214 * have incremented the reference count, so unload won't
5215 * happen if we're in here.
5218 h->scan_finished = 0; /* mark scan as in progress */
5219 spin_unlock_irqrestore(&h->scan_lock, flags);
5221 if (unlikely(lockup_detected(h)))
5222 return hpsa_scan_complete(h);
5224 hpsa_update_scsi_devices(h);
5226 hpsa_scan_complete(h);
5229 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5231 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5238 else if (qdepth > logical_drive->queue_depth)
5239 qdepth = logical_drive->queue_depth;
5241 return scsi_change_queue_depth(sdev, qdepth);
5244 static int hpsa_scan_finished(struct Scsi_Host *sh,
5245 unsigned long elapsed_time)
5247 struct ctlr_info *h = shost_to_hba(sh);
5248 unsigned long flags;
5251 spin_lock_irqsave(&h->scan_lock, flags);
5252 finished = h->scan_finished;
5253 spin_unlock_irqrestore(&h->scan_lock, flags);
5257 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5259 struct Scsi_Host *sh;
5262 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5264 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5271 sh->max_channel = 3;
5272 sh->max_cmd_len = MAX_COMMAND_SIZE;
5273 sh->max_lun = HPSA_MAX_LUN;
5274 sh->max_id = HPSA_MAX_LUN;
5275 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5276 sh->cmd_per_lun = sh->can_queue;
5277 sh->sg_tablesize = h->maxsgentries;
5278 sh->transportt = hpsa_sas_transport_template;
5279 sh->hostdata[0] = (unsigned long) h;
5280 sh->irq = h->intr[h->intr_mode];
5281 sh->unique_id = sh->irq;
5282 error = scsi_init_shared_tag_map(sh, sh->can_queue);
5284 dev_err(&h->pdev->dev,
5285 "%s: scsi_init_shared_tag_map failed for controller %d\n",
5294 static int hpsa_scsi_add_host(struct ctlr_info *h)
5298 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5300 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5303 scsi_scan_host(h->scsi_host);
5308 * The block layer has already gone to the trouble of picking out a unique,
5309 * small-integer tag for this request. We use an offset from that value as
5310 * an index to select our command block. (The offset allows us to reserve the
5311 * low-numbered entries for our own uses.)
5313 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5315 int idx = scmd->request->tag;
5320 /* Offset to leave space for internal cmds. */
5321 return idx += HPSA_NRESERVED_CMDS;
5325 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5326 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5328 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5329 struct CommandList *c, unsigned char lunaddr[],
5334 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5335 (void) fill_cmd(c, TEST_UNIT_READY, h,
5336 NULL, 0, 0, lunaddr, TYPE_CMD);
5337 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5340 /* no unmap needed here because no data xfer. */
5342 /* Check if the unit is already ready. */
5343 if (c->err_info->CommandStatus == CMD_SUCCESS)
5347 * The first command sent after reset will receive "unit attention" to
5348 * indicate that the LUN has been reset...this is actually what we're
5349 * looking for (but, success is good too).
5351 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5352 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5353 (c->err_info->SenseInfo[2] == NO_SENSE ||
5354 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5361 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5362 * returns zero when the unit is ready, and non-zero when giving up.
5364 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5365 struct CommandList *c,
5366 unsigned char lunaddr[], int reply_queue)
5370 int waittime = 1; /* seconds */
5372 /* Send test unit ready until device ready, or give up. */
5373 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5376 * Wait for a bit. do this first, because if we send
5377 * the TUR right away, the reset will just abort it.
5379 msleep(1000 * waittime);
5381 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5385 /* Increase wait time with each try, up to a point. */
5386 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5389 dev_warn(&h->pdev->dev,
5390 "waiting %d secs for device to become ready.\n",
5397 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5398 unsigned char lunaddr[],
5405 struct CommandList *c;
5410 * If no specific reply queue was requested, then send the TUR
5411 * repeatedly, requesting a reply on each reply queue; otherwise execute
5412 * the loop exactly once using only the specified queue.
5414 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5416 last_queue = h->nreply_queues - 1;
5418 first_queue = reply_queue;
5419 last_queue = reply_queue;
5422 for (rq = first_queue; rq <= last_queue; rq++) {
5423 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5429 dev_warn(&h->pdev->dev, "giving up on device.\n");
5431 dev_warn(&h->pdev->dev, "device is ready.\n");
5437 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5438 * complaining. Doing a host- or bus-reset can't do anything good here.
5440 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5443 struct ctlr_info *h;
5444 struct hpsa_scsi_dev_t *dev;
5448 /* find the controller to which the command to be aborted was sent */
5449 h = sdev_to_hba(scsicmd->device);
5450 if (h == NULL) /* paranoia */
5453 if (lockup_detected(h))
5456 dev = scsicmd->device->hostdata;
5458 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5462 /* if controller locked up, we can guarantee command won't complete */
5463 if (lockup_detected(h)) {
5464 snprintf(msg, sizeof(msg),
5465 "cmd %d RESET FAILED, lockup detected",
5466 hpsa_get_cmd_index(scsicmd));
5467 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5471 /* this reset request might be the result of a lockup; check */
5472 if (detect_controller_lockup(h)) {
5473 snprintf(msg, sizeof(msg),
5474 "cmd %d RESET FAILED, new lockup detected",
5475 hpsa_get_cmd_index(scsicmd));
5476 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5480 /* Do not attempt on controller */
5481 if (is_hba_lunid(dev->scsi3addr))
5484 if (is_logical_dev_addr_mode(dev->scsi3addr))
5485 reset_type = HPSA_DEVICE_RESET_MSG;
5487 reset_type = HPSA_PHYS_TARGET_RESET;
5489 sprintf(msg, "resetting %s",
5490 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5491 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5493 h->reset_in_progress = 1;
5495 /* send a reset to the SCSI LUN which the command was sent to */
5496 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
5497 DEFAULT_REPLY_QUEUE);
5498 sprintf(msg, "reset %s %s",
5499 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5500 rc == 0 ? "completed successfully" : "failed");
5501 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5502 h->reset_in_progress = 0;
5503 return rc == 0 ? SUCCESS : FAILED;
5506 static void swizzle_abort_tag(u8 *tag)
5510 memcpy(original_tag, tag, 8);
5511 tag[0] = original_tag[3];
5512 tag[1] = original_tag[2];
5513 tag[2] = original_tag[1];
5514 tag[3] = original_tag[0];
5515 tag[4] = original_tag[7];
5516 tag[5] = original_tag[6];
5517 tag[6] = original_tag[5];
5518 tag[7] = original_tag[4];
5521 static void hpsa_get_tag(struct ctlr_info *h,
5522 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5525 if (c->cmd_type == CMD_IOACCEL1) {
5526 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5527 &h->ioaccel_cmd_pool[c->cmdindex];
5528 tag = le64_to_cpu(cm1->tag);
5529 *tagupper = cpu_to_le32(tag >> 32);
5530 *taglower = cpu_to_le32(tag);
5533 if (c->cmd_type == CMD_IOACCEL2) {
5534 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5535 &h->ioaccel2_cmd_pool[c->cmdindex];
5536 /* upper tag not used in ioaccel2 mode */
5537 memset(tagupper, 0, sizeof(*tagupper));
5538 *taglower = cm2->Tag;
5541 tag = le64_to_cpu(c->Header.tag);
5542 *tagupper = cpu_to_le32(tag >> 32);
5543 *taglower = cpu_to_le32(tag);
5546 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5547 struct CommandList *abort, int reply_queue)
5550 struct CommandList *c;
5551 struct ErrorInfo *ei;
5552 __le32 tagupper, taglower;
5556 /* fill_cmd can't fail here, no buffer to map */
5557 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5558 0, 0, scsi3addr, TYPE_MSG);
5559 if (h->needs_abort_tags_swizzled)
5560 swizzle_abort_tag(&c->Request.CDB[4]);
5561 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5562 hpsa_get_tag(h, abort, &taglower, &tagupper);
5563 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5564 __func__, tagupper, taglower);
5565 /* no unmap needed here because no data xfer. */
5568 switch (ei->CommandStatus) {
5571 case CMD_TMF_STATUS:
5572 rc = hpsa_evaluate_tmf_status(h, c);
5574 case CMD_UNABORTABLE: /* Very common, don't make noise. */
5578 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5579 __func__, tagupper, taglower);
5580 hpsa_scsi_interpret_error(h, c);
5585 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5586 __func__, tagupper, taglower);
5590 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5591 struct CommandList *command_to_abort, int reply_queue)
5593 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5594 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5595 struct io_accel2_cmd *c2a =
5596 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5597 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5598 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5601 * We're overlaying struct hpsa_tmf_struct on top of something which
5602 * was allocated as a struct io_accel2_cmd, so we better be sure it
5603 * actually fits, and doesn't overrun the error info space.
5605 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5606 sizeof(struct io_accel2_cmd));
5607 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5608 offsetof(struct hpsa_tmf_struct, error_len) +
5609 sizeof(ac->error_len));
5611 c->cmd_type = IOACCEL2_TMF;
5612 c->scsi_cmd = SCSI_CMD_BUSY;
5614 /* Adjust the DMA address to point to the accelerated command buffer */
5615 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5616 (c->cmdindex * sizeof(struct io_accel2_cmd));
5617 BUG_ON(c->busaddr & 0x0000007F);
5619 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5620 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5621 ac->reply_queue = reply_queue;
5622 ac->tmf = IOACCEL2_TMF_ABORT;
5623 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5624 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5625 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5626 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5627 ac->error_ptr = cpu_to_le64(c->busaddr +
5628 offsetof(struct io_accel2_cmd, error_data));
5629 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5632 /* ioaccel2 path firmware cannot handle abort task requests.
5633 * Change abort requests to physical target reset, and send to the
5634 * address of the physical disk used for the ioaccel 2 command.
5635 * Return 0 on success (IO_OK)
5639 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5640 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5643 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5644 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5645 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5646 unsigned char *psa = &phys_scsi3addr[0];
5648 /* Get a pointer to the hpsa logical device. */
5649 scmd = abort->scsi_cmd;
5650 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5652 dev_warn(&h->pdev->dev,
5653 "Cannot abort: no device pointer for command.\n");
5654 return -1; /* not abortable */
5657 if (h->raid_offload_debug > 0)
5658 dev_info(&h->pdev->dev,
5659 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5660 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
5662 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5663 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5665 if (!dev->offload_enabled) {
5666 dev_warn(&h->pdev->dev,
5667 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5668 return -1; /* not abortable */
5671 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5672 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5673 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5674 return -1; /* not abortable */
5677 /* send the reset */
5678 if (h->raid_offload_debug > 0)
5679 dev_info(&h->pdev->dev,
5680 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5681 psa[0], psa[1], psa[2], psa[3],
5682 psa[4], psa[5], psa[6], psa[7]);
5683 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
5685 dev_warn(&h->pdev->dev,
5686 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5687 psa[0], psa[1], psa[2], psa[3],
5688 psa[4], psa[5], psa[6], psa[7]);
5689 return rc; /* failed to reset */
5692 /* wait for device to recover */
5693 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
5694 dev_warn(&h->pdev->dev,
5695 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5696 psa[0], psa[1], psa[2], psa[3],
5697 psa[4], psa[5], psa[6], psa[7]);
5698 return -1; /* failed to recover */
5701 /* device recovered */
5702 dev_info(&h->pdev->dev,
5703 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5704 psa[0], psa[1], psa[2], psa[3],
5705 psa[4], psa[5], psa[6], psa[7]);
5707 return rc; /* success */
5710 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5711 struct CommandList *abort, int reply_queue)
5714 struct CommandList *c;
5715 __le32 taglower, tagupper;
5716 struct hpsa_scsi_dev_t *dev;
5717 struct io_accel2_cmd *c2;
5719 dev = abort->scsi_cmd->device->hostdata;
5720 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5724 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5725 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5726 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5727 hpsa_get_tag(h, abort, &taglower, &tagupper);
5728 dev_dbg(&h->pdev->dev,
5729 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5730 __func__, tagupper, taglower);
5731 /* no unmap needed here because no data xfer. */
5733 dev_dbg(&h->pdev->dev,
5734 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5735 __func__, tagupper, taglower, c2->error_data.serv_response);
5736 switch (c2->error_data.serv_response) {
5737 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5738 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5741 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5742 case IOACCEL2_SERV_RESPONSE_FAILURE:
5743 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5747 dev_warn(&h->pdev->dev,
5748 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5749 __func__, tagupper, taglower,
5750 c2->error_data.serv_response);
5754 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5755 tagupper, taglower);
5759 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
5760 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5763 * ioccelerator mode 2 commands should be aborted via the
5764 * accelerated path, since RAID path is unaware of these commands,
5765 * but not all underlying firmware can handle abort TMF.
5766 * Change abort to physical device reset when abort TMF is unsupported.
5768 if (abort->cmd_type == CMD_IOACCEL2) {
5769 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5770 return hpsa_send_abort_ioaccel2(h, abort,
5773 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
5774 abort, reply_queue);
5776 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
5779 /* Find out which reply queue a command was meant to return on */
5780 static int hpsa_extract_reply_queue(struct ctlr_info *h,
5781 struct CommandList *c)
5783 if (c->cmd_type == CMD_IOACCEL2)
5784 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5785 return c->Header.ReplyQueue;
5789 * Limit concurrency of abort commands to prevent
5790 * over-subscription of commands
5792 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5794 #define ABORT_CMD_WAIT_MSECS 5000
5795 return !wait_event_timeout(h->abort_cmd_wait_queue,
5796 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5797 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5800 /* Send an abort for the specified command.
5801 * If the device and controller support it,
5802 * send a task abort request.
5804 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5808 struct ctlr_info *h;
5809 struct hpsa_scsi_dev_t *dev;
5810 struct CommandList *abort; /* pointer to command to be aborted */
5811 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
5812 char msg[256]; /* For debug messaging. */
5814 __le32 tagupper, taglower;
5815 int refcount, reply_queue;
5820 if (sc->device == NULL)
5823 /* Find the controller of the command to be aborted */
5824 h = sdev_to_hba(sc->device);
5828 /* Find the device of the command to be aborted */
5829 dev = sc->device->hostdata;
5831 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5836 /* If controller locked up, we can guarantee command won't complete */
5837 if (lockup_detected(h)) {
5838 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5839 "ABORT FAILED, lockup detected");
5843 /* This is a good time to check if controller lockup has occurred */
5844 if (detect_controller_lockup(h)) {
5845 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5846 "ABORT FAILED, new lockup detected");
5850 /* Check that controller supports some kind of task abort */
5851 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5852 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5855 memset(msg, 0, sizeof(msg));
5856 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
5857 h->scsi_host->host_no, sc->device->channel,
5858 sc->device->id, sc->device->lun,
5859 "Aborting command", sc);
5861 /* Get SCSI command to be aborted */
5862 abort = (struct CommandList *) sc->host_scribble;
5863 if (abort == NULL) {
5864 /* This can happen if the command already completed. */
5867 refcount = atomic_inc_return(&abort->refcount);
5868 if (refcount == 1) { /* Command is done already. */
5873 /* Don't bother trying the abort if we know it won't work. */
5874 if (abort->cmd_type != CMD_IOACCEL2 &&
5875 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5881 * Check that we're aborting the right command.
5882 * It's possible the CommandList already completed and got re-used.
5884 if (abort->scsi_cmd != sc) {
5889 abort->abort_pending = true;
5890 hpsa_get_tag(h, abort, &taglower, &tagupper);
5891 reply_queue = hpsa_extract_reply_queue(h, abort);
5892 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
5893 as = abort->scsi_cmd;
5895 ml += sprintf(msg+ml,
5896 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5897 as->cmd_len, as->cmnd[0], as->cmnd[1],
5899 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
5900 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
5903 * Command is in flight, or possibly already completed
5904 * by the firmware (but not to the scsi mid layer) but we can't
5905 * distinguish which. Send the abort down.
5907 if (wait_for_available_abort_cmd(h)) {
5908 dev_warn(&h->pdev->dev,
5909 "%s FAILED, timeout waiting for an abort command to become available.\n",
5914 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
5915 atomic_inc(&h->abort_cmds_available);
5916 wake_up_all(&h->abort_cmd_wait_queue);
5918 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
5919 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5920 "FAILED to abort command");
5924 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
5925 wait_event(h->event_sync_wait_queue,
5926 abort->scsi_cmd != sc || lockup_detected(h));
5928 return !lockup_detected(h) ? SUCCESS : FAILED;
5932 * For operations with an associated SCSI command, a command block is allocated
5933 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5934 * block request tag as an index into a table of entries. cmd_tagged_free() is
5935 * the complement, although cmd_free() may be called instead.
5937 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5938 struct scsi_cmnd *scmd)
5940 int idx = hpsa_get_cmd_index(scmd);
5941 struct CommandList *c = h->cmd_pool + idx;
5943 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5944 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5945 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5946 /* The index value comes from the block layer, so if it's out of
5947 * bounds, it's probably not our bug.
5952 atomic_inc(&c->refcount);
5953 if (unlikely(!hpsa_is_cmd_idle(c))) {
5955 * We expect that the SCSI layer will hand us a unique tag
5956 * value. Thus, there should never be a collision here between
5957 * two requests...because if the selected command isn't idle
5958 * then someone is going to be very disappointed.
5960 dev_err(&h->pdev->dev,
5961 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5963 if (c->scsi_cmd != NULL)
5964 scsi_print_command(c->scsi_cmd);
5965 scsi_print_command(scmd);
5968 hpsa_cmd_partial_init(h, idx, c);
5972 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5975 * Release our reference to the block. We don't need to do anything
5976 * else to free it, because it is accessed by index. (There's no point
5977 * in checking the result of the decrement, since we cannot guarantee
5978 * that there isn't a concurrent abort which is also accessing it.)
5980 (void)atomic_dec(&c->refcount);
5984 * For operations that cannot sleep, a command block is allocated at init,
5985 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5986 * which ones are free or in use. Lock must be held when calling this.
5987 * cmd_free() is the complement.
5988 * This function never gives up and returns NULL. If it hangs,
5989 * another thread must call cmd_free() to free some tags.
5992 static struct CommandList *cmd_alloc(struct ctlr_info *h)
5994 struct CommandList *c;
5999 * There is some *extremely* small but non-zero chance that that
6000 * multiple threads could get in here, and one thread could
6001 * be scanning through the list of bits looking for a free
6002 * one, but the free ones are always behind him, and other
6003 * threads sneak in behind him and eat them before he can
6004 * get to them, so that while there is always a free one, a
6005 * very unlucky thread might be starved anyway, never able to
6006 * beat the other threads. In reality, this happens so
6007 * infrequently as to be indistinguishable from never.
6009 * Note that we start allocating commands before the SCSI host structure
6010 * is initialized. Since the search starts at bit zero, this
6011 * all works, since we have at least one command structure available;
6012 * however, it means that the structures with the low indexes have to be
6013 * reserved for driver-initiated requests, while requests from the block
6014 * layer will use the higher indexes.
6018 i = find_next_zero_bit(h->cmd_pool_bits,
6019 HPSA_NRESERVED_CMDS,
6021 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6025 c = h->cmd_pool + i;
6026 refcount = atomic_inc_return(&c->refcount);
6027 if (unlikely(refcount > 1)) {
6028 cmd_free(h, c); /* already in use */
6029 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6032 set_bit(i & (BITS_PER_LONG - 1),
6033 h->cmd_pool_bits + (i / BITS_PER_LONG));
6034 break; /* it's ours now. */
6036 hpsa_cmd_partial_init(h, i, c);
6041 * This is the complementary operation to cmd_alloc(). Note, however, in some
6042 * corner cases it may also be used to free blocks allocated by
6043 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6044 * the clear-bit is harmless.
6046 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6048 if (atomic_dec_and_test(&c->refcount)) {
6051 i = c - h->cmd_pool;
6052 clear_bit(i & (BITS_PER_LONG - 1),
6053 h->cmd_pool_bits + (i / BITS_PER_LONG));
6057 #ifdef CONFIG_COMPAT
6059 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
6062 IOCTL32_Command_struct __user *arg32 =
6063 (IOCTL32_Command_struct __user *) arg;
6064 IOCTL_Command_struct arg64;
6065 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6069 memset(&arg64, 0, sizeof(arg64));
6071 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6072 sizeof(arg64.LUN_info));
6073 err |= copy_from_user(&arg64.Request, &arg32->Request,
6074 sizeof(arg64.Request));
6075 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6076 sizeof(arg64.error_info));
6077 err |= get_user(arg64.buf_size, &arg32->buf_size);
6078 err |= get_user(cp, &arg32->buf);
6079 arg64.buf = compat_ptr(cp);
6080 err |= copy_to_user(p, &arg64, sizeof(arg64));
6085 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6088 err |= copy_in_user(&arg32->error_info, &p->error_info,
6089 sizeof(arg32->error_info));
6095 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6096 int cmd, void __user *arg)
6098 BIG_IOCTL32_Command_struct __user *arg32 =
6099 (BIG_IOCTL32_Command_struct __user *) arg;
6100 BIG_IOCTL_Command_struct arg64;
6101 BIG_IOCTL_Command_struct __user *p =
6102 compat_alloc_user_space(sizeof(arg64));
6106 memset(&arg64, 0, sizeof(arg64));
6108 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6109 sizeof(arg64.LUN_info));
6110 err |= copy_from_user(&arg64.Request, &arg32->Request,
6111 sizeof(arg64.Request));
6112 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6113 sizeof(arg64.error_info));
6114 err |= get_user(arg64.buf_size, &arg32->buf_size);
6115 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6116 err |= get_user(cp, &arg32->buf);
6117 arg64.buf = compat_ptr(cp);
6118 err |= copy_to_user(p, &arg64, sizeof(arg64));
6123 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6126 err |= copy_in_user(&arg32->error_info, &p->error_info,
6127 sizeof(arg32->error_info));
6133 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6136 case CCISS_GETPCIINFO:
6137 case CCISS_GETINTINFO:
6138 case CCISS_SETINTINFO:
6139 case CCISS_GETNODENAME:
6140 case CCISS_SETNODENAME:
6141 case CCISS_GETHEARTBEAT:
6142 case CCISS_GETBUSTYPES:
6143 case CCISS_GETFIRMVER:
6144 case CCISS_GETDRIVVER:
6145 case CCISS_REVALIDVOLS:
6146 case CCISS_DEREGDISK:
6147 case CCISS_REGNEWDISK:
6149 case CCISS_RESCANDISK:
6150 case CCISS_GETLUNINFO:
6151 return hpsa_ioctl(dev, cmd, arg);
6153 case CCISS_PASSTHRU32:
6154 return hpsa_ioctl32_passthru(dev, cmd, arg);
6155 case CCISS_BIG_PASSTHRU32:
6156 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6159 return -ENOIOCTLCMD;
6164 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6166 struct hpsa_pci_info pciinfo;
6170 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6171 pciinfo.bus = h->pdev->bus->number;
6172 pciinfo.dev_fn = h->pdev->devfn;
6173 pciinfo.board_id = h->board_id;
6174 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6179 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6181 DriverVer_type DriverVer;
6182 unsigned char vmaj, vmin, vsubmin;
6185 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6186 &vmaj, &vmin, &vsubmin);
6188 dev_info(&h->pdev->dev, "driver version string '%s' "
6189 "unrecognized.", HPSA_DRIVER_VERSION);
6194 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6197 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6202 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6204 IOCTL_Command_struct iocommand;
6205 struct CommandList *c;
6212 if (!capable(CAP_SYS_RAWIO))
6214 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6216 if ((iocommand.buf_size < 1) &&
6217 (iocommand.Request.Type.Direction != XFER_NONE)) {
6220 if (iocommand.buf_size > 0) {
6221 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6224 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6225 /* Copy the data into the buffer we created */
6226 if (copy_from_user(buff, iocommand.buf,
6227 iocommand.buf_size)) {
6232 memset(buff, 0, iocommand.buf_size);
6237 /* Fill in the command type */
6238 c->cmd_type = CMD_IOCTL_PEND;
6239 c->scsi_cmd = SCSI_CMD_BUSY;
6240 /* Fill in Command Header */
6241 c->Header.ReplyQueue = 0; /* unused in simple mode */
6242 if (iocommand.buf_size > 0) { /* buffer to fill */
6243 c->Header.SGList = 1;
6244 c->Header.SGTotal = cpu_to_le16(1);
6245 } else { /* no buffers to fill */
6246 c->Header.SGList = 0;
6247 c->Header.SGTotal = cpu_to_le16(0);
6249 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6251 /* Fill in Request block */
6252 memcpy(&c->Request, &iocommand.Request,
6253 sizeof(c->Request));
6255 /* Fill in the scatter gather information */
6256 if (iocommand.buf_size > 0) {
6257 temp64 = pci_map_single(h->pdev, buff,
6258 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6259 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6260 c->SG[0].Addr = cpu_to_le64(0);
6261 c->SG[0].Len = cpu_to_le32(0);
6265 c->SG[0].Addr = cpu_to_le64(temp64);
6266 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6267 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6269 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
6270 if (iocommand.buf_size > 0)
6271 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6272 check_ioctl_unit_attention(h, c);
6278 /* Copy the error information out */
6279 memcpy(&iocommand.error_info, c->err_info,
6280 sizeof(iocommand.error_info));
6281 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6285 if ((iocommand.Request.Type.Direction & XFER_READ) &&
6286 iocommand.buf_size > 0) {
6287 /* Copy the data out of the buffer we created */
6288 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6300 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6302 BIG_IOCTL_Command_struct *ioc;
6303 struct CommandList *c;
6304 unsigned char **buff = NULL;
6305 int *buff_size = NULL;
6311 BYTE __user *data_ptr;
6315 if (!capable(CAP_SYS_RAWIO))
6317 ioc = (BIG_IOCTL_Command_struct *)
6318 kmalloc(sizeof(*ioc), GFP_KERNEL);
6323 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6327 if ((ioc->buf_size < 1) &&
6328 (ioc->Request.Type.Direction != XFER_NONE)) {
6332 /* Check kmalloc limits using all SGs */
6333 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6337 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6341 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
6346 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
6351 left = ioc->buf_size;
6352 data_ptr = ioc->buf;
6354 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6355 buff_size[sg_used] = sz;
6356 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6357 if (buff[sg_used] == NULL) {
6361 if (ioc->Request.Type.Direction & XFER_WRITE) {
6362 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6367 memset(buff[sg_used], 0, sz);
6374 c->cmd_type = CMD_IOCTL_PEND;
6375 c->scsi_cmd = SCSI_CMD_BUSY;
6376 c->Header.ReplyQueue = 0;
6377 c->Header.SGList = (u8) sg_used;
6378 c->Header.SGTotal = cpu_to_le16(sg_used);
6379 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6380 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6381 if (ioc->buf_size > 0) {
6383 for (i = 0; i < sg_used; i++) {
6384 temp64 = pci_map_single(h->pdev, buff[i],
6385 buff_size[i], PCI_DMA_BIDIRECTIONAL);
6386 if (dma_mapping_error(&h->pdev->dev,
6387 (dma_addr_t) temp64)) {
6388 c->SG[i].Addr = cpu_to_le64(0);
6389 c->SG[i].Len = cpu_to_le32(0);
6390 hpsa_pci_unmap(h->pdev, c, i,
6391 PCI_DMA_BIDIRECTIONAL);
6395 c->SG[i].Addr = cpu_to_le64(temp64);
6396 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6397 c->SG[i].Ext = cpu_to_le32(0);
6399 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6401 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
6403 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6404 check_ioctl_unit_attention(h, c);
6410 /* Copy the error information out */
6411 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6412 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6416 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6419 /* Copy the data out of the buffer we created */
6420 BYTE __user *ptr = ioc->buf;
6421 for (i = 0; i < sg_used; i++) {
6422 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6426 ptr += buff_size[i];
6436 for (i = 0; i < sg_used; i++)
6445 static void check_ioctl_unit_attention(struct ctlr_info *h,
6446 struct CommandList *c)
6448 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6449 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6450 (void) check_for_unit_attention(h, c);
6456 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6458 struct ctlr_info *h;
6459 void __user *argp = (void __user *)arg;
6462 h = sdev_to_hba(dev);
6465 case CCISS_DEREGDISK:
6466 case CCISS_REGNEWDISK:
6468 hpsa_scan_start(h->scsi_host);
6470 case CCISS_GETPCIINFO:
6471 return hpsa_getpciinfo_ioctl(h, argp);
6472 case CCISS_GETDRIVVER:
6473 return hpsa_getdrivver_ioctl(h, argp);
6474 case CCISS_PASSTHRU:
6475 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6477 rc = hpsa_passthru_ioctl(h, argp);
6478 atomic_inc(&h->passthru_cmds_avail);
6480 case CCISS_BIG_PASSTHRU:
6481 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6483 rc = hpsa_big_passthru_ioctl(h, argp);
6484 atomic_inc(&h->passthru_cmds_avail);
6491 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6494 struct CommandList *c;
6498 /* fill_cmd can't fail here, no data buffer to map */
6499 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6500 RAID_CTLR_LUNID, TYPE_MSG);
6501 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6503 enqueue_cmd_and_start_io(h, c);
6504 /* Don't wait for completion, the reset won't complete. Don't free
6505 * the command either. This is the last command we will send before
6506 * re-initializing everything, so it doesn't matter and won't leak.
6511 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6512 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6515 int pci_dir = XFER_NONE;
6516 u64 tag; /* for commands to be aborted */
6518 c->cmd_type = CMD_IOCTL_PEND;
6519 c->scsi_cmd = SCSI_CMD_BUSY;
6520 c->Header.ReplyQueue = 0;
6521 if (buff != NULL && size > 0) {
6522 c->Header.SGList = 1;
6523 c->Header.SGTotal = cpu_to_le16(1);
6525 c->Header.SGList = 0;
6526 c->Header.SGTotal = cpu_to_le16(0);
6528 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6530 if (cmd_type == TYPE_CMD) {
6533 /* are we trying to read a vital product page */
6534 if (page_code & VPD_PAGE) {
6535 c->Request.CDB[1] = 0x01;
6536 c->Request.CDB[2] = (page_code & 0xff);
6538 c->Request.CDBLen = 6;
6539 c->Request.type_attr_dir =
6540 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6541 c->Request.Timeout = 0;
6542 c->Request.CDB[0] = HPSA_INQUIRY;
6543 c->Request.CDB[4] = size & 0xFF;
6545 case HPSA_REPORT_LOG:
6546 case HPSA_REPORT_PHYS:
6547 /* Talking to controller so It's a physical command
6548 mode = 00 target = 0. Nothing to write.
6550 c->Request.CDBLen = 12;
6551 c->Request.type_attr_dir =
6552 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6553 c->Request.Timeout = 0;
6554 c->Request.CDB[0] = cmd;
6555 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6556 c->Request.CDB[7] = (size >> 16) & 0xFF;
6557 c->Request.CDB[8] = (size >> 8) & 0xFF;
6558 c->Request.CDB[9] = size & 0xFF;
6560 case BMIC_SENSE_DIAG_OPTIONS:
6561 c->Request.CDBLen = 16;
6562 c->Request.type_attr_dir =
6563 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6564 c->Request.Timeout = 0;
6565 /* Spec says this should be BMIC_WRITE */
6566 c->Request.CDB[0] = BMIC_READ;
6567 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6569 case BMIC_SET_DIAG_OPTIONS:
6570 c->Request.CDBLen = 16;
6571 c->Request.type_attr_dir =
6572 TYPE_ATTR_DIR(cmd_type,
6573 ATTR_SIMPLE, XFER_WRITE);
6574 c->Request.Timeout = 0;
6575 c->Request.CDB[0] = BMIC_WRITE;
6576 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6578 case HPSA_CACHE_FLUSH:
6579 c->Request.CDBLen = 12;
6580 c->Request.type_attr_dir =
6581 TYPE_ATTR_DIR(cmd_type,
6582 ATTR_SIMPLE, XFER_WRITE);
6583 c->Request.Timeout = 0;
6584 c->Request.CDB[0] = BMIC_WRITE;
6585 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6586 c->Request.CDB[7] = (size >> 8) & 0xFF;
6587 c->Request.CDB[8] = size & 0xFF;
6589 case TEST_UNIT_READY:
6590 c->Request.CDBLen = 6;
6591 c->Request.type_attr_dir =
6592 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6593 c->Request.Timeout = 0;
6595 case HPSA_GET_RAID_MAP:
6596 c->Request.CDBLen = 12;
6597 c->Request.type_attr_dir =
6598 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6599 c->Request.Timeout = 0;
6600 c->Request.CDB[0] = HPSA_CISS_READ;
6601 c->Request.CDB[1] = cmd;
6602 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6603 c->Request.CDB[7] = (size >> 16) & 0xFF;
6604 c->Request.CDB[8] = (size >> 8) & 0xFF;
6605 c->Request.CDB[9] = size & 0xFF;
6607 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6608 c->Request.CDBLen = 10;
6609 c->Request.type_attr_dir =
6610 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6611 c->Request.Timeout = 0;
6612 c->Request.CDB[0] = BMIC_READ;
6613 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6614 c->Request.CDB[7] = (size >> 16) & 0xFF;
6615 c->Request.CDB[8] = (size >> 8) & 0xFF;
6617 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6618 c->Request.CDBLen = 10;
6619 c->Request.type_attr_dir =
6620 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6621 c->Request.Timeout = 0;
6622 c->Request.CDB[0] = BMIC_READ;
6623 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6624 c->Request.CDB[7] = (size >> 16) & 0xFF;
6625 c->Request.CDB[8] = (size >> 8) & 0XFF;
6627 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6628 c->Request.CDBLen = 10;
6629 c->Request.type_attr_dir =
6630 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6631 c->Request.Timeout = 0;
6632 c->Request.CDB[0] = BMIC_READ;
6633 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6634 c->Request.CDB[7] = (size >> 16) & 0xFF;
6635 c->Request.CDB[8] = (size >> 8) & 0XFF;
6637 case BMIC_IDENTIFY_CONTROLLER:
6638 c->Request.CDBLen = 10;
6639 c->Request.type_attr_dir =
6640 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6641 c->Request.Timeout = 0;
6642 c->Request.CDB[0] = BMIC_READ;
6643 c->Request.CDB[1] = 0;
6644 c->Request.CDB[2] = 0;
6645 c->Request.CDB[3] = 0;
6646 c->Request.CDB[4] = 0;
6647 c->Request.CDB[5] = 0;
6648 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6649 c->Request.CDB[7] = (size >> 16) & 0xFF;
6650 c->Request.CDB[8] = (size >> 8) & 0XFF;
6651 c->Request.CDB[9] = 0;
6654 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6658 } else if (cmd_type == TYPE_MSG) {
6661 case HPSA_PHYS_TARGET_RESET:
6662 c->Request.CDBLen = 16;
6663 c->Request.type_attr_dir =
6664 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6665 c->Request.Timeout = 0; /* Don't time out */
6666 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6667 c->Request.CDB[0] = HPSA_RESET;
6668 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6669 /* Physical target reset needs no control bytes 4-7*/
6670 c->Request.CDB[4] = 0x00;
6671 c->Request.CDB[5] = 0x00;
6672 c->Request.CDB[6] = 0x00;
6673 c->Request.CDB[7] = 0x00;
6675 case HPSA_DEVICE_RESET_MSG:
6676 c->Request.CDBLen = 16;
6677 c->Request.type_attr_dir =
6678 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6679 c->Request.Timeout = 0; /* Don't time out */
6680 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6681 c->Request.CDB[0] = cmd;
6682 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6683 /* If bytes 4-7 are zero, it means reset the */
6685 c->Request.CDB[4] = 0x00;
6686 c->Request.CDB[5] = 0x00;
6687 c->Request.CDB[6] = 0x00;
6688 c->Request.CDB[7] = 0x00;
6690 case HPSA_ABORT_MSG:
6691 memcpy(&tag, buff, sizeof(tag));
6692 dev_dbg(&h->pdev->dev,
6693 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6694 tag, c->Header.tag);
6695 c->Request.CDBLen = 16;
6696 c->Request.type_attr_dir =
6697 TYPE_ATTR_DIR(cmd_type,
6698 ATTR_SIMPLE, XFER_WRITE);
6699 c->Request.Timeout = 0; /* Don't time out */
6700 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6701 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6702 c->Request.CDB[2] = 0x00; /* reserved */
6703 c->Request.CDB[3] = 0x00; /* reserved */
6704 /* Tag to abort goes in CDB[4]-CDB[11] */
6705 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
6706 c->Request.CDB[12] = 0x00; /* reserved */
6707 c->Request.CDB[13] = 0x00; /* reserved */
6708 c->Request.CDB[14] = 0x00; /* reserved */
6709 c->Request.CDB[15] = 0x00; /* reserved */
6712 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6717 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6721 switch (GET_DIR(c->Request.type_attr_dir)) {
6723 pci_dir = PCI_DMA_FROMDEVICE;
6726 pci_dir = PCI_DMA_TODEVICE;
6729 pci_dir = PCI_DMA_NONE;
6732 pci_dir = PCI_DMA_BIDIRECTIONAL;
6734 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6740 * Map (physical) PCI mem into (virtual) kernel space
6742 static void __iomem *remap_pci_mem(ulong base, ulong size)
6744 ulong page_base = ((ulong) base) & PAGE_MASK;
6745 ulong page_offs = ((ulong) base) - page_base;
6746 void __iomem *page_remapped = ioremap_nocache(page_base,
6749 return page_remapped ? (page_remapped + page_offs) : NULL;
6752 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6754 return h->access.command_completed(h, q);
6757 static inline bool interrupt_pending(struct ctlr_info *h)
6759 return h->access.intr_pending(h);
6762 static inline long interrupt_not_for_us(struct ctlr_info *h)
6764 return (h->access.intr_pending(h) == 0) ||
6765 (h->interrupts_enabled == 0);
6768 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6771 if (unlikely(tag_index >= h->nr_cmds)) {
6772 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6778 static inline void finish_cmd(struct CommandList *c)
6780 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6781 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6782 || c->cmd_type == CMD_IOACCEL2))
6783 complete_scsi_command(c);
6784 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6785 complete(c->waiting);
6788 /* process completion of an indexed ("direct lookup") command */
6789 static inline void process_indexed_cmd(struct ctlr_info *h,
6793 struct CommandList *c;
6795 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6796 if (!bad_tag(h, tag_index, raw_tag)) {
6797 c = h->cmd_pool + tag_index;
6802 /* Some controllers, like p400, will give us one interrupt
6803 * after a soft reset, even if we turned interrupts off.
6804 * Only need to check for this in the hpsa_xxx_discard_completions
6807 static int ignore_bogus_interrupt(struct ctlr_info *h)
6809 if (likely(!reset_devices))
6812 if (likely(h->interrupts_enabled))
6815 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6816 "(known firmware bug.) Ignoring.\n");
6822 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6823 * Relies on (h-q[x] == x) being true for x such that
6824 * 0 <= x < MAX_REPLY_QUEUES.
6826 static struct ctlr_info *queue_to_hba(u8 *queue)
6828 return container_of((queue - *queue), struct ctlr_info, q[0]);
6831 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6833 struct ctlr_info *h = queue_to_hba(queue);
6834 u8 q = *(u8 *) queue;
6837 if (ignore_bogus_interrupt(h))
6840 if (interrupt_not_for_us(h))
6842 h->last_intr_timestamp = get_jiffies_64();
6843 while (interrupt_pending(h)) {
6844 raw_tag = get_next_completion(h, q);
6845 while (raw_tag != FIFO_EMPTY)
6846 raw_tag = next_command(h, q);
6851 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6853 struct ctlr_info *h = queue_to_hba(queue);
6855 u8 q = *(u8 *) queue;
6857 if (ignore_bogus_interrupt(h))
6860 h->last_intr_timestamp = get_jiffies_64();
6861 raw_tag = get_next_completion(h, q);
6862 while (raw_tag != FIFO_EMPTY)
6863 raw_tag = next_command(h, q);
6867 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6869 struct ctlr_info *h = queue_to_hba((u8 *) queue);
6871 u8 q = *(u8 *) queue;
6873 if (interrupt_not_for_us(h))
6875 h->last_intr_timestamp = get_jiffies_64();
6876 while (interrupt_pending(h)) {
6877 raw_tag = get_next_completion(h, q);
6878 while (raw_tag != FIFO_EMPTY) {
6879 process_indexed_cmd(h, raw_tag);
6880 raw_tag = next_command(h, q);
6886 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
6888 struct ctlr_info *h = queue_to_hba(queue);
6890 u8 q = *(u8 *) queue;
6892 h->last_intr_timestamp = get_jiffies_64();
6893 raw_tag = get_next_completion(h, q);
6894 while (raw_tag != FIFO_EMPTY) {
6895 process_indexed_cmd(h, raw_tag);
6896 raw_tag = next_command(h, q);
6901 /* Send a message CDB to the firmware. Careful, this only works
6902 * in simple mode, not performant mode due to the tag lookup.
6903 * We only ever use this immediately after a controller reset.
6905 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6909 struct CommandListHeader CommandHeader;
6910 struct RequestBlock Request;
6911 struct ErrDescriptor ErrorDescriptor;
6913 struct Command *cmd;
6914 static const size_t cmd_sz = sizeof(*cmd) +
6915 sizeof(cmd->ErrorDescriptor);
6919 void __iomem *vaddr;
6922 vaddr = pci_ioremap_bar(pdev, 0);
6926 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6927 * CCISS commands, so they must be allocated from the lower 4GiB of
6930 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6936 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6942 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6943 * although there's no guarantee, we assume that the address is at
6944 * least 4-byte aligned (most likely, it's page-aligned).
6946 paddr32 = cpu_to_le32(paddr64);
6948 cmd->CommandHeader.ReplyQueue = 0;
6949 cmd->CommandHeader.SGList = 0;
6950 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
6951 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
6952 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6954 cmd->Request.CDBLen = 16;
6955 cmd->Request.type_attr_dir =
6956 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
6957 cmd->Request.Timeout = 0; /* Don't time out */
6958 cmd->Request.CDB[0] = opcode;
6959 cmd->Request.CDB[1] = type;
6960 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
6961 cmd->ErrorDescriptor.Addr =
6962 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
6963 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
6965 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
6967 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6968 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
6969 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
6971 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6976 /* we leak the DMA buffer here ... no choice since the controller could
6977 * still complete the command.
6979 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6980 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6985 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6987 if (tag & HPSA_ERROR_BIT) {
6988 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6993 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6998 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7000 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7001 void __iomem *vaddr, u32 use_doorbell)
7005 /* For everything after the P600, the PCI power state method
7006 * of resetting the controller doesn't work, so we have this
7007 * other way using the doorbell register.
7009 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7010 writel(use_doorbell, vaddr + SA5_DOORBELL);
7012 /* PMC hardware guys tell us we need a 10 second delay after
7013 * doorbell reset and before any attempt to talk to the board
7014 * at all to ensure that this actually works and doesn't fall
7015 * over in some weird corner cases.
7018 } else { /* Try to do it the PCI power state way */
7020 /* Quoting from the Open CISS Specification: "The Power
7021 * Management Control/Status Register (CSR) controls the power
7022 * state of the device. The normal operating state is D0,
7023 * CSR=00h. The software off state is D3, CSR=03h. To reset
7024 * the controller, place the interface device in D3 then to D0,
7025 * this causes a secondary PCI reset which will reset the
7030 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7032 /* enter the D3hot power management state */
7033 rc = pci_set_power_state(pdev, PCI_D3hot);
7039 /* enter the D0 power management state */
7040 rc = pci_set_power_state(pdev, PCI_D0);
7045 * The P600 requires a small delay when changing states.
7046 * Otherwise we may think the board did not reset and we bail.
7047 * This for kdump only and is particular to the P600.
7054 static void init_driver_version(char *driver_version, int len)
7056 memset(driver_version, 0, len);
7057 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7060 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7062 char *driver_version;
7063 int i, size = sizeof(cfgtable->driver_version);
7065 driver_version = kmalloc(size, GFP_KERNEL);
7066 if (!driver_version)
7069 init_driver_version(driver_version, size);
7070 for (i = 0; i < size; i++)
7071 writeb(driver_version[i], &cfgtable->driver_version[i]);
7072 kfree(driver_version);
7076 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7077 unsigned char *driver_ver)
7081 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7082 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7085 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7088 char *driver_ver, *old_driver_ver;
7089 int rc, size = sizeof(cfgtable->driver_version);
7091 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
7092 if (!old_driver_ver)
7094 driver_ver = old_driver_ver + size;
7096 /* After a reset, the 32 bytes of "driver version" in the cfgtable
7097 * should have been changed, otherwise we know the reset failed.
7099 init_driver_version(old_driver_ver, size);
7100 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7101 rc = !memcmp(driver_ver, old_driver_ver, size);
7102 kfree(old_driver_ver);
7105 /* This does a hard reset of the controller using PCI power management
7106 * states or the using the doorbell register.
7108 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7112 u64 cfg_base_addr_index;
7113 void __iomem *vaddr;
7114 unsigned long paddr;
7115 u32 misc_fw_support;
7117 struct CfgTable __iomem *cfgtable;
7119 u16 command_register;
7121 /* For controllers as old as the P600, this is very nearly
7124 * pci_save_state(pci_dev);
7125 * pci_set_power_state(pci_dev, PCI_D3hot);
7126 * pci_set_power_state(pci_dev, PCI_D0);
7127 * pci_restore_state(pci_dev);
7129 * For controllers newer than the P600, the pci power state
7130 * method of resetting doesn't work so we have another way
7131 * using the doorbell register.
7134 if (!ctlr_is_resettable(board_id)) {
7135 dev_warn(&pdev->dev, "Controller not resettable\n");
7139 /* if controller is soft- but not hard resettable... */
7140 if (!ctlr_is_hard_resettable(board_id))
7141 return -ENOTSUPP; /* try soft reset later. */
7143 /* Save the PCI command register */
7144 pci_read_config_word(pdev, 4, &command_register);
7145 pci_save_state(pdev);
7147 /* find the first memory BAR, so we can find the cfg table */
7148 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7151 vaddr = remap_pci_mem(paddr, 0x250);
7155 /* find cfgtable in order to check if reset via doorbell is supported */
7156 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7157 &cfg_base_addr_index, &cfg_offset);
7160 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7161 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7166 rc = write_driver_ver_to_cfgtable(cfgtable);
7168 goto unmap_cfgtable;
7170 /* If reset via doorbell register is supported, use that.
7171 * There are two such methods. Favor the newest method.
7173 misc_fw_support = readl(&cfgtable->misc_fw_support);
7174 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7176 use_doorbell = DOORBELL_CTLR_RESET2;
7178 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7180 dev_warn(&pdev->dev,
7181 "Soft reset not supported. Firmware update is required.\n");
7182 rc = -ENOTSUPP; /* try soft reset */
7183 goto unmap_cfgtable;
7187 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7189 goto unmap_cfgtable;
7191 pci_restore_state(pdev);
7192 pci_write_config_word(pdev, 4, command_register);
7194 /* Some devices (notably the HP Smart Array 5i Controller)
7195 need a little pause here */
7196 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7198 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7200 dev_warn(&pdev->dev,
7201 "Failed waiting for board to become ready after hard reset\n");
7202 goto unmap_cfgtable;
7205 rc = controller_reset_failed(vaddr);
7207 goto unmap_cfgtable;
7209 dev_warn(&pdev->dev, "Unable to successfully reset "
7210 "controller. Will try soft reset.\n");
7213 dev_info(&pdev->dev, "board ready after hard reset.\n");
7225 * We cannot read the structure directly, for portability we must use
7227 * This is for debug only.
7229 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7235 dev_info(dev, "Controller Configuration information\n");
7236 dev_info(dev, "------------------------------------\n");
7237 for (i = 0; i < 4; i++)
7238 temp_name[i] = readb(&(tb->Signature[i]));
7239 temp_name[4] = '\0';
7240 dev_info(dev, " Signature = %s\n", temp_name);
7241 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7242 dev_info(dev, " Transport methods supported = 0x%x\n",
7243 readl(&(tb->TransportSupport)));
7244 dev_info(dev, " Transport methods active = 0x%x\n",
7245 readl(&(tb->TransportActive)));
7246 dev_info(dev, " Requested transport Method = 0x%x\n",
7247 readl(&(tb->HostWrite.TransportRequest)));
7248 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7249 readl(&(tb->HostWrite.CoalIntDelay)));
7250 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7251 readl(&(tb->HostWrite.CoalIntCount)));
7252 dev_info(dev, " Max outstanding commands = %d\n",
7253 readl(&(tb->CmdsOutMax)));
7254 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7255 for (i = 0; i < 16; i++)
7256 temp_name[i] = readb(&(tb->ServerName[i]));
7257 temp_name[16] = '\0';
7258 dev_info(dev, " Server Name = %s\n", temp_name);
7259 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7260 readl(&(tb->HeartBeat)));
7261 #endif /* HPSA_DEBUG */
7264 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7266 int i, offset, mem_type, bar_type;
7268 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7271 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7272 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7273 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7276 mem_type = pci_resource_flags(pdev, i) &
7277 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7279 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7280 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7281 offset += 4; /* 32 bit */
7283 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7286 default: /* reserved in PCI 2.2 */
7287 dev_warn(&pdev->dev,
7288 "base address is invalid\n");
7293 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7299 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7301 if (h->msix_vector) {
7302 if (h->pdev->msix_enabled)
7303 pci_disable_msix(h->pdev);
7305 } else if (h->msi_vector) {
7306 if (h->pdev->msi_enabled)
7307 pci_disable_msi(h->pdev);
7312 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7313 * controllers that are capable. If not, we use legacy INTx mode.
7315 static void hpsa_interrupt_mode(struct ctlr_info *h)
7317 #ifdef CONFIG_PCI_MSI
7319 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
7321 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
7322 hpsa_msix_entries[i].vector = 0;
7323 hpsa_msix_entries[i].entry = i;
7326 /* Some boards advertise MSI but don't really support it */
7327 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
7328 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
7329 goto default_int_mode;
7330 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
7331 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
7332 h->msix_vector = MAX_REPLY_QUEUES;
7333 if (h->msix_vector > num_online_cpus())
7334 h->msix_vector = num_online_cpus();
7335 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
7338 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
7340 goto single_msi_mode;
7341 } else if (err < h->msix_vector) {
7342 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
7343 "available\n", err);
7345 h->msix_vector = err;
7346 for (i = 0; i < h->msix_vector; i++)
7347 h->intr[i] = hpsa_msix_entries[i].vector;
7351 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
7352 dev_info(&h->pdev->dev, "MSI capable controller\n");
7353 if (!pci_enable_msi(h->pdev))
7356 dev_warn(&h->pdev->dev, "MSI init failed\n");
7359 #endif /* CONFIG_PCI_MSI */
7360 /* if we get here we're going to use the default interrupt mode */
7361 h->intr[h->intr_mode] = h->pdev->irq;
7364 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
7367 u32 subsystem_vendor_id, subsystem_device_id;
7369 subsystem_vendor_id = pdev->subsystem_vendor;
7370 subsystem_device_id = pdev->subsystem_device;
7371 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7372 subsystem_vendor_id;
7374 for (i = 0; i < ARRAY_SIZE(products); i++)
7375 if (*board_id == products[i].board_id)
7378 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7379 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7381 dev_warn(&pdev->dev, "unrecognized board ID: "
7382 "0x%08x, ignoring.\n", *board_id);
7385 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7388 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7389 unsigned long *memory_bar)
7393 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7394 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7395 /* addressing mode bits already removed */
7396 *memory_bar = pci_resource_start(pdev, i);
7397 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7401 dev_warn(&pdev->dev, "no memory BAR found\n");
7405 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7411 iterations = HPSA_BOARD_READY_ITERATIONS;
7413 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7415 for (i = 0; i < iterations; i++) {
7416 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7417 if (wait_for_ready) {
7418 if (scratchpad == HPSA_FIRMWARE_READY)
7421 if (scratchpad != HPSA_FIRMWARE_READY)
7424 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7426 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7430 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7431 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7434 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7435 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7436 *cfg_base_addr &= (u32) 0x0000ffff;
7437 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7438 if (*cfg_base_addr_index == -1) {
7439 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7445 static void hpsa_free_cfgtables(struct ctlr_info *h)
7447 if (h->transtable) {
7448 iounmap(h->transtable);
7449 h->transtable = NULL;
7452 iounmap(h->cfgtable);
7457 /* Find and map CISS config table and transfer table
7458 + * several items must be unmapped (freed) later
7460 static int hpsa_find_cfgtables(struct ctlr_info *h)
7464 u64 cfg_base_addr_index;
7468 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7469 &cfg_base_addr_index, &cfg_offset);
7472 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7473 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7475 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7478 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7481 /* Find performant mode table. */
7482 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7483 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7484 cfg_base_addr_index)+cfg_offset+trans_offset,
7485 sizeof(*h->transtable));
7486 if (!h->transtable) {
7487 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7488 hpsa_free_cfgtables(h);
7494 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7496 #define MIN_MAX_COMMANDS 16
7497 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7499 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7501 /* Limit commands in memory limited kdump scenario. */
7502 if (reset_devices && h->max_commands > 32)
7503 h->max_commands = 32;
7505 if (h->max_commands < MIN_MAX_COMMANDS) {
7506 dev_warn(&h->pdev->dev,
7507 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7510 h->max_commands = MIN_MAX_COMMANDS;
7514 /* If the controller reports that the total max sg entries is greater than 512,
7515 * then we know that chained SG blocks work. (Original smart arrays did not
7516 * support chained SG blocks and would return zero for max sg entries.)
7518 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7520 return h->maxsgentries > 512;
7523 /* Interrogate the hardware for some limits:
7524 * max commands, max SG elements without chaining, and with chaining,
7525 * SG chain block size, etc.
7527 static void hpsa_find_board_params(struct ctlr_info *h)
7529 hpsa_get_max_perf_mode_cmds(h);
7530 h->nr_cmds = h->max_commands;
7531 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7532 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7533 if (hpsa_supports_chained_sg_blocks(h)) {
7534 /* Limit in-command s/g elements to 32 save dma'able memory. */
7535 h->max_cmd_sg_entries = 32;
7536 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7537 h->maxsgentries--; /* save one for chain pointer */
7540 * Original smart arrays supported at most 31 s/g entries
7541 * embedded inline in the command (trying to use more
7542 * would lock up the controller)
7544 h->max_cmd_sg_entries = 31;
7545 h->maxsgentries = 31; /* default to traditional values */
7549 /* Find out what task management functions are supported and cache */
7550 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7551 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7552 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7553 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7554 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7555 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7556 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7559 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7561 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7562 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7568 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7572 driver_support = readl(&(h->cfgtable->driver_support));
7573 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7575 driver_support |= ENABLE_SCSI_PREFETCH;
7577 driver_support |= ENABLE_UNIT_ATTN;
7578 writel(driver_support, &(h->cfgtable->driver_support));
7581 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7582 * in a prefetch beyond physical memory.
7584 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7588 if (h->board_id != 0x3225103C)
7590 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7591 dma_prefetch |= 0x8000;
7592 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7595 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7599 unsigned long flags;
7600 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7601 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7602 spin_lock_irqsave(&h->lock, flags);
7603 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7604 spin_unlock_irqrestore(&h->lock, flags);
7605 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7607 /* delay and try again */
7608 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7615 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7619 unsigned long flags;
7621 /* under certain very rare conditions, this can take awhile.
7622 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7623 * as we enter this code.)
7625 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7626 if (h->remove_in_progress)
7628 spin_lock_irqsave(&h->lock, flags);
7629 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7630 spin_unlock_irqrestore(&h->lock, flags);
7631 if (!(doorbell_value & CFGTBL_ChangeReq))
7633 /* delay and try again */
7634 msleep(MODE_CHANGE_WAIT_INTERVAL);
7641 /* return -ENODEV or other reason on error, 0 on success */
7642 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7646 trans_support = readl(&(h->cfgtable->TransportSupport));
7647 if (!(trans_support & SIMPLE_MODE))
7650 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7652 /* Update the field, and then ring the doorbell */
7653 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7654 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7655 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7656 if (hpsa_wait_for_mode_change_ack(h))
7658 print_cfg_table(&h->pdev->dev, h->cfgtable);
7659 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7661 h->transMethod = CFGTBL_Trans_Simple;
7664 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7668 /* free items allocated or mapped by hpsa_pci_init */
7669 static void hpsa_free_pci_init(struct ctlr_info *h)
7671 hpsa_free_cfgtables(h); /* pci_init 4 */
7672 iounmap(h->vaddr); /* pci_init 3 */
7674 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7676 * call pci_disable_device before pci_release_regions per
7677 * Documentation/PCI/pci.txt
7679 pci_disable_device(h->pdev); /* pci_init 1 */
7680 pci_release_regions(h->pdev); /* pci_init 2 */
7683 /* several items must be freed later */
7684 static int hpsa_pci_init(struct ctlr_info *h)
7686 int prod_index, err;
7688 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7691 h->product_name = products[prod_index].product_name;
7692 h->access = *(products[prod_index].access);
7694 h->needs_abort_tags_swizzled =
7695 ctlr_needs_abort_tags_swizzled(h->board_id);
7697 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7698 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7700 err = pci_enable_device(h->pdev);
7702 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7703 pci_disable_device(h->pdev);
7707 err = pci_request_regions(h->pdev, HPSA);
7709 dev_err(&h->pdev->dev,
7710 "failed to obtain PCI resources\n");
7711 pci_disable_device(h->pdev);
7715 pci_set_master(h->pdev);
7717 hpsa_interrupt_mode(h);
7718 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7720 goto clean2; /* intmode+region, pci */
7721 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7723 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7725 goto clean2; /* intmode+region, pci */
7727 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7729 goto clean3; /* vaddr, intmode+region, pci */
7730 err = hpsa_find_cfgtables(h);
7732 goto clean3; /* vaddr, intmode+region, pci */
7733 hpsa_find_board_params(h);
7735 if (!hpsa_CISS_signature_present(h)) {
7737 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7739 hpsa_set_driver_support_bits(h);
7740 hpsa_p600_dma_prefetch_quirk(h);
7741 err = hpsa_enter_simple_mode(h);
7743 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7746 clean4: /* cfgtables, vaddr, intmode+region, pci */
7747 hpsa_free_cfgtables(h);
7748 clean3: /* vaddr, intmode+region, pci */
7751 clean2: /* intmode+region, pci */
7752 hpsa_disable_interrupt_mode(h);
7754 * call pci_disable_device before pci_release_regions per
7755 * Documentation/PCI/pci.txt
7757 pci_disable_device(h->pdev);
7758 pci_release_regions(h->pdev);
7762 static void hpsa_hba_inquiry(struct ctlr_info *h)
7766 #define HBA_INQUIRY_BYTE_COUNT 64
7767 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7768 if (!h->hba_inquiry_data)
7770 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7771 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7773 kfree(h->hba_inquiry_data);
7774 h->hba_inquiry_data = NULL;
7778 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7781 void __iomem *vaddr;
7786 /* kdump kernel is loading, we don't know in which state is
7787 * the pci interface. The dev->enable_cnt is equal zero
7788 * so we call enable+disable, wait a while and switch it on.
7790 rc = pci_enable_device(pdev);
7792 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7795 pci_disable_device(pdev);
7796 msleep(260); /* a randomly chosen number */
7797 rc = pci_enable_device(pdev);
7799 dev_warn(&pdev->dev, "failed to enable device.\n");
7803 pci_set_master(pdev);
7805 vaddr = pci_ioremap_bar(pdev, 0);
7806 if (vaddr == NULL) {
7810 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7813 /* Reset the controller with a PCI power-cycle or via doorbell */
7814 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7816 /* -ENOTSUPP here means we cannot reset the controller
7817 * but it's already (and still) up and running in
7818 * "performant mode". Or, it might be 640x, which can't reset
7819 * due to concerns about shared bbwc between 6402/6404 pair.
7824 /* Now try to get the controller to respond to a no-op */
7825 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7826 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7827 if (hpsa_noop(pdev) == 0)
7830 dev_warn(&pdev->dev, "no-op failed%s\n",
7831 (i < 11 ? "; re-trying" : ""));
7836 pci_disable_device(pdev);
7840 static void hpsa_free_cmd_pool(struct ctlr_info *h)
7842 kfree(h->cmd_pool_bits);
7843 h->cmd_pool_bits = NULL;
7845 pci_free_consistent(h->pdev,
7846 h->nr_cmds * sizeof(struct CommandList),
7848 h->cmd_pool_dhandle);
7850 h->cmd_pool_dhandle = 0;
7852 if (h->errinfo_pool) {
7853 pci_free_consistent(h->pdev,
7854 h->nr_cmds * sizeof(struct ErrorInfo),
7856 h->errinfo_pool_dhandle);
7857 h->errinfo_pool = NULL;
7858 h->errinfo_pool_dhandle = 0;
7862 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7864 h->cmd_pool_bits = kzalloc(
7865 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7866 sizeof(unsigned long), GFP_KERNEL);
7867 h->cmd_pool = pci_alloc_consistent(h->pdev,
7868 h->nr_cmds * sizeof(*h->cmd_pool),
7869 &(h->cmd_pool_dhandle));
7870 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7871 h->nr_cmds * sizeof(*h->errinfo_pool),
7872 &(h->errinfo_pool_dhandle));
7873 if ((h->cmd_pool_bits == NULL)
7874 || (h->cmd_pool == NULL)
7875 || (h->errinfo_pool == NULL)) {
7876 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
7879 hpsa_preinitialize_commands(h);
7882 hpsa_free_cmd_pool(h);
7886 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7890 cpu = cpumask_first(cpu_online_mask);
7891 for (i = 0; i < h->msix_vector; i++) {
7892 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
7893 cpu = cpumask_next(cpu, cpu_online_mask);
7897 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7898 static void hpsa_free_irqs(struct ctlr_info *h)
7902 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7903 /* Single reply queue, only one irq to free */
7905 irq_set_affinity_hint(h->intr[i], NULL);
7906 free_irq(h->intr[i], &h->q[i]);
7911 for (i = 0; i < h->msix_vector; i++) {
7912 irq_set_affinity_hint(h->intr[i], NULL);
7913 free_irq(h->intr[i], &h->q[i]);
7916 for (; i < MAX_REPLY_QUEUES; i++)
7920 /* returns 0 on success; cleans up and returns -Enn on error */
7921 static int hpsa_request_irqs(struct ctlr_info *h,
7922 irqreturn_t (*msixhandler)(int, void *),
7923 irqreturn_t (*intxhandler)(int, void *))
7928 * initialize h->q[x] = x so that interrupt handlers know which
7931 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7934 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
7935 /* If performant mode and MSI-X, use multiple reply queues */
7936 for (i = 0; i < h->msix_vector; i++) {
7937 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
7938 rc = request_irq(h->intr[i], msixhandler,
7944 dev_err(&h->pdev->dev,
7945 "failed to get irq %d for %s\n",
7946 h->intr[i], h->devname);
7947 for (j = 0; j < i; j++) {
7948 free_irq(h->intr[j], &h->q[j]);
7951 for (; j < MAX_REPLY_QUEUES; j++)
7956 hpsa_irq_affinity_hints(h);
7958 /* Use single reply pool */
7959 if (h->msix_vector > 0 || h->msi_vector) {
7961 sprintf(h->intrname[h->intr_mode],
7962 "%s-msix", h->devname);
7964 sprintf(h->intrname[h->intr_mode],
7965 "%s-msi", h->devname);
7966 rc = request_irq(h->intr[h->intr_mode],
7968 h->intrname[h->intr_mode],
7969 &h->q[h->intr_mode]);
7971 sprintf(h->intrname[h->intr_mode],
7972 "%s-intx", h->devname);
7973 rc = request_irq(h->intr[h->intr_mode],
7974 intxhandler, IRQF_SHARED,
7975 h->intrname[h->intr_mode],
7976 &h->q[h->intr_mode]);
7978 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
7981 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
7982 h->intr[h->intr_mode], h->devname);
7989 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
7992 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
7994 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
7995 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7997 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8001 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8002 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8004 dev_warn(&h->pdev->dev, "Board failed to become ready "
8005 "after soft reset.\n");
8012 static void hpsa_free_reply_queues(struct ctlr_info *h)
8016 for (i = 0; i < h->nreply_queues; i++) {
8017 if (!h->reply_queue[i].head)
8019 pci_free_consistent(h->pdev,
8020 h->reply_queue_size,
8021 h->reply_queue[i].head,
8022 h->reply_queue[i].busaddr);
8023 h->reply_queue[i].head = NULL;
8024 h->reply_queue[i].busaddr = 0;
8026 h->reply_queue_size = 0;
8029 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8031 hpsa_free_performant_mode(h); /* init_one 7 */
8032 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8033 hpsa_free_cmd_pool(h); /* init_one 5 */
8034 hpsa_free_irqs(h); /* init_one 4 */
8035 scsi_host_put(h->scsi_host); /* init_one 3 */
8036 h->scsi_host = NULL; /* init_one 3 */
8037 hpsa_free_pci_init(h); /* init_one 2_5 */
8038 free_percpu(h->lockup_detected); /* init_one 2 */
8039 h->lockup_detected = NULL; /* init_one 2 */
8040 if (h->resubmit_wq) {
8041 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
8042 h->resubmit_wq = NULL;
8044 if (h->rescan_ctlr_wq) {
8045 destroy_workqueue(h->rescan_ctlr_wq);
8046 h->rescan_ctlr_wq = NULL;
8048 kfree(h); /* init_one 1 */
8051 /* Called when controller lockup detected. */
8052 static void fail_all_outstanding_cmds(struct ctlr_info *h)
8055 struct CommandList *c;
8058 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
8059 for (i = 0; i < h->nr_cmds; i++) {
8060 c = h->cmd_pool + i;
8061 refcount = atomic_inc_return(&c->refcount);
8063 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8065 atomic_dec(&h->commands_outstanding);
8070 dev_warn(&h->pdev->dev,
8071 "failed %d commands in fail_all\n", failcount);
8074 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8078 for_each_online_cpu(cpu) {
8079 u32 *lockup_detected;
8080 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8081 *lockup_detected = value;
8083 wmb(); /* be sure the per-cpu variables are out to memory */
8086 static void controller_lockup_detected(struct ctlr_info *h)
8088 unsigned long flags;
8089 u32 lockup_detected;
8091 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8092 spin_lock_irqsave(&h->lock, flags);
8093 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8094 if (!lockup_detected) {
8095 /* no heartbeat, but controller gave us a zero. */
8096 dev_warn(&h->pdev->dev,
8097 "lockup detected after %d but scratchpad register is zero\n",
8098 h->heartbeat_sample_interval / HZ);
8099 lockup_detected = 0xffffffff;
8101 set_lockup_detected_for_all_cpus(h, lockup_detected);
8102 spin_unlock_irqrestore(&h->lock, flags);
8103 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8104 lockup_detected, h->heartbeat_sample_interval / HZ);
8105 pci_disable_device(h->pdev);
8106 fail_all_outstanding_cmds(h);
8109 static int detect_controller_lockup(struct ctlr_info *h)
8113 unsigned long flags;
8115 now = get_jiffies_64();
8116 /* If we've received an interrupt recently, we're ok. */
8117 if (time_after64(h->last_intr_timestamp +
8118 (h->heartbeat_sample_interval), now))
8122 * If we've already checked the heartbeat recently, we're ok.
8123 * This could happen if someone sends us a signal. We
8124 * otherwise don't care about signals in this thread.
8126 if (time_after64(h->last_heartbeat_timestamp +
8127 (h->heartbeat_sample_interval), now))
8130 /* If heartbeat has not changed since we last looked, we're not ok. */
8131 spin_lock_irqsave(&h->lock, flags);
8132 heartbeat = readl(&h->cfgtable->HeartBeat);
8133 spin_unlock_irqrestore(&h->lock, flags);
8134 if (h->last_heartbeat == heartbeat) {
8135 controller_lockup_detected(h);
8140 h->last_heartbeat = heartbeat;
8141 h->last_heartbeat_timestamp = now;
8145 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8150 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8153 /* Ask the controller to clear the events we're handling. */
8154 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8155 | CFGTBL_Trans_io_accel2)) &&
8156 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8157 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8159 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8160 event_type = "state change";
8161 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8162 event_type = "configuration change";
8163 /* Stop sending new RAID offload reqs via the IO accelerator */
8164 scsi_block_requests(h->scsi_host);
8165 for (i = 0; i < h->ndevices; i++)
8166 h->dev[i]->offload_enabled = 0;
8167 hpsa_drain_accel_commands(h);
8168 /* Set 'accelerator path config change' bit */
8169 dev_warn(&h->pdev->dev,
8170 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8171 h->events, event_type);
8172 writel(h->events, &(h->cfgtable->clear_event_notify));
8173 /* Set the "clear event notify field update" bit 6 */
8174 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8175 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8176 hpsa_wait_for_clear_event_notify_ack(h);
8177 scsi_unblock_requests(h->scsi_host);
8179 /* Acknowledge controller notification events. */
8180 writel(h->events, &(h->cfgtable->clear_event_notify));
8181 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8182 hpsa_wait_for_clear_event_notify_ack(h);
8184 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8185 hpsa_wait_for_mode_change_ack(h);
8191 /* Check a register on the controller to see if there are configuration
8192 * changes (added/changed/removed logical drives, etc.) which mean that
8193 * we should rescan the controller for devices.
8194 * Also check flag for driver-initiated rescan.
8196 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8198 if (h->drv_req_rescan) {
8199 h->drv_req_rescan = 0;
8203 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8206 h->events = readl(&(h->cfgtable->event_notify));
8207 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8211 * Check if any of the offline devices have become ready
8213 static int hpsa_offline_devices_ready(struct ctlr_info *h)
8215 unsigned long flags;
8216 struct offline_device_entry *d;
8217 struct list_head *this, *tmp;
8219 spin_lock_irqsave(&h->offline_device_lock, flags);
8220 list_for_each_safe(this, tmp, &h->offline_device_list) {
8221 d = list_entry(this, struct offline_device_entry,
8223 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8224 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8225 spin_lock_irqsave(&h->offline_device_lock, flags);
8226 list_del(&d->offline_list);
8227 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8230 spin_lock_irqsave(&h->offline_device_lock, flags);
8232 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8236 static int hpsa_luns_changed(struct ctlr_info *h)
8238 int rc = 1; /* assume there are changes */
8239 struct ReportLUNdata *logdev = NULL;
8241 /* if we can't find out if lun data has changed,
8242 * assume that it has.
8245 if (!h->lastlogicals)
8248 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8250 dev_warn(&h->pdev->dev,
8251 "Out of memory, can't track lun changes.\n");
8254 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8255 dev_warn(&h->pdev->dev,
8256 "report luns failed, can't track lun changes.\n");
8259 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8260 dev_info(&h->pdev->dev,
8261 "Lun changes detected.\n");
8262 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8265 rc = 0; /* no changes detected. */
8271 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8273 unsigned long flags;
8274 struct ctlr_info *h = container_of(to_delayed_work(work),
8275 struct ctlr_info, rescan_ctlr_work);
8278 if (h->remove_in_progress)
8281 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
8282 scsi_host_get(h->scsi_host);
8283 hpsa_ack_ctlr_events(h);
8284 hpsa_scan_start(h->scsi_host);
8285 scsi_host_put(h->scsi_host);
8286 } else if (h->discovery_polling) {
8287 hpsa_disable_rld_caching(h);
8288 if (hpsa_luns_changed(h)) {
8289 struct Scsi_Host *sh = NULL;
8291 dev_info(&h->pdev->dev,
8292 "driver discovery polling rescan.\n");
8293 sh = scsi_host_get(h->scsi_host);
8295 hpsa_scan_start(sh);
8300 spin_lock_irqsave(&h->lock, flags);
8301 if (!h->remove_in_progress)
8302 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8303 h->heartbeat_sample_interval);
8304 spin_unlock_irqrestore(&h->lock, flags);
8307 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8309 unsigned long flags;
8310 struct ctlr_info *h = container_of(to_delayed_work(work),
8311 struct ctlr_info, monitor_ctlr_work);
8313 detect_controller_lockup(h);
8314 if (lockup_detected(h))
8317 spin_lock_irqsave(&h->lock, flags);
8318 if (!h->remove_in_progress)
8319 schedule_delayed_work(&h->monitor_ctlr_work,
8320 h->heartbeat_sample_interval);
8321 spin_unlock_irqrestore(&h->lock, flags);
8324 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8327 struct workqueue_struct *wq = NULL;
8329 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8331 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8336 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8339 struct ctlr_info *h;
8340 int try_soft_reset = 0;
8341 unsigned long flags;
8344 if (number_of_controllers == 0)
8345 printk(KERN_INFO DRIVER_NAME "\n");
8347 rc = hpsa_lookup_board_id(pdev, &board_id);
8349 dev_warn(&pdev->dev, "Board ID not found\n");
8353 rc = hpsa_init_reset_devices(pdev, board_id);
8355 if (rc != -ENOTSUPP)
8357 /* If the reset fails in a particular way (it has no way to do
8358 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8359 * a soft reset once we get the controller configured up to the
8360 * point that it can accept a command.
8366 reinit_after_soft_reset:
8368 /* Command structures must be aligned on a 32-byte boundary because
8369 * the 5 lower bits of the address are used by the hardware. and by
8370 * the driver. See comments in hpsa.h for more info.
8372 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8373 h = kzalloc(sizeof(*h), GFP_KERNEL);
8375 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8381 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8382 INIT_LIST_HEAD(&h->offline_device_list);
8383 spin_lock_init(&h->lock);
8384 spin_lock_init(&h->offline_device_lock);
8385 spin_lock_init(&h->scan_lock);
8386 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8387 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
8389 /* Allocate and clear per-cpu variable lockup_detected */
8390 h->lockup_detected = alloc_percpu(u32);
8391 if (!h->lockup_detected) {
8392 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8394 goto clean1; /* aer/h */
8396 set_lockup_detected_for_all_cpus(h, 0);
8398 rc = hpsa_pci_init(h);
8400 goto clean2; /* lu, aer/h */
8402 /* relies on h-> settings made by hpsa_pci_init, including
8403 * interrupt_mode h->intr */
8404 rc = hpsa_scsi_host_alloc(h);
8406 goto clean2_5; /* pci, lu, aer/h */
8408 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8409 h->ctlr = number_of_controllers;
8410 number_of_controllers++;
8412 /* configure PCI DMA stuff */
8413 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8417 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8421 dev_err(&pdev->dev, "no suitable DMA available\n");
8422 goto clean3; /* shost, pci, lu, aer/h */
8426 /* make sure the board interrupts are off */
8427 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8429 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8431 goto clean3; /* shost, pci, lu, aer/h */
8432 rc = hpsa_alloc_cmd_pool(h);
8434 goto clean4; /* irq, shost, pci, lu, aer/h */
8435 rc = hpsa_alloc_sg_chain_blocks(h);
8437 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
8438 init_waitqueue_head(&h->scan_wait_queue);
8439 init_waitqueue_head(&h->abort_cmd_wait_queue);
8440 init_waitqueue_head(&h->event_sync_wait_queue);
8441 mutex_init(&h->reset_mutex);
8442 h->scan_finished = 1; /* no scan currently in progress */
8444 pci_set_drvdata(pdev, h);
8447 spin_lock_init(&h->devlock);
8448 rc = hpsa_put_ctlr_into_performant_mode(h);
8450 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8452 /* hook into SCSI subsystem */
8453 rc = hpsa_scsi_add_host(h);
8455 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8457 /* create the resubmit workqueue */
8458 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8459 if (!h->rescan_ctlr_wq) {
8464 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8465 if (!h->resubmit_wq) {
8467 goto clean7; /* aer/h */
8471 * At this point, the controller is ready to take commands.
8472 * Now, if reset_devices and the hard reset didn't work, try
8473 * the soft reset and see if that works.
8475 if (try_soft_reset) {
8477 /* This is kind of gross. We may or may not get a completion
8478 * from the soft reset command, and if we do, then the value
8479 * from the fifo may or may not be valid. So, we wait 10 secs
8480 * after the reset throwing away any completions we get during
8481 * that time. Unregister the interrupt handler and register
8482 * fake ones to scoop up any residual completions.
8484 spin_lock_irqsave(&h->lock, flags);
8485 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8486 spin_unlock_irqrestore(&h->lock, flags);
8488 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8489 hpsa_intx_discard_completions);
8491 dev_warn(&h->pdev->dev,
8492 "Failed to request_irq after soft reset.\n");
8494 * cannot goto clean7 or free_irqs will be called
8495 * again. Instead, do its work
8497 hpsa_free_performant_mode(h); /* clean7 */
8498 hpsa_free_sg_chain_blocks(h); /* clean6 */
8499 hpsa_free_cmd_pool(h); /* clean5 */
8501 * skip hpsa_free_irqs(h) clean4 since that
8502 * was just called before request_irqs failed
8507 rc = hpsa_kdump_soft_reset(h);
8509 /* Neither hard nor soft reset worked, we're hosed. */
8512 dev_info(&h->pdev->dev, "Board READY.\n");
8513 dev_info(&h->pdev->dev,
8514 "Waiting for stale completions to drain.\n");
8515 h->access.set_intr_mask(h, HPSA_INTR_ON);
8517 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8519 rc = controller_reset_failed(h->cfgtable);
8521 dev_info(&h->pdev->dev,
8522 "Soft reset appears to have failed.\n");
8524 /* since the controller's reset, we have to go back and re-init
8525 * everything. Easiest to just forget what we've done and do it
8528 hpsa_undo_allocations_after_kdump_soft_reset(h);
8531 /* don't goto clean, we already unallocated */
8534 goto reinit_after_soft_reset;
8537 /* Enable Accelerated IO path at driver layer */
8538 h->acciopath_status = 1;
8539 /* Disable discovery polling.*/
8540 h->discovery_polling = 0;
8543 /* Turn the interrupts on so we can service requests */
8544 h->access.set_intr_mask(h, HPSA_INTR_ON);
8546 hpsa_hba_inquiry(h);
8548 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8549 if (!h->lastlogicals)
8550 dev_info(&h->pdev->dev,
8551 "Can't track change to report lun data\n");
8553 /* Monitor the controller for firmware lockups */
8554 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8555 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8556 schedule_delayed_work(&h->monitor_ctlr_work,
8557 h->heartbeat_sample_interval);
8558 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8559 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8560 h->heartbeat_sample_interval);
8563 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8564 hpsa_free_performant_mode(h);
8565 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8566 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8567 hpsa_free_sg_chain_blocks(h);
8568 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8569 hpsa_free_cmd_pool(h);
8570 clean4: /* irq, shost, pci, lu, aer/h */
8572 clean3: /* shost, pci, lu, aer/h */
8573 scsi_host_put(h->scsi_host);
8574 h->scsi_host = NULL;
8575 clean2_5: /* pci, lu, aer/h */
8576 hpsa_free_pci_init(h);
8577 clean2: /* lu, aer/h */
8578 if (h->lockup_detected) {
8579 free_percpu(h->lockup_detected);
8580 h->lockup_detected = NULL;
8582 clean1: /* wq/aer/h */
8583 if (h->resubmit_wq) {
8584 destroy_workqueue(h->resubmit_wq);
8585 h->resubmit_wq = NULL;
8587 if (h->rescan_ctlr_wq) {
8588 destroy_workqueue(h->rescan_ctlr_wq);
8589 h->rescan_ctlr_wq = NULL;
8595 static void hpsa_flush_cache(struct ctlr_info *h)
8598 struct CommandList *c;
8601 if (unlikely(lockup_detected(h)))
8603 flush_buf = kzalloc(4, GFP_KERNEL);
8609 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8610 RAID_CTLR_LUNID, TYPE_CMD)) {
8613 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8614 PCI_DMA_TODEVICE, NO_TIMEOUT);
8617 if (c->err_info->CommandStatus != 0)
8619 dev_warn(&h->pdev->dev,
8620 "error flushing cache on controller\n");
8625 /* Make controller gather fresh report lun data each time we
8626 * send down a report luns request
8628 static void hpsa_disable_rld_caching(struct ctlr_info *h)
8631 struct CommandList *c;
8634 /* Don't bother trying to set diag options if locked up */
8635 if (unlikely(h->lockup_detected))
8638 options = kzalloc(sizeof(*options), GFP_KERNEL);
8640 dev_err(&h->pdev->dev,
8641 "Error: failed to disable rld caching, during alloc.\n");
8647 /* first, get the current diag options settings */
8648 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8649 RAID_CTLR_LUNID, TYPE_CMD))
8652 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8653 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
8654 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8657 /* Now, set the bit for disabling the RLD caching */
8658 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8660 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8661 RAID_CTLR_LUNID, TYPE_CMD))
8664 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8665 PCI_DMA_TODEVICE, NO_TIMEOUT);
8666 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8669 /* Now verify that it got set: */
8670 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8671 RAID_CTLR_LUNID, TYPE_CMD))
8674 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8675 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
8676 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8679 if (*options && HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8683 dev_err(&h->pdev->dev,
8684 "Error: failed to disable report lun data caching.\n");
8690 static void hpsa_shutdown(struct pci_dev *pdev)
8692 struct ctlr_info *h;
8694 h = pci_get_drvdata(pdev);
8695 /* Turn board interrupts off and send the flush cache command
8696 * sendcmd will turn off interrupt, and send the flush...
8697 * To write all data in the battery backed cache to disks
8699 hpsa_flush_cache(h);
8700 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8701 hpsa_free_irqs(h); /* init_one 4 */
8702 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
8705 static void hpsa_free_device_info(struct ctlr_info *h)
8709 for (i = 0; i < h->ndevices; i++) {
8715 static void hpsa_remove_one(struct pci_dev *pdev)
8717 struct ctlr_info *h;
8718 unsigned long flags;
8720 if (pci_get_drvdata(pdev) == NULL) {
8721 dev_err(&pdev->dev, "unable to remove device\n");
8724 h = pci_get_drvdata(pdev);
8726 /* Get rid of any controller monitoring work items */
8727 spin_lock_irqsave(&h->lock, flags);
8728 h->remove_in_progress = 1;
8729 spin_unlock_irqrestore(&h->lock, flags);
8730 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8731 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8732 destroy_workqueue(h->rescan_ctlr_wq);
8733 destroy_workqueue(h->resubmit_wq);
8736 * Call before disabling interrupts.
8737 * scsi_remove_host can trigger I/O operations especially
8738 * when multipath is enabled. There can be SYNCHRONIZE CACHE
8739 * operations which cannot complete and will hang the system.
8742 scsi_remove_host(h->scsi_host); /* init_one 8 */
8743 /* includes hpsa_free_irqs - init_one 4 */
8744 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8745 hpsa_shutdown(pdev);
8747 hpsa_free_device_info(h); /* scan */
8749 kfree(h->hba_inquiry_data); /* init_one 10 */
8750 h->hba_inquiry_data = NULL; /* init_one 10 */
8751 hpsa_free_ioaccel2_sg_chain_blocks(h);
8752 hpsa_free_performant_mode(h); /* init_one 7 */
8753 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8754 hpsa_free_cmd_pool(h); /* init_one 5 */
8755 kfree(h->lastlogicals);
8757 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
8759 scsi_host_put(h->scsi_host); /* init_one 3 */
8760 h->scsi_host = NULL; /* init_one 3 */
8762 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8763 hpsa_free_pci_init(h); /* init_one 2.5 */
8765 free_percpu(h->lockup_detected); /* init_one 2 */
8766 h->lockup_detected = NULL; /* init_one 2 */
8767 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8769 hpsa_delete_sas_host(h);
8771 kfree(h); /* init_one 1 */
8774 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8775 __attribute__((unused)) pm_message_t state)
8780 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8785 static struct pci_driver hpsa_pci_driver = {
8787 .probe = hpsa_init_one,
8788 .remove = hpsa_remove_one,
8789 .id_table = hpsa_pci_device_id, /* id_table */
8790 .shutdown = hpsa_shutdown,
8791 .suspend = hpsa_suspend,
8792 .resume = hpsa_resume,
8795 /* Fill in bucket_map[], given nsgs (the max number of
8796 * scatter gather elements supported) and bucket[],
8797 * which is an array of 8 integers. The bucket[] array
8798 * contains 8 different DMA transfer sizes (in 16
8799 * byte increments) which the controller uses to fetch
8800 * commands. This function fills in bucket_map[], which
8801 * maps a given number of scatter gather elements to one of
8802 * the 8 DMA transfer sizes. The point of it is to allow the
8803 * controller to only do as much DMA as needed to fetch the
8804 * command, with the DMA transfer size encoded in the lower
8805 * bits of the command address.
8807 static void calc_bucket_map(int bucket[], int num_buckets,
8808 int nsgs, int min_blocks, u32 *bucket_map)
8812 /* Note, bucket_map must have nsgs+1 entries. */
8813 for (i = 0; i <= nsgs; i++) {
8814 /* Compute size of a command with i SG entries */
8815 size = i + min_blocks;
8816 b = num_buckets; /* Assume the biggest bucket */
8817 /* Find the bucket that is just big enough */
8818 for (j = 0; j < num_buckets; j++) {
8819 if (bucket[j] >= size) {
8824 /* for a command with i SG entries, use bucket b. */
8830 * return -ENODEV on err, 0 on success (or no action)
8831 * allocates numerous items that must be freed later
8833 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
8836 unsigned long register_value;
8837 unsigned long transMethod = CFGTBL_Trans_Performant |
8838 (trans_support & CFGTBL_Trans_use_short_tags) |
8839 CFGTBL_Trans_enable_directed_msix |
8840 (trans_support & (CFGTBL_Trans_io_accel1 |
8841 CFGTBL_Trans_io_accel2));
8842 struct access_method access = SA5_performant_access;
8844 /* This is a bit complicated. There are 8 registers on
8845 * the controller which we write to to tell it 8 different
8846 * sizes of commands which there may be. It's a way of
8847 * reducing the DMA done to fetch each command. Encoded into
8848 * each command's tag are 3 bits which communicate to the controller
8849 * which of the eight sizes that command fits within. The size of
8850 * each command depends on how many scatter gather entries there are.
8851 * Each SG entry requires 16 bytes. The eight registers are programmed
8852 * with the number of 16-byte blocks a command of that size requires.
8853 * The smallest command possible requires 5 such 16 byte blocks.
8854 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
8855 * blocks. Note, this only extends to the SG entries contained
8856 * within the command block, and does not extend to chained blocks
8857 * of SG elements. bft[] contains the eight values we write to
8858 * the registers. They are not evenly distributed, but have more
8859 * sizes for small commands, and fewer sizes for larger commands.
8861 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
8862 #define MIN_IOACCEL2_BFT_ENTRY 5
8863 #define HPSA_IOACCEL2_HEADER_SZ 4
8864 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8865 13, 14, 15, 16, 17, 18, 19,
8866 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8867 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8868 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8869 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8870 16 * MIN_IOACCEL2_BFT_ENTRY);
8871 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
8872 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
8873 /* 5 = 1 s/g entry or 4k
8874 * 6 = 2 s/g entry or 8k
8875 * 8 = 4 s/g entry or 16k
8876 * 10 = 6 s/g entry or 24k
8879 /* If the controller supports either ioaccel method then
8880 * we can also use the RAID stack submit path that does not
8881 * perform the superfluous readl() after each command submission.
8883 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8884 access = SA5_performant_access_no_read;
8886 /* Controller spec: zero out this buffer. */
8887 for (i = 0; i < h->nreply_queues; i++)
8888 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
8890 bft[7] = SG_ENTRIES_IN_CMD + 4;
8891 calc_bucket_map(bft, ARRAY_SIZE(bft),
8892 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
8893 for (i = 0; i < 8; i++)
8894 writel(bft[i], &h->transtable->BlockFetch[i]);
8896 /* size of controller ring buffer */
8897 writel(h->max_commands, &h->transtable->RepQSize);
8898 writel(h->nreply_queues, &h->transtable->RepQCount);
8899 writel(0, &h->transtable->RepQCtrAddrLow32);
8900 writel(0, &h->transtable->RepQCtrAddrHigh32);
8902 for (i = 0; i < h->nreply_queues; i++) {
8903 writel(0, &h->transtable->RepQAddr[i].upper);
8904 writel(h->reply_queue[i].busaddr,
8905 &h->transtable->RepQAddr[i].lower);
8908 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
8909 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8911 * enable outbound interrupt coalescing in accelerator mode;
8913 if (trans_support & CFGTBL_Trans_io_accel1) {
8914 access = SA5_ioaccel_mode1_access;
8915 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8916 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8918 if (trans_support & CFGTBL_Trans_io_accel2) {
8919 access = SA5_ioaccel_mode2_access;
8920 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8921 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8924 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8925 if (hpsa_wait_for_mode_change_ack(h)) {
8926 dev_err(&h->pdev->dev,
8927 "performant mode problem - doorbell timeout\n");
8930 register_value = readl(&(h->cfgtable->TransportActive));
8931 if (!(register_value & CFGTBL_Trans_Performant)) {
8932 dev_err(&h->pdev->dev,
8933 "performant mode problem - transport not active\n");
8936 /* Change the access methods to the performant access methods */
8938 h->transMethod = transMethod;
8940 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8941 (trans_support & CFGTBL_Trans_io_accel2)))
8944 if (trans_support & CFGTBL_Trans_io_accel1) {
8945 /* Set up I/O accelerator mode */
8946 for (i = 0; i < h->nreply_queues; i++) {
8947 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8948 h->reply_queue[i].current_entry =
8949 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8951 bft[7] = h->ioaccel_maxsg + 8;
8952 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8953 h->ioaccel1_blockFetchTable);
8955 /* initialize all reply queue entries to unused */
8956 for (i = 0; i < h->nreply_queues; i++)
8957 memset(h->reply_queue[i].head,
8958 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8959 h->reply_queue_size);
8961 /* set all the constant fields in the accelerator command
8962 * frames once at init time to save CPU cycles later.
8964 for (i = 0; i < h->nr_cmds; i++) {
8965 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8967 cp->function = IOACCEL1_FUNCTION_SCSIIO;
8968 cp->err_info = (u32) (h->errinfo_pool_dhandle +
8969 (i * sizeof(struct ErrorInfo)));
8970 cp->err_info_len = sizeof(struct ErrorInfo);
8971 cp->sgl_offset = IOACCEL1_SGLOFFSET;
8972 cp->host_context_flags =
8973 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
8974 cp->timeout_sec = 0;
8977 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
8979 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
8980 (i * sizeof(struct io_accel1_cmd)));
8982 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8983 u64 cfg_offset, cfg_base_addr_index;
8984 u32 bft2_offset, cfg_base_addr;
8987 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8988 &cfg_base_addr_index, &cfg_offset);
8989 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8990 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8991 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8992 4, h->ioaccel2_blockFetchTable);
8993 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8994 BUILD_BUG_ON(offsetof(struct CfgTable,
8995 io_accel_request_size_offset) != 0xb8);
8996 h->ioaccel2_bft2_regs =
8997 remap_pci_mem(pci_resource_start(h->pdev,
8998 cfg_base_addr_index) +
8999 cfg_offset + bft2_offset,
9001 sizeof(*h->ioaccel2_bft2_regs));
9002 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9003 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9005 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9006 if (hpsa_wait_for_mode_change_ack(h)) {
9007 dev_err(&h->pdev->dev,
9008 "performant mode problem - enabling ioaccel mode\n");
9014 /* Free ioaccel1 mode command blocks and block fetch table */
9015 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9017 if (h->ioaccel_cmd_pool) {
9018 pci_free_consistent(h->pdev,
9019 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9020 h->ioaccel_cmd_pool,
9021 h->ioaccel_cmd_pool_dhandle);
9022 h->ioaccel_cmd_pool = NULL;
9023 h->ioaccel_cmd_pool_dhandle = 0;
9025 kfree(h->ioaccel1_blockFetchTable);
9026 h->ioaccel1_blockFetchTable = NULL;
9029 /* Allocate ioaccel1 mode command blocks and block fetch table */
9030 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9033 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9034 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9035 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9037 /* Command structures must be aligned on a 128-byte boundary
9038 * because the 7 lower bits of the address are used by the
9041 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9042 IOACCEL1_COMMANDLIST_ALIGNMENT);
9043 h->ioaccel_cmd_pool =
9044 pci_alloc_consistent(h->pdev,
9045 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9046 &(h->ioaccel_cmd_pool_dhandle));
9048 h->ioaccel1_blockFetchTable =
9049 kmalloc(((h->ioaccel_maxsg + 1) *
9050 sizeof(u32)), GFP_KERNEL);
9052 if ((h->ioaccel_cmd_pool == NULL) ||
9053 (h->ioaccel1_blockFetchTable == NULL))
9056 memset(h->ioaccel_cmd_pool, 0,
9057 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9061 hpsa_free_ioaccel1_cmd_and_bft(h);
9065 /* Free ioaccel2 mode command blocks and block fetch table */
9066 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9068 hpsa_free_ioaccel2_sg_chain_blocks(h);
9070 if (h->ioaccel2_cmd_pool) {
9071 pci_free_consistent(h->pdev,
9072 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9073 h->ioaccel2_cmd_pool,
9074 h->ioaccel2_cmd_pool_dhandle);
9075 h->ioaccel2_cmd_pool = NULL;
9076 h->ioaccel2_cmd_pool_dhandle = 0;
9078 kfree(h->ioaccel2_blockFetchTable);
9079 h->ioaccel2_blockFetchTable = NULL;
9082 /* Allocate ioaccel2 mode command blocks and block fetch table */
9083 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9087 /* Allocate ioaccel2 mode command blocks and block fetch table */
9090 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9091 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9092 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9094 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9095 IOACCEL2_COMMANDLIST_ALIGNMENT);
9096 h->ioaccel2_cmd_pool =
9097 pci_alloc_consistent(h->pdev,
9098 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9099 &(h->ioaccel2_cmd_pool_dhandle));
9101 h->ioaccel2_blockFetchTable =
9102 kmalloc(((h->ioaccel_maxsg + 1) *
9103 sizeof(u32)), GFP_KERNEL);
9105 if ((h->ioaccel2_cmd_pool == NULL) ||
9106 (h->ioaccel2_blockFetchTable == NULL)) {
9111 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9115 memset(h->ioaccel2_cmd_pool, 0,
9116 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9120 hpsa_free_ioaccel2_cmd_and_bft(h);
9124 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9125 static void hpsa_free_performant_mode(struct ctlr_info *h)
9127 kfree(h->blockFetchTable);
9128 h->blockFetchTable = NULL;
9129 hpsa_free_reply_queues(h);
9130 hpsa_free_ioaccel1_cmd_and_bft(h);
9131 hpsa_free_ioaccel2_cmd_and_bft(h);
9134 /* return -ENODEV on error, 0 on success (or no action)
9135 * allocates numerous items that must be freed later
9137 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9140 unsigned long transMethod = CFGTBL_Trans_Performant |
9141 CFGTBL_Trans_use_short_tags;
9144 if (hpsa_simple_mode)
9147 trans_support = readl(&(h->cfgtable->TransportSupport));
9148 if (!(trans_support & PERFORMANT_MODE))
9151 /* Check for I/O accelerator mode support */
9152 if (trans_support & CFGTBL_Trans_io_accel1) {
9153 transMethod |= CFGTBL_Trans_io_accel1 |
9154 CFGTBL_Trans_enable_directed_msix;
9155 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9158 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9159 transMethod |= CFGTBL_Trans_io_accel2 |
9160 CFGTBL_Trans_enable_directed_msix;
9161 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9166 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
9167 hpsa_get_max_perf_mode_cmds(h);
9168 /* Performant mode ring buffer and supporting data structures */
9169 h->reply_queue_size = h->max_commands * sizeof(u64);
9171 for (i = 0; i < h->nreply_queues; i++) {
9172 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
9173 h->reply_queue_size,
9174 &(h->reply_queue[i].busaddr));
9175 if (!h->reply_queue[i].head) {
9177 goto clean1; /* rq, ioaccel */
9179 h->reply_queue[i].size = h->max_commands;
9180 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
9181 h->reply_queue[i].current_entry = 0;
9184 /* Need a block fetch table for performant mode */
9185 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9186 sizeof(u32)), GFP_KERNEL);
9187 if (!h->blockFetchTable) {
9189 goto clean1; /* rq, ioaccel */
9192 rc = hpsa_enter_performant_mode(h, trans_support);
9194 goto clean2; /* bft, rq, ioaccel */
9197 clean2: /* bft, rq, ioaccel */
9198 kfree(h->blockFetchTable);
9199 h->blockFetchTable = NULL;
9200 clean1: /* rq, ioaccel */
9201 hpsa_free_reply_queues(h);
9202 hpsa_free_ioaccel1_cmd_and_bft(h);
9203 hpsa_free_ioaccel2_cmd_and_bft(h);
9207 static int is_accelerated_cmd(struct CommandList *c)
9209 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9212 static void hpsa_drain_accel_commands(struct ctlr_info *h)
9214 struct CommandList *c = NULL;
9215 int i, accel_cmds_out;
9218 do { /* wait for all outstanding ioaccel commands to drain out */
9220 for (i = 0; i < h->nr_cmds; i++) {
9221 c = h->cmd_pool + i;
9222 refcount = atomic_inc_return(&c->refcount);
9223 if (refcount > 1) /* Command is allocated */
9224 accel_cmds_out += is_accelerated_cmd(c);
9227 if (accel_cmds_out <= 0)
9233 static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9234 struct hpsa_sas_port *hpsa_sas_port)
9236 struct hpsa_sas_phy *hpsa_sas_phy;
9237 struct sas_phy *phy;
9239 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9243 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9244 hpsa_sas_port->next_phy_index);
9246 kfree(hpsa_sas_phy);
9250 hpsa_sas_port->next_phy_index++;
9251 hpsa_sas_phy->phy = phy;
9252 hpsa_sas_phy->parent_port = hpsa_sas_port;
9254 return hpsa_sas_phy;
9257 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9259 struct sas_phy *phy = hpsa_sas_phy->phy;
9261 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9263 if (hpsa_sas_phy->added_to_port)
9264 list_del(&hpsa_sas_phy->phy_list_entry);
9265 kfree(hpsa_sas_phy);
9268 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9271 struct hpsa_sas_port *hpsa_sas_port;
9272 struct sas_phy *phy;
9273 struct sas_identify *identify;
9275 hpsa_sas_port = hpsa_sas_phy->parent_port;
9276 phy = hpsa_sas_phy->phy;
9278 identify = &phy->identify;
9279 memset(identify, 0, sizeof(*identify));
9280 identify->sas_address = hpsa_sas_port->sas_address;
9281 identify->device_type = SAS_END_DEVICE;
9282 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9283 identify->target_port_protocols = SAS_PROTOCOL_STP;
9284 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9285 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9286 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9287 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9288 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9290 rc = sas_phy_add(hpsa_sas_phy->phy);
9294 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9295 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9296 &hpsa_sas_port->phy_list_head);
9297 hpsa_sas_phy->added_to_port = true;
9303 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9304 struct sas_rphy *rphy)
9306 struct sas_identify *identify;
9308 identify = &rphy->identify;
9309 identify->sas_address = hpsa_sas_port->sas_address;
9310 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9311 identify->target_port_protocols = SAS_PROTOCOL_STP;
9313 return sas_rphy_add(rphy);
9316 static struct hpsa_sas_port
9317 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9321 struct hpsa_sas_port *hpsa_sas_port;
9322 struct sas_port *port;
9324 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9328 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9329 hpsa_sas_port->parent_node = hpsa_sas_node;
9331 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9333 goto free_hpsa_port;
9335 rc = sas_port_add(port);
9339 hpsa_sas_port->port = port;
9340 hpsa_sas_port->sas_address = sas_address;
9341 list_add_tail(&hpsa_sas_port->port_list_entry,
9342 &hpsa_sas_node->port_list_head);
9344 return hpsa_sas_port;
9347 sas_port_free(port);
9349 kfree(hpsa_sas_port);
9354 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9356 struct hpsa_sas_phy *hpsa_sas_phy;
9357 struct hpsa_sas_phy *next;
9359 list_for_each_entry_safe(hpsa_sas_phy, next,
9360 &hpsa_sas_port->phy_list_head, phy_list_entry)
9361 hpsa_free_sas_phy(hpsa_sas_phy);
9363 sas_port_delete(hpsa_sas_port->port);
9364 list_del(&hpsa_sas_port->port_list_entry);
9365 kfree(hpsa_sas_port);
9368 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9370 struct hpsa_sas_node *hpsa_sas_node;
9372 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9373 if (hpsa_sas_node) {
9374 hpsa_sas_node->parent_dev = parent_dev;
9375 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9378 return hpsa_sas_node;
9381 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9383 struct hpsa_sas_port *hpsa_sas_port;
9384 struct hpsa_sas_port *next;
9389 list_for_each_entry_safe(hpsa_sas_port, next,
9390 &hpsa_sas_node->port_list_head, port_list_entry)
9391 hpsa_free_sas_port(hpsa_sas_port);
9393 kfree(hpsa_sas_node);
9396 static struct hpsa_scsi_dev_t
9397 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9398 struct sas_rphy *rphy)
9401 struct hpsa_scsi_dev_t *device;
9403 for (i = 0; i < h->ndevices; i++) {
9405 if (!device->sas_port)
9407 if (device->sas_port->rphy == rphy)
9414 static int hpsa_add_sas_host(struct ctlr_info *h)
9417 struct device *parent_dev;
9418 struct hpsa_sas_node *hpsa_sas_node;
9419 struct hpsa_sas_port *hpsa_sas_port;
9420 struct hpsa_sas_phy *hpsa_sas_phy;
9422 parent_dev = &h->scsi_host->shost_gendev;
9424 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9428 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9429 if (!hpsa_sas_port) {
9434 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9435 if (!hpsa_sas_phy) {
9440 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9444 h->sas_host = hpsa_sas_node;
9449 hpsa_free_sas_phy(hpsa_sas_phy);
9451 hpsa_free_sas_port(hpsa_sas_port);
9453 hpsa_free_sas_node(hpsa_sas_node);
9458 static void hpsa_delete_sas_host(struct ctlr_info *h)
9460 hpsa_free_sas_node(h->sas_host);
9463 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9464 struct hpsa_scsi_dev_t *device)
9467 struct hpsa_sas_port *hpsa_sas_port;
9468 struct sas_rphy *rphy;
9470 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9474 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9480 hpsa_sas_port->rphy = rphy;
9481 device->sas_port = hpsa_sas_port;
9483 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9490 hpsa_free_sas_port(hpsa_sas_port);
9491 device->sas_port = NULL;
9496 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9498 if (device->sas_port) {
9499 hpsa_free_sas_port(device->sas_port);
9500 device->sas_port = NULL;
9505 hpsa_sas_get_linkerrors(struct sas_phy *phy)
9511 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9517 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9523 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9529 hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9535 hpsa_sas_phy_setup(struct sas_phy *phy)
9541 hpsa_sas_phy_release(struct sas_phy *phy)
9546 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9551 /* SMP = Serial Management Protocol */
9553 hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
9554 struct request *req)
9559 static struct sas_function_template hpsa_sas_transport_functions = {
9560 .get_linkerrors = hpsa_sas_get_linkerrors,
9561 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9562 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9563 .phy_reset = hpsa_sas_phy_reset,
9564 .phy_enable = hpsa_sas_phy_enable,
9565 .phy_setup = hpsa_sas_phy_setup,
9566 .phy_release = hpsa_sas_phy_release,
9567 .set_phy_speed = hpsa_sas_phy_speed,
9568 .smp_handler = hpsa_sas_smp_handler,
9572 * This is it. Register the PCI driver information for the cards we control
9573 * the OS will call our registered routines when it finds one of our cards.
9575 static int __init hpsa_init(void)
9579 hpsa_sas_transport_template =
9580 sas_attach_transport(&hpsa_sas_transport_functions);
9581 if (!hpsa_sas_transport_template)
9584 rc = pci_register_driver(&hpsa_pci_driver);
9587 sas_release_transport(hpsa_sas_transport_template);
9592 static void __exit hpsa_cleanup(void)
9594 pci_unregister_driver(&hpsa_pci_driver);
9595 sas_release_transport(hpsa_sas_transport_template);
9598 static void __attribute__((unused)) verify_offsets(void)
9600 #define VERIFY_OFFSET(member, offset) \
9601 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9603 VERIFY_OFFSET(structure_size, 0);
9604 VERIFY_OFFSET(volume_blk_size, 4);
9605 VERIFY_OFFSET(volume_blk_cnt, 8);
9606 VERIFY_OFFSET(phys_blk_shift, 16);
9607 VERIFY_OFFSET(parity_rotation_shift, 17);
9608 VERIFY_OFFSET(strip_size, 18);
9609 VERIFY_OFFSET(disk_starting_blk, 20);
9610 VERIFY_OFFSET(disk_blk_cnt, 28);
9611 VERIFY_OFFSET(data_disks_per_row, 36);
9612 VERIFY_OFFSET(metadata_disks_per_row, 38);
9613 VERIFY_OFFSET(row_cnt, 40);
9614 VERIFY_OFFSET(layout_map_count, 42);
9615 VERIFY_OFFSET(flags, 44);
9616 VERIFY_OFFSET(dekindex, 46);
9617 /* VERIFY_OFFSET(reserved, 48 */
9618 VERIFY_OFFSET(data, 64);
9620 #undef VERIFY_OFFSET
9622 #define VERIFY_OFFSET(member, offset) \
9623 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9625 VERIFY_OFFSET(IU_type, 0);
9626 VERIFY_OFFSET(direction, 1);
9627 VERIFY_OFFSET(reply_queue, 2);
9628 /* VERIFY_OFFSET(reserved1, 3); */
9629 VERIFY_OFFSET(scsi_nexus, 4);
9630 VERIFY_OFFSET(Tag, 8);
9631 VERIFY_OFFSET(cdb, 16);
9632 VERIFY_OFFSET(cciss_lun, 32);
9633 VERIFY_OFFSET(data_len, 40);
9634 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9635 VERIFY_OFFSET(sg_count, 45);
9636 /* VERIFY_OFFSET(reserved3 */
9637 VERIFY_OFFSET(err_ptr, 48);
9638 VERIFY_OFFSET(err_len, 56);
9639 /* VERIFY_OFFSET(reserved4 */
9640 VERIFY_OFFSET(sg, 64);
9642 #undef VERIFY_OFFSET
9644 #define VERIFY_OFFSET(member, offset) \
9645 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9647 VERIFY_OFFSET(dev_handle, 0x00);
9648 VERIFY_OFFSET(reserved1, 0x02);
9649 VERIFY_OFFSET(function, 0x03);
9650 VERIFY_OFFSET(reserved2, 0x04);
9651 VERIFY_OFFSET(err_info, 0x0C);
9652 VERIFY_OFFSET(reserved3, 0x10);
9653 VERIFY_OFFSET(err_info_len, 0x12);
9654 VERIFY_OFFSET(reserved4, 0x13);
9655 VERIFY_OFFSET(sgl_offset, 0x14);
9656 VERIFY_OFFSET(reserved5, 0x15);
9657 VERIFY_OFFSET(transfer_len, 0x1C);
9658 VERIFY_OFFSET(reserved6, 0x20);
9659 VERIFY_OFFSET(io_flags, 0x24);
9660 VERIFY_OFFSET(reserved7, 0x26);
9661 VERIFY_OFFSET(LUN, 0x34);
9662 VERIFY_OFFSET(control, 0x3C);
9663 VERIFY_OFFSET(CDB, 0x40);
9664 VERIFY_OFFSET(reserved8, 0x50);
9665 VERIFY_OFFSET(host_context_flags, 0x60);
9666 VERIFY_OFFSET(timeout_sec, 0x62);
9667 VERIFY_OFFSET(ReplyQueue, 0x64);
9668 VERIFY_OFFSET(reserved9, 0x65);
9669 VERIFY_OFFSET(tag, 0x68);
9670 VERIFY_OFFSET(host_addr, 0x70);
9671 VERIFY_OFFSET(CISS_LUN, 0x78);
9672 VERIFY_OFFSET(SG, 0x78 + 8);
9673 #undef VERIFY_OFFSET
9676 module_init(hpsa_init);
9677 module_exit(hpsa_cleanup);