2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
109 .cache_line_size = 0x20,
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
115 .clr_interrupt_mask_reg32 = 0x00230,
116 .sense_interrupt_mask_reg = 0x0022C,
117 .sense_interrupt_mask_reg32 = 0x0022C,
118 .clr_interrupt_reg = 0x00228,
119 .clr_interrupt_reg32 = 0x00228,
120 .sense_interrupt_reg = 0x00224,
121 .sense_interrupt_reg32 = 0x00224,
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
124 .sense_uproc_interrupt_reg32 = 0x00214,
125 .set_uproc_interrupt_reg = 0x00214,
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
131 { /* Snipe and Scamp */
134 .cache_line_size = 0x20,
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
140 .clr_interrupt_mask_reg32 = 0x0028C,
141 .sense_interrupt_mask_reg = 0x00288,
142 .sense_interrupt_mask_reg32 = 0x00288,
143 .clr_interrupt_reg = 0x00284,
144 .clr_interrupt_reg32 = 0x00284,
145 .sense_interrupt_reg = 0x00280,
146 .sense_interrupt_reg32 = 0x00280,
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
149 .sense_uproc_interrupt_reg32 = 0x00290,
150 .set_uproc_interrupt_reg = 0x00290,
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
159 .cache_line_size = 0x20,
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
165 .clr_interrupt_mask_reg32 = 0x0001C,
166 .sense_interrupt_mask_reg = 0x00010,
167 .sense_interrupt_mask_reg32 = 0x00014,
168 .clr_interrupt_reg = 0x00008,
169 .clr_interrupt_reg32 = 0x0000C,
170 .sense_interrupt_reg = 0x00000,
171 .sense_interrupt_reg32 = 0x00004,
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
174 .sense_uproc_interrupt_reg32 = 0x00024,
175 .set_uproc_interrupt_reg = 0x00020,
176 .set_uproc_interrupt_reg32 = 0x00024,
177 .clr_uproc_interrupt_reg = 0x00028,
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
180 .dump_addr_reg = 0x00064,
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
187 static const struct ipr_chip_t ipr_chip[] = {
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 static int ipr_max_bus_speeds[] = {
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
227 /* A constant array of IOASCs/URCs/Error Messages */
229 struct ipr_error_table_t ipr_error_table[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231 "8155: An unknown error was received"},
233 "Soft underlength error"},
235 "Command to be cancelled not found"},
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283 "8009: Impending cache battery pack failure"},
285 "34FF: Disk device format in progress"},
286 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "9070: IOA requested reset"},
289 "Synchronization required"},
291 "No ready, IOA shutdown"},
293 "Not ready, IOA has been shutdown"},
294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
295 "3020: Storage subsystem configuration error"},
297 "FFF5: Medium error, data unreadable, recommend reassign"},
299 "7000: Medium error, data unreadable, do not reassign"},
300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
301 "FFF3: Disk media format bad"},
302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
303 "3002: Addressed device failed to respond to selection"},
304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "3100: Device bus error"},
306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
307 "3109: IOA timed out a device command"},
309 "3120: SCSI bus is not operational"},
310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "4100: Hard device bus fabric error"},
312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313 "310C: Logical block guard error detected by the device"},
314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315 "310C: Logical block reference tag error detected by the device"},
316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317 "4170: Scatter list tag / sequence number error"},
318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "8150: Logical block CRC error on IOA to Host transfer"},
320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321 "4170: Logical block sequence number error on IOA to Host transfer"},
322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310D: Logical block reference tag error detected by the IOA"},
324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310D: Logical block guard error detected by the IOA"},
326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
327 "9000: IOA reserved area data check"},
328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
329 "9001: IOA reserved area invalid data pattern"},
330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
331 "9002: IOA reserved area LRC error"},
332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333 "Hardware Error, IOA metadata access error"},
334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
335 "102E: Out of alternate sectors for disk storage"},
336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
337 "FFF4: Data transfer underlength error"},
338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
339 "FFF4: Data transfer overlength error"},
340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
341 "3400: Logical unit failure"},
342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
343 "FFF4: Device microcode is corrupt"},
344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
345 "8150: PCI bus error"},
347 "Unsupported device bus message received"},
348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
349 "FFF4: Disk device problem"},
350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
351 "8150: Permanent IOA failure"},
352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
353 "3010: Disk device returned wrong response to IOA"},
354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
355 "8151: IOA microcode error"},
357 "Device bus status error"},
358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
359 "8157: IOA error requiring IOA reset to recover"},
361 "ATA device status error"},
363 "Message reject received from the device"},
364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
365 "8008: A permanent cache battery pack failure occurred"},
366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
367 "9090: Disk unit has been modified after the last known status"},
368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
369 "9081: IOA detected device error"},
370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
371 "9082: IOA detected device error"},
372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
373 "3110: Device bus error, message or command phase"},
374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
375 "3110: SAS Command / Task Management Function failed"},
376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9091: Incorrect hardware configuration change has been detected"},
378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9073: Invalid multi-adapter configuration"},
380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
381 "4010: Incorrect connection between cascaded expanders"},
382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
383 "4020: Connections exceed IOA design limits"},
384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
385 "4030: Incorrect multipath connection"},
386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
387 "4110: Unsupported enclosure function"},
388 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
389 "FFF4: Command to logical unit failed"},
391 "Illegal request, invalid request type or request packet"},
393 "Illegal request, invalid resource handle"},
395 "Illegal request, commands not allowed to this device"},
397 "Illegal request, command not allowed to a secondary adapter"},
399 "Illegal request, command not allowed to a non-optimized resource"},
401 "Illegal request, invalid field in parameter list"},
403 "Illegal request, parameter not supported"},
405 "Illegal request, parameter value invalid"},
407 "Illegal request, command sequence error"},
409 "Illegal request, dual adapter support not enabled"},
410 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
411 "9031: Array protection temporarily suspended, protection resuming"},
412 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
413 "9040: Array protection temporarily suspended, protection resuming"},
414 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
415 "3140: Device bus not ready to ready transition"},
416 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
417 "FFFB: SCSI bus was reset"},
419 "FFFE: SCSI bus transition to single ended"},
421 "FFFE: SCSI bus transition to LVD"},
422 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
423 "FFFB: SCSI bus was reset by another initiator"},
424 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
425 "3029: A device replacement has occurred"},
426 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
427 "9051: IOA cache data exists for a missing or failed device"},
428 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
429 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
430 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9025: Disk unit is not supported at its physical location"},
432 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
433 "3020: IOA detected a SCSI bus configuration error"},
434 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "3150: SCSI bus configuration error"},
436 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
437 "9074: Asymmetric advanced function disk configuration"},
438 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4040: Incomplete multipath connection between IOA and enclosure"},
440 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
441 "4041: Incomplete multipath connection between enclosure and device"},
442 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
443 "9075: Incomplete multipath connection between IOA and remote IOA"},
444 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
445 "9076: Configuration error, missing remote IOA"},
446 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4050: Enclosure does not support a required multipath function"},
448 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "4070: Logically bad block written on device"},
450 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
451 "9041: Array protection temporarily suspended"},
452 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9042: Corrupt array parity detected on specified device"},
454 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
455 "9030: Array no longer protected due to missing or failed disk unit"},
456 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9071: Link operational transition"},
458 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9072: Link not operational transition"},
460 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
461 "9032: Array exposed but still protected"},
462 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
463 "70DD: Device forced failed by disrupt device command"},
464 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
465 "4061: Multipath redundancy level got better"},
466 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4060: Multipath redundancy level got worse"},
469 "Failure due to other device"},
470 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9008: IOA does not support functions expected by devices"},
472 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
473 "9010: Cache data associated with attached devices cannot be found"},
474 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
475 "9011: Cache data belongs to devices other than those attached"},
476 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
477 "9020: Array missing 2 or more devices with only 1 device present"},
478 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
479 "9021: Array missing 2 or more devices with 2 or more devices present"},
480 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9022: Exposed array is missing a required device"},
482 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9023: Array member(s) not at required physical locations"},
484 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9024: Array not functional due to present hardware configuration"},
486 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9026: Array not functional due to present hardware configuration"},
488 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9027: Array is missing a device and parity is out of sync"},
490 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9028: Maximum number of arrays already exist"},
492 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9050: Required cache data cannot be located for a disk unit"},
494 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9052: Cache data exists for a device that has been modified"},
496 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
497 "9054: IOA resources not available due to previous problems"},
498 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
499 "9092: Disk unit requires initialization before use"},
500 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
501 "9029: Incorrect hardware configuration change has been detected"},
502 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
503 "9060: One or more disk pairs are missing from an array"},
504 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
505 "9061: One or more disks are missing from an array"},
506 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
507 "9062: One or more disks are missing from an array"},
508 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9063: Maximum number of functional arrays has been exceeded"},
511 "Aborted command, invalid descriptor"},
513 "Command terminated by host"}
516 static const struct ipr_ses_table_entry ipr_ses_table[] = {
517 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
518 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
519 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
520 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
521 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
522 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
523 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
524 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
527 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
528 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
529 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
533 * Function Prototypes
535 static int ipr_reset_alert(struct ipr_cmnd *);
536 static void ipr_process_ccn(struct ipr_cmnd *);
537 static void ipr_process_error(struct ipr_cmnd *);
538 static void ipr_reset_ioa_job(struct ipr_cmnd *);
539 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
540 enum ipr_shutdown_type);
542 #ifdef CONFIG_SCSI_IPR_TRACE
544 * ipr_trc_hook - Add a trace entry to the driver trace
545 * @ipr_cmd: ipr command struct
547 * @add_data: additional data
552 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
553 u8 type, u32 add_data)
555 struct ipr_trace_entry *trace_entry;
556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
557 unsigned int trace_index;
559 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
560 trace_entry = &ioa_cfg->trace[trace_index];
561 trace_entry->time = jiffies;
562 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
563 trace_entry->type = type;
564 if (ipr_cmd->ioa_cfg->sis64)
565 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
567 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
568 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
569 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
570 trace_entry->u.add_data = add_data;
574 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
578 * ipr_lock_and_done - Acquire lock and complete command
579 * @ipr_cmd: ipr command struct
584 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
586 unsigned long lock_flags;
587 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
589 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
590 ipr_cmd->done(ipr_cmd);
591 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
595 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
596 * @ipr_cmd: ipr command struct
601 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
603 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
604 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
605 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
606 dma_addr_t dma_addr = ipr_cmd->dma_addr;
609 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
610 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
611 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
612 ioarcb->data_transfer_length = 0;
613 ioarcb->read_data_transfer_length = 0;
614 ioarcb->ioadl_len = 0;
615 ioarcb->read_ioadl_len = 0;
617 if (ipr_cmd->ioa_cfg->sis64) {
618 ioarcb->u.sis64_addr_data.data_ioadl_addr =
619 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
620 ioasa64->u.gata.status = 0;
622 ioarcb->write_ioadl_addr =
623 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
624 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
625 ioasa->u.gata.status = 0;
628 ioasa->hdr.ioasc = 0;
629 ioasa->hdr.residual_data_len = 0;
630 ipr_cmd->scsi_cmd = NULL;
632 ipr_cmd->sense_buffer[0] = 0;
633 ipr_cmd->dma_use_sg = 0;
637 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
638 * @ipr_cmd: ipr command struct
643 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
644 void (*fast_done) (struct ipr_cmnd *))
646 ipr_reinit_ipr_cmnd(ipr_cmd);
647 ipr_cmd->u.scratch = 0;
648 ipr_cmd->sibling = NULL;
649 ipr_cmd->eh_comp = NULL;
650 ipr_cmd->fast_done = fast_done;
651 init_timer(&ipr_cmd->timer);
655 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
656 * @ioa_cfg: ioa config struct
659 * pointer to ipr command struct
662 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
664 struct ipr_cmnd *ipr_cmd = NULL;
666 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
667 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
668 struct ipr_cmnd, queue);
669 list_del(&ipr_cmd->queue);
677 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
678 * @ioa_cfg: ioa config struct
681 * pointer to ipr command struct
684 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
686 struct ipr_cmnd *ipr_cmd =
687 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
688 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
693 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
694 * @ioa_cfg: ioa config struct
695 * @clr_ints: interrupts to clear
697 * This function masks all interrupts on the adapter, then clears the
698 * interrupts specified in the mask
703 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
706 volatile u32 int_reg;
709 /* Stop new interrupts */
710 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
711 spin_lock(&ioa_cfg->hrrq[i]._lock);
712 ioa_cfg->hrrq[i].allow_interrupts = 0;
713 spin_unlock(&ioa_cfg->hrrq[i]._lock);
717 /* Set interrupt mask to stop all new interrupts */
719 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
721 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
723 /* Clear any pending interrupts */
725 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
726 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
727 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
731 * ipr_save_pcix_cmd_reg - Save PCI-X command register
732 * @ioa_cfg: ioa config struct
735 * 0 on success / -EIO on failure
737 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
739 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
741 if (pcix_cmd_reg == 0)
744 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
745 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
746 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
750 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
755 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
756 * @ioa_cfg: ioa config struct
759 * 0 on success / -EIO on failure
761 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
763 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
766 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
767 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
768 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
777 * ipr_sata_eh_done - done function for aborted SATA commands
778 * @ipr_cmd: ipr command struct
780 * This function is invoked for ops generated to SATA
781 * devices which are being aborted.
786 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
788 struct ata_queued_cmd *qc = ipr_cmd->qc;
789 struct ipr_sata_port *sata_port = qc->ap->private_data;
791 qc->err_mask |= AC_ERR_OTHER;
792 sata_port->ioasa.status |= ATA_BUSY;
793 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
798 * ipr_scsi_eh_done - mid-layer done function for aborted ops
799 * @ipr_cmd: ipr command struct
801 * This function is invoked by the interrupt handler for
802 * ops generated by the SCSI mid-layer which are being aborted.
807 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
809 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
811 scsi_cmd->result |= (DID_ERROR << 16);
813 scsi_dma_unmap(ipr_cmd->scsi_cmd);
814 scsi_cmd->scsi_done(scsi_cmd);
815 if (ipr_cmd->eh_comp)
816 complete(ipr_cmd->eh_comp);
817 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
821 * ipr_fail_all_ops - Fails all outstanding ops.
822 * @ioa_cfg: ioa config struct
824 * This function fails all outstanding ops.
829 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
831 struct ipr_cmnd *ipr_cmd, *temp;
832 struct ipr_hrr_queue *hrrq;
835 for_each_hrrq(hrrq, ioa_cfg) {
836 spin_lock(&hrrq->_lock);
837 list_for_each_entry_safe(ipr_cmd,
838 temp, &hrrq->hrrq_pending_q, queue) {
839 list_del(&ipr_cmd->queue);
841 ipr_cmd->s.ioasa.hdr.ioasc =
842 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
843 ipr_cmd->s.ioasa.hdr.ilid =
844 cpu_to_be32(IPR_DRIVER_ILID);
846 if (ipr_cmd->scsi_cmd)
847 ipr_cmd->done = ipr_scsi_eh_done;
848 else if (ipr_cmd->qc)
849 ipr_cmd->done = ipr_sata_eh_done;
851 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
852 IPR_IOASC_IOA_WAS_RESET);
853 del_timer(&ipr_cmd->timer);
854 ipr_cmd->done(ipr_cmd);
856 spin_unlock(&hrrq->_lock);
862 * ipr_send_command - Send driver initiated requests.
863 * @ipr_cmd: ipr command struct
865 * This function sends a command to the adapter using the correct write call.
866 * In the case of sis64, calculate the ioarcb size required. Then or in the
872 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
875 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
877 if (ioa_cfg->sis64) {
878 /* The default size is 256 bytes */
879 send_dma_addr |= 0x1;
881 /* If the number of ioadls * size of ioadl > 128 bytes,
882 then use a 512 byte ioarcb */
883 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
884 send_dma_addr |= 0x4;
885 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
887 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
891 * ipr_do_req - Send driver initiated requests.
892 * @ipr_cmd: ipr command struct
893 * @done: done function
894 * @timeout_func: timeout function
895 * @timeout: timeout value
897 * This function sends the specified command to the adapter with the
898 * timeout given. The done function is invoked on command completion.
903 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
904 void (*done) (struct ipr_cmnd *),
905 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
907 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
909 ipr_cmd->done = done;
911 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
912 ipr_cmd->timer.expires = jiffies + timeout;
913 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
915 add_timer(&ipr_cmd->timer);
917 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
919 ipr_send_command(ipr_cmd);
923 * ipr_internal_cmd_done - Op done function for an internally generated op.
924 * @ipr_cmd: ipr command struct
926 * This function is the op done function for an internally generated,
927 * blocking op. It simply wakes the sleeping thread.
932 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
934 if (ipr_cmd->sibling)
935 ipr_cmd->sibling = NULL;
937 complete(&ipr_cmd->completion);
941 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
942 * @ipr_cmd: ipr command struct
943 * @dma_addr: dma address
944 * @len: transfer length
945 * @flags: ioadl flag value
947 * This function initializes an ioadl in the case where there is only a single
953 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
956 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
957 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
959 ipr_cmd->dma_use_sg = 1;
961 if (ipr_cmd->ioa_cfg->sis64) {
962 ioadl64->flags = cpu_to_be32(flags);
963 ioadl64->data_len = cpu_to_be32(len);
964 ioadl64->address = cpu_to_be64(dma_addr);
966 ipr_cmd->ioarcb.ioadl_len =
967 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
968 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
970 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
971 ioadl->address = cpu_to_be32(dma_addr);
973 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
974 ipr_cmd->ioarcb.read_ioadl_len =
975 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
976 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
978 ipr_cmd->ioarcb.ioadl_len =
979 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
980 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
986 * ipr_send_blocking_cmd - Send command and sleep on its completion.
987 * @ipr_cmd: ipr command struct
988 * @timeout_func: function to invoke if command times out
994 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
995 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
998 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1000 init_completion(&ipr_cmd->completion);
1001 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1003 spin_unlock_irq(ioa_cfg->host->host_lock);
1004 wait_for_completion(&ipr_cmd->completion);
1005 spin_lock_irq(ioa_cfg->host->host_lock);
1008 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1012 if (ioa_cfg->hrrq_num == 1)
1015 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1016 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1022 * ipr_send_hcam - Send an HCAM to the adapter.
1023 * @ioa_cfg: ioa config struct
1025 * @hostrcb: hostrcb struct
1027 * This function will send a Host Controlled Async command to the adapter.
1028 * If HCAMs are currently not allowed to be issued to the adapter, it will
1029 * place the hostrcb on the free queue.
1034 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1035 struct ipr_hostrcb *hostrcb)
1037 struct ipr_cmnd *ipr_cmd;
1038 struct ipr_ioarcb *ioarcb;
1040 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1041 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1042 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1043 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1045 ipr_cmd->u.hostrcb = hostrcb;
1046 ioarcb = &ipr_cmd->ioarcb;
1048 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1049 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1050 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1051 ioarcb->cmd_pkt.cdb[1] = type;
1052 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1053 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1055 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1056 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1058 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1059 ipr_cmd->done = ipr_process_ccn;
1061 ipr_cmd->done = ipr_process_error;
1063 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1065 ipr_send_command(ipr_cmd);
1067 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1072 * ipr_update_ata_class - Update the ata class in the resource entry
1073 * @res: resource entry struct
1074 * @proto: cfgte device bus protocol value
1079 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1082 case IPR_PROTO_SATA:
1083 case IPR_PROTO_SAS_STP:
1084 res->ata_class = ATA_DEV_ATA;
1086 case IPR_PROTO_SATA_ATAPI:
1087 case IPR_PROTO_SAS_STP_ATAPI:
1088 res->ata_class = ATA_DEV_ATAPI;
1091 res->ata_class = ATA_DEV_UNKNOWN;
1097 * ipr_init_res_entry - Initialize a resource entry struct.
1098 * @res: resource entry struct
1099 * @cfgtew: config table entry wrapper struct
1104 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1105 struct ipr_config_table_entry_wrapper *cfgtew)
1109 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1110 struct ipr_resource_entry *gscsi_res = NULL;
1112 res->needs_sync_complete = 0;
1115 res->del_from_ml = 0;
1116 res->resetting_device = 0;
1118 res->sata_port = NULL;
1120 if (ioa_cfg->sis64) {
1121 proto = cfgtew->u.cfgte64->proto;
1122 res->res_flags = cfgtew->u.cfgte64->res_flags;
1123 res->qmodel = IPR_QUEUEING_MODEL64(res);
1124 res->type = cfgtew->u.cfgte64->res_type;
1126 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1127 sizeof(res->res_path));
1130 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1131 sizeof(res->dev_lun.scsi_lun));
1132 res->lun = scsilun_to_int(&res->dev_lun);
1134 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1135 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1136 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1138 res->target = gscsi_res->target;
1143 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1144 ioa_cfg->max_devs_supported);
1145 set_bit(res->target, ioa_cfg->target_ids);
1147 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1148 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1150 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1151 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1152 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1153 ioa_cfg->max_devs_supported);
1154 set_bit(res->target, ioa_cfg->array_ids);
1155 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1156 res->bus = IPR_VSET_VIRTUAL_BUS;
1157 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1158 ioa_cfg->max_devs_supported);
1159 set_bit(res->target, ioa_cfg->vset_ids);
1161 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1162 ioa_cfg->max_devs_supported);
1163 set_bit(res->target, ioa_cfg->target_ids);
1166 proto = cfgtew->u.cfgte->proto;
1167 res->qmodel = IPR_QUEUEING_MODEL(res);
1168 res->flags = cfgtew->u.cfgte->flags;
1169 if (res->flags & IPR_IS_IOA_RESOURCE)
1170 res->type = IPR_RES_TYPE_IOAFP;
1172 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1174 res->bus = cfgtew->u.cfgte->res_addr.bus;
1175 res->target = cfgtew->u.cfgte->res_addr.target;
1176 res->lun = cfgtew->u.cfgte->res_addr.lun;
1177 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1180 ipr_update_ata_class(res, proto);
1184 * ipr_is_same_device - Determine if two devices are the same.
1185 * @res: resource entry struct
1186 * @cfgtew: config table entry wrapper struct
1189 * 1 if the devices are the same / 0 otherwise
1191 static int ipr_is_same_device(struct ipr_resource_entry *res,
1192 struct ipr_config_table_entry_wrapper *cfgtew)
1194 if (res->ioa_cfg->sis64) {
1195 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1196 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1197 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1198 sizeof(cfgtew->u.cfgte64->lun))) {
1202 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1203 res->target == cfgtew->u.cfgte->res_addr.target &&
1204 res->lun == cfgtew->u.cfgte->res_addr.lun)
1212 * __ipr_format_res_path - Format the resource path for printing.
1213 * @res_path: resource path
1215 * @len: length of buffer provided
1220 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1226 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1227 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1228 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1234 * ipr_format_res_path - Format the resource path for printing.
1235 * @ioa_cfg: ioa config struct
1236 * @res_path: resource path
1238 * @len: length of buffer provided
1243 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1244 u8 *res_path, char *buffer, int len)
1249 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1250 __ipr_format_res_path(res_path, p, len - (buffer - p));
1255 * ipr_update_res_entry - Update the resource entry.
1256 * @res: resource entry struct
1257 * @cfgtew: config table entry wrapper struct
1262 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1263 struct ipr_config_table_entry_wrapper *cfgtew)
1265 char buffer[IPR_MAX_RES_PATH_LENGTH];
1269 if (res->ioa_cfg->sis64) {
1270 res->flags = cfgtew->u.cfgte64->flags;
1271 res->res_flags = cfgtew->u.cfgte64->res_flags;
1272 res->type = cfgtew->u.cfgte64->res_type;
1274 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1275 sizeof(struct ipr_std_inq_data));
1277 res->qmodel = IPR_QUEUEING_MODEL64(res);
1278 proto = cfgtew->u.cfgte64->proto;
1279 res->res_handle = cfgtew->u.cfgte64->res_handle;
1280 res->dev_id = cfgtew->u.cfgte64->dev_id;
1282 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1283 sizeof(res->dev_lun.scsi_lun));
1285 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1286 sizeof(res->res_path))) {
1287 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1288 sizeof(res->res_path));
1292 if (res->sdev && new_path)
1293 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1294 ipr_format_res_path(res->ioa_cfg,
1295 res->res_path, buffer, sizeof(buffer)));
1297 res->flags = cfgtew->u.cfgte->flags;
1298 if (res->flags & IPR_IS_IOA_RESOURCE)
1299 res->type = IPR_RES_TYPE_IOAFP;
1301 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1303 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1304 sizeof(struct ipr_std_inq_data));
1306 res->qmodel = IPR_QUEUEING_MODEL(res);
1307 proto = cfgtew->u.cfgte->proto;
1308 res->res_handle = cfgtew->u.cfgte->res_handle;
1311 ipr_update_ata_class(res, proto);
1315 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1317 * @res: resource entry struct
1318 * @cfgtew: config table entry wrapper struct
1323 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1325 struct ipr_resource_entry *gscsi_res = NULL;
1326 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1328 if (!ioa_cfg->sis64)
1331 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1332 clear_bit(res->target, ioa_cfg->array_ids);
1333 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1334 clear_bit(res->target, ioa_cfg->vset_ids);
1335 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1336 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1337 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1339 clear_bit(res->target, ioa_cfg->target_ids);
1341 } else if (res->bus == 0)
1342 clear_bit(res->target, ioa_cfg->target_ids);
1346 * ipr_handle_config_change - Handle a config change from the adapter
1347 * @ioa_cfg: ioa config struct
1353 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1354 struct ipr_hostrcb *hostrcb)
1356 struct ipr_resource_entry *res = NULL;
1357 struct ipr_config_table_entry_wrapper cfgtew;
1358 __be32 cc_res_handle;
1362 if (ioa_cfg->sis64) {
1363 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1364 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1366 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1367 cc_res_handle = cfgtew.u.cfgte->res_handle;
1370 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1371 if (res->res_handle == cc_res_handle) {
1378 if (list_empty(&ioa_cfg->free_res_q)) {
1379 ipr_send_hcam(ioa_cfg,
1380 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1385 res = list_entry(ioa_cfg->free_res_q.next,
1386 struct ipr_resource_entry, queue);
1388 list_del(&res->queue);
1389 ipr_init_res_entry(res, &cfgtew);
1390 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1393 ipr_update_res_entry(res, &cfgtew);
1395 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1397 res->del_from_ml = 1;
1398 res->res_handle = IPR_INVALID_RES_HANDLE;
1399 if (ioa_cfg->allow_ml_add_del)
1400 schedule_work(&ioa_cfg->work_q);
1402 ipr_clear_res_target(res);
1403 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1405 } else if (!res->sdev || res->del_from_ml) {
1407 if (ioa_cfg->allow_ml_add_del)
1408 schedule_work(&ioa_cfg->work_q);
1411 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1415 * ipr_process_ccn - Op done function for a CCN.
1416 * @ipr_cmd: ipr command struct
1418 * This function is the op done function for a configuration
1419 * change notification host controlled async from the adapter.
1424 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1426 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1427 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1428 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1430 list_del(&hostrcb->queue);
1431 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1434 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1435 dev_err(&ioa_cfg->pdev->dev,
1436 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1438 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1440 ipr_handle_config_change(ioa_cfg, hostrcb);
1445 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1446 * @i: index into buffer
1447 * @buf: string to modify
1449 * This function will strip all trailing whitespace, pad the end
1450 * of the string with a single space, and NULL terminate the string.
1453 * new length of string
1455 static int strip_and_pad_whitespace(int i, char *buf)
1457 while (i && buf[i] == ' ')
1465 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1466 * @prefix: string to print at start of printk
1467 * @hostrcb: hostrcb pointer
1468 * @vpd: vendor/product id/sn struct
1473 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1474 struct ipr_vpd *vpd)
1476 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1479 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1480 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1482 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1483 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1485 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1486 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1488 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1492 * ipr_log_vpd - Log the passed VPD to the error log.
1493 * @vpd: vendor/product id/sn struct
1498 static void ipr_log_vpd(struct ipr_vpd *vpd)
1500 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1501 + IPR_SERIAL_NUM_LEN];
1503 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1504 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1506 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1507 ipr_err("Vendor/Product ID: %s\n", buffer);
1509 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1510 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1511 ipr_err(" Serial Number: %s\n", buffer);
1515 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1516 * @prefix: string to print at start of printk
1517 * @hostrcb: hostrcb pointer
1518 * @vpd: vendor/product id/sn/wwn struct
1523 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1524 struct ipr_ext_vpd *vpd)
1526 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1527 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1528 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1532 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1533 * @vpd: vendor/product id/sn/wwn struct
1538 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1540 ipr_log_vpd(&vpd->vpd);
1541 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1542 be32_to_cpu(vpd->wwid[1]));
1546 * ipr_log_enhanced_cache_error - Log a cache error.
1547 * @ioa_cfg: ioa config struct
1548 * @hostrcb: hostrcb struct
1553 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1554 struct ipr_hostrcb *hostrcb)
1556 struct ipr_hostrcb_type_12_error *error;
1559 error = &hostrcb->hcam.u.error64.u.type_12_error;
1561 error = &hostrcb->hcam.u.error.u.type_12_error;
1563 ipr_err("-----Current Configuration-----\n");
1564 ipr_err("Cache Directory Card Information:\n");
1565 ipr_log_ext_vpd(&error->ioa_vpd);
1566 ipr_err("Adapter Card Information:\n");
1567 ipr_log_ext_vpd(&error->cfc_vpd);
1569 ipr_err("-----Expected Configuration-----\n");
1570 ipr_err("Cache Directory Card Information:\n");
1571 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1572 ipr_err("Adapter Card Information:\n");
1573 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1575 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1576 be32_to_cpu(error->ioa_data[0]),
1577 be32_to_cpu(error->ioa_data[1]),
1578 be32_to_cpu(error->ioa_data[2]));
1582 * ipr_log_cache_error - Log a cache error.
1583 * @ioa_cfg: ioa config struct
1584 * @hostrcb: hostrcb struct
1589 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1590 struct ipr_hostrcb *hostrcb)
1592 struct ipr_hostrcb_type_02_error *error =
1593 &hostrcb->hcam.u.error.u.type_02_error;
1595 ipr_err("-----Current Configuration-----\n");
1596 ipr_err("Cache Directory Card Information:\n");
1597 ipr_log_vpd(&error->ioa_vpd);
1598 ipr_err("Adapter Card Information:\n");
1599 ipr_log_vpd(&error->cfc_vpd);
1601 ipr_err("-----Expected Configuration-----\n");
1602 ipr_err("Cache Directory Card Information:\n");
1603 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1604 ipr_err("Adapter Card Information:\n");
1605 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1607 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1608 be32_to_cpu(error->ioa_data[0]),
1609 be32_to_cpu(error->ioa_data[1]),
1610 be32_to_cpu(error->ioa_data[2]));
1614 * ipr_log_enhanced_config_error - Log a configuration error.
1615 * @ioa_cfg: ioa config struct
1616 * @hostrcb: hostrcb struct
1621 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1622 struct ipr_hostrcb *hostrcb)
1624 int errors_logged, i;
1625 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1626 struct ipr_hostrcb_type_13_error *error;
1628 error = &hostrcb->hcam.u.error.u.type_13_error;
1629 errors_logged = be32_to_cpu(error->errors_logged);
1631 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1632 be32_to_cpu(error->errors_detected), errors_logged);
1634 dev_entry = error->dev;
1636 for (i = 0; i < errors_logged; i++, dev_entry++) {
1639 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1640 ipr_log_ext_vpd(&dev_entry->vpd);
1642 ipr_err("-----New Device Information-----\n");
1643 ipr_log_ext_vpd(&dev_entry->new_vpd);
1645 ipr_err("Cache Directory Card Information:\n");
1646 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1648 ipr_err("Adapter Card Information:\n");
1649 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1654 * ipr_log_sis64_config_error - Log a device error.
1655 * @ioa_cfg: ioa config struct
1656 * @hostrcb: hostrcb struct
1661 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1662 struct ipr_hostrcb *hostrcb)
1664 int errors_logged, i;
1665 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1666 struct ipr_hostrcb_type_23_error *error;
1667 char buffer[IPR_MAX_RES_PATH_LENGTH];
1669 error = &hostrcb->hcam.u.error64.u.type_23_error;
1670 errors_logged = be32_to_cpu(error->errors_logged);
1672 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1673 be32_to_cpu(error->errors_detected), errors_logged);
1675 dev_entry = error->dev;
1677 for (i = 0; i < errors_logged; i++, dev_entry++) {
1680 ipr_err("Device %d : %s", i + 1,
1681 __ipr_format_res_path(dev_entry->res_path,
1682 buffer, sizeof(buffer)));
1683 ipr_log_ext_vpd(&dev_entry->vpd);
1685 ipr_err("-----New Device Information-----\n");
1686 ipr_log_ext_vpd(&dev_entry->new_vpd);
1688 ipr_err("Cache Directory Card Information:\n");
1689 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1691 ipr_err("Adapter Card Information:\n");
1692 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1697 * ipr_log_config_error - Log a configuration error.
1698 * @ioa_cfg: ioa config struct
1699 * @hostrcb: hostrcb struct
1704 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1705 struct ipr_hostrcb *hostrcb)
1707 int errors_logged, i;
1708 struct ipr_hostrcb_device_data_entry *dev_entry;
1709 struct ipr_hostrcb_type_03_error *error;
1711 error = &hostrcb->hcam.u.error.u.type_03_error;
1712 errors_logged = be32_to_cpu(error->errors_logged);
1714 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1715 be32_to_cpu(error->errors_detected), errors_logged);
1717 dev_entry = error->dev;
1719 for (i = 0; i < errors_logged; i++, dev_entry++) {
1722 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1723 ipr_log_vpd(&dev_entry->vpd);
1725 ipr_err("-----New Device Information-----\n");
1726 ipr_log_vpd(&dev_entry->new_vpd);
1728 ipr_err("Cache Directory Card Information:\n");
1729 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1731 ipr_err("Adapter Card Information:\n");
1732 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1734 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1735 be32_to_cpu(dev_entry->ioa_data[0]),
1736 be32_to_cpu(dev_entry->ioa_data[1]),
1737 be32_to_cpu(dev_entry->ioa_data[2]),
1738 be32_to_cpu(dev_entry->ioa_data[3]),
1739 be32_to_cpu(dev_entry->ioa_data[4]));
1744 * ipr_log_enhanced_array_error - Log an array configuration error.
1745 * @ioa_cfg: ioa config struct
1746 * @hostrcb: hostrcb struct
1751 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1752 struct ipr_hostrcb *hostrcb)
1755 struct ipr_hostrcb_type_14_error *error;
1756 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1757 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1759 error = &hostrcb->hcam.u.error.u.type_14_error;
1763 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1764 error->protection_level,
1765 ioa_cfg->host->host_no,
1766 error->last_func_vset_res_addr.bus,
1767 error->last_func_vset_res_addr.target,
1768 error->last_func_vset_res_addr.lun);
1772 array_entry = error->array_member;
1773 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1774 ARRAY_SIZE(error->array_member));
1776 for (i = 0; i < num_entries; i++, array_entry++) {
1777 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1780 if (be32_to_cpu(error->exposed_mode_adn) == i)
1781 ipr_err("Exposed Array Member %d:\n", i);
1783 ipr_err("Array Member %d:\n", i);
1785 ipr_log_ext_vpd(&array_entry->vpd);
1786 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1787 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1788 "Expected Location");
1795 * ipr_log_array_error - Log an array configuration error.
1796 * @ioa_cfg: ioa config struct
1797 * @hostrcb: hostrcb struct
1802 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1803 struct ipr_hostrcb *hostrcb)
1806 struct ipr_hostrcb_type_04_error *error;
1807 struct ipr_hostrcb_array_data_entry *array_entry;
1808 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1810 error = &hostrcb->hcam.u.error.u.type_04_error;
1814 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1815 error->protection_level,
1816 ioa_cfg->host->host_no,
1817 error->last_func_vset_res_addr.bus,
1818 error->last_func_vset_res_addr.target,
1819 error->last_func_vset_res_addr.lun);
1823 array_entry = error->array_member;
1825 for (i = 0; i < 18; i++) {
1826 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1829 if (be32_to_cpu(error->exposed_mode_adn) == i)
1830 ipr_err("Exposed Array Member %d:\n", i);
1832 ipr_err("Array Member %d:\n", i);
1834 ipr_log_vpd(&array_entry->vpd);
1836 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1837 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1838 "Expected Location");
1843 array_entry = error->array_member2;
1850 * ipr_log_hex_data - Log additional hex IOA error data.
1851 * @ioa_cfg: ioa config struct
1852 * @data: IOA error data
1858 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1865 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1866 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1868 for (i = 0; i < len / 4; i += 4) {
1869 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1870 be32_to_cpu(data[i]),
1871 be32_to_cpu(data[i+1]),
1872 be32_to_cpu(data[i+2]),
1873 be32_to_cpu(data[i+3]));
1878 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1879 * @ioa_cfg: ioa config struct
1880 * @hostrcb: hostrcb struct
1885 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1886 struct ipr_hostrcb *hostrcb)
1888 struct ipr_hostrcb_type_17_error *error;
1891 error = &hostrcb->hcam.u.error64.u.type_17_error;
1893 error = &hostrcb->hcam.u.error.u.type_17_error;
1895 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1896 strim(error->failure_reason);
1898 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1899 be32_to_cpu(hostrcb->hcam.u.error.prc));
1900 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1901 ipr_log_hex_data(ioa_cfg, error->data,
1902 be32_to_cpu(hostrcb->hcam.length) -
1903 (offsetof(struct ipr_hostrcb_error, u) +
1904 offsetof(struct ipr_hostrcb_type_17_error, data)));
1908 * ipr_log_dual_ioa_error - Log a dual adapter error.
1909 * @ioa_cfg: ioa config struct
1910 * @hostrcb: hostrcb struct
1915 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1916 struct ipr_hostrcb *hostrcb)
1918 struct ipr_hostrcb_type_07_error *error;
1920 error = &hostrcb->hcam.u.error.u.type_07_error;
1921 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1922 strim(error->failure_reason);
1924 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1925 be32_to_cpu(hostrcb->hcam.u.error.prc));
1926 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1927 ipr_log_hex_data(ioa_cfg, error->data,
1928 be32_to_cpu(hostrcb->hcam.length) -
1929 (offsetof(struct ipr_hostrcb_error, u) +
1930 offsetof(struct ipr_hostrcb_type_07_error, data)));
1933 static const struct {
1936 } path_active_desc[] = {
1937 { IPR_PATH_NO_INFO, "Path" },
1938 { IPR_PATH_ACTIVE, "Active path" },
1939 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1942 static const struct {
1945 } path_state_desc[] = {
1946 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1947 { IPR_PATH_HEALTHY, "is healthy" },
1948 { IPR_PATH_DEGRADED, "is degraded" },
1949 { IPR_PATH_FAILED, "is failed" }
1953 * ipr_log_fabric_path - Log a fabric path error
1954 * @hostrcb: hostrcb struct
1955 * @fabric: fabric descriptor
1960 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1961 struct ipr_hostrcb_fabric_desc *fabric)
1964 u8 path_state = fabric->path_state;
1965 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1966 u8 state = path_state & IPR_PATH_STATE_MASK;
1968 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1969 if (path_active_desc[i].active != active)
1972 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1973 if (path_state_desc[j].state != state)
1976 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1977 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1978 path_active_desc[i].desc, path_state_desc[j].desc,
1980 } else if (fabric->cascaded_expander == 0xff) {
1981 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1982 path_active_desc[i].desc, path_state_desc[j].desc,
1983 fabric->ioa_port, fabric->phy);
1984 } else if (fabric->phy == 0xff) {
1985 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1986 path_active_desc[i].desc, path_state_desc[j].desc,
1987 fabric->ioa_port, fabric->cascaded_expander);
1989 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1990 path_active_desc[i].desc, path_state_desc[j].desc,
1991 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1997 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1998 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2002 * ipr_log64_fabric_path - Log a fabric path error
2003 * @hostrcb: hostrcb struct
2004 * @fabric: fabric descriptor
2009 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2010 struct ipr_hostrcb64_fabric_desc *fabric)
2013 u8 path_state = fabric->path_state;
2014 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2015 u8 state = path_state & IPR_PATH_STATE_MASK;
2016 char buffer[IPR_MAX_RES_PATH_LENGTH];
2018 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2019 if (path_active_desc[i].active != active)
2022 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2023 if (path_state_desc[j].state != state)
2026 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2027 path_active_desc[i].desc, path_state_desc[j].desc,
2028 ipr_format_res_path(hostrcb->ioa_cfg,
2030 buffer, sizeof(buffer)));
2035 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2036 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2037 buffer, sizeof(buffer)));
2040 static const struct {
2043 } path_type_desc[] = {
2044 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2045 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2046 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2047 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2050 static const struct {
2053 } path_status_desc[] = {
2054 { IPR_PATH_CFG_NO_PROB, "Functional" },
2055 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2056 { IPR_PATH_CFG_FAILED, "Failed" },
2057 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2058 { IPR_PATH_NOT_DETECTED, "Missing" },
2059 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2062 static const char *link_rate[] = {
2065 "phy reset problem",
2082 * ipr_log_path_elem - Log a fabric path element.
2083 * @hostrcb: hostrcb struct
2084 * @cfg: fabric path element struct
2089 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2090 struct ipr_hostrcb_config_element *cfg)
2093 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2094 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2096 if (type == IPR_PATH_CFG_NOT_EXIST)
2099 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2100 if (path_type_desc[i].type != type)
2103 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2104 if (path_status_desc[j].status != status)
2107 if (type == IPR_PATH_CFG_IOA_PORT) {
2108 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2109 path_status_desc[j].desc, path_type_desc[i].desc,
2110 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2111 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2113 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2114 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2115 path_status_desc[j].desc, path_type_desc[i].desc,
2116 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2117 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2118 } else if (cfg->cascaded_expander == 0xff) {
2119 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2120 "WWN=%08X%08X\n", path_status_desc[j].desc,
2121 path_type_desc[i].desc, cfg->phy,
2122 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2123 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2124 } else if (cfg->phy == 0xff) {
2125 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2126 "WWN=%08X%08X\n", path_status_desc[j].desc,
2127 path_type_desc[i].desc, cfg->cascaded_expander,
2128 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2129 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2131 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2132 "WWN=%08X%08X\n", path_status_desc[j].desc,
2133 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2134 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2135 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2142 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2143 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2144 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2145 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2149 * ipr_log64_path_elem - Log a fabric path element.
2150 * @hostrcb: hostrcb struct
2151 * @cfg: fabric path element struct
2156 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2157 struct ipr_hostrcb64_config_element *cfg)
2160 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2161 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2162 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2163 char buffer[IPR_MAX_RES_PATH_LENGTH];
2165 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2168 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2169 if (path_type_desc[i].type != type)
2172 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2173 if (path_status_desc[j].status != status)
2176 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2177 path_status_desc[j].desc, path_type_desc[i].desc,
2178 ipr_format_res_path(hostrcb->ioa_cfg,
2179 cfg->res_path, buffer, sizeof(buffer)),
2180 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2181 be32_to_cpu(cfg->wwid[0]),
2182 be32_to_cpu(cfg->wwid[1]));
2186 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2187 "WWN=%08X%08X\n", cfg->type_status,
2188 ipr_format_res_path(hostrcb->ioa_cfg,
2189 cfg->res_path, buffer, sizeof(buffer)),
2190 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2191 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2195 * ipr_log_fabric_error - Log a fabric error.
2196 * @ioa_cfg: ioa config struct
2197 * @hostrcb: hostrcb struct
2202 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2203 struct ipr_hostrcb *hostrcb)
2205 struct ipr_hostrcb_type_20_error *error;
2206 struct ipr_hostrcb_fabric_desc *fabric;
2207 struct ipr_hostrcb_config_element *cfg;
2210 error = &hostrcb->hcam.u.error.u.type_20_error;
2211 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2212 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2214 add_len = be32_to_cpu(hostrcb->hcam.length) -
2215 (offsetof(struct ipr_hostrcb_error, u) +
2216 offsetof(struct ipr_hostrcb_type_20_error, desc));
2218 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2219 ipr_log_fabric_path(hostrcb, fabric);
2220 for_each_fabric_cfg(fabric, cfg)
2221 ipr_log_path_elem(hostrcb, cfg);
2223 add_len -= be16_to_cpu(fabric->length);
2224 fabric = (struct ipr_hostrcb_fabric_desc *)
2225 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2228 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2232 * ipr_log_sis64_array_error - Log a sis64 array error.
2233 * @ioa_cfg: ioa config struct
2234 * @hostrcb: hostrcb struct
2239 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2240 struct ipr_hostrcb *hostrcb)
2243 struct ipr_hostrcb_type_24_error *error;
2244 struct ipr_hostrcb64_array_data_entry *array_entry;
2245 char buffer[IPR_MAX_RES_PATH_LENGTH];
2246 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2248 error = &hostrcb->hcam.u.error64.u.type_24_error;
2252 ipr_err("RAID %s Array Configuration: %s\n",
2253 error->protection_level,
2254 ipr_format_res_path(ioa_cfg, error->last_res_path,
2255 buffer, sizeof(buffer)));
2259 array_entry = error->array_member;
2260 num_entries = min_t(u32, error->num_entries,
2261 ARRAY_SIZE(error->array_member));
2263 for (i = 0; i < num_entries; i++, array_entry++) {
2265 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2268 if (error->exposed_mode_adn == i)
2269 ipr_err("Exposed Array Member %d:\n", i);
2271 ipr_err("Array Member %d:\n", i);
2273 ipr_err("Array Member %d:\n", i);
2274 ipr_log_ext_vpd(&array_entry->vpd);
2275 ipr_err("Current Location: %s\n",
2276 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2277 buffer, sizeof(buffer)));
2278 ipr_err("Expected Location: %s\n",
2279 ipr_format_res_path(ioa_cfg,
2280 array_entry->expected_res_path,
2281 buffer, sizeof(buffer)));
2288 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2289 * @ioa_cfg: ioa config struct
2290 * @hostrcb: hostrcb struct
2295 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2296 struct ipr_hostrcb *hostrcb)
2298 struct ipr_hostrcb_type_30_error *error;
2299 struct ipr_hostrcb64_fabric_desc *fabric;
2300 struct ipr_hostrcb64_config_element *cfg;
2303 error = &hostrcb->hcam.u.error64.u.type_30_error;
2305 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2306 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2308 add_len = be32_to_cpu(hostrcb->hcam.length) -
2309 (offsetof(struct ipr_hostrcb64_error, u) +
2310 offsetof(struct ipr_hostrcb_type_30_error, desc));
2312 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2313 ipr_log64_fabric_path(hostrcb, fabric);
2314 for_each_fabric_cfg(fabric, cfg)
2315 ipr_log64_path_elem(hostrcb, cfg);
2317 add_len -= be16_to_cpu(fabric->length);
2318 fabric = (struct ipr_hostrcb64_fabric_desc *)
2319 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2322 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2326 * ipr_log_generic_error - Log an adapter error.
2327 * @ioa_cfg: ioa config struct
2328 * @hostrcb: hostrcb struct
2333 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2334 struct ipr_hostrcb *hostrcb)
2336 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2337 be32_to_cpu(hostrcb->hcam.length));
2341 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2344 * This function will return the index of into the ipr_error_table
2345 * for the specified IOASC. If the IOASC is not in the table,
2346 * 0 will be returned, which points to the entry used for unknown errors.
2349 * index into the ipr_error_table
2351 static u32 ipr_get_error(u32 ioasc)
2355 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2356 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2363 * ipr_handle_log_data - Log an adapter error.
2364 * @ioa_cfg: ioa config struct
2365 * @hostrcb: hostrcb struct
2367 * This function logs an adapter error to the system.
2372 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2373 struct ipr_hostrcb *hostrcb)
2378 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2381 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2382 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2385 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2387 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2389 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2390 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2391 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2392 scsi_report_bus_reset(ioa_cfg->host,
2393 hostrcb->hcam.u.error.fd_res_addr.bus);
2396 error_index = ipr_get_error(ioasc);
2398 if (!ipr_error_table[error_index].log_hcam)
2401 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2403 /* Set indication we have logged an error */
2404 ioa_cfg->errors_logged++;
2406 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2408 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2409 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2411 switch (hostrcb->hcam.overlay_id) {
2412 case IPR_HOST_RCB_OVERLAY_ID_2:
2413 ipr_log_cache_error(ioa_cfg, hostrcb);
2415 case IPR_HOST_RCB_OVERLAY_ID_3:
2416 ipr_log_config_error(ioa_cfg, hostrcb);
2418 case IPR_HOST_RCB_OVERLAY_ID_4:
2419 case IPR_HOST_RCB_OVERLAY_ID_6:
2420 ipr_log_array_error(ioa_cfg, hostrcb);
2422 case IPR_HOST_RCB_OVERLAY_ID_7:
2423 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2425 case IPR_HOST_RCB_OVERLAY_ID_12:
2426 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2428 case IPR_HOST_RCB_OVERLAY_ID_13:
2429 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2431 case IPR_HOST_RCB_OVERLAY_ID_14:
2432 case IPR_HOST_RCB_OVERLAY_ID_16:
2433 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2435 case IPR_HOST_RCB_OVERLAY_ID_17:
2436 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2438 case IPR_HOST_RCB_OVERLAY_ID_20:
2439 ipr_log_fabric_error(ioa_cfg, hostrcb);
2441 case IPR_HOST_RCB_OVERLAY_ID_23:
2442 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2444 case IPR_HOST_RCB_OVERLAY_ID_24:
2445 case IPR_HOST_RCB_OVERLAY_ID_26:
2446 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2448 case IPR_HOST_RCB_OVERLAY_ID_30:
2449 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2451 case IPR_HOST_RCB_OVERLAY_ID_1:
2452 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2454 ipr_log_generic_error(ioa_cfg, hostrcb);
2460 * ipr_process_error - Op done function for an adapter error log.
2461 * @ipr_cmd: ipr command struct
2463 * This function is the op done function for an error log host
2464 * controlled async from the adapter. It will log the error and
2465 * send the HCAM back to the adapter.
2470 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2472 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2473 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2474 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2478 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2480 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2482 list_del(&hostrcb->queue);
2483 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2486 ipr_handle_log_data(ioa_cfg, hostrcb);
2487 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2488 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2489 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2490 dev_err(&ioa_cfg->pdev->dev,
2491 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2494 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2498 * ipr_timeout - An internally generated op has timed out.
2499 * @ipr_cmd: ipr command struct
2501 * This function blocks host requests and initiates an
2507 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2509 unsigned long lock_flags = 0;
2510 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2513 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2515 ioa_cfg->errors_logged++;
2516 dev_err(&ioa_cfg->pdev->dev,
2517 "Adapter being reset due to command timeout.\n");
2519 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2520 ioa_cfg->sdt_state = GET_DUMP;
2522 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2523 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2525 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2530 * ipr_oper_timeout - Adapter timed out transitioning to operational
2531 * @ipr_cmd: ipr command struct
2533 * This function blocks host requests and initiates an
2539 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2541 unsigned long lock_flags = 0;
2542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2545 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2547 ioa_cfg->errors_logged++;
2548 dev_err(&ioa_cfg->pdev->dev,
2549 "Adapter timed out transitioning to operational.\n");
2551 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2552 ioa_cfg->sdt_state = GET_DUMP;
2554 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2556 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2557 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2560 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2565 * ipr_find_ses_entry - Find matching SES in SES table
2566 * @res: resource entry struct of SES
2569 * pointer to SES table entry / NULL on failure
2571 static const struct ipr_ses_table_entry *
2572 ipr_find_ses_entry(struct ipr_resource_entry *res)
2575 struct ipr_std_inq_vpids *vpids;
2576 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2578 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2579 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2580 if (ste->compare_product_id_byte[j] == 'X') {
2581 vpids = &res->std_inq_data.vpids;
2582 if (vpids->product_id[j] == ste->product_id[j])
2590 if (matches == IPR_PROD_ID_LEN)
2598 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2599 * @ioa_cfg: ioa config struct
2601 * @bus_width: bus width
2604 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2605 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2606 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2607 * max 160MHz = max 320MB/sec).
2609 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2611 struct ipr_resource_entry *res;
2612 const struct ipr_ses_table_entry *ste;
2613 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2615 /* Loop through each config table entry in the config table buffer */
2616 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2617 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2620 if (bus != res->bus)
2623 if (!(ste = ipr_find_ses_entry(res)))
2626 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2629 return max_xfer_rate;
2633 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2634 * @ioa_cfg: ioa config struct
2635 * @max_delay: max delay in micro-seconds to wait
2637 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2640 * 0 on success / other on failure
2642 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2644 volatile u32 pcii_reg;
2647 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2648 while (delay < max_delay) {
2649 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2651 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2654 /* udelay cannot be used if delay is more than a few milliseconds */
2655 if ((delay / 1000) > MAX_UDELAY_MS)
2656 mdelay(delay / 1000);
2666 * ipr_get_sis64_dump_data_section - Dump IOA memory
2667 * @ioa_cfg: ioa config struct
2668 * @start_addr: adapter address to dump
2669 * @dest: destination kernel buffer
2670 * @length_in_words: length to dump in 4 byte words
2675 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2677 __be32 *dest, u32 length_in_words)
2681 for (i = 0; i < length_in_words; i++) {
2682 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2683 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2691 * ipr_get_ldump_data_section - Dump IOA memory
2692 * @ioa_cfg: ioa config struct
2693 * @start_addr: adapter address to dump
2694 * @dest: destination kernel buffer
2695 * @length_in_words: length to dump in 4 byte words
2698 * 0 on success / -EIO on failure
2700 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2702 __be32 *dest, u32 length_in_words)
2704 volatile u32 temp_pcii_reg;
2708 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2709 dest, length_in_words);
2711 /* Write IOA interrupt reg starting LDUMP state */
2712 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2713 ioa_cfg->regs.set_uproc_interrupt_reg32);
2715 /* Wait for IO debug acknowledge */
2716 if (ipr_wait_iodbg_ack(ioa_cfg,
2717 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2718 dev_err(&ioa_cfg->pdev->dev,
2719 "IOA dump long data transfer timeout\n");
2723 /* Signal LDUMP interlocked - clear IO debug ack */
2724 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2725 ioa_cfg->regs.clr_interrupt_reg);
2727 /* Write Mailbox with starting address */
2728 writel(start_addr, ioa_cfg->ioa_mailbox);
2730 /* Signal address valid - clear IOA Reset alert */
2731 writel(IPR_UPROCI_RESET_ALERT,
2732 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2734 for (i = 0; i < length_in_words; i++) {
2735 /* Wait for IO debug acknowledge */
2736 if (ipr_wait_iodbg_ack(ioa_cfg,
2737 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2738 dev_err(&ioa_cfg->pdev->dev,
2739 "IOA dump short data transfer timeout\n");
2743 /* Read data from mailbox and increment destination pointer */
2744 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2747 /* For all but the last word of data, signal data received */
2748 if (i < (length_in_words - 1)) {
2749 /* Signal dump data received - Clear IO debug Ack */
2750 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2751 ioa_cfg->regs.clr_interrupt_reg);
2755 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2756 writel(IPR_UPROCI_RESET_ALERT,
2757 ioa_cfg->regs.set_uproc_interrupt_reg32);
2759 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2760 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2762 /* Signal dump data received - Clear IO debug Ack */
2763 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2764 ioa_cfg->regs.clr_interrupt_reg);
2766 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2767 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2769 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2771 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2781 #ifdef CONFIG_SCSI_IPR_DUMP
2783 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2784 * @ioa_cfg: ioa config struct
2785 * @pci_address: adapter address
2786 * @length: length of data to copy
2788 * Copy data from PCI adapter to kernel buffer.
2789 * Note: length MUST be a 4 byte multiple
2791 * 0 on success / other on failure
2793 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2794 unsigned long pci_address, u32 length)
2796 int bytes_copied = 0;
2797 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2799 unsigned long lock_flags = 0;
2800 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2803 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2805 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2807 while (bytes_copied < length &&
2808 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2809 if (ioa_dump->page_offset >= PAGE_SIZE ||
2810 ioa_dump->page_offset == 0) {
2811 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2815 return bytes_copied;
2818 ioa_dump->page_offset = 0;
2819 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2820 ioa_dump->next_page_index++;
2822 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2824 rem_len = length - bytes_copied;
2825 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2826 cur_len = min(rem_len, rem_page_len);
2828 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2829 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2832 rc = ipr_get_ldump_data_section(ioa_cfg,
2833 pci_address + bytes_copied,
2834 &page[ioa_dump->page_offset / 4],
2835 (cur_len / sizeof(u32)));
2837 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2840 ioa_dump->page_offset += cur_len;
2841 bytes_copied += cur_len;
2849 return bytes_copied;
2853 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2854 * @hdr: dump entry header struct
2859 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2861 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2863 hdr->offset = sizeof(*hdr);
2864 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2868 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2869 * @ioa_cfg: ioa config struct
2870 * @driver_dump: driver dump struct
2875 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2876 struct ipr_driver_dump *driver_dump)
2878 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2880 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2881 driver_dump->ioa_type_entry.hdr.len =
2882 sizeof(struct ipr_dump_ioa_type_entry) -
2883 sizeof(struct ipr_dump_entry_header);
2884 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2885 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2886 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2887 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2888 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2889 ucode_vpd->minor_release[1];
2890 driver_dump->hdr.num_entries++;
2894 * ipr_dump_version_data - Fill in the driver version in the dump.
2895 * @ioa_cfg: ioa config struct
2896 * @driver_dump: driver dump struct
2901 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2902 struct ipr_driver_dump *driver_dump)
2904 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2905 driver_dump->version_entry.hdr.len =
2906 sizeof(struct ipr_dump_version_entry) -
2907 sizeof(struct ipr_dump_entry_header);
2908 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2909 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2910 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2911 driver_dump->hdr.num_entries++;
2915 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2916 * @ioa_cfg: ioa config struct
2917 * @driver_dump: driver dump struct
2922 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2923 struct ipr_driver_dump *driver_dump)
2925 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2926 driver_dump->trace_entry.hdr.len =
2927 sizeof(struct ipr_dump_trace_entry) -
2928 sizeof(struct ipr_dump_entry_header);
2929 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2930 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2931 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2932 driver_dump->hdr.num_entries++;
2936 * ipr_dump_location_data - Fill in the IOA location in the dump.
2937 * @ioa_cfg: ioa config struct
2938 * @driver_dump: driver dump struct
2943 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2944 struct ipr_driver_dump *driver_dump)
2946 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2947 driver_dump->location_entry.hdr.len =
2948 sizeof(struct ipr_dump_location_entry) -
2949 sizeof(struct ipr_dump_entry_header);
2950 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2951 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2952 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2953 driver_dump->hdr.num_entries++;
2957 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2958 * @ioa_cfg: ioa config struct
2959 * @dump: dump struct
2964 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2966 unsigned long start_addr, sdt_word;
2967 unsigned long lock_flags = 0;
2968 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2969 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2970 u32 num_entries, max_num_entries, start_off, end_off;
2971 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2972 struct ipr_sdt *sdt;
2978 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2980 if (ioa_cfg->sdt_state != READ_DUMP) {
2981 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2985 if (ioa_cfg->sis64) {
2986 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2987 ssleep(IPR_DUMP_DELAY_SECONDS);
2988 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2991 start_addr = readl(ioa_cfg->ioa_mailbox);
2993 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2994 dev_err(&ioa_cfg->pdev->dev,
2995 "Invalid dump table format: %lx\n", start_addr);
2996 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3000 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3002 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3004 /* Initialize the overall dump header */
3005 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3006 driver_dump->hdr.num_entries = 1;
3007 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3008 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3009 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3010 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3012 ipr_dump_version_data(ioa_cfg, driver_dump);
3013 ipr_dump_location_data(ioa_cfg, driver_dump);
3014 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3015 ipr_dump_trace_data(ioa_cfg, driver_dump);
3017 /* Update dump_header */
3018 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3020 /* IOA Dump entry */
3021 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3022 ioa_dump->hdr.len = 0;
3023 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3024 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3026 /* First entries in sdt are actually a list of dump addresses and
3027 lengths to gather the real dump data. sdt represents the pointer
3028 to the ioa generated dump table. Dump data will be extracted based
3029 on entries in this table */
3030 sdt = &ioa_dump->sdt;
3032 if (ioa_cfg->sis64) {
3033 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3034 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3036 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3037 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3040 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3041 (max_num_entries * sizeof(struct ipr_sdt_entry));
3042 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3043 bytes_to_copy / sizeof(__be32));
3045 /* Smart Dump table is ready to use and the first entry is valid */
3046 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3047 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3048 dev_err(&ioa_cfg->pdev->dev,
3049 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3050 rc, be32_to_cpu(sdt->hdr.state));
3051 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3052 ioa_cfg->sdt_state = DUMP_OBTAINED;
3053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3057 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3059 if (num_entries > max_num_entries)
3060 num_entries = max_num_entries;
3062 /* Update dump length to the actual data to be copied */
3063 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3065 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3067 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3069 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3071 for (i = 0; i < num_entries; i++) {
3072 if (ioa_dump->hdr.len > max_dump_size) {
3073 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3077 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3078 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3080 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3082 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3083 end_off = be32_to_cpu(sdt->entry[i].end_token);
3085 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3086 bytes_to_copy = end_off - start_off;
3091 if (bytes_to_copy > max_dump_size) {
3092 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3096 /* Copy data from adapter to driver buffers */
3097 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3100 ioa_dump->hdr.len += bytes_copied;
3102 if (bytes_copied != bytes_to_copy) {
3103 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3110 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3112 /* Update dump_header */
3113 driver_dump->hdr.len += ioa_dump->hdr.len;
3115 ioa_cfg->sdt_state = DUMP_OBTAINED;
3120 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3124 * ipr_release_dump - Free adapter dump memory
3125 * @kref: kref struct
3130 static void ipr_release_dump(struct kref *kref)
3132 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3133 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3134 unsigned long lock_flags = 0;
3138 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3139 ioa_cfg->dump = NULL;
3140 ioa_cfg->sdt_state = INACTIVE;
3141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3143 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3144 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3146 vfree(dump->ioa_dump.ioa_data);
3152 * ipr_worker_thread - Worker thread
3153 * @work: ioa config struct
3155 * Called at task level from a work thread. This function takes care
3156 * of adding and removing device from the mid-layer as configuration
3157 * changes are detected by the adapter.
3162 static void ipr_worker_thread(struct work_struct *work)
3164 unsigned long lock_flags;
3165 struct ipr_resource_entry *res;
3166 struct scsi_device *sdev;
3167 struct ipr_dump *dump;
3168 struct ipr_ioa_cfg *ioa_cfg =
3169 container_of(work, struct ipr_ioa_cfg, work_q);
3170 u8 bus, target, lun;
3174 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3176 if (ioa_cfg->sdt_state == READ_DUMP) {
3177 dump = ioa_cfg->dump;
3179 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3182 kref_get(&dump->kref);
3183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3184 ipr_get_ioa_dump(ioa_cfg, dump);
3185 kref_put(&dump->kref, ipr_release_dump);
3187 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3188 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3189 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3190 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3197 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3198 !ioa_cfg->allow_ml_add_del) {
3199 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3203 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3204 if (res->del_from_ml && res->sdev) {
3207 if (!scsi_device_get(sdev)) {
3208 if (!res->add_to_ml)
3209 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3211 res->del_from_ml = 0;
3212 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3213 scsi_remove_device(sdev);
3214 scsi_device_put(sdev);
3215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3222 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3223 if (res->add_to_ml) {
3225 target = res->target;
3228 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3229 scsi_add_device(ioa_cfg->host, bus, target, lun);
3230 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3236 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3240 #ifdef CONFIG_SCSI_IPR_TRACE
3242 * ipr_read_trace - Dump the adapter trace
3243 * @filp: open sysfs file
3244 * @kobj: kobject struct
3245 * @bin_attr: bin_attribute struct
3248 * @count: buffer size
3251 * number of bytes printed to buffer
3253 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3254 struct bin_attribute *bin_attr,
3255 char *buf, loff_t off, size_t count)
3257 struct device *dev = container_of(kobj, struct device, kobj);
3258 struct Scsi_Host *shost = class_to_shost(dev);
3259 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3260 unsigned long lock_flags = 0;
3263 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3264 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3266 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3271 static struct bin_attribute ipr_trace_attr = {
3277 .read = ipr_read_trace,
3282 * ipr_show_fw_version - Show the firmware version
3283 * @dev: class device struct
3287 * number of bytes printed to buffer
3289 static ssize_t ipr_show_fw_version(struct device *dev,
3290 struct device_attribute *attr, char *buf)
3292 struct Scsi_Host *shost = class_to_shost(dev);
3293 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3294 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3295 unsigned long lock_flags = 0;
3298 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3299 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3300 ucode_vpd->major_release, ucode_vpd->card_type,
3301 ucode_vpd->minor_release[0],
3302 ucode_vpd->minor_release[1]);
3303 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3307 static struct device_attribute ipr_fw_version_attr = {
3309 .name = "fw_version",
3312 .show = ipr_show_fw_version,
3316 * ipr_show_log_level - Show the adapter's error logging level
3317 * @dev: class device struct
3321 * number of bytes printed to buffer
3323 static ssize_t ipr_show_log_level(struct device *dev,
3324 struct device_attribute *attr, char *buf)
3326 struct Scsi_Host *shost = class_to_shost(dev);
3327 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3328 unsigned long lock_flags = 0;
3331 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3332 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3338 * ipr_store_log_level - Change the adapter's error logging level
3339 * @dev: class device struct
3343 * number of bytes printed to buffer
3345 static ssize_t ipr_store_log_level(struct device *dev,
3346 struct device_attribute *attr,
3347 const char *buf, size_t count)
3349 struct Scsi_Host *shost = class_to_shost(dev);
3350 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3351 unsigned long lock_flags = 0;
3353 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3354 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3359 static struct device_attribute ipr_log_level_attr = {
3361 .name = "log_level",
3362 .mode = S_IRUGO | S_IWUSR,
3364 .show = ipr_show_log_level,
3365 .store = ipr_store_log_level
3369 * ipr_store_diagnostics - IOA Diagnostics interface
3370 * @dev: device struct
3372 * @count: buffer size
3374 * This function will reset the adapter and wait a reasonable
3375 * amount of time for any errors that the adapter might log.
3378 * count on success / other on failure
3380 static ssize_t ipr_store_diagnostics(struct device *dev,
3381 struct device_attribute *attr,
3382 const char *buf, size_t count)
3384 struct Scsi_Host *shost = class_to_shost(dev);
3385 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3386 unsigned long lock_flags = 0;
3389 if (!capable(CAP_SYS_ADMIN))
3392 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3393 while (ioa_cfg->in_reset_reload) {
3394 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3395 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3396 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3399 ioa_cfg->errors_logged = 0;
3400 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3402 if (ioa_cfg->in_reset_reload) {
3403 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3404 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3406 /* Wait for a second for any errors to be logged */
3409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3414 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3416 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3421 static struct device_attribute ipr_diagnostics_attr = {
3423 .name = "run_diagnostics",
3426 .store = ipr_store_diagnostics
3430 * ipr_show_adapter_state - Show the adapter's state
3431 * @class_dev: device struct
3435 * number of bytes printed to buffer
3437 static ssize_t ipr_show_adapter_state(struct device *dev,
3438 struct device_attribute *attr, char *buf)
3440 struct Scsi_Host *shost = class_to_shost(dev);
3441 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3442 unsigned long lock_flags = 0;
3445 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3446 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3447 len = snprintf(buf, PAGE_SIZE, "offline\n");
3449 len = snprintf(buf, PAGE_SIZE, "online\n");
3450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3455 * ipr_store_adapter_state - Change adapter state
3456 * @dev: device struct
3458 * @count: buffer size
3460 * This function will change the adapter's state.
3463 * count on success / other on failure
3465 static ssize_t ipr_store_adapter_state(struct device *dev,
3466 struct device_attribute *attr,
3467 const char *buf, size_t count)
3469 struct Scsi_Host *shost = class_to_shost(dev);
3470 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3471 unsigned long lock_flags;
3472 int result = count, i;
3474 if (!capable(CAP_SYS_ADMIN))
3477 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3478 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3479 !strncmp(buf, "online", 6)) {
3480 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3481 spin_lock(&ioa_cfg->hrrq[i]._lock);
3482 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3483 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3486 ioa_cfg->reset_retries = 0;
3487 ioa_cfg->in_ioa_bringdown = 0;
3488 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3490 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3491 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3496 static struct device_attribute ipr_ioa_state_attr = {
3498 .name = "online_state",
3499 .mode = S_IRUGO | S_IWUSR,
3501 .show = ipr_show_adapter_state,
3502 .store = ipr_store_adapter_state
3506 * ipr_store_reset_adapter - Reset the adapter
3507 * @dev: device struct
3509 * @count: buffer size
3511 * This function will reset the adapter.
3514 * count on success / other on failure
3516 static ssize_t ipr_store_reset_adapter(struct device *dev,
3517 struct device_attribute *attr,
3518 const char *buf, size_t count)
3520 struct Scsi_Host *shost = class_to_shost(dev);
3521 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3522 unsigned long lock_flags;
3525 if (!capable(CAP_SYS_ADMIN))
3528 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3529 if (!ioa_cfg->in_reset_reload)
3530 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3531 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3532 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3537 static struct device_attribute ipr_ioa_reset_attr = {
3539 .name = "reset_host",
3542 .store = ipr_store_reset_adapter
3545 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3547 * ipr_show_iopoll_weight - Show ipr polling mode
3548 * @dev: class device struct
3552 * number of bytes printed to buffer
3554 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3555 struct device_attribute *attr, char *buf)
3557 struct Scsi_Host *shost = class_to_shost(dev);
3558 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3559 unsigned long lock_flags = 0;
3562 spin_lock_irqsave(shost->host_lock, lock_flags);
3563 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3564 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3570 * ipr_store_iopoll_weight - Change the adapter's polling mode
3571 * @dev: class device struct
3575 * number of bytes printed to buffer
3577 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3578 struct device_attribute *attr,
3579 const char *buf, size_t count)
3581 struct Scsi_Host *shost = class_to_shost(dev);
3582 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3583 unsigned long user_iopoll_weight;
3584 unsigned long lock_flags = 0;
3587 if (!ioa_cfg->sis64) {
3588 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3591 if (kstrtoul(buf, 10, &user_iopoll_weight))
3594 if (user_iopoll_weight > 256) {
3595 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3599 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3600 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3604 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3605 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3606 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3607 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3610 spin_lock_irqsave(shost->host_lock, lock_flags);
3611 ioa_cfg->iopoll_weight = user_iopoll_weight;
3612 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3613 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3614 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3615 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3616 ioa_cfg->iopoll_weight, ipr_iopoll);
3617 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3620 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3625 static struct device_attribute ipr_iopoll_weight_attr = {
3627 .name = "iopoll_weight",
3628 .mode = S_IRUGO | S_IWUSR,
3630 .show = ipr_show_iopoll_weight,
3631 .store = ipr_store_iopoll_weight
3635 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3636 * @buf_len: buffer length
3638 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3639 * list to use for microcode download
3642 * pointer to sglist / NULL on failure
3644 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3646 int sg_size, order, bsize_elem, num_elem, i, j;
3647 struct ipr_sglist *sglist;
3648 struct scatterlist *scatterlist;
3651 /* Get the minimum size per scatter/gather element */
3652 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3654 /* Get the actual size per element */
3655 order = get_order(sg_size);
3657 /* Determine the actual number of bytes per element */
3658 bsize_elem = PAGE_SIZE * (1 << order);
3660 /* Determine the actual number of sg entries needed */
3661 if (buf_len % bsize_elem)
3662 num_elem = (buf_len / bsize_elem) + 1;
3664 num_elem = buf_len / bsize_elem;
3666 /* Allocate a scatter/gather list for the DMA */
3667 sglist = kzalloc(sizeof(struct ipr_sglist) +
3668 (sizeof(struct scatterlist) * (num_elem - 1)),
3671 if (sglist == NULL) {
3676 scatterlist = sglist->scatterlist;
3677 sg_init_table(scatterlist, num_elem);
3679 sglist->order = order;
3680 sglist->num_sg = num_elem;
3682 /* Allocate a bunch of sg elements */
3683 for (i = 0; i < num_elem; i++) {
3684 page = alloc_pages(GFP_KERNEL, order);
3688 /* Free up what we already allocated */
3689 for (j = i - 1; j >= 0; j--)
3690 __free_pages(sg_page(&scatterlist[j]), order);
3695 sg_set_page(&scatterlist[i], page, 0, 0);
3702 * ipr_free_ucode_buffer - Frees a microcode download buffer
3703 * @p_dnld: scatter/gather list pointer
3705 * Free a DMA'able ucode download buffer previously allocated with
3706 * ipr_alloc_ucode_buffer
3711 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3715 for (i = 0; i < sglist->num_sg; i++)
3716 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3722 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3723 * @sglist: scatter/gather list pointer
3724 * @buffer: buffer pointer
3725 * @len: buffer length
3727 * Copy a microcode image from a user buffer into a buffer allocated by
3728 * ipr_alloc_ucode_buffer
3731 * 0 on success / other on failure
3733 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3734 u8 *buffer, u32 len)
3736 int bsize_elem, i, result = 0;
3737 struct scatterlist *scatterlist;
3740 /* Determine the actual number of bytes per element */
3741 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3743 scatterlist = sglist->scatterlist;
3745 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3746 struct page *page = sg_page(&scatterlist[i]);
3749 memcpy(kaddr, buffer, bsize_elem);
3752 scatterlist[i].length = bsize_elem;
3760 if (len % bsize_elem) {
3761 struct page *page = sg_page(&scatterlist[i]);
3764 memcpy(kaddr, buffer, len % bsize_elem);
3767 scatterlist[i].length = len % bsize_elem;
3770 sglist->buffer_len = len;
3775 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3776 * @ipr_cmd: ipr command struct
3777 * @sglist: scatter/gather list
3779 * Builds a microcode download IOA data list (IOADL).
3782 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3783 struct ipr_sglist *sglist)
3785 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3786 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3787 struct scatterlist *scatterlist = sglist->scatterlist;
3790 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3791 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3792 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3795 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3796 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3797 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3798 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3799 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3802 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3806 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3807 * @ipr_cmd: ipr command struct
3808 * @sglist: scatter/gather list
3810 * Builds a microcode download IOA data list (IOADL).
3813 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3814 struct ipr_sglist *sglist)
3816 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3817 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3818 struct scatterlist *scatterlist = sglist->scatterlist;
3821 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3822 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3823 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3826 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3828 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3829 ioadl[i].flags_and_data_len =
3830 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3832 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3835 ioadl[i-1].flags_and_data_len |=
3836 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3840 * ipr_update_ioa_ucode - Update IOA's microcode
3841 * @ioa_cfg: ioa config struct
3842 * @sglist: scatter/gather list
3844 * Initiate an adapter reset to update the IOA's microcode
3847 * 0 on success / -EIO on failure
3849 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3850 struct ipr_sglist *sglist)
3852 unsigned long lock_flags;
3854 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3855 while (ioa_cfg->in_reset_reload) {
3856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3857 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3858 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3861 if (ioa_cfg->ucode_sglist) {
3862 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3863 dev_err(&ioa_cfg->pdev->dev,
3864 "Microcode download already in progress\n");
3868 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3869 sglist->num_sg, DMA_TO_DEVICE);
3871 if (!sglist->num_dma_sg) {
3872 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3873 dev_err(&ioa_cfg->pdev->dev,
3874 "Failed to map microcode download buffer!\n");
3878 ioa_cfg->ucode_sglist = sglist;
3879 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3880 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3881 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3883 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3884 ioa_cfg->ucode_sglist = NULL;
3885 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3890 * ipr_store_update_fw - Update the firmware on the adapter
3891 * @class_dev: device struct
3893 * @count: buffer size
3895 * This function will update the firmware on the adapter.
3898 * count on success / other on failure
3900 static ssize_t ipr_store_update_fw(struct device *dev,
3901 struct device_attribute *attr,
3902 const char *buf, size_t count)
3904 struct Scsi_Host *shost = class_to_shost(dev);
3905 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3906 struct ipr_ucode_image_header *image_hdr;
3907 const struct firmware *fw_entry;
3908 struct ipr_sglist *sglist;
3911 int len, result, dnld_size;
3913 if (!capable(CAP_SYS_ADMIN))
3916 len = snprintf(fname, 99, "%s", buf);
3917 fname[len-1] = '\0';
3919 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3920 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3924 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3926 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3927 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3928 sglist = ipr_alloc_ucode_buffer(dnld_size);
3931 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3932 release_firmware(fw_entry);
3936 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3939 dev_err(&ioa_cfg->pdev->dev,
3940 "Microcode buffer copy to DMA buffer failed\n");
3944 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3946 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3951 ipr_free_ucode_buffer(sglist);
3952 release_firmware(fw_entry);
3956 static struct device_attribute ipr_update_fw_attr = {
3958 .name = "update_fw",
3961 .store = ipr_store_update_fw
3965 * ipr_show_fw_type - Show the adapter's firmware type.
3966 * @dev: class device struct
3970 * number of bytes printed to buffer
3972 static ssize_t ipr_show_fw_type(struct device *dev,
3973 struct device_attribute *attr, char *buf)
3975 struct Scsi_Host *shost = class_to_shost(dev);
3976 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3977 unsigned long lock_flags = 0;
3980 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3981 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3982 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3986 static struct device_attribute ipr_ioa_fw_type_attr = {
3991 .show = ipr_show_fw_type
3994 static struct device_attribute *ipr_ioa_attrs[] = {
3995 &ipr_fw_version_attr,
3996 &ipr_log_level_attr,
3997 &ipr_diagnostics_attr,
3998 &ipr_ioa_state_attr,
3999 &ipr_ioa_reset_attr,
4000 &ipr_update_fw_attr,
4001 &ipr_ioa_fw_type_attr,
4002 &ipr_iopoll_weight_attr,
4006 #ifdef CONFIG_SCSI_IPR_DUMP
4008 * ipr_read_dump - Dump the adapter
4009 * @filp: open sysfs file
4010 * @kobj: kobject struct
4011 * @bin_attr: bin_attribute struct
4014 * @count: buffer size
4017 * number of bytes printed to buffer
4019 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4020 struct bin_attribute *bin_attr,
4021 char *buf, loff_t off, size_t count)
4023 struct device *cdev = container_of(kobj, struct device, kobj);
4024 struct Scsi_Host *shost = class_to_shost(cdev);
4025 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4026 struct ipr_dump *dump;
4027 unsigned long lock_flags = 0;
4032 if (!capable(CAP_SYS_ADMIN))
4035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4036 dump = ioa_cfg->dump;
4038 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4039 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4042 kref_get(&dump->kref);
4043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4045 if (off > dump->driver_dump.hdr.len) {
4046 kref_put(&dump->kref, ipr_release_dump);
4050 if (off + count > dump->driver_dump.hdr.len) {
4051 count = dump->driver_dump.hdr.len - off;
4055 if (count && off < sizeof(dump->driver_dump)) {
4056 if (off + count > sizeof(dump->driver_dump))
4057 len = sizeof(dump->driver_dump) - off;
4060 src = (u8 *)&dump->driver_dump + off;
4061 memcpy(buf, src, len);
4067 off -= sizeof(dump->driver_dump);
4070 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4071 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4072 sizeof(struct ipr_sdt_entry));
4074 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4075 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4077 if (count && off < sdt_end) {
4078 if (off + count > sdt_end)
4079 len = sdt_end - off;
4082 src = (u8 *)&dump->ioa_dump + off;
4083 memcpy(buf, src, len);
4092 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4093 len = PAGE_ALIGN(off) - off;
4096 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4097 src += off & ~PAGE_MASK;
4098 memcpy(buf, src, len);
4104 kref_put(&dump->kref, ipr_release_dump);
4109 * ipr_alloc_dump - Prepare for adapter dump
4110 * @ioa_cfg: ioa config struct
4113 * 0 on success / other on failure
4115 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4117 struct ipr_dump *dump;
4119 unsigned long lock_flags = 0;
4121 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4124 ipr_err("Dump memory allocation failed\n");
4129 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4131 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4134 ipr_err("Dump memory allocation failed\n");
4139 dump->ioa_dump.ioa_data = ioa_data;
4141 kref_init(&dump->kref);
4142 dump->ioa_cfg = ioa_cfg;
4144 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4146 if (INACTIVE != ioa_cfg->sdt_state) {
4147 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4148 vfree(dump->ioa_dump.ioa_data);
4153 ioa_cfg->dump = dump;
4154 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4155 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4156 ioa_cfg->dump_taken = 1;
4157 schedule_work(&ioa_cfg->work_q);
4159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4165 * ipr_free_dump - Free adapter dump memory
4166 * @ioa_cfg: ioa config struct
4169 * 0 on success / other on failure
4171 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4173 struct ipr_dump *dump;
4174 unsigned long lock_flags = 0;
4178 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4179 dump = ioa_cfg->dump;
4181 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4185 ioa_cfg->dump = NULL;
4186 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4188 kref_put(&dump->kref, ipr_release_dump);
4195 * ipr_write_dump - Setup dump state of adapter
4196 * @filp: open sysfs file
4197 * @kobj: kobject struct
4198 * @bin_attr: bin_attribute struct
4201 * @count: buffer size
4204 * number of bytes printed to buffer
4206 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4207 struct bin_attribute *bin_attr,
4208 char *buf, loff_t off, size_t count)
4210 struct device *cdev = container_of(kobj, struct device, kobj);
4211 struct Scsi_Host *shost = class_to_shost(cdev);
4212 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4215 if (!capable(CAP_SYS_ADMIN))
4219 rc = ipr_alloc_dump(ioa_cfg);
4220 else if (buf[0] == '0')
4221 rc = ipr_free_dump(ioa_cfg);
4231 static struct bin_attribute ipr_dump_attr = {
4234 .mode = S_IRUSR | S_IWUSR,
4237 .read = ipr_read_dump,
4238 .write = ipr_write_dump
4241 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4245 * ipr_change_queue_depth - Change the device's queue depth
4246 * @sdev: scsi device struct
4247 * @qdepth: depth to set
4248 * @reason: calling context
4253 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4256 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4257 struct ipr_resource_entry *res;
4258 unsigned long lock_flags = 0;
4260 if (reason != SCSI_QDEPTH_DEFAULT)
4263 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4264 res = (struct ipr_resource_entry *)sdev->hostdata;
4266 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4267 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4270 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4271 return sdev->queue_depth;
4275 * ipr_change_queue_type - Change the device's queue type
4276 * @dsev: scsi device struct
4277 * @tag_type: type of tags to use
4280 * actual queue type set
4282 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4284 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4285 struct ipr_resource_entry *res;
4286 unsigned long lock_flags = 0;
4288 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4289 res = (struct ipr_resource_entry *)sdev->hostdata;
4292 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4294 * We don't bother quiescing the device here since the
4295 * adapter firmware does it for us.
4297 scsi_set_tag_type(sdev, tag_type);
4300 scsi_activate_tcq(sdev, sdev->queue_depth);
4302 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4308 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4313 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4314 * @dev: device struct
4315 * @attr: device attribute structure
4319 * number of bytes printed to buffer
4321 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4323 struct scsi_device *sdev = to_scsi_device(dev);
4324 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4325 struct ipr_resource_entry *res;
4326 unsigned long lock_flags = 0;
4327 ssize_t len = -ENXIO;
4329 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4330 res = (struct ipr_resource_entry *)sdev->hostdata;
4332 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4337 static struct device_attribute ipr_adapter_handle_attr = {
4339 .name = "adapter_handle",
4342 .show = ipr_show_adapter_handle
4346 * ipr_show_resource_path - Show the resource path or the resource address for
4348 * @dev: device struct
4349 * @attr: device attribute structure
4353 * number of bytes printed to buffer
4355 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4357 struct scsi_device *sdev = to_scsi_device(dev);
4358 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4359 struct ipr_resource_entry *res;
4360 unsigned long lock_flags = 0;
4361 ssize_t len = -ENXIO;
4362 char buffer[IPR_MAX_RES_PATH_LENGTH];
4364 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4365 res = (struct ipr_resource_entry *)sdev->hostdata;
4366 if (res && ioa_cfg->sis64)
4367 len = snprintf(buf, PAGE_SIZE, "%s\n",
4368 __ipr_format_res_path(res->res_path, buffer,
4371 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4372 res->bus, res->target, res->lun);
4374 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4378 static struct device_attribute ipr_resource_path_attr = {
4380 .name = "resource_path",
4383 .show = ipr_show_resource_path
4387 * ipr_show_device_id - Show the device_id for this device.
4388 * @dev: device struct
4389 * @attr: device attribute structure
4393 * number of bytes printed to buffer
4395 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4397 struct scsi_device *sdev = to_scsi_device(dev);
4398 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4399 struct ipr_resource_entry *res;
4400 unsigned long lock_flags = 0;
4401 ssize_t len = -ENXIO;
4403 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4404 res = (struct ipr_resource_entry *)sdev->hostdata;
4405 if (res && ioa_cfg->sis64)
4406 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4408 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4414 static struct device_attribute ipr_device_id_attr = {
4416 .name = "device_id",
4419 .show = ipr_show_device_id
4423 * ipr_show_resource_type - Show the resource type for this device.
4424 * @dev: device struct
4425 * @attr: device attribute structure
4429 * number of bytes printed to buffer
4431 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4433 struct scsi_device *sdev = to_scsi_device(dev);
4434 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4435 struct ipr_resource_entry *res;
4436 unsigned long lock_flags = 0;
4437 ssize_t len = -ENXIO;
4439 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4440 res = (struct ipr_resource_entry *)sdev->hostdata;
4443 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4445 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4449 static struct device_attribute ipr_resource_type_attr = {
4451 .name = "resource_type",
4454 .show = ipr_show_resource_type
4457 static struct device_attribute *ipr_dev_attrs[] = {
4458 &ipr_adapter_handle_attr,
4459 &ipr_resource_path_attr,
4460 &ipr_device_id_attr,
4461 &ipr_resource_type_attr,
4466 * ipr_biosparam - Return the HSC mapping
4467 * @sdev: scsi device struct
4468 * @block_device: block device pointer
4469 * @capacity: capacity of the device
4470 * @parm: Array containing returned HSC values.
4472 * This function generates the HSC parms that fdisk uses.
4473 * We want to make sure we return something that places partitions
4474 * on 4k boundaries for best performance with the IOA.
4479 static int ipr_biosparam(struct scsi_device *sdev,
4480 struct block_device *block_device,
4481 sector_t capacity, int *parm)
4489 cylinders = capacity;
4490 sector_div(cylinders, (128 * 32));
4495 parm[2] = cylinders;
4501 * ipr_find_starget - Find target based on bus/target.
4502 * @starget: scsi target struct
4505 * resource entry pointer if found / NULL if not found
4507 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4509 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4510 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4511 struct ipr_resource_entry *res;
4513 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4514 if ((res->bus == starget->channel) &&
4515 (res->target == starget->id)) {
4523 static struct ata_port_info sata_port_info;
4526 * ipr_target_alloc - Prepare for commands to a SCSI target
4527 * @starget: scsi target struct
4529 * If the device is a SATA device, this function allocates an
4530 * ATA port with libata, else it does nothing.
4533 * 0 on success / non-0 on failure
4535 static int ipr_target_alloc(struct scsi_target *starget)
4537 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4538 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4539 struct ipr_sata_port *sata_port;
4540 struct ata_port *ap;
4541 struct ipr_resource_entry *res;
4542 unsigned long lock_flags;
4544 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4545 res = ipr_find_starget(starget);
4546 starget->hostdata = NULL;
4548 if (res && ipr_is_gata(res)) {
4549 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4550 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4554 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4556 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4557 sata_port->ioa_cfg = ioa_cfg;
4559 sata_port->res = res;
4561 res->sata_port = sata_port;
4562 ap->private_data = sata_port;
4563 starget->hostdata = sata_port;
4569 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4575 * ipr_target_destroy - Destroy a SCSI target
4576 * @starget: scsi target struct
4578 * If the device was a SATA device, this function frees the libata
4579 * ATA port, else it does nothing.
4582 static void ipr_target_destroy(struct scsi_target *starget)
4584 struct ipr_sata_port *sata_port = starget->hostdata;
4585 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4586 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4588 if (ioa_cfg->sis64) {
4589 if (!ipr_find_starget(starget)) {
4590 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4591 clear_bit(starget->id, ioa_cfg->array_ids);
4592 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4593 clear_bit(starget->id, ioa_cfg->vset_ids);
4594 else if (starget->channel == 0)
4595 clear_bit(starget->id, ioa_cfg->target_ids);
4600 starget->hostdata = NULL;
4601 ata_sas_port_destroy(sata_port->ap);
4607 * ipr_find_sdev - Find device based on bus/target/lun.
4608 * @sdev: scsi device struct
4611 * resource entry pointer if found / NULL if not found
4613 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4615 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4616 struct ipr_resource_entry *res;
4618 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4619 if ((res->bus == sdev->channel) &&
4620 (res->target == sdev->id) &&
4621 (res->lun == sdev->lun))
4629 * ipr_slave_destroy - Unconfigure a SCSI device
4630 * @sdev: scsi device struct
4635 static void ipr_slave_destroy(struct scsi_device *sdev)
4637 struct ipr_resource_entry *res;
4638 struct ipr_ioa_cfg *ioa_cfg;
4639 unsigned long lock_flags = 0;
4641 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4643 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4644 res = (struct ipr_resource_entry *) sdev->hostdata;
4647 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4648 sdev->hostdata = NULL;
4650 res->sata_port = NULL;
4652 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4656 * ipr_slave_configure - Configure a SCSI device
4657 * @sdev: scsi device struct
4659 * This function configures the specified scsi device.
4664 static int ipr_slave_configure(struct scsi_device *sdev)
4666 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4667 struct ipr_resource_entry *res;
4668 struct ata_port *ap = NULL;
4669 unsigned long lock_flags = 0;
4670 char buffer[IPR_MAX_RES_PATH_LENGTH];
4672 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4673 res = sdev->hostdata;
4675 if (ipr_is_af_dasd_device(res))
4676 sdev->type = TYPE_RAID;
4677 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4678 sdev->scsi_level = 4;
4679 sdev->no_uld_attach = 1;
4681 if (ipr_is_vset_device(res)) {
4682 blk_queue_rq_timeout(sdev->request_queue,
4683 IPR_VSET_RW_TIMEOUT);
4684 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4686 if (ipr_is_gata(res) && res->sata_port)
4687 ap = res->sata_port->ap;
4688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4691 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4692 ata_sas_slave_configure(sdev, ap);
4694 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4696 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4697 ipr_format_res_path(ioa_cfg,
4698 res->res_path, buffer, sizeof(buffer)));
4701 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4706 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4707 * @sdev: scsi device struct
4709 * This function initializes an ATA port so that future commands
4710 * sent through queuecommand will work.
4715 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4717 struct ipr_sata_port *sata_port = NULL;
4721 if (sdev->sdev_target)
4722 sata_port = sdev->sdev_target->hostdata;
4724 rc = ata_sas_port_init(sata_port->ap);
4726 rc = ata_sas_sync_probe(sata_port->ap);
4730 ipr_slave_destroy(sdev);
4737 * ipr_slave_alloc - Prepare for commands to a device.
4738 * @sdev: scsi device struct
4740 * This function saves a pointer to the resource entry
4741 * in the scsi device struct if the device exists. We
4742 * can then use this pointer in ipr_queuecommand when
4743 * handling new commands.
4746 * 0 on success / -ENXIO if device does not exist
4748 static int ipr_slave_alloc(struct scsi_device *sdev)
4750 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4751 struct ipr_resource_entry *res;
4752 unsigned long lock_flags;
4755 sdev->hostdata = NULL;
4757 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4759 res = ipr_find_sdev(sdev);
4764 sdev->hostdata = res;
4765 if (!ipr_is_naca_model(res))
4766 res->needs_sync_complete = 1;
4768 if (ipr_is_gata(res)) {
4769 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4770 return ipr_ata_slave_alloc(sdev);
4774 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4780 * ipr_match_lun - Match function for specified LUN
4781 * @ipr_cmd: ipr command struct
4782 * @device: device to match (sdev)
4785 * 1 if command matches sdev / 0 if command does not match sdev
4787 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4789 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4795 * ipr_wait_for_ops - Wait for matching commands to complete
4796 * @ipr_cmd: ipr command struct
4797 * @device: device to match (sdev)
4798 * @match: match function to use
4803 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4804 int (*match)(struct ipr_cmnd *, void *))
4806 struct ipr_cmnd *ipr_cmd;
4808 unsigned long flags;
4809 struct ipr_hrr_queue *hrrq;
4810 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4811 DECLARE_COMPLETION_ONSTACK(comp);
4817 for_each_hrrq(hrrq, ioa_cfg) {
4818 spin_lock_irqsave(hrrq->lock, flags);
4819 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4820 if (match(ipr_cmd, device)) {
4821 ipr_cmd->eh_comp = ∁
4825 spin_unlock_irqrestore(hrrq->lock, flags);
4829 timeout = wait_for_completion_timeout(&comp, timeout);
4834 for_each_hrrq(hrrq, ioa_cfg) {
4835 spin_lock_irqsave(hrrq->lock, flags);
4836 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4837 if (match(ipr_cmd, device)) {
4838 ipr_cmd->eh_comp = NULL;
4842 spin_unlock_irqrestore(hrrq->lock, flags);
4846 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4848 return wait ? FAILED : SUCCESS;
4857 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4859 struct ipr_ioa_cfg *ioa_cfg;
4860 unsigned long lock_flags = 0;
4864 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4865 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4867 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4868 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4869 dev_err(&ioa_cfg->pdev->dev,
4870 "Adapter being reset as a result of error recovery.\n");
4872 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4873 ioa_cfg->sdt_state = GET_DUMP;
4876 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4877 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4878 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4880 /* If we got hit with a host reset while we were already resetting
4881 the adapter for some reason, and the reset failed. */
4882 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4887 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4893 * ipr_device_reset - Reset the device
4894 * @ioa_cfg: ioa config struct
4895 * @res: resource entry struct
4897 * This function issues a device reset to the affected device.
4898 * If the device is a SCSI device, a LUN reset will be sent
4899 * to the device first. If that does not work, a target reset
4900 * will be sent. If the device is a SATA device, a PHY reset will
4904 * 0 on success / non-zero on failure
4906 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4907 struct ipr_resource_entry *res)
4909 struct ipr_cmnd *ipr_cmd;
4910 struct ipr_ioarcb *ioarcb;
4911 struct ipr_cmd_pkt *cmd_pkt;
4912 struct ipr_ioarcb_ata_regs *regs;
4916 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4917 ioarcb = &ipr_cmd->ioarcb;
4918 cmd_pkt = &ioarcb->cmd_pkt;
4920 if (ipr_cmd->ioa_cfg->sis64) {
4921 regs = &ipr_cmd->i.ata_ioadl.regs;
4922 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4924 regs = &ioarcb->u.add_data.u.regs;
4926 ioarcb->res_handle = res->res_handle;
4927 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4928 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4929 if (ipr_is_gata(res)) {
4930 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4931 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4932 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4935 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4936 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4937 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4938 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4939 if (ipr_cmd->ioa_cfg->sis64)
4940 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4941 sizeof(struct ipr_ioasa_gata));
4943 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4944 sizeof(struct ipr_ioasa_gata));
4948 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4952 * ipr_sata_reset - Reset the SATA port
4953 * @link: SATA link to reset
4954 * @classes: class of the attached device
4956 * This function issues a SATA phy reset to the affected ATA link.
4959 * 0 on success / non-zero on failure
4961 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4962 unsigned long deadline)
4964 struct ipr_sata_port *sata_port = link->ap->private_data;
4965 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4966 struct ipr_resource_entry *res;
4967 unsigned long lock_flags = 0;
4971 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4972 while (ioa_cfg->in_reset_reload) {
4973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4974 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4975 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4978 res = sata_port->res;
4980 rc = ipr_device_reset(ioa_cfg, res);
4981 *classes = res->ata_class;
4984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4990 * ipr_eh_dev_reset - Reset the device
4991 * @scsi_cmd: scsi command struct
4993 * This function issues a device reset to the affected device.
4994 * A LUN reset will be sent to the device first. If that does
4995 * not work, a target reset will be sent.
5000 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5002 struct ipr_cmnd *ipr_cmd;
5003 struct ipr_ioa_cfg *ioa_cfg;
5004 struct ipr_resource_entry *res;
5005 struct ata_port *ap;
5007 struct ipr_hrr_queue *hrrq;
5010 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5011 res = scsi_cmd->device->hostdata;
5017 * If we are currently going through reset/reload, return failed. This will force the
5018 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5021 if (ioa_cfg->in_reset_reload)
5023 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5026 for_each_hrrq(hrrq, ioa_cfg) {
5027 spin_lock(&hrrq->_lock);
5028 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5029 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5030 if (ipr_cmd->scsi_cmd)
5031 ipr_cmd->done = ipr_scsi_eh_done;
5033 ipr_cmd->done = ipr_sata_eh_done;
5035 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5036 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5037 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5041 spin_unlock(&hrrq->_lock);
5043 res->resetting_device = 1;
5044 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5046 if (ipr_is_gata(res) && res->sata_port) {
5047 ap = res->sata_port->ap;
5048 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5049 ata_std_error_handler(ap);
5050 spin_lock_irq(scsi_cmd->device->host->host_lock);
5052 for_each_hrrq(hrrq, ioa_cfg) {
5053 spin_lock(&hrrq->_lock);
5054 list_for_each_entry(ipr_cmd,
5055 &hrrq->hrrq_pending_q, queue) {
5056 if (ipr_cmd->ioarcb.res_handle ==
5062 spin_unlock(&hrrq->_lock);
5065 rc = ipr_device_reset(ioa_cfg, res);
5066 res->resetting_device = 0;
5069 return rc ? FAILED : SUCCESS;
5072 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5075 struct ipr_ioa_cfg *ioa_cfg;
5077 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5079 spin_lock_irq(cmd->device->host->host_lock);
5080 rc = __ipr_eh_dev_reset(cmd);
5081 spin_unlock_irq(cmd->device->host->host_lock);
5084 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5090 * ipr_bus_reset_done - Op done function for bus reset.
5091 * @ipr_cmd: ipr command struct
5093 * This function is the op done function for a bus reset
5098 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5100 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5101 struct ipr_resource_entry *res;
5104 if (!ioa_cfg->sis64)
5105 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5106 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5107 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5113 * If abort has not completed, indicate the reset has, else call the
5114 * abort's done function to wake the sleeping eh thread
5116 if (ipr_cmd->sibling->sibling)
5117 ipr_cmd->sibling->sibling = NULL;
5119 ipr_cmd->sibling->done(ipr_cmd->sibling);
5121 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5126 * ipr_abort_timeout - An abort task has timed out
5127 * @ipr_cmd: ipr command struct
5129 * This function handles when an abort task times out. If this
5130 * happens we issue a bus reset since we have resources tied
5131 * up that must be freed before returning to the midlayer.
5136 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5138 struct ipr_cmnd *reset_cmd;
5139 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5140 struct ipr_cmd_pkt *cmd_pkt;
5141 unsigned long lock_flags = 0;
5144 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5145 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5146 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5150 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5151 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5152 ipr_cmd->sibling = reset_cmd;
5153 reset_cmd->sibling = ipr_cmd;
5154 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5155 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5156 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5157 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5158 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5160 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5161 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5166 * ipr_cancel_op - Cancel specified op
5167 * @scsi_cmd: scsi command struct
5169 * This function cancels specified op.
5174 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5176 struct ipr_cmnd *ipr_cmd;
5177 struct ipr_ioa_cfg *ioa_cfg;
5178 struct ipr_resource_entry *res;
5179 struct ipr_cmd_pkt *cmd_pkt;
5182 struct ipr_hrr_queue *hrrq;
5185 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5186 res = scsi_cmd->device->hostdata;
5188 /* If we are currently going through reset/reload, return failed.
5189 * This will force the mid-layer to call ipr_eh_host_reset,
5190 * which will then go to sleep and wait for the reset to complete
5192 if (ioa_cfg->in_reset_reload ||
5193 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5199 * If we are aborting a timed out op, chances are that the timeout was caused
5200 * by a still not detected EEH error. In such cases, reading a register will
5201 * trigger the EEH recovery infrastructure.
5203 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5205 if (!ipr_is_gscsi(res))
5208 for_each_hrrq(hrrq, ioa_cfg) {
5209 spin_lock(&hrrq->_lock);
5210 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5211 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5212 ipr_cmd->done = ipr_scsi_eh_done;
5217 spin_unlock(&hrrq->_lock);
5223 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5224 ipr_cmd->ioarcb.res_handle = res->res_handle;
5225 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5226 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5227 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5228 ipr_cmd->u.sdev = scsi_cmd->device;
5230 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5232 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5233 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5236 * If the abort task timed out and we sent a bus reset, we will get
5237 * one the following responses to the abort
5239 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5244 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5245 if (!ipr_is_naca_model(res))
5246 res->needs_sync_complete = 1;
5249 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5253 * ipr_eh_abort - Abort a single op
5254 * @scsi_cmd: scsi command struct
5259 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5261 unsigned long flags;
5263 struct ipr_ioa_cfg *ioa_cfg;
5267 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5269 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5270 rc = ipr_cancel_op(scsi_cmd);
5271 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5274 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5280 * ipr_handle_other_interrupt - Handle "other" interrupts
5281 * @ioa_cfg: ioa config struct
5282 * @int_reg: interrupt register
5285 * IRQ_NONE / IRQ_HANDLED
5287 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5290 irqreturn_t rc = IRQ_HANDLED;
5293 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5294 int_reg &= ~int_mask_reg;
5296 /* If an interrupt on the adapter did not occur, ignore it.
5297 * Or in the case of SIS 64, check for a stage change interrupt.
5299 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5300 if (ioa_cfg->sis64) {
5301 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5302 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5303 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5305 /* clear stage change */
5306 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5307 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5308 list_del(&ioa_cfg->reset_cmd->queue);
5309 del_timer(&ioa_cfg->reset_cmd->timer);
5310 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5318 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5319 /* Mask the interrupt */
5320 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5322 /* Clear the interrupt */
5323 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5324 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5326 list_del(&ioa_cfg->reset_cmd->queue);
5327 del_timer(&ioa_cfg->reset_cmd->timer);
5328 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5329 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5330 if (ioa_cfg->clear_isr) {
5331 if (ipr_debug && printk_ratelimit())
5332 dev_err(&ioa_cfg->pdev->dev,
5333 "Spurious interrupt detected. 0x%08X\n", int_reg);
5334 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5335 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5339 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5340 ioa_cfg->ioa_unit_checked = 1;
5341 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5342 dev_err(&ioa_cfg->pdev->dev,
5343 "No Host RRQ. 0x%08X\n", int_reg);
5345 dev_err(&ioa_cfg->pdev->dev,
5346 "Permanent IOA failure. 0x%08X\n", int_reg);
5348 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5349 ioa_cfg->sdt_state = GET_DUMP;
5351 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5352 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5359 * ipr_isr_eh - Interrupt service routine error handler
5360 * @ioa_cfg: ioa config struct
5361 * @msg: message to log
5366 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5368 ioa_cfg->errors_logged++;
5369 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5371 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5372 ioa_cfg->sdt_state = GET_DUMP;
5374 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5377 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5378 struct list_head *doneq)
5382 struct ipr_cmnd *ipr_cmd;
5383 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5386 /* If interrupts are disabled, ignore the interrupt */
5387 if (!hrr_queue->allow_interrupts)
5390 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5391 hrr_queue->toggle_bit) {
5393 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5394 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5395 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5397 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5398 cmd_index < hrr_queue->min_cmd_id)) {
5400 "Invalid response handle from IOA: ",
5405 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5406 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5408 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5410 list_move_tail(&ipr_cmd->queue, doneq);
5412 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5413 hrr_queue->hrrq_curr++;
5415 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5416 hrr_queue->toggle_bit ^= 1u;
5419 if (budget > 0 && num_hrrq >= budget)
5426 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5428 struct ipr_ioa_cfg *ioa_cfg;
5429 struct ipr_hrr_queue *hrrq;
5430 struct ipr_cmnd *ipr_cmd, *temp;
5431 unsigned long hrrq_flags;
5435 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5436 ioa_cfg = hrrq->ioa_cfg;
5438 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5439 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5441 if (completed_ops < budget)
5442 blk_iopoll_complete(iop);
5443 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5445 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5446 list_del(&ipr_cmd->queue);
5447 del_timer(&ipr_cmd->timer);
5448 ipr_cmd->fast_done(ipr_cmd);
5451 return completed_ops;
5455 * ipr_isr - Interrupt service routine
5457 * @devp: pointer to ioa config struct
5460 * IRQ_NONE / IRQ_HANDLED
5462 static irqreturn_t ipr_isr(int irq, void *devp)
5464 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5465 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5466 unsigned long hrrq_flags = 0;
5470 struct ipr_cmnd *ipr_cmd, *temp;
5471 irqreturn_t rc = IRQ_NONE;
5474 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5475 /* If interrupts are disabled, ignore the interrupt */
5476 if (!hrrq->allow_interrupts) {
5477 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5482 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5485 if (!ioa_cfg->clear_isr)
5488 /* Clear the PCI interrupt */
5491 writel(IPR_PCII_HRRQ_UPDATED,
5492 ioa_cfg->regs.clr_interrupt_reg32);
5493 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5494 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5495 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5497 } else if (rc == IRQ_NONE && irq_none == 0) {
5498 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5500 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5501 int_reg & IPR_PCII_HRRQ_UPDATED) {
5503 "Error clearing HRRQ: ", num_hrrq);
5510 if (unlikely(rc == IRQ_NONE))
5511 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5513 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5514 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5515 list_del(&ipr_cmd->queue);
5516 del_timer(&ipr_cmd->timer);
5517 ipr_cmd->fast_done(ipr_cmd);
5523 * ipr_isr_mhrrq - Interrupt service routine
5525 * @devp: pointer to ioa config struct
5528 * IRQ_NONE / IRQ_HANDLED
5530 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5532 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5533 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5534 unsigned long hrrq_flags = 0;
5535 struct ipr_cmnd *ipr_cmd, *temp;
5536 irqreturn_t rc = IRQ_NONE;
5539 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5541 /* If interrupts are disabled, ignore the interrupt */
5542 if (!hrrq->allow_interrupts) {
5543 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5547 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5548 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5549 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5551 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5552 blk_iopoll_sched(&hrrq->iopoll);
5553 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5557 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5560 if (ipr_process_hrrq(hrrq, -1, &doneq))
5564 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5566 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5567 list_del(&ipr_cmd->queue);
5568 del_timer(&ipr_cmd->timer);
5569 ipr_cmd->fast_done(ipr_cmd);
5575 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5576 * @ioa_cfg: ioa config struct
5577 * @ipr_cmd: ipr command struct
5580 * 0 on success / -1 on failure
5582 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5583 struct ipr_cmnd *ipr_cmd)
5586 struct scatterlist *sg;
5588 u32 ioadl_flags = 0;
5589 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5590 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5591 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5593 length = scsi_bufflen(scsi_cmd);
5597 nseg = scsi_dma_map(scsi_cmd);
5599 if (printk_ratelimit())
5600 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5604 ipr_cmd->dma_use_sg = nseg;
5606 ioarcb->data_transfer_length = cpu_to_be32(length);
5608 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5610 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5611 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5612 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5613 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5614 ioadl_flags = IPR_IOADL_FLAGS_READ;
5616 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5617 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5618 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5619 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5622 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5627 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5628 * @ioa_cfg: ioa config struct
5629 * @ipr_cmd: ipr command struct
5632 * 0 on success / -1 on failure
5634 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5635 struct ipr_cmnd *ipr_cmd)
5638 struct scatterlist *sg;
5640 u32 ioadl_flags = 0;
5641 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5642 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5643 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5645 length = scsi_bufflen(scsi_cmd);
5649 nseg = scsi_dma_map(scsi_cmd);
5651 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5655 ipr_cmd->dma_use_sg = nseg;
5657 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5658 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5659 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5660 ioarcb->data_transfer_length = cpu_to_be32(length);
5662 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5663 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5664 ioadl_flags = IPR_IOADL_FLAGS_READ;
5665 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5666 ioarcb->read_ioadl_len =
5667 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5670 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5671 ioadl = ioarcb->u.add_data.u.ioadl;
5672 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5673 offsetof(struct ipr_ioarcb, u.add_data));
5674 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5677 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5678 ioadl[i].flags_and_data_len =
5679 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5680 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5683 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5688 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5689 * @scsi_cmd: scsi command struct
5694 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5697 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5699 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5701 case MSG_SIMPLE_TAG:
5702 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5705 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5707 case MSG_ORDERED_TAG:
5708 rc = IPR_FLAGS_LO_ORDERED_TASK;
5717 * ipr_erp_done - Process completion of ERP for a device
5718 * @ipr_cmd: ipr command struct
5720 * This function copies the sense buffer into the scsi_cmd
5721 * struct and pushes the scsi_done function.
5726 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5728 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5729 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5730 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5732 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5733 scsi_cmd->result |= (DID_ERROR << 16);
5734 scmd_printk(KERN_ERR, scsi_cmd,
5735 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5737 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5738 SCSI_SENSE_BUFFERSIZE);
5742 if (!ipr_is_naca_model(res))
5743 res->needs_sync_complete = 1;
5746 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5747 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5748 scsi_cmd->scsi_done(scsi_cmd);
5752 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5753 * @ipr_cmd: ipr command struct
5758 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5760 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5761 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5762 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5764 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5765 ioarcb->data_transfer_length = 0;
5766 ioarcb->read_data_transfer_length = 0;
5767 ioarcb->ioadl_len = 0;
5768 ioarcb->read_ioadl_len = 0;
5769 ioasa->hdr.ioasc = 0;
5770 ioasa->hdr.residual_data_len = 0;
5772 if (ipr_cmd->ioa_cfg->sis64)
5773 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5774 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5776 ioarcb->write_ioadl_addr =
5777 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5778 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5783 * ipr_erp_request_sense - Send request sense to a device
5784 * @ipr_cmd: ipr command struct
5786 * This function sends a request sense to a device as a result
5787 * of a check condition.
5792 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5794 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5795 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5797 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5798 ipr_erp_done(ipr_cmd);
5802 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5804 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5805 cmd_pkt->cdb[0] = REQUEST_SENSE;
5806 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5807 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5808 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5809 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5811 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5812 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5814 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5815 IPR_REQUEST_SENSE_TIMEOUT * 2);
5819 * ipr_erp_cancel_all - Send cancel all to a device
5820 * @ipr_cmd: ipr command struct
5822 * This function sends a cancel all to a device to clear the
5823 * queue. If we are running TCQ on the device, QERR is set to 1,
5824 * which means all outstanding ops have been dropped on the floor.
5825 * Cancel all will return them to us.
5830 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5832 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5833 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5834 struct ipr_cmd_pkt *cmd_pkt;
5838 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5840 if (!scsi_get_tag_type(scsi_cmd->device)) {
5841 ipr_erp_request_sense(ipr_cmd);
5845 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5846 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5847 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5849 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5850 IPR_CANCEL_ALL_TIMEOUT);
5854 * ipr_dump_ioasa - Dump contents of IOASA
5855 * @ioa_cfg: ioa config struct
5856 * @ipr_cmd: ipr command struct
5857 * @res: resource entry struct
5859 * This function is invoked by the interrupt handler when ops
5860 * fail. It will log the IOASA if appropriate. Only called
5866 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5867 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5871 u32 ioasc, fd_ioasc;
5872 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5873 __be32 *ioasa_data = (__be32 *)ioasa;
5876 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5877 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5882 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5885 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5886 error_index = ipr_get_error(fd_ioasc);
5888 error_index = ipr_get_error(ioasc);
5890 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5891 /* Don't log an error if the IOA already logged one */
5892 if (ioasa->hdr.ilid != 0)
5895 if (!ipr_is_gscsi(res))
5898 if (ipr_error_table[error_index].log_ioasa == 0)
5902 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5904 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5905 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5906 data_len = sizeof(struct ipr_ioasa64);
5907 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5908 data_len = sizeof(struct ipr_ioasa);
5910 ipr_err("IOASA Dump:\n");
5912 for (i = 0; i < data_len / 4; i += 4) {
5913 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5914 be32_to_cpu(ioasa_data[i]),
5915 be32_to_cpu(ioasa_data[i+1]),
5916 be32_to_cpu(ioasa_data[i+2]),
5917 be32_to_cpu(ioasa_data[i+3]));
5922 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5924 * @sense_buf: sense data buffer
5929 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5932 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5933 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5934 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5935 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5937 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5939 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5942 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5944 if (ipr_is_vset_device(res) &&
5945 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5946 ioasa->u.vset.failing_lba_hi != 0) {
5947 sense_buf[0] = 0x72;
5948 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5949 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5950 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5954 sense_buf[9] = 0x0A;
5955 sense_buf[10] = 0x80;
5957 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5959 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5960 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5961 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5962 sense_buf[15] = failing_lba & 0x000000ff;
5964 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5966 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5967 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5968 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5969 sense_buf[19] = failing_lba & 0x000000ff;
5971 sense_buf[0] = 0x70;
5972 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5973 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5974 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5976 /* Illegal request */
5977 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5978 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5979 sense_buf[7] = 10; /* additional length */
5981 /* IOARCB was in error */
5982 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5983 sense_buf[15] = 0xC0;
5984 else /* Parameter data was invalid */
5985 sense_buf[15] = 0x80;
5988 ((IPR_FIELD_POINTER_MASK &
5989 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5991 (IPR_FIELD_POINTER_MASK &
5992 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5994 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5995 if (ipr_is_vset_device(res))
5996 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5998 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6000 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6001 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6002 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6003 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6004 sense_buf[6] = failing_lba & 0x000000ff;
6007 sense_buf[7] = 6; /* additional length */
6013 * ipr_get_autosense - Copy autosense data to sense buffer
6014 * @ipr_cmd: ipr command struct
6016 * This function copies the autosense buffer to the buffer
6017 * in the scsi_cmd, if there is autosense available.
6020 * 1 if autosense was available / 0 if not
6022 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6024 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6025 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6027 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6030 if (ipr_cmd->ioa_cfg->sis64)
6031 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6032 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6033 SCSI_SENSE_BUFFERSIZE));
6035 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6036 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6037 SCSI_SENSE_BUFFERSIZE));
6042 * ipr_erp_start - Process an error response for a SCSI op
6043 * @ioa_cfg: ioa config struct
6044 * @ipr_cmd: ipr command struct
6046 * This function determines whether or not to initiate ERP
6047 * on the affected device.
6052 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6053 struct ipr_cmnd *ipr_cmd)
6055 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6056 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6057 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6058 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6061 ipr_scsi_eh_done(ipr_cmd);
6065 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6066 ipr_gen_sense(ipr_cmd);
6068 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6070 switch (masked_ioasc) {
6071 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6072 if (ipr_is_naca_model(res))
6073 scsi_cmd->result |= (DID_ABORT << 16);
6075 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6077 case IPR_IOASC_IR_RESOURCE_HANDLE:
6078 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6079 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6081 case IPR_IOASC_HW_SEL_TIMEOUT:
6082 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6083 if (!ipr_is_naca_model(res))
6084 res->needs_sync_complete = 1;
6086 case IPR_IOASC_SYNC_REQUIRED:
6088 res->needs_sync_complete = 1;
6089 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6091 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6092 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6093 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6095 case IPR_IOASC_BUS_WAS_RESET:
6096 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6098 * Report the bus reset and ask for a retry. The device
6099 * will give CC/UA the next command.
6101 if (!res->resetting_device)
6102 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6103 scsi_cmd->result |= (DID_ERROR << 16);
6104 if (!ipr_is_naca_model(res))
6105 res->needs_sync_complete = 1;
6107 case IPR_IOASC_HW_DEV_BUS_STATUS:
6108 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6109 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6110 if (!ipr_get_autosense(ipr_cmd)) {
6111 if (!ipr_is_naca_model(res)) {
6112 ipr_erp_cancel_all(ipr_cmd);
6117 if (!ipr_is_naca_model(res))
6118 res->needs_sync_complete = 1;
6120 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6123 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6124 scsi_cmd->result |= (DID_ERROR << 16);
6125 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6126 res->needs_sync_complete = 1;
6130 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6131 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6132 scsi_cmd->scsi_done(scsi_cmd);
6136 * ipr_scsi_done - mid-layer done function
6137 * @ipr_cmd: ipr command struct
6139 * This function is invoked by the interrupt handler for
6140 * ops generated by the SCSI mid-layer
6145 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6147 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6148 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6149 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6150 unsigned long lock_flags;
6152 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6154 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6155 scsi_dma_unmap(scsi_cmd);
6157 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6158 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6159 scsi_cmd->scsi_done(scsi_cmd);
6160 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6162 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6163 spin_lock(&ipr_cmd->hrrq->_lock);
6164 ipr_erp_start(ioa_cfg, ipr_cmd);
6165 spin_unlock(&ipr_cmd->hrrq->_lock);
6166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6171 * ipr_queuecommand - Queue a mid-layer request
6172 * @shost: scsi host struct
6173 * @scsi_cmd: scsi command struct
6175 * This function queues a request generated by the mid-layer.
6179 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6180 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6182 static int ipr_queuecommand(struct Scsi_Host *shost,
6183 struct scsi_cmnd *scsi_cmd)
6185 struct ipr_ioa_cfg *ioa_cfg;
6186 struct ipr_resource_entry *res;
6187 struct ipr_ioarcb *ioarcb;
6188 struct ipr_cmnd *ipr_cmd;
6189 unsigned long hrrq_flags, lock_flags;
6191 struct ipr_hrr_queue *hrrq;
6194 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6196 scsi_cmd->result = (DID_OK << 16);
6197 res = scsi_cmd->device->hostdata;
6199 if (ipr_is_gata(res) && res->sata_port) {
6200 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6201 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6202 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6206 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6207 hrrq = &ioa_cfg->hrrq[hrrq_id];
6209 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6211 * We are currently blocking all devices due to a host reset
6212 * We have told the host to stop giving us new requests, but
6213 * ERP ops don't count. FIXME
6215 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6216 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6217 return SCSI_MLQUEUE_HOST_BUSY;
6221 * FIXME - Create scsi_set_host_offline interface
6222 * and the ioa_is_dead check can be removed
6224 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6225 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6229 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6230 if (ipr_cmd == NULL) {
6231 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6232 return SCSI_MLQUEUE_HOST_BUSY;
6234 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6236 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6237 ioarcb = &ipr_cmd->ioarcb;
6239 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6240 ipr_cmd->scsi_cmd = scsi_cmd;
6241 ipr_cmd->done = ipr_scsi_eh_done;
6243 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6244 if (scsi_cmd->underflow == 0)
6245 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6247 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6248 if (ipr_is_gscsi(res))
6249 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6250 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6251 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6254 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6255 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6256 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6260 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6262 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6264 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6265 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6266 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6267 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6269 scsi_dma_unmap(scsi_cmd);
6270 return SCSI_MLQUEUE_HOST_BUSY;
6273 if (unlikely(hrrq->ioa_is_dead)) {
6274 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6275 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6276 scsi_dma_unmap(scsi_cmd);
6280 ioarcb->res_handle = res->res_handle;
6281 if (res->needs_sync_complete) {
6282 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6283 res->needs_sync_complete = 0;
6285 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6286 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6287 ipr_send_command(ipr_cmd);
6288 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6292 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6293 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6294 scsi_cmd->result = (DID_NO_CONNECT << 16);
6295 scsi_cmd->scsi_done(scsi_cmd);
6296 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6301 * ipr_ioctl - IOCTL handler
6302 * @sdev: scsi device struct
6307 * 0 on success / other on failure
6309 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6311 struct ipr_resource_entry *res;
6313 res = (struct ipr_resource_entry *)sdev->hostdata;
6314 if (res && ipr_is_gata(res)) {
6315 if (cmd == HDIO_GET_IDENTITY)
6317 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6324 * ipr_info - Get information about the card/driver
6325 * @scsi_host: scsi host struct
6328 * pointer to buffer with description string
6330 static const char *ipr_ioa_info(struct Scsi_Host *host)
6332 static char buffer[512];
6333 struct ipr_ioa_cfg *ioa_cfg;
6334 unsigned long lock_flags = 0;
6336 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6338 spin_lock_irqsave(host->host_lock, lock_flags);
6339 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6340 spin_unlock_irqrestore(host->host_lock, lock_flags);
6345 static struct scsi_host_template driver_template = {
6346 .module = THIS_MODULE,
6348 .info = ipr_ioa_info,
6350 .queuecommand = ipr_queuecommand,
6351 .eh_abort_handler = ipr_eh_abort,
6352 .eh_device_reset_handler = ipr_eh_dev_reset,
6353 .eh_host_reset_handler = ipr_eh_host_reset,
6354 .slave_alloc = ipr_slave_alloc,
6355 .slave_configure = ipr_slave_configure,
6356 .slave_destroy = ipr_slave_destroy,
6357 .target_alloc = ipr_target_alloc,
6358 .target_destroy = ipr_target_destroy,
6359 .change_queue_depth = ipr_change_queue_depth,
6360 .change_queue_type = ipr_change_queue_type,
6361 .bios_param = ipr_biosparam,
6362 .can_queue = IPR_MAX_COMMANDS,
6364 .sg_tablesize = IPR_MAX_SGLIST,
6365 .max_sectors = IPR_IOA_MAX_SECTORS,
6366 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6367 .use_clustering = ENABLE_CLUSTERING,
6368 .shost_attrs = ipr_ioa_attrs,
6369 .sdev_attrs = ipr_dev_attrs,
6370 .proc_name = IPR_NAME,
6375 * ipr_ata_phy_reset - libata phy_reset handler
6376 * @ap: ata port to reset
6379 static void ipr_ata_phy_reset(struct ata_port *ap)
6381 unsigned long flags;
6382 struct ipr_sata_port *sata_port = ap->private_data;
6383 struct ipr_resource_entry *res = sata_port->res;
6384 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6388 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6389 while (ioa_cfg->in_reset_reload) {
6390 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6391 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6392 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6395 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6398 rc = ipr_device_reset(ioa_cfg, res);
6401 ap->link.device[0].class = ATA_DEV_NONE;
6405 ap->link.device[0].class = res->ata_class;
6406 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6407 ap->link.device[0].class = ATA_DEV_NONE;
6410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6415 * ipr_ata_post_internal - Cleanup after an internal command
6416 * @qc: ATA queued command
6421 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6423 struct ipr_sata_port *sata_port = qc->ap->private_data;
6424 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6425 struct ipr_cmnd *ipr_cmd;
6426 struct ipr_hrr_queue *hrrq;
6427 unsigned long flags;
6429 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6430 while (ioa_cfg->in_reset_reload) {
6431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6432 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6433 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6436 for_each_hrrq(hrrq, ioa_cfg) {
6437 spin_lock(&hrrq->_lock);
6438 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6439 if (ipr_cmd->qc == qc) {
6440 ipr_device_reset(ioa_cfg, sata_port->res);
6444 spin_unlock(&hrrq->_lock);
6446 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6450 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6451 * @regs: destination
6452 * @tf: source ATA taskfile
6457 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6458 struct ata_taskfile *tf)
6460 regs->feature = tf->feature;
6461 regs->nsect = tf->nsect;
6462 regs->lbal = tf->lbal;
6463 regs->lbam = tf->lbam;
6464 regs->lbah = tf->lbah;
6465 regs->device = tf->device;
6466 regs->command = tf->command;
6467 regs->hob_feature = tf->hob_feature;
6468 regs->hob_nsect = tf->hob_nsect;
6469 regs->hob_lbal = tf->hob_lbal;
6470 regs->hob_lbam = tf->hob_lbam;
6471 regs->hob_lbah = tf->hob_lbah;
6472 regs->ctl = tf->ctl;
6476 * ipr_sata_done - done function for SATA commands
6477 * @ipr_cmd: ipr command struct
6479 * This function is invoked by the interrupt handler for
6480 * ops generated by the SCSI mid-layer to SATA devices
6485 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6487 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6488 struct ata_queued_cmd *qc = ipr_cmd->qc;
6489 struct ipr_sata_port *sata_port = qc->ap->private_data;
6490 struct ipr_resource_entry *res = sata_port->res;
6491 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6493 spin_lock(&ipr_cmd->hrrq->_lock);
6494 if (ipr_cmd->ioa_cfg->sis64)
6495 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6496 sizeof(struct ipr_ioasa_gata));
6498 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6499 sizeof(struct ipr_ioasa_gata));
6500 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6502 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6503 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6505 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6506 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6508 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6509 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6510 spin_unlock(&ipr_cmd->hrrq->_lock);
6511 ata_qc_complete(qc);
6515 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6516 * @ipr_cmd: ipr command struct
6517 * @qc: ATA queued command
6520 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6521 struct ata_queued_cmd *qc)
6523 u32 ioadl_flags = 0;
6524 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6525 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6526 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6527 int len = qc->nbytes;
6528 struct scatterlist *sg;
6530 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6535 if (qc->dma_dir == DMA_TO_DEVICE) {
6536 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6537 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6538 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6539 ioadl_flags = IPR_IOADL_FLAGS_READ;
6541 ioarcb->data_transfer_length = cpu_to_be32(len);
6543 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6544 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6545 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6547 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6548 ioadl64->flags = cpu_to_be32(ioadl_flags);
6549 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6550 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6552 last_ioadl64 = ioadl64;
6556 if (likely(last_ioadl64))
6557 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6561 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6562 * @ipr_cmd: ipr command struct
6563 * @qc: ATA queued command
6566 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6567 struct ata_queued_cmd *qc)
6569 u32 ioadl_flags = 0;
6570 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6571 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6572 struct ipr_ioadl_desc *last_ioadl = NULL;
6573 int len = qc->nbytes;
6574 struct scatterlist *sg;
6580 if (qc->dma_dir == DMA_TO_DEVICE) {
6581 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6582 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6583 ioarcb->data_transfer_length = cpu_to_be32(len);
6585 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6586 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6587 ioadl_flags = IPR_IOADL_FLAGS_READ;
6588 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6589 ioarcb->read_ioadl_len =
6590 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6593 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6594 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6595 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6601 if (likely(last_ioadl))
6602 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6606 * ipr_qc_defer - Get a free ipr_cmd
6607 * @qc: queued command
6612 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6614 struct ata_port *ap = qc->ap;
6615 struct ipr_sata_port *sata_port = ap->private_data;
6616 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6617 struct ipr_cmnd *ipr_cmd;
6618 struct ipr_hrr_queue *hrrq;
6621 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6622 hrrq = &ioa_cfg->hrrq[hrrq_id];
6624 qc->lldd_task = NULL;
6625 spin_lock(&hrrq->_lock);
6626 if (unlikely(hrrq->ioa_is_dead)) {
6627 spin_unlock(&hrrq->_lock);
6631 if (unlikely(!hrrq->allow_cmds)) {
6632 spin_unlock(&hrrq->_lock);
6633 return ATA_DEFER_LINK;
6636 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6637 if (ipr_cmd == NULL) {
6638 spin_unlock(&hrrq->_lock);
6639 return ATA_DEFER_LINK;
6642 qc->lldd_task = ipr_cmd;
6643 spin_unlock(&hrrq->_lock);
6648 * ipr_qc_issue - Issue a SATA qc to a device
6649 * @qc: queued command
6654 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6656 struct ata_port *ap = qc->ap;
6657 struct ipr_sata_port *sata_port = ap->private_data;
6658 struct ipr_resource_entry *res = sata_port->res;
6659 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6660 struct ipr_cmnd *ipr_cmd;
6661 struct ipr_ioarcb *ioarcb;
6662 struct ipr_ioarcb_ata_regs *regs;
6664 if (qc->lldd_task == NULL)
6667 ipr_cmd = qc->lldd_task;
6668 if (ipr_cmd == NULL)
6669 return AC_ERR_SYSTEM;
6671 qc->lldd_task = NULL;
6672 spin_lock(&ipr_cmd->hrrq->_lock);
6673 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6674 ipr_cmd->hrrq->ioa_is_dead)) {
6675 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6676 spin_unlock(&ipr_cmd->hrrq->_lock);
6677 return AC_ERR_SYSTEM;
6680 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6681 ioarcb = &ipr_cmd->ioarcb;
6683 if (ioa_cfg->sis64) {
6684 regs = &ipr_cmd->i.ata_ioadl.regs;
6685 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6687 regs = &ioarcb->u.add_data.u.regs;
6689 memset(regs, 0, sizeof(*regs));
6690 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6692 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6694 ipr_cmd->done = ipr_sata_done;
6695 ipr_cmd->ioarcb.res_handle = res->res_handle;
6696 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6697 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6698 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6699 ipr_cmd->dma_use_sg = qc->n_elem;
6702 ipr_build_ata_ioadl64(ipr_cmd, qc);
6704 ipr_build_ata_ioadl(ipr_cmd, qc);
6706 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6707 ipr_copy_sata_tf(regs, &qc->tf);
6708 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6709 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6711 switch (qc->tf.protocol) {
6712 case ATA_PROT_NODATA:
6717 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6720 case ATAPI_PROT_PIO:
6721 case ATAPI_PROT_NODATA:
6722 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6725 case ATAPI_PROT_DMA:
6726 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6727 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6732 spin_unlock(&ipr_cmd->hrrq->_lock);
6733 return AC_ERR_INVALID;
6736 ipr_send_command(ipr_cmd);
6737 spin_unlock(&ipr_cmd->hrrq->_lock);
6743 * ipr_qc_fill_rtf - Read result TF
6744 * @qc: ATA queued command
6749 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6751 struct ipr_sata_port *sata_port = qc->ap->private_data;
6752 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6753 struct ata_taskfile *tf = &qc->result_tf;
6755 tf->feature = g->error;
6756 tf->nsect = g->nsect;
6760 tf->device = g->device;
6761 tf->command = g->status;
6762 tf->hob_nsect = g->hob_nsect;
6763 tf->hob_lbal = g->hob_lbal;
6764 tf->hob_lbam = g->hob_lbam;
6765 tf->hob_lbah = g->hob_lbah;
6766 tf->ctl = g->alt_status;
6771 static struct ata_port_operations ipr_sata_ops = {
6772 .phy_reset = ipr_ata_phy_reset,
6773 .hardreset = ipr_sata_reset,
6774 .post_internal_cmd = ipr_ata_post_internal,
6775 .qc_prep = ata_noop_qc_prep,
6776 .qc_defer = ipr_qc_defer,
6777 .qc_issue = ipr_qc_issue,
6778 .qc_fill_rtf = ipr_qc_fill_rtf,
6779 .port_start = ata_sas_port_start,
6780 .port_stop = ata_sas_port_stop
6783 static struct ata_port_info sata_port_info = {
6784 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6785 .pio_mask = ATA_PIO4_ONLY,
6786 .mwdma_mask = ATA_MWDMA2,
6787 .udma_mask = ATA_UDMA6,
6788 .port_ops = &ipr_sata_ops
6791 #ifdef CONFIG_PPC_PSERIES
6792 static const u16 ipr_blocked_processors[] = {
6804 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6805 * @ioa_cfg: ioa cfg struct
6807 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6808 * certain pSeries hardware. This function determines if the given
6809 * adapter is in one of these confgurations or not.
6812 * 1 if adapter is not supported / 0 if adapter is supported
6814 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6818 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6819 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6820 if (pvr_version_is(ipr_blocked_processors[i]))
6827 #define ipr_invalid_adapter(ioa_cfg) 0
6831 * ipr_ioa_bringdown_done - IOA bring down completion.
6832 * @ipr_cmd: ipr command struct
6834 * This function processes the completion of an adapter bring down.
6835 * It wakes any reset sleepers.
6840 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6842 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6846 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6848 spin_unlock_irq(ioa_cfg->host->host_lock);
6849 scsi_unblock_requests(ioa_cfg->host);
6850 spin_lock_irq(ioa_cfg->host->host_lock);
6853 ioa_cfg->in_reset_reload = 0;
6854 ioa_cfg->reset_retries = 0;
6855 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6856 spin_lock(&ioa_cfg->hrrq[i]._lock);
6857 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6858 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6862 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6863 wake_up_all(&ioa_cfg->reset_wait_q);
6866 return IPR_RC_JOB_RETURN;
6870 * ipr_ioa_reset_done - IOA reset completion.
6871 * @ipr_cmd: ipr command struct
6873 * This function processes the completion of an adapter reset.
6874 * It schedules any necessary mid-layer add/removes and
6875 * wakes any reset sleepers.
6880 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6882 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6883 struct ipr_resource_entry *res;
6884 struct ipr_hostrcb *hostrcb, *temp;
6888 ioa_cfg->in_reset_reload = 0;
6889 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6890 spin_lock(&ioa_cfg->hrrq[j]._lock);
6891 ioa_cfg->hrrq[j].allow_cmds = 1;
6892 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6895 ioa_cfg->reset_cmd = NULL;
6896 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6898 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6899 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6904 schedule_work(&ioa_cfg->work_q);
6906 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6907 list_del(&hostrcb->queue);
6908 if (i++ < IPR_NUM_LOG_HCAMS)
6909 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6911 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6914 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6915 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6917 ioa_cfg->reset_retries = 0;
6918 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6919 wake_up_all(&ioa_cfg->reset_wait_q);
6921 spin_unlock(ioa_cfg->host->host_lock);
6922 scsi_unblock_requests(ioa_cfg->host);
6923 spin_lock(ioa_cfg->host->host_lock);
6925 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6926 scsi_block_requests(ioa_cfg->host);
6929 return IPR_RC_JOB_RETURN;
6933 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6934 * @supported_dev: supported device struct
6935 * @vpids: vendor product id struct
6940 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6941 struct ipr_std_inq_vpids *vpids)
6943 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6944 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6945 supported_dev->num_records = 1;
6946 supported_dev->data_length =
6947 cpu_to_be16(sizeof(struct ipr_supported_device));
6948 supported_dev->reserved = 0;
6952 * ipr_set_supported_devs - Send Set Supported Devices for a device
6953 * @ipr_cmd: ipr command struct
6955 * This function sends a Set Supported Devices to the adapter
6958 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6960 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6962 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6963 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6964 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6965 struct ipr_resource_entry *res = ipr_cmd->u.res;
6967 ipr_cmd->job_step = ipr_ioa_reset_done;
6969 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6970 if (!ipr_is_scsi_disk(res))
6973 ipr_cmd->u.res = res;
6974 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6976 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6977 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6978 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6980 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6981 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6982 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6983 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6985 ipr_init_ioadl(ipr_cmd,
6986 ioa_cfg->vpd_cbs_dma +
6987 offsetof(struct ipr_misc_cbs, supp_dev),
6988 sizeof(struct ipr_supported_device),
6989 IPR_IOADL_FLAGS_WRITE_LAST);
6991 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6992 IPR_SET_SUP_DEVICE_TIMEOUT);
6994 if (!ioa_cfg->sis64)
6995 ipr_cmd->job_step = ipr_set_supported_devs;
6997 return IPR_RC_JOB_RETURN;
7001 return IPR_RC_JOB_CONTINUE;
7005 * ipr_get_mode_page - Locate specified mode page
7006 * @mode_pages: mode page buffer
7007 * @page_code: page code to find
7008 * @len: minimum required length for mode page
7011 * pointer to mode page / NULL on failure
7013 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7014 u32 page_code, u32 len)
7016 struct ipr_mode_page_hdr *mode_hdr;
7020 if (!mode_pages || (mode_pages->hdr.length == 0))
7023 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7024 mode_hdr = (struct ipr_mode_page_hdr *)
7025 (mode_pages->data + mode_pages->hdr.block_desc_len);
7028 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7029 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7033 page_length = (sizeof(struct ipr_mode_page_hdr) +
7034 mode_hdr->page_length);
7035 length -= page_length;
7036 mode_hdr = (struct ipr_mode_page_hdr *)
7037 ((unsigned long)mode_hdr + page_length);
7044 * ipr_check_term_power - Check for term power errors
7045 * @ioa_cfg: ioa config struct
7046 * @mode_pages: IOAFP mode pages buffer
7048 * Check the IOAFP's mode page 28 for term power errors
7053 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7054 struct ipr_mode_pages *mode_pages)
7058 struct ipr_dev_bus_entry *bus;
7059 struct ipr_mode_page28 *mode_page;
7061 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7062 sizeof(struct ipr_mode_page28));
7064 entry_length = mode_page->entry_length;
7066 bus = mode_page->bus;
7068 for (i = 0; i < mode_page->num_entries; i++) {
7069 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7070 dev_err(&ioa_cfg->pdev->dev,
7071 "Term power is absent on scsi bus %d\n",
7075 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7080 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7081 * @ioa_cfg: ioa config struct
7083 * Looks through the config table checking for SES devices. If
7084 * the SES device is in the SES table indicating a maximum SCSI
7085 * bus speed, the speed is limited for the bus.
7090 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7095 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7096 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7097 ioa_cfg->bus_attr[i].bus_width);
7099 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7100 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7105 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7106 * @ioa_cfg: ioa config struct
7107 * @mode_pages: mode page 28 buffer
7109 * Updates mode page 28 based on driver configuration
7114 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7115 struct ipr_mode_pages *mode_pages)
7117 int i, entry_length;
7118 struct ipr_dev_bus_entry *bus;
7119 struct ipr_bus_attributes *bus_attr;
7120 struct ipr_mode_page28 *mode_page;
7122 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7123 sizeof(struct ipr_mode_page28));
7125 entry_length = mode_page->entry_length;
7127 /* Loop for each device bus entry */
7128 for (i = 0, bus = mode_page->bus;
7129 i < mode_page->num_entries;
7130 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7131 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7132 dev_err(&ioa_cfg->pdev->dev,
7133 "Invalid resource address reported: 0x%08X\n",
7134 IPR_GET_PHYS_LOC(bus->res_addr));
7138 bus_attr = &ioa_cfg->bus_attr[i];
7139 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7140 bus->bus_width = bus_attr->bus_width;
7141 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7142 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7143 if (bus_attr->qas_enabled)
7144 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7146 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7151 * ipr_build_mode_select - Build a mode select command
7152 * @ipr_cmd: ipr command struct
7153 * @res_handle: resource handle to send command to
7154 * @parm: Byte 2 of Mode Sense command
7155 * @dma_addr: DMA buffer address
7156 * @xfer_len: data transfer length
7161 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7162 __be32 res_handle, u8 parm,
7163 dma_addr_t dma_addr, u8 xfer_len)
7165 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7167 ioarcb->res_handle = res_handle;
7168 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7169 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7170 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7171 ioarcb->cmd_pkt.cdb[1] = parm;
7172 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7174 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7178 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7179 * @ipr_cmd: ipr command struct
7181 * This function sets up the SCSI bus attributes and sends
7182 * a Mode Select for Page 28 to activate them.
7187 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7189 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7190 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7194 ipr_scsi_bus_speed_limit(ioa_cfg);
7195 ipr_check_term_power(ioa_cfg, mode_pages);
7196 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7197 length = mode_pages->hdr.length + 1;
7198 mode_pages->hdr.length = 0;
7200 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7201 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7204 ipr_cmd->job_step = ipr_set_supported_devs;
7205 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7206 struct ipr_resource_entry, queue);
7207 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7210 return IPR_RC_JOB_RETURN;
7214 * ipr_build_mode_sense - Builds a mode sense command
7215 * @ipr_cmd: ipr command struct
7216 * @res: resource entry struct
7217 * @parm: Byte 2 of mode sense command
7218 * @dma_addr: DMA address of mode sense buffer
7219 * @xfer_len: Size of DMA buffer
7224 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7226 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7228 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7230 ioarcb->res_handle = res_handle;
7231 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7232 ioarcb->cmd_pkt.cdb[2] = parm;
7233 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7234 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7236 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7240 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7241 * @ipr_cmd: ipr command struct
7243 * This function handles the failure of an IOA bringup command.
7248 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7250 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7251 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7253 dev_err(&ioa_cfg->pdev->dev,
7254 "0x%02X failed with IOASC: 0x%08X\n",
7255 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7257 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7258 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7259 return IPR_RC_JOB_RETURN;
7263 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7264 * @ipr_cmd: ipr command struct
7266 * This function handles the failure of a Mode Sense to the IOAFP.
7267 * Some adapters do not handle all mode pages.
7270 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7272 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7274 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7275 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7277 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7278 ipr_cmd->job_step = ipr_set_supported_devs;
7279 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7280 struct ipr_resource_entry, queue);
7281 return IPR_RC_JOB_CONTINUE;
7284 return ipr_reset_cmd_failed(ipr_cmd);
7288 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7289 * @ipr_cmd: ipr command struct
7291 * This function send a Page 28 mode sense to the IOA to
7292 * retrieve SCSI bus attributes.
7297 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7299 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7302 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7303 0x28, ioa_cfg->vpd_cbs_dma +
7304 offsetof(struct ipr_misc_cbs, mode_pages),
7305 sizeof(struct ipr_mode_pages));
7307 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7308 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7310 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7313 return IPR_RC_JOB_RETURN;
7317 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7318 * @ipr_cmd: ipr command struct
7320 * This function enables dual IOA RAID support if possible.
7325 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7327 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7328 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7329 struct ipr_mode_page24 *mode_page;
7333 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7334 sizeof(struct ipr_mode_page24));
7337 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7339 length = mode_pages->hdr.length + 1;
7340 mode_pages->hdr.length = 0;
7342 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7343 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7346 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7347 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7350 return IPR_RC_JOB_RETURN;
7354 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7355 * @ipr_cmd: ipr command struct
7357 * This function handles the failure of a Mode Sense to the IOAFP.
7358 * Some adapters do not handle all mode pages.
7361 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7363 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7365 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7367 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7368 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7369 return IPR_RC_JOB_CONTINUE;
7372 return ipr_reset_cmd_failed(ipr_cmd);
7376 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7377 * @ipr_cmd: ipr command struct
7379 * This function send a mode sense to the IOA to retrieve
7380 * the IOA Advanced Function Control mode page.
7385 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7387 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7390 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7391 0x24, ioa_cfg->vpd_cbs_dma +
7392 offsetof(struct ipr_misc_cbs, mode_pages),
7393 sizeof(struct ipr_mode_pages));
7395 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7396 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7398 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7401 return IPR_RC_JOB_RETURN;
7405 * ipr_init_res_table - Initialize the resource table
7406 * @ipr_cmd: ipr command struct
7408 * This function looks through the existing resource table, comparing
7409 * it with the config table. This function will take care of old/new
7410 * devices and schedule adding/removing them from the mid-layer
7414 * IPR_RC_JOB_CONTINUE
7416 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7418 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7419 struct ipr_resource_entry *res, *temp;
7420 struct ipr_config_table_entry_wrapper cfgtew;
7421 int entries, found, flag, i;
7426 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7428 flag = ioa_cfg->u.cfg_table->hdr.flags;
7430 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7431 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7433 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7434 list_move_tail(&res->queue, &old_res);
7437 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7439 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7441 for (i = 0; i < entries; i++) {
7443 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7445 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7448 list_for_each_entry_safe(res, temp, &old_res, queue) {
7449 if (ipr_is_same_device(res, &cfgtew)) {
7450 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7457 if (list_empty(&ioa_cfg->free_res_q)) {
7458 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7463 res = list_entry(ioa_cfg->free_res_q.next,
7464 struct ipr_resource_entry, queue);
7465 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7466 ipr_init_res_entry(res, &cfgtew);
7468 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7469 res->sdev->allow_restart = 1;
7472 ipr_update_res_entry(res, &cfgtew);
7475 list_for_each_entry_safe(res, temp, &old_res, queue) {
7477 res->del_from_ml = 1;
7478 res->res_handle = IPR_INVALID_RES_HANDLE;
7479 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7483 list_for_each_entry_safe(res, temp, &old_res, queue) {
7484 ipr_clear_res_target(res);
7485 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7488 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7489 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7491 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7494 return IPR_RC_JOB_CONTINUE;
7498 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7499 * @ipr_cmd: ipr command struct
7501 * This function sends a Query IOA Configuration command
7502 * to the adapter to retrieve the IOA configuration table.
7507 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7509 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7510 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7511 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7512 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7515 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7516 ioa_cfg->dual_raid = 1;
7517 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7518 ucode_vpd->major_release, ucode_vpd->card_type,
7519 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7520 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7521 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7523 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7524 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7525 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7526 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7528 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7529 IPR_IOADL_FLAGS_READ_LAST);
7531 ipr_cmd->job_step = ipr_init_res_table;
7533 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7536 return IPR_RC_JOB_RETURN;
7540 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7541 * @ipr_cmd: ipr command struct
7543 * This utility function sends an inquiry to the adapter.
7548 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7549 dma_addr_t dma_addr, u8 xfer_len)
7551 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7554 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7555 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7557 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7558 ioarcb->cmd_pkt.cdb[1] = flags;
7559 ioarcb->cmd_pkt.cdb[2] = page;
7560 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7562 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7564 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7569 * ipr_inquiry_page_supported - Is the given inquiry page supported
7570 * @page0: inquiry page 0 buffer
7573 * This function determines if the specified inquiry page is supported.
7576 * 1 if page is supported / 0 if not
7578 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7582 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7583 if (page0->page[i] == page)
7590 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7591 * @ipr_cmd: ipr command struct
7593 * This function sends a Page 0xD0 inquiry to the adapter
7594 * to retrieve adapter capabilities.
7597 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7599 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7602 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7603 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7606 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7607 memset(cap, 0, sizeof(*cap));
7609 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7610 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7611 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7612 sizeof(struct ipr_inquiry_cap));
7613 return IPR_RC_JOB_RETURN;
7617 return IPR_RC_JOB_CONTINUE;
7621 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7622 * @ipr_cmd: ipr command struct
7624 * This function sends a Page 3 inquiry to the adapter
7625 * to retrieve software VPD information.
7628 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7630 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7636 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7638 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7639 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7640 sizeof(struct ipr_inquiry_page3));
7643 return IPR_RC_JOB_RETURN;
7647 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7648 * @ipr_cmd: ipr command struct
7650 * This function sends a Page 0 inquiry to the adapter
7651 * to retrieve supported inquiry pages.
7654 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7656 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7658 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7663 /* Grab the type out of the VPD and store it away */
7664 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7666 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7668 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7670 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7671 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7672 sizeof(struct ipr_inquiry_page0));
7675 return IPR_RC_JOB_RETURN;
7679 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7680 * @ipr_cmd: ipr command struct
7682 * This function sends a standard inquiry to the adapter.
7687 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7689 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7692 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7694 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7695 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7696 sizeof(struct ipr_ioa_vpd));
7699 return IPR_RC_JOB_RETURN;
7703 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7704 * @ipr_cmd: ipr command struct
7706 * This function send an Identify Host Request Response Queue
7707 * command to establish the HRRQ with the adapter.
7712 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7714 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7715 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7716 struct ipr_hrr_queue *hrrq;
7719 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7720 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7722 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7723 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7725 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7726 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7728 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7730 ioarcb->cmd_pkt.cdb[1] = 0x1;
7732 if (ioa_cfg->nvectors == 1)
7733 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7735 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7737 ioarcb->cmd_pkt.cdb[2] =
7738 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7739 ioarcb->cmd_pkt.cdb[3] =
7740 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7741 ioarcb->cmd_pkt.cdb[4] =
7742 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7743 ioarcb->cmd_pkt.cdb[5] =
7744 ((u64) hrrq->host_rrq_dma) & 0xff;
7745 ioarcb->cmd_pkt.cdb[7] =
7746 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7747 ioarcb->cmd_pkt.cdb[8] =
7748 (sizeof(u32) * hrrq->size) & 0xff;
7750 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7751 ioarcb->cmd_pkt.cdb[9] =
7752 ioa_cfg->identify_hrrq_index;
7754 if (ioa_cfg->sis64) {
7755 ioarcb->cmd_pkt.cdb[10] =
7756 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7757 ioarcb->cmd_pkt.cdb[11] =
7758 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7759 ioarcb->cmd_pkt.cdb[12] =
7760 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7761 ioarcb->cmd_pkt.cdb[13] =
7762 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7765 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7766 ioarcb->cmd_pkt.cdb[14] =
7767 ioa_cfg->identify_hrrq_index;
7769 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7770 IPR_INTERNAL_TIMEOUT);
7772 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7773 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7776 return IPR_RC_JOB_RETURN;
7780 return IPR_RC_JOB_CONTINUE;
7784 * ipr_reset_timer_done - Adapter reset timer function
7785 * @ipr_cmd: ipr command struct
7787 * Description: This function is used in adapter reset processing
7788 * for timing events. If the reset_cmd pointer in the IOA
7789 * config struct is not this adapter's we are doing nested
7790 * resets and fail_all_ops will take care of freeing the
7796 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7798 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7799 unsigned long lock_flags = 0;
7801 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7803 if (ioa_cfg->reset_cmd == ipr_cmd) {
7804 list_del(&ipr_cmd->queue);
7805 ipr_cmd->done(ipr_cmd);
7808 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7812 * ipr_reset_start_timer - Start a timer for adapter reset job
7813 * @ipr_cmd: ipr command struct
7814 * @timeout: timeout value
7816 * Description: This function is used in adapter reset processing
7817 * for timing events. If the reset_cmd pointer in the IOA
7818 * config struct is not this adapter's we are doing nested
7819 * resets and fail_all_ops will take care of freeing the
7825 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7826 unsigned long timeout)
7830 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7831 ipr_cmd->done = ipr_reset_ioa_job;
7833 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7834 ipr_cmd->timer.expires = jiffies + timeout;
7835 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7836 add_timer(&ipr_cmd->timer);
7840 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7841 * @ioa_cfg: ioa cfg struct
7846 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7848 struct ipr_hrr_queue *hrrq;
7850 for_each_hrrq(hrrq, ioa_cfg) {
7851 spin_lock(&hrrq->_lock);
7852 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7854 /* Initialize Host RRQ pointers */
7855 hrrq->hrrq_start = hrrq->host_rrq;
7856 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7857 hrrq->hrrq_curr = hrrq->hrrq_start;
7858 hrrq->toggle_bit = 1;
7859 spin_unlock(&hrrq->_lock);
7863 ioa_cfg->identify_hrrq_index = 0;
7864 if (ioa_cfg->hrrq_num == 1)
7865 atomic_set(&ioa_cfg->hrrq_index, 0);
7867 atomic_set(&ioa_cfg->hrrq_index, 1);
7869 /* Zero out config table */
7870 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7874 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7875 * @ipr_cmd: ipr command struct
7878 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7880 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7882 unsigned long stage, stage_time;
7884 volatile u32 int_reg;
7885 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7888 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7889 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7890 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7892 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7894 /* sanity check the stage_time value */
7895 if (stage_time == 0)
7896 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7897 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7898 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7899 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7900 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7902 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7903 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7904 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7905 stage_time = ioa_cfg->transop_timeout;
7906 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7907 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7908 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7909 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7910 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7911 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7912 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7913 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7914 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7915 return IPR_RC_JOB_CONTINUE;
7919 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7920 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7921 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7922 ipr_cmd->done = ipr_reset_ioa_job;
7923 add_timer(&ipr_cmd->timer);
7925 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7927 return IPR_RC_JOB_RETURN;
7931 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7932 * @ipr_cmd: ipr command struct
7934 * This function reinitializes some control blocks and
7935 * enables destructive diagnostics on the adapter.
7940 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7942 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7943 volatile u32 int_reg;
7944 volatile u64 maskval;
7948 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7949 ipr_init_ioa_mem(ioa_cfg);
7951 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7952 spin_lock(&ioa_cfg->hrrq[i]._lock);
7953 ioa_cfg->hrrq[i].allow_interrupts = 1;
7954 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7957 if (ioa_cfg->sis64) {
7958 /* Set the adapter to the correct endian mode. */
7959 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7960 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7963 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7965 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7966 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7967 ioa_cfg->regs.clr_interrupt_mask_reg32);
7968 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7969 return IPR_RC_JOB_CONTINUE;
7972 /* Enable destructive diagnostics on IOA */
7973 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7975 if (ioa_cfg->sis64) {
7976 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7977 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7978 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7980 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7982 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7984 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7986 if (ioa_cfg->sis64) {
7987 ipr_cmd->job_step = ipr_reset_next_stage;
7988 return IPR_RC_JOB_CONTINUE;
7991 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7992 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7993 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7994 ipr_cmd->done = ipr_reset_ioa_job;
7995 add_timer(&ipr_cmd->timer);
7996 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7999 return IPR_RC_JOB_RETURN;
8003 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8004 * @ipr_cmd: ipr command struct
8006 * This function is invoked when an adapter dump has run out
8007 * of processing time.
8010 * IPR_RC_JOB_CONTINUE
8012 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8014 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8016 if (ioa_cfg->sdt_state == GET_DUMP)
8017 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8018 else if (ioa_cfg->sdt_state == READ_DUMP)
8019 ioa_cfg->sdt_state = ABORT_DUMP;
8021 ioa_cfg->dump_timeout = 1;
8022 ipr_cmd->job_step = ipr_reset_alert;
8024 return IPR_RC_JOB_CONTINUE;
8028 * ipr_unit_check_no_data - Log a unit check/no data error log
8029 * @ioa_cfg: ioa config struct
8031 * Logs an error indicating the adapter unit checked, but for some
8032 * reason, we were unable to fetch the unit check buffer.
8037 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8039 ioa_cfg->errors_logged++;
8040 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8044 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8045 * @ioa_cfg: ioa config struct
8047 * Fetches the unit check buffer from the adapter by clocking the data
8048 * through the mailbox register.
8053 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8055 unsigned long mailbox;
8056 struct ipr_hostrcb *hostrcb;
8057 struct ipr_uc_sdt sdt;
8061 mailbox = readl(ioa_cfg->ioa_mailbox);
8063 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8064 ipr_unit_check_no_data(ioa_cfg);
8068 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8069 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8070 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8072 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8073 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8074 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8075 ipr_unit_check_no_data(ioa_cfg);
8079 /* Find length of the first sdt entry (UC buffer) */
8080 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8081 length = be32_to_cpu(sdt.entry[0].end_token);
8083 length = (be32_to_cpu(sdt.entry[0].end_token) -
8084 be32_to_cpu(sdt.entry[0].start_token)) &
8085 IPR_FMT2_MBX_ADDR_MASK;
8087 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8088 struct ipr_hostrcb, queue);
8089 list_del(&hostrcb->queue);
8090 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8092 rc = ipr_get_ldump_data_section(ioa_cfg,
8093 be32_to_cpu(sdt.entry[0].start_token),
8094 (__be32 *)&hostrcb->hcam,
8095 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8098 ipr_handle_log_data(ioa_cfg, hostrcb);
8099 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8100 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8101 ioa_cfg->sdt_state == GET_DUMP)
8102 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8104 ipr_unit_check_no_data(ioa_cfg);
8106 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8110 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8111 * @ipr_cmd: ipr command struct
8113 * Description: This function will call to get the unit check buffer.
8118 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8120 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8123 ioa_cfg->ioa_unit_checked = 0;
8124 ipr_get_unit_check_buffer(ioa_cfg);
8125 ipr_cmd->job_step = ipr_reset_alert;
8126 ipr_reset_start_timer(ipr_cmd, 0);
8129 return IPR_RC_JOB_RETURN;
8133 * ipr_reset_restore_cfg_space - Restore PCI config space.
8134 * @ipr_cmd: ipr command struct
8136 * Description: This function restores the saved PCI config space of
8137 * the adapter, fails all outstanding ops back to the callers, and
8138 * fetches the dump/unit check if applicable to this reset.
8141 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8143 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8145 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8149 ioa_cfg->pdev->state_saved = true;
8150 pci_restore_state(ioa_cfg->pdev);
8152 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8153 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8154 return IPR_RC_JOB_CONTINUE;
8157 ipr_fail_all_ops(ioa_cfg);
8159 if (ioa_cfg->sis64) {
8160 /* Set the adapter to the correct endian mode. */
8161 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8162 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8165 if (ioa_cfg->ioa_unit_checked) {
8166 if (ioa_cfg->sis64) {
8167 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8168 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8169 return IPR_RC_JOB_RETURN;
8171 ioa_cfg->ioa_unit_checked = 0;
8172 ipr_get_unit_check_buffer(ioa_cfg);
8173 ipr_cmd->job_step = ipr_reset_alert;
8174 ipr_reset_start_timer(ipr_cmd, 0);
8175 return IPR_RC_JOB_RETURN;
8179 if (ioa_cfg->in_ioa_bringdown) {
8180 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8182 ipr_cmd->job_step = ipr_reset_enable_ioa;
8184 if (GET_DUMP == ioa_cfg->sdt_state) {
8185 ioa_cfg->sdt_state = READ_DUMP;
8186 ioa_cfg->dump_timeout = 0;
8188 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8190 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8191 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8192 schedule_work(&ioa_cfg->work_q);
8193 return IPR_RC_JOB_RETURN;
8198 return IPR_RC_JOB_CONTINUE;
8202 * ipr_reset_bist_done - BIST has completed on the adapter.
8203 * @ipr_cmd: ipr command struct
8205 * Description: Unblock config space and resume the reset process.
8208 * IPR_RC_JOB_CONTINUE
8210 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8212 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8215 if (ioa_cfg->cfg_locked)
8216 pci_cfg_access_unlock(ioa_cfg->pdev);
8217 ioa_cfg->cfg_locked = 0;
8218 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8220 return IPR_RC_JOB_CONTINUE;
8224 * ipr_reset_start_bist - Run BIST on the adapter.
8225 * @ipr_cmd: ipr command struct
8227 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8230 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8232 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8234 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8235 int rc = PCIBIOS_SUCCESSFUL;
8238 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8239 writel(IPR_UPROCI_SIS64_START_BIST,
8240 ioa_cfg->regs.set_uproc_interrupt_reg32);
8242 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8244 if (rc == PCIBIOS_SUCCESSFUL) {
8245 ipr_cmd->job_step = ipr_reset_bist_done;
8246 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8247 rc = IPR_RC_JOB_RETURN;
8249 if (ioa_cfg->cfg_locked)
8250 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8251 ioa_cfg->cfg_locked = 0;
8252 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8253 rc = IPR_RC_JOB_CONTINUE;
8261 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8262 * @ipr_cmd: ipr command struct
8264 * Description: This clears PCI reset to the adapter and delays two seconds.
8269 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8272 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8273 ipr_cmd->job_step = ipr_reset_bist_done;
8274 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8276 return IPR_RC_JOB_RETURN;
8280 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8281 * @ipr_cmd: ipr command struct
8283 * Description: This asserts PCI reset to the adapter.
8288 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8290 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8291 struct pci_dev *pdev = ioa_cfg->pdev;
8294 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8295 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8296 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8298 return IPR_RC_JOB_RETURN;
8302 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8303 * @ipr_cmd: ipr command struct
8305 * Description: This attempts to block config access to the IOA.
8308 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8310 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8312 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8313 int rc = IPR_RC_JOB_CONTINUE;
8315 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8316 ioa_cfg->cfg_locked = 1;
8317 ipr_cmd->job_step = ioa_cfg->reset;
8319 if (ipr_cmd->u.time_left) {
8320 rc = IPR_RC_JOB_RETURN;
8321 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8322 ipr_reset_start_timer(ipr_cmd,
8323 IPR_CHECK_FOR_RESET_TIMEOUT);
8325 ipr_cmd->job_step = ioa_cfg->reset;
8326 dev_err(&ioa_cfg->pdev->dev,
8327 "Timed out waiting to lock config access. Resetting anyway.\n");
8335 * ipr_reset_block_config_access - Block config access to the IOA
8336 * @ipr_cmd: ipr command struct
8338 * Description: This attempts to block config access to the IOA
8341 * IPR_RC_JOB_CONTINUE
8343 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8345 ipr_cmd->ioa_cfg->cfg_locked = 0;
8346 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8347 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8348 return IPR_RC_JOB_CONTINUE;
8352 * ipr_reset_allowed - Query whether or not IOA can be reset
8353 * @ioa_cfg: ioa config struct
8356 * 0 if reset not allowed / non-zero if reset is allowed
8358 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8360 volatile u32 temp_reg;
8362 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8363 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8367 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8368 * @ipr_cmd: ipr command struct
8370 * Description: This function waits for adapter permission to run BIST,
8371 * then runs BIST. If the adapter does not give permission after a
8372 * reasonable time, we will reset the adapter anyway. The impact of
8373 * resetting the adapter without warning the adapter is the risk of
8374 * losing the persistent error log on the adapter. If the adapter is
8375 * reset while it is writing to the flash on the adapter, the flash
8376 * segment will have bad ECC and be zeroed.
8379 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8381 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8383 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8384 int rc = IPR_RC_JOB_RETURN;
8386 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8387 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8388 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8390 ipr_cmd->job_step = ipr_reset_block_config_access;
8391 rc = IPR_RC_JOB_CONTINUE;
8398 * ipr_reset_alert - Alert the adapter of a pending reset
8399 * @ipr_cmd: ipr command struct
8401 * Description: This function alerts the adapter that it will be reset.
8402 * If memory space is not currently enabled, proceed directly
8403 * to running BIST on the adapter. The timer must always be started
8404 * so we guarantee we do not run BIST from ipr_isr.
8409 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8411 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8416 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8418 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8419 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8420 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8421 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8423 ipr_cmd->job_step = ipr_reset_block_config_access;
8426 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8427 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8430 return IPR_RC_JOB_RETURN;
8434 * ipr_reset_ucode_download_done - Microcode download completion
8435 * @ipr_cmd: ipr command struct
8437 * Description: This function unmaps the microcode download buffer.
8440 * IPR_RC_JOB_CONTINUE
8442 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8444 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8445 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8447 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8448 sglist->num_sg, DMA_TO_DEVICE);
8450 ipr_cmd->job_step = ipr_reset_alert;
8451 return IPR_RC_JOB_CONTINUE;
8455 * ipr_reset_ucode_download - Download microcode to the adapter
8456 * @ipr_cmd: ipr command struct
8458 * Description: This function checks to see if it there is microcode
8459 * to download to the adapter. If there is, a download is performed.
8462 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8464 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8466 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8467 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8470 ipr_cmd->job_step = ipr_reset_alert;
8473 return IPR_RC_JOB_CONTINUE;
8475 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8476 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8477 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8478 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8479 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8480 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8481 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8484 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8486 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8487 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8489 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8490 IPR_WRITE_BUFFER_TIMEOUT);
8493 return IPR_RC_JOB_RETURN;
8497 * ipr_reset_shutdown_ioa - Shutdown the adapter
8498 * @ipr_cmd: ipr command struct
8500 * Description: This function issues an adapter shutdown of the
8501 * specified type to the specified adapter as part of the
8502 * adapter reset job.
8505 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8507 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8509 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8510 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8511 unsigned long timeout;
8512 int rc = IPR_RC_JOB_CONTINUE;
8515 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8516 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8517 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8518 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8519 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8520 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8522 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8523 timeout = IPR_SHUTDOWN_TIMEOUT;
8524 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8525 timeout = IPR_INTERNAL_TIMEOUT;
8526 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8527 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8529 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8531 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8533 rc = IPR_RC_JOB_RETURN;
8534 ipr_cmd->job_step = ipr_reset_ucode_download;
8536 ipr_cmd->job_step = ipr_reset_alert;
8543 * ipr_reset_ioa_job - Adapter reset job
8544 * @ipr_cmd: ipr command struct
8546 * Description: This function is the job router for the adapter reset job.
8551 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8554 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8557 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8559 if (ioa_cfg->reset_cmd != ipr_cmd) {
8561 * We are doing nested adapter resets and this is
8562 * not the current reset job.
8564 list_add_tail(&ipr_cmd->queue,
8565 &ipr_cmd->hrrq->hrrq_free_q);
8569 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8570 rc = ipr_cmd->job_step_failed(ipr_cmd);
8571 if (rc == IPR_RC_JOB_RETURN)
8575 ipr_reinit_ipr_cmnd(ipr_cmd);
8576 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8577 rc = ipr_cmd->job_step(ipr_cmd);
8578 } while (rc == IPR_RC_JOB_CONTINUE);
8582 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8583 * @ioa_cfg: ioa config struct
8584 * @job_step: first job step of reset job
8585 * @shutdown_type: shutdown type
8587 * Description: This function will initiate the reset of the given adapter
8588 * starting at the selected job step.
8589 * If the caller needs to wait on the completion of the reset,
8590 * the caller must sleep on the reset_wait_q.
8595 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8596 int (*job_step) (struct ipr_cmnd *),
8597 enum ipr_shutdown_type shutdown_type)
8599 struct ipr_cmnd *ipr_cmd;
8602 ioa_cfg->in_reset_reload = 1;
8603 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8604 spin_lock(&ioa_cfg->hrrq[i]._lock);
8605 ioa_cfg->hrrq[i].allow_cmds = 0;
8606 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8609 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8610 scsi_block_requests(ioa_cfg->host);
8612 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8613 ioa_cfg->reset_cmd = ipr_cmd;
8614 ipr_cmd->job_step = job_step;
8615 ipr_cmd->u.shutdown_type = shutdown_type;
8617 ipr_reset_ioa_job(ipr_cmd);
8621 * ipr_initiate_ioa_reset - Initiate an adapter reset
8622 * @ioa_cfg: ioa config struct
8623 * @shutdown_type: shutdown type
8625 * Description: This function will initiate the reset of the given adapter.
8626 * If the caller needs to wait on the completion of the reset,
8627 * the caller must sleep on the reset_wait_q.
8632 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8633 enum ipr_shutdown_type shutdown_type)
8637 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8640 if (ioa_cfg->in_reset_reload) {
8641 if (ioa_cfg->sdt_state == GET_DUMP)
8642 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8643 else if (ioa_cfg->sdt_state == READ_DUMP)
8644 ioa_cfg->sdt_state = ABORT_DUMP;
8647 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8648 dev_err(&ioa_cfg->pdev->dev,
8649 "IOA taken offline - error recovery failed\n");
8651 ioa_cfg->reset_retries = 0;
8652 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8653 spin_lock(&ioa_cfg->hrrq[i]._lock);
8654 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8655 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8659 if (ioa_cfg->in_ioa_bringdown) {
8660 ioa_cfg->reset_cmd = NULL;
8661 ioa_cfg->in_reset_reload = 0;
8662 ipr_fail_all_ops(ioa_cfg);
8663 wake_up_all(&ioa_cfg->reset_wait_q);
8665 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8666 spin_unlock_irq(ioa_cfg->host->host_lock);
8667 scsi_unblock_requests(ioa_cfg->host);
8668 spin_lock_irq(ioa_cfg->host->host_lock);
8672 ioa_cfg->in_ioa_bringdown = 1;
8673 shutdown_type = IPR_SHUTDOWN_NONE;
8677 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8682 * ipr_reset_freeze - Hold off all I/O activity
8683 * @ipr_cmd: ipr command struct
8685 * Description: If the PCI slot is frozen, hold off all I/O
8686 * activity; then, as soon as the slot is available again,
8687 * initiate an adapter reset.
8689 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8691 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8694 /* Disallow new interrupts, avoid loop */
8695 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8696 spin_lock(&ioa_cfg->hrrq[i]._lock);
8697 ioa_cfg->hrrq[i].allow_interrupts = 0;
8698 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8701 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8702 ipr_cmd->done = ipr_reset_ioa_job;
8703 return IPR_RC_JOB_RETURN;
8707 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8708 * @pdev: PCI device struct
8710 * Description: This routine is called to tell us that the PCI bus
8711 * is down. Can't do anything here, except put the device driver
8712 * into a holding pattern, waiting for the PCI bus to come back.
8714 static void ipr_pci_frozen(struct pci_dev *pdev)
8716 unsigned long flags = 0;
8717 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8719 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8720 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8725 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8726 * @pdev: PCI device struct
8728 * Description: This routine is called by the pci error recovery
8729 * code after the PCI slot has been reset, just before we
8730 * should resume normal operations.
8732 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8734 unsigned long flags = 0;
8735 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8737 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8738 if (ioa_cfg->needs_warm_reset)
8739 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8741 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8743 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8744 return PCI_ERS_RESULT_RECOVERED;
8748 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8749 * @pdev: PCI device struct
8751 * Description: This routine is called when the PCI bus has
8752 * permanently failed.
8754 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8756 unsigned long flags = 0;
8757 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8760 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8761 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8762 ioa_cfg->sdt_state = ABORT_DUMP;
8763 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8764 ioa_cfg->in_ioa_bringdown = 1;
8765 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8766 spin_lock(&ioa_cfg->hrrq[i]._lock);
8767 ioa_cfg->hrrq[i].allow_cmds = 0;
8768 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8771 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8772 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8776 * ipr_pci_error_detected - Called when a PCI error is detected.
8777 * @pdev: PCI device struct
8778 * @state: PCI channel state
8780 * Description: Called when a PCI error is detected.
8783 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8785 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8786 pci_channel_state_t state)
8789 case pci_channel_io_frozen:
8790 ipr_pci_frozen(pdev);
8791 return PCI_ERS_RESULT_NEED_RESET;
8792 case pci_channel_io_perm_failure:
8793 ipr_pci_perm_failure(pdev);
8794 return PCI_ERS_RESULT_DISCONNECT;
8799 return PCI_ERS_RESULT_NEED_RESET;
8803 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8804 * @ioa_cfg: ioa cfg struct
8806 * Description: This is the second phase of adapter intialization
8807 * This function takes care of initilizing the adapter to the point
8808 * where it can accept new commands.
8811 * 0 on success / -EIO on failure
8813 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8816 unsigned long host_lock_flags = 0;
8819 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8820 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8821 if (ioa_cfg->needs_hard_reset) {
8822 ioa_cfg->needs_hard_reset = 0;
8823 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8825 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8827 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8828 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8829 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8831 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8833 } else if (ipr_invalid_adapter(ioa_cfg)) {
8837 dev_err(&ioa_cfg->pdev->dev,
8838 "Adapter not supported in this hardware configuration.\n");
8841 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8848 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8849 * @ioa_cfg: ioa config struct
8854 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8858 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8859 if (ioa_cfg->ipr_cmnd_list[i])
8860 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8861 ioa_cfg->ipr_cmnd_list[i],
8862 ioa_cfg->ipr_cmnd_list_dma[i]);
8864 ioa_cfg->ipr_cmnd_list[i] = NULL;
8867 if (ioa_cfg->ipr_cmd_pool)
8868 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8870 kfree(ioa_cfg->ipr_cmnd_list);
8871 kfree(ioa_cfg->ipr_cmnd_list_dma);
8872 ioa_cfg->ipr_cmnd_list = NULL;
8873 ioa_cfg->ipr_cmnd_list_dma = NULL;
8874 ioa_cfg->ipr_cmd_pool = NULL;
8878 * ipr_free_mem - Frees memory allocated for an adapter
8879 * @ioa_cfg: ioa cfg struct
8884 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8888 kfree(ioa_cfg->res_entries);
8889 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8890 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8891 ipr_free_cmd_blks(ioa_cfg);
8893 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8894 pci_free_consistent(ioa_cfg->pdev,
8895 sizeof(u32) * ioa_cfg->hrrq[i].size,
8896 ioa_cfg->hrrq[i].host_rrq,
8897 ioa_cfg->hrrq[i].host_rrq_dma);
8899 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8900 ioa_cfg->u.cfg_table,
8901 ioa_cfg->cfg_table_dma);
8903 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8904 pci_free_consistent(ioa_cfg->pdev,
8905 sizeof(struct ipr_hostrcb),
8906 ioa_cfg->hostrcb[i],
8907 ioa_cfg->hostrcb_dma[i]);
8910 ipr_free_dump(ioa_cfg);
8911 kfree(ioa_cfg->trace);
8915 * ipr_free_all_resources - Free all allocated resources for an adapter.
8916 * @ipr_cmd: ipr command struct
8918 * This function frees all allocated resources for the
8919 * specified adapter.
8924 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8926 struct pci_dev *pdev = ioa_cfg->pdev;
8929 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8930 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8932 for (i = 0; i < ioa_cfg->nvectors; i++)
8933 free_irq(ioa_cfg->vectors_info[i].vec,
8936 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8938 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8939 pci_disable_msi(pdev);
8940 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8941 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8942 pci_disable_msix(pdev);
8943 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8946 iounmap(ioa_cfg->hdw_dma_regs);
8947 pci_release_regions(pdev);
8948 ipr_free_mem(ioa_cfg);
8949 scsi_host_put(ioa_cfg->host);
8950 pci_disable_device(pdev);
8955 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8956 * @ioa_cfg: ioa config struct
8959 * 0 on success / -ENOMEM on allocation failure
8961 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8963 struct ipr_cmnd *ipr_cmd;
8964 struct ipr_ioarcb *ioarcb;
8965 dma_addr_t dma_addr;
8966 int i, entries_each_hrrq, hrrq_id = 0;
8968 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8969 sizeof(struct ipr_cmnd), 512, 0);
8971 if (!ioa_cfg->ipr_cmd_pool)
8974 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8975 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8977 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8978 ipr_free_cmd_blks(ioa_cfg);
8982 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8983 if (ioa_cfg->hrrq_num > 1) {
8985 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8986 ioa_cfg->hrrq[i].min_cmd_id = 0;
8987 ioa_cfg->hrrq[i].max_cmd_id =
8988 (entries_each_hrrq - 1);
8991 IPR_NUM_BASE_CMD_BLKS/
8992 (ioa_cfg->hrrq_num - 1);
8993 ioa_cfg->hrrq[i].min_cmd_id =
8994 IPR_NUM_INTERNAL_CMD_BLKS +
8995 (i - 1) * entries_each_hrrq;
8996 ioa_cfg->hrrq[i].max_cmd_id =
8997 (IPR_NUM_INTERNAL_CMD_BLKS +
8998 i * entries_each_hrrq - 1);
9001 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9002 ioa_cfg->hrrq[i].min_cmd_id = 0;
9003 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9005 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9008 BUG_ON(ioa_cfg->hrrq_num == 0);
9010 i = IPR_NUM_CMD_BLKS -
9011 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9013 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9014 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9017 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9018 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9021 ipr_free_cmd_blks(ioa_cfg);
9025 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9026 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9027 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9029 ioarcb = &ipr_cmd->ioarcb;
9030 ipr_cmd->dma_addr = dma_addr;
9032 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9034 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9036 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9037 if (ioa_cfg->sis64) {
9038 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9039 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9040 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9041 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9043 ioarcb->write_ioadl_addr =
9044 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9045 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9046 ioarcb->ioasa_host_pci_addr =
9047 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9049 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9050 ipr_cmd->cmd_index = i;
9051 ipr_cmd->ioa_cfg = ioa_cfg;
9052 ipr_cmd->sense_buffer_dma = dma_addr +
9053 offsetof(struct ipr_cmnd, sense_buffer);
9055 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9056 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9057 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9058 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9066 * ipr_alloc_mem - Allocate memory for an adapter
9067 * @ioa_cfg: ioa config struct
9070 * 0 on success / non-zero for error
9072 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9074 struct pci_dev *pdev = ioa_cfg->pdev;
9075 int i, rc = -ENOMEM;
9078 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9079 ioa_cfg->max_devs_supported, GFP_KERNEL);
9081 if (!ioa_cfg->res_entries)
9084 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9085 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9086 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9089 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9090 sizeof(struct ipr_misc_cbs),
9091 &ioa_cfg->vpd_cbs_dma);
9093 if (!ioa_cfg->vpd_cbs)
9094 goto out_free_res_entries;
9096 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9097 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9098 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9099 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9101 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9103 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9106 if (ipr_alloc_cmd_blks(ioa_cfg))
9107 goto out_free_vpd_cbs;
9109 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9110 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9111 sizeof(u32) * ioa_cfg->hrrq[i].size,
9112 &ioa_cfg->hrrq[i].host_rrq_dma);
9114 if (!ioa_cfg->hrrq[i].host_rrq) {
9116 pci_free_consistent(pdev,
9117 sizeof(u32) * ioa_cfg->hrrq[i].size,
9118 ioa_cfg->hrrq[i].host_rrq,
9119 ioa_cfg->hrrq[i].host_rrq_dma);
9120 goto out_ipr_free_cmd_blocks;
9122 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9125 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9126 ioa_cfg->cfg_table_size,
9127 &ioa_cfg->cfg_table_dma);
9129 if (!ioa_cfg->u.cfg_table)
9130 goto out_free_host_rrq;
9132 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9133 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9134 sizeof(struct ipr_hostrcb),
9135 &ioa_cfg->hostrcb_dma[i]);
9137 if (!ioa_cfg->hostrcb[i])
9138 goto out_free_hostrcb_dma;
9140 ioa_cfg->hostrcb[i]->hostrcb_dma =
9141 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9142 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9143 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9146 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9147 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9149 if (!ioa_cfg->trace)
9150 goto out_free_hostrcb_dma;
9157 out_free_hostrcb_dma:
9159 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9160 ioa_cfg->hostrcb[i],
9161 ioa_cfg->hostrcb_dma[i]);
9163 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9164 ioa_cfg->u.cfg_table,
9165 ioa_cfg->cfg_table_dma);
9167 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9168 pci_free_consistent(pdev,
9169 sizeof(u32) * ioa_cfg->hrrq[i].size,
9170 ioa_cfg->hrrq[i].host_rrq,
9171 ioa_cfg->hrrq[i].host_rrq_dma);
9173 out_ipr_free_cmd_blocks:
9174 ipr_free_cmd_blks(ioa_cfg);
9176 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9177 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9178 out_free_res_entries:
9179 kfree(ioa_cfg->res_entries);
9184 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9185 * @ioa_cfg: ioa config struct
9190 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9194 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9195 ioa_cfg->bus_attr[i].bus = i;
9196 ioa_cfg->bus_attr[i].qas_enabled = 0;
9197 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9198 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9199 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9201 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9206 * ipr_init_ioa_cfg - Initialize IOA config struct
9207 * @ioa_cfg: ioa config struct
9208 * @host: scsi host struct
9209 * @pdev: PCI dev struct
9214 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9215 struct Scsi_Host *host, struct pci_dev *pdev)
9217 const struct ipr_interrupt_offsets *p;
9218 struct ipr_interrupts *t;
9221 ioa_cfg->host = host;
9222 ioa_cfg->pdev = pdev;
9223 ioa_cfg->log_level = ipr_log_level;
9224 ioa_cfg->doorbell = IPR_DOORBELL;
9225 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9226 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9227 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9228 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9229 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9230 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9232 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9233 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9234 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9235 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9236 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9237 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9238 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9239 ioa_cfg->sdt_state = INACTIVE;
9241 ipr_initialize_bus_attr(ioa_cfg);
9242 ioa_cfg->max_devs_supported = ipr_max_devs;
9244 if (ioa_cfg->sis64) {
9245 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9246 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9247 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9248 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9250 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9251 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9252 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9253 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9255 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9256 host->unique_id = host->host_no;
9257 host->max_cmd_len = IPR_MAX_CDB_LEN;
9258 host->can_queue = ioa_cfg->max_cmds;
9259 pci_set_drvdata(pdev, ioa_cfg);
9261 p = &ioa_cfg->chip_cfg->regs;
9263 base = ioa_cfg->hdw_dma_regs;
9265 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9266 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9267 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9268 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9269 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9270 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9271 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9272 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9273 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9274 t->ioarrin_reg = base + p->ioarrin_reg;
9275 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9276 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9277 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9278 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9279 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9280 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9282 if (ioa_cfg->sis64) {
9283 t->init_feedback_reg = base + p->init_feedback_reg;
9284 t->dump_addr_reg = base + p->dump_addr_reg;
9285 t->dump_data_reg = base + p->dump_data_reg;
9286 t->endian_swap_reg = base + p->endian_swap_reg;
9291 * ipr_get_chip_info - Find adapter chip information
9292 * @dev_id: PCI device id struct
9295 * ptr to chip information on success / NULL on failure
9297 static const struct ipr_chip_t *
9298 ipr_get_chip_info(const struct pci_device_id *dev_id)
9302 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9303 if (ipr_chip[i].vendor == dev_id->vendor &&
9304 ipr_chip[i].device == dev_id->device)
9305 return &ipr_chip[i];
9309 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9311 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9312 int i, err, vectors;
9314 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9315 entries[i].entry = i;
9317 vectors = ipr_number_of_msix;
9319 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9323 pci_disable_msix(ioa_cfg->pdev);
9328 for (i = 0; i < vectors; i++)
9329 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9330 ioa_cfg->nvectors = vectors;
9336 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9338 int i, err, vectors;
9340 vectors = ipr_number_of_msix;
9342 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9346 pci_disable_msi(ioa_cfg->pdev);
9351 for (i = 0; i < vectors; i++)
9352 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9353 ioa_cfg->nvectors = vectors;
9359 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9361 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9363 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9364 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9365 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9366 ioa_cfg->vectors_info[vec_idx].
9367 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9371 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9375 for (i = 1; i < ioa_cfg->nvectors; i++) {
9376 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9379 ioa_cfg->vectors_info[i].desc,
9383 free_irq(ioa_cfg->vectors_info[i].vec,
9392 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9393 * @pdev: PCI device struct
9395 * Description: Simply set the msi_received flag to 1 indicating that
9396 * Message Signaled Interrupts are supported.
9399 * 0 on success / non-zero on failure
9401 static irqreturn_t ipr_test_intr(int irq, void *devp)
9403 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9404 unsigned long lock_flags = 0;
9405 irqreturn_t rc = IRQ_HANDLED;
9407 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9408 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9410 ioa_cfg->msi_received = 1;
9411 wake_up(&ioa_cfg->msi_wait_q);
9413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9418 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9419 * @pdev: PCI device struct
9421 * Description: The return value from pci_enable_msi() can not always be
9422 * trusted. This routine sets up and initiates a test interrupt to determine
9423 * if the interrupt is received via the ipr_test_intr() service routine.
9424 * If the tests fails, the driver will fall back to LSI.
9427 * 0 on success / non-zero on failure
9429 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9432 volatile u32 int_reg;
9433 unsigned long lock_flags = 0;
9437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9438 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9439 ioa_cfg->msi_received = 0;
9440 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9441 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9442 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9445 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9446 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9448 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9450 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9452 } else if (ipr_debug)
9453 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9455 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9456 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9457 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9458 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9459 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9461 if (!ioa_cfg->msi_received) {
9462 /* MSI test failed */
9463 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9465 } else if (ipr_debug)
9466 dev_info(&pdev->dev, "MSI test succeeded.\n");
9468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9470 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9471 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9473 free_irq(pdev->irq, ioa_cfg);
9480 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9481 * @pdev: PCI device struct
9482 * @dev_id: PCI device id struct
9485 * 0 on success / non-zero on failure
9487 static int ipr_probe_ioa(struct pci_dev *pdev,
9488 const struct pci_device_id *dev_id)
9490 struct ipr_ioa_cfg *ioa_cfg;
9491 struct Scsi_Host *host;
9492 unsigned long ipr_regs_pci;
9493 void __iomem *ipr_regs;
9494 int rc = PCIBIOS_SUCCESSFUL;
9495 volatile u32 mask, uproc, interrupts;
9496 unsigned long lock_flags;
9500 if ((rc = pci_enable_device(pdev))) {
9501 dev_err(&pdev->dev, "Cannot enable adapter\n");
9505 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9507 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9510 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9515 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9516 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9517 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9519 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9521 if (!ioa_cfg->ipr_chip) {
9522 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9523 dev_id->vendor, dev_id->device);
9524 goto out_scsi_host_put;
9527 /* set SIS 32 or SIS 64 */
9528 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9529 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9530 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9531 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9533 if (ipr_transop_timeout)
9534 ioa_cfg->transop_timeout = ipr_transop_timeout;
9535 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9536 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9538 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9540 ioa_cfg->revid = pdev->revision;
9542 ipr_regs_pci = pci_resource_start(pdev, 0);
9544 rc = pci_request_regions(pdev, IPR_NAME);
9547 "Couldn't register memory range of registers\n");
9548 goto out_scsi_host_put;
9551 ipr_regs = pci_ioremap_bar(pdev, 0);
9555 "Couldn't map memory range of registers\n");
9557 goto out_release_regions;
9560 ioa_cfg->hdw_dma_regs = ipr_regs;
9561 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9562 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9564 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9566 pci_set_master(pdev);
9568 if (ioa_cfg->sis64) {
9569 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9571 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9572 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9576 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9579 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9583 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9584 ioa_cfg->chip_cfg->cache_line_size);
9586 if (rc != PCIBIOS_SUCCESSFUL) {
9587 dev_err(&pdev->dev, "Write of cache line size failed\n");
9592 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9593 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9594 IPR_MAX_MSIX_VECTORS);
9595 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9598 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9599 ipr_enable_msix(ioa_cfg) == 0)
9600 ioa_cfg->intr_flag = IPR_USE_MSIX;
9601 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9602 ipr_enable_msi(ioa_cfg) == 0)
9603 ioa_cfg->intr_flag = IPR_USE_MSI;
9605 ioa_cfg->intr_flag = IPR_USE_LSI;
9606 ioa_cfg->nvectors = 1;
9607 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9610 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9611 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9612 rc = ipr_test_msi(ioa_cfg, pdev);
9613 if (rc == -EOPNOTSUPP) {
9614 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9615 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9616 pci_disable_msi(pdev);
9617 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9618 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9619 pci_disable_msix(pdev);
9622 ioa_cfg->intr_flag = IPR_USE_LSI;
9623 ioa_cfg->nvectors = 1;
9626 goto out_msi_disable;
9628 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9629 dev_info(&pdev->dev,
9630 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9631 ioa_cfg->nvectors, pdev->irq);
9632 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9633 dev_info(&pdev->dev,
9634 "Request for %d MSIXs succeeded.",
9639 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9640 (unsigned int)num_online_cpus(),
9641 (unsigned int)IPR_MAX_HRRQ_NUM);
9643 /* Save away PCI config space for use following IOA reset */
9644 rc = pci_save_state(pdev);
9646 if (rc != PCIBIOS_SUCCESSFUL) {
9647 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9649 goto out_msi_disable;
9652 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9653 goto out_msi_disable;
9655 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9656 goto out_msi_disable;
9659 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9660 + ((sizeof(struct ipr_config_table_entry64)
9661 * ioa_cfg->max_devs_supported)));
9663 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9664 + ((sizeof(struct ipr_config_table_entry)
9665 * ioa_cfg->max_devs_supported)));
9667 rc = ipr_alloc_mem(ioa_cfg);
9670 "Couldn't allocate enough memory for device driver!\n");
9671 goto out_msi_disable;
9675 * If HRRQ updated interrupt is not masked, or reset alert is set,
9676 * the card is in an unknown state and needs a hard reset
9678 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9679 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9680 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9681 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9682 ioa_cfg->needs_hard_reset = 1;
9683 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9684 ioa_cfg->needs_hard_reset = 1;
9685 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9686 ioa_cfg->ioa_unit_checked = 1;
9688 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9689 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9690 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9692 if (ioa_cfg->intr_flag == IPR_USE_MSI
9693 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9694 name_msi_vectors(ioa_cfg);
9695 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9697 ioa_cfg->vectors_info[0].desc,
9700 rc = ipr_request_other_msi_irqs(ioa_cfg);
9702 rc = request_irq(pdev->irq, ipr_isr,
9704 IPR_NAME, &ioa_cfg->hrrq[0]);
9707 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9712 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9713 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9714 ioa_cfg->needs_warm_reset = 1;
9715 ioa_cfg->reset = ipr_reset_slot_reset;
9717 ioa_cfg->reset = ipr_reset_start_bist;
9719 spin_lock(&ipr_driver_lock);
9720 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9721 spin_unlock(&ipr_driver_lock);
9728 ipr_free_mem(ioa_cfg);
9730 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9731 pci_disable_msi(pdev);
9732 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9733 pci_disable_msix(pdev);
9736 out_release_regions:
9737 pci_release_regions(pdev);
9739 scsi_host_put(host);
9741 pci_disable_device(pdev);
9746 * ipr_scan_vsets - Scans for VSET devices
9747 * @ioa_cfg: ioa config struct
9749 * Description: Since the VSET resources do not follow SAM in that we can have
9750 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9755 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9759 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9760 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9761 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9765 * ipr_initiate_ioa_bringdown - Bring down an adapter
9766 * @ioa_cfg: ioa config struct
9767 * @shutdown_type: shutdown type
9769 * Description: This function will initiate bringing down the adapter.
9770 * This consists of issuing an IOA shutdown to the adapter
9771 * to flush the cache, and running BIST.
9772 * If the caller needs to wait on the completion of the reset,
9773 * the caller must sleep on the reset_wait_q.
9778 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9779 enum ipr_shutdown_type shutdown_type)
9782 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9783 ioa_cfg->sdt_state = ABORT_DUMP;
9784 ioa_cfg->reset_retries = 0;
9785 ioa_cfg->in_ioa_bringdown = 1;
9786 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9791 * __ipr_remove - Remove a single adapter
9792 * @pdev: pci device struct
9794 * Adapter hot plug remove entry point.
9799 static void __ipr_remove(struct pci_dev *pdev)
9801 unsigned long host_lock_flags = 0;
9802 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9806 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9807 while (ioa_cfg->in_reset_reload) {
9808 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9809 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9810 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9813 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9814 spin_lock(&ioa_cfg->hrrq[i]._lock);
9815 ioa_cfg->hrrq[i].removing_ioa = 1;
9816 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9819 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9821 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9822 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9823 flush_work(&ioa_cfg->work_q);
9824 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9825 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9827 spin_lock(&ipr_driver_lock);
9828 list_del(&ioa_cfg->queue);
9829 spin_unlock(&ipr_driver_lock);
9831 if (ioa_cfg->sdt_state == ABORT_DUMP)
9832 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9835 ipr_free_all_resources(ioa_cfg);
9841 * ipr_remove - IOA hot plug remove entry point
9842 * @pdev: pci device struct
9844 * Adapter hot plug remove entry point.
9849 static void ipr_remove(struct pci_dev *pdev)
9851 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9855 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9857 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9859 scsi_remove_host(ioa_cfg->host);
9867 * ipr_probe - Adapter hot plug add entry point
9870 * 0 on success / non-zero on failure
9872 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9874 struct ipr_ioa_cfg *ioa_cfg;
9877 rc = ipr_probe_ioa(pdev, dev_id);
9882 ioa_cfg = pci_get_drvdata(pdev);
9883 rc = ipr_probe_ioa_part2(ioa_cfg);
9890 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9897 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9901 scsi_remove_host(ioa_cfg->host);
9906 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9910 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9912 scsi_remove_host(ioa_cfg->host);
9917 scsi_scan_host(ioa_cfg->host);
9918 ipr_scan_vsets(ioa_cfg);
9919 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9920 ioa_cfg->allow_ml_add_del = 1;
9921 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9922 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9924 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9925 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9926 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9927 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9928 ioa_cfg->iopoll_weight, ipr_iopoll);
9929 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9933 schedule_work(&ioa_cfg->work_q);
9938 * ipr_shutdown - Shutdown handler.
9939 * @pdev: pci device struct
9941 * This function is invoked upon system shutdown/reboot. It will issue
9942 * an adapter shutdown to the adapter to flush the write cache.
9947 static void ipr_shutdown(struct pci_dev *pdev)
9949 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9950 unsigned long lock_flags = 0;
9953 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9954 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9955 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9956 ioa_cfg->iopoll_weight = 0;
9957 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9958 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9961 while (ioa_cfg->in_reset_reload) {
9962 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9963 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9964 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9967 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9968 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9969 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9972 static struct pci_device_id ipr_pci_table[] = {
9973 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9974 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9975 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9976 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9977 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9978 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9979 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9980 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9981 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9982 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9983 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9984 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9985 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9986 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9987 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9988 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9989 IPR_USE_LONG_TRANSOP_TIMEOUT },
9990 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9991 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9992 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9993 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9994 IPR_USE_LONG_TRANSOP_TIMEOUT },
9995 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9996 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9997 IPR_USE_LONG_TRANSOP_TIMEOUT },
9998 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9999 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10000 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10001 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10002 IPR_USE_LONG_TRANSOP_TIMEOUT},
10003 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10004 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10005 IPR_USE_LONG_TRANSOP_TIMEOUT },
10006 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10007 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10008 IPR_USE_LONG_TRANSOP_TIMEOUT },
10009 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10010 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10011 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10012 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10013 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10014 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10015 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10016 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10017 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10018 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10019 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10020 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10021 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10022 IPR_USE_LONG_TRANSOP_TIMEOUT },
10023 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10024 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10025 IPR_USE_LONG_TRANSOP_TIMEOUT },
10026 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10027 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10028 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10029 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10030 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10031 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10032 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10033 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10034 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10035 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10036 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10037 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10038 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10039 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10040 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10041 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10042 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10043 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10044 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10045 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10046 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10047 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10048 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10049 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10050 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10051 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10052 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10053 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10054 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10055 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10058 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10060 static const struct pci_error_handlers ipr_err_handler = {
10061 .error_detected = ipr_pci_error_detected,
10062 .slot_reset = ipr_pci_slot_reset,
10065 static struct pci_driver ipr_driver = {
10067 .id_table = ipr_pci_table,
10068 .probe = ipr_probe,
10069 .remove = ipr_remove,
10070 .shutdown = ipr_shutdown,
10071 .err_handler = &ipr_err_handler,
10075 * ipr_halt_done - Shutdown prepare completion
10080 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10082 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10086 * ipr_halt - Issue shutdown prepare to all adapters
10089 * NOTIFY_OK on success / NOTIFY_DONE on failure
10091 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10093 struct ipr_cmnd *ipr_cmd;
10094 struct ipr_ioa_cfg *ioa_cfg;
10095 unsigned long flags = 0;
10097 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10098 return NOTIFY_DONE;
10100 spin_lock(&ipr_driver_lock);
10102 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10103 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10104 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10105 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10109 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10110 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10111 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10112 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10113 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10115 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10116 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10118 spin_unlock(&ipr_driver_lock);
10123 static struct notifier_block ipr_notifier = {
10128 * ipr_init - Module entry point
10131 * 0 on success / negative value on failure
10133 static int __init ipr_init(void)
10135 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10136 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10138 register_reboot_notifier(&ipr_notifier);
10139 return pci_register_driver(&ipr_driver);
10143 * ipr_exit - Module unload
10145 * Module unload entry point.
10150 static void __exit ipr_exit(void)
10152 unregister_reboot_notifier(&ipr_notifier);
10153 pci_unregister_driver(&ipr_driver);
10156 module_init(ipr_init);
10157 module_exit(ipr_exit);