ip6_gre: release cached dst on tunnel removal
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107                 .mailbox = 0x0042C,
108                 .max_cmds = 100,
109                 .cache_line_size = 0x20,
110                 .clear_isr = 1,
111                 .iopoll_weight = 0,
112                 {
113                         .set_interrupt_mask_reg = 0x0022C,
114                         .clr_interrupt_mask_reg = 0x00230,
115                         .clr_interrupt_mask_reg32 = 0x00230,
116                         .sense_interrupt_mask_reg = 0x0022C,
117                         .sense_interrupt_mask_reg32 = 0x0022C,
118                         .clr_interrupt_reg = 0x00228,
119                         .clr_interrupt_reg32 = 0x00228,
120                         .sense_interrupt_reg = 0x00224,
121                         .sense_interrupt_reg32 = 0x00224,
122                         .ioarrin_reg = 0x00404,
123                         .sense_uproc_interrupt_reg = 0x00214,
124                         .sense_uproc_interrupt_reg32 = 0x00214,
125                         .set_uproc_interrupt_reg = 0x00214,
126                         .set_uproc_interrupt_reg32 = 0x00214,
127                         .clr_uproc_interrupt_reg = 0x00218,
128                         .clr_uproc_interrupt_reg32 = 0x00218
129                 }
130         },
131         { /* Snipe and Scamp */
132                 .mailbox = 0x0052C,
133                 .max_cmds = 100,
134                 .cache_line_size = 0x20,
135                 .clear_isr = 1,
136                 .iopoll_weight = 0,
137                 {
138                         .set_interrupt_mask_reg = 0x00288,
139                         .clr_interrupt_mask_reg = 0x0028C,
140                         .clr_interrupt_mask_reg32 = 0x0028C,
141                         .sense_interrupt_mask_reg = 0x00288,
142                         .sense_interrupt_mask_reg32 = 0x00288,
143                         .clr_interrupt_reg = 0x00284,
144                         .clr_interrupt_reg32 = 0x00284,
145                         .sense_interrupt_reg = 0x00280,
146                         .sense_interrupt_reg32 = 0x00280,
147                         .ioarrin_reg = 0x00504,
148                         .sense_uproc_interrupt_reg = 0x00290,
149                         .sense_uproc_interrupt_reg32 = 0x00290,
150                         .set_uproc_interrupt_reg = 0x00290,
151                         .set_uproc_interrupt_reg32 = 0x00290,
152                         .clr_uproc_interrupt_reg = 0x00294,
153                         .clr_uproc_interrupt_reg32 = 0x00294
154                 }
155         },
156         { /* CRoC */
157                 .mailbox = 0x00044,
158                 .max_cmds = 1000,
159                 .cache_line_size = 0x20,
160                 .clear_isr = 0,
161                 .iopoll_weight = 64,
162                 {
163                         .set_interrupt_mask_reg = 0x00010,
164                         .clr_interrupt_mask_reg = 0x00018,
165                         .clr_interrupt_mask_reg32 = 0x0001C,
166                         .sense_interrupt_mask_reg = 0x00010,
167                         .sense_interrupt_mask_reg32 = 0x00014,
168                         .clr_interrupt_reg = 0x00008,
169                         .clr_interrupt_reg32 = 0x0000C,
170                         .sense_interrupt_reg = 0x00000,
171                         .sense_interrupt_reg32 = 0x00004,
172                         .ioarrin_reg = 0x00070,
173                         .sense_uproc_interrupt_reg = 0x00020,
174                         .sense_uproc_interrupt_reg32 = 0x00024,
175                         .set_uproc_interrupt_reg = 0x00020,
176                         .set_uproc_interrupt_reg32 = 0x00024,
177                         .clr_uproc_interrupt_reg = 0x00028,
178                         .clr_uproc_interrupt_reg32 = 0x0002C,
179                         .init_feedback_reg = 0x0005C,
180                         .dump_addr_reg = 0x00064,
181                         .dump_data_reg = 0x00068,
182                         .endian_swap_reg = 0x00084
183                 }
184         },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5).  (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /*  A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231         "8155: An unknown error was received"},
232         {0x00330000, 0, 0,
233         "Soft underlength error"},
234         {0x005A0000, 0, 0,
235         "Command to be cancelled not found"},
236         {0x00808000, 0, 0,
237         "Qualified success"},
238         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239         "FFFE: Soft device bus error recovered by the IOA"},
240         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241         "4101: Soft device bus fabric error"},
242         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243         "FFFC: Logical block guard error recovered by the device"},
244         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245         "FFFC: Logical block reference tag error recovered by the device"},
246         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247         "4171: Recovered scatter list tag / sequence number error"},
248         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FFFD: Recovered logical block reference tag error detected by the IOA"},
254         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255         "FFFD: Logical block guard error recovered by the IOA"},
256         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFF9: Device sector reassign successful"},
258         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFF7: Media error recovered by device rewrite procedures"},
260         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261         "7001: IOA sector reassignment successful"},
262         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF9: Soft media error. Sector reassignment recommended"},
264         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265         "FFF7: Media error recovered by IOA rewrite procedures"},
266         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FF3D: Soft PCI bus error recovered by the IOA"},
268         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269         "FFF6: Device hardware error recovered by the IOA"},
270         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FFF6: Device hardware error recovered by the device"},
272         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FF3D: Soft IOA error recovered by the IOA"},
274         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFFA: Undefined device response recovered by the IOA"},
276         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FFF6: Device bus error, message or command phase"},
278         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFE: Task Management Function failed"},
280         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Failure prediction threshold exceeded"},
282         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283         "8009: Impending cache battery pack failure"},
284         {0x02040400, 0, 0,
285         "34FF: Disk device format in progress"},
286         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
287         "9070: IOA requested reset"},
288         {0x023F0000, 0, 0,
289         "Synchronization required"},
290         {0x024E0000, 0, 0,
291         "No ready, IOA shutdown"},
292         {0x025A0000, 0, 0,
293         "Not ready, IOA has been shutdown"},
294         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
295         "3020: Storage subsystem configuration error"},
296         {0x03110B00, 0, 0,
297         "FFF5: Medium error, data unreadable, recommend reassign"},
298         {0x03110C00, 0, 0,
299         "7000: Medium error, data unreadable, do not reassign"},
300         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
301         "FFF3: Disk media format bad"},
302         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
303         "3002: Addressed device failed to respond to selection"},
304         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
305         "3100: Device bus error"},
306         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
307         "3109: IOA timed out a device command"},
308         {0x04088000, 0, 0,
309         "3120: SCSI bus is not operational"},
310         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
311         "4100: Hard device bus fabric error"},
312         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313         "310C: Logical block guard error detected by the device"},
314         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315         "310C: Logical block reference tag error detected by the device"},
316         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317         "4170: Scatter list tag / sequence number error"},
318         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "8150: Logical block CRC error on IOA to Host transfer"},
320         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321         "4170: Logical block sequence number error on IOA to Host transfer"},
322         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323         "310D: Logical block reference tag error detected by the IOA"},
324         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "310D: Logical block guard error detected by the IOA"},
326         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
327         "9000: IOA reserved area data check"},
328         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
329         "9001: IOA reserved area invalid data pattern"},
330         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
331         "9002: IOA reserved area LRC error"},
332         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333         "Hardware Error, IOA metadata access error"},
334         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
335         "102E: Out of alternate sectors for disk storage"},
336         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
337         "FFF4: Data transfer underlength error"},
338         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
339         "FFF4: Data transfer overlength error"},
340         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
341         "3400: Logical unit failure"},
342         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
343         "FFF4: Device microcode is corrupt"},
344         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
345         "8150: PCI bus error"},
346         {0x04430000, 1, 0,
347         "Unsupported device bus message received"},
348         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
349         "FFF4: Disk device problem"},
350         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
351         "8150: Permanent IOA failure"},
352         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
353         "3010: Disk device returned wrong response to IOA"},
354         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
355         "8151: IOA microcode error"},
356         {0x04448500, 0, 0,
357         "Device bus status error"},
358         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
359         "8157: IOA error requiring IOA reset to recover"},
360         {0x04448700, 0, 0,
361         "ATA device status error"},
362         {0x04490000, 0, 0,
363         "Message reject received from the device"},
364         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
365         "8008: A permanent cache battery pack failure occurred"},
366         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
367         "9090: Disk unit has been modified after the last known status"},
368         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
369         "9081: IOA detected device error"},
370         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
371         "9082: IOA detected device error"},
372         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
373         "3110: Device bus error, message or command phase"},
374         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
375         "3110: SAS Command / Task Management Function failed"},
376         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
377         "9091: Incorrect hardware configuration change has been detected"},
378         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
379         "9073: Invalid multi-adapter configuration"},
380         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
381         "4010: Incorrect connection between cascaded expanders"},
382         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "4020: Connections exceed IOA design limits"},
384         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "4030: Incorrect multipath connection"},
386         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
387         "4110: Unsupported enclosure function"},
388         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
389         "FFF4: Command to logical unit failed"},
390         {0x05240000, 1, 0,
391         "Illegal request, invalid request type or request packet"},
392         {0x05250000, 0, 0,
393         "Illegal request, invalid resource handle"},
394         {0x05258000, 0, 0,
395         "Illegal request, commands not allowed to this device"},
396         {0x05258100, 0, 0,
397         "Illegal request, command not allowed to a secondary adapter"},
398         {0x05258200, 0, 0,
399         "Illegal request, command not allowed to a non-optimized resource"},
400         {0x05260000, 0, 0,
401         "Illegal request, invalid field in parameter list"},
402         {0x05260100, 0, 0,
403         "Illegal request, parameter not supported"},
404         {0x05260200, 0, 0,
405         "Illegal request, parameter value invalid"},
406         {0x052C0000, 0, 0,
407         "Illegal request, command sequence error"},
408         {0x052C8000, 1, 0,
409         "Illegal request, dual adapter support not enabled"},
410         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
411         "9031: Array protection temporarily suspended, protection resuming"},
412         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
413         "9040: Array protection temporarily suspended, protection resuming"},
414         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
415         "3140: Device bus not ready to ready transition"},
416         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
417         "FFFB: SCSI bus was reset"},
418         {0x06290500, 0, 0,
419         "FFFE: SCSI bus transition to single ended"},
420         {0x06290600, 0, 0,
421         "FFFE: SCSI bus transition to LVD"},
422         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
423         "FFFB: SCSI bus was reset by another initiator"},
424         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
425         "3029: A device replacement has occurred"},
426         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
427         "9051: IOA cache data exists for a missing or failed device"},
428         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
429         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
430         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9025: Disk unit is not supported at its physical location"},
432         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "3020: IOA detected a SCSI bus configuration error"},
434         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
435         "3150: SCSI bus configuration error"},
436         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
437         "9074: Asymmetric advanced function disk configuration"},
438         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
439         "4040: Incomplete multipath connection between IOA and enclosure"},
440         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
441         "4041: Incomplete multipath connection between enclosure and device"},
442         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
443         "9075: Incomplete multipath connection between IOA and remote IOA"},
444         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
445         "9076: Configuration error, missing remote IOA"},
446         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
447         "4050: Enclosure does not support a required multipath function"},
448         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "4070: Logically bad block written on device"},
450         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
451         "9041: Array protection temporarily suspended"},
452         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9042: Corrupt array parity detected on specified device"},
454         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9030: Array no longer protected due to missing or failed disk unit"},
456         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9071: Link operational transition"},
458         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9072: Link not operational transition"},
460         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
461         "9032: Array exposed but still protected"},
462         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
463         "70DD: Device forced failed by disrupt device command"},
464         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4061: Multipath redundancy level got better"},
466         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4060: Multipath redundancy level got worse"},
468         {0x07270000, 0, 0,
469         "Failure due to other device"},
470         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9008: IOA does not support functions expected by devices"},
472         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
473         "9010: Cache data associated with attached devices cannot be found"},
474         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
475         "9011: Cache data belongs to devices other than those attached"},
476         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
477         "9020: Array missing 2 or more devices with only 1 device present"},
478         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
479         "9021: Array missing 2 or more devices with 2 or more devices present"},
480         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
481         "9022: Exposed array is missing a required device"},
482         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
483         "9023: Array member(s) not at required physical locations"},
484         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9024: Array not functional due to present hardware configuration"},
486         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9026: Array not functional due to present hardware configuration"},
488         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9027: Array is missing a device and parity is out of sync"},
490         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9028: Maximum number of arrays already exist"},
492         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9050: Required cache data cannot be located for a disk unit"},
494         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9052: Cache data exists for a device that has been modified"},
496         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
497         "9054: IOA resources not available due to previous problems"},
498         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "9092: Disk unit requires initialization before use"},
500         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "9029: Incorrect hardware configuration change has been detected"},
502         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
503         "9060: One or more disk pairs are missing from an array"},
504         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
505         "9061: One or more disks are missing from an array"},
506         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
507         "9062: One or more disks are missing from an array"},
508         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9063: Maximum number of functional arrays has been exceeded"},
510         {0x0B260000, 0, 0,
511         "Aborted command, invalid descriptor"},
512         {0x0B5A0000, 0, 0,
513         "Command terminated by host"}
514 };
515
516 static const struct ipr_ses_table_entry ipr_ses_table[] = {
517         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
518         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
519         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
520         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
521         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
522         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
523         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
524         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
525         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
526         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
527         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
528         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
529         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
530 };
531
532 /*
533  *  Function Prototypes
534  */
535 static int ipr_reset_alert(struct ipr_cmnd *);
536 static void ipr_process_ccn(struct ipr_cmnd *);
537 static void ipr_process_error(struct ipr_cmnd *);
538 static void ipr_reset_ioa_job(struct ipr_cmnd *);
539 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
540                                    enum ipr_shutdown_type);
541
542 #ifdef CONFIG_SCSI_IPR_TRACE
543 /**
544  * ipr_trc_hook - Add a trace entry to the driver trace
545  * @ipr_cmd:    ipr command struct
546  * @type:               trace type
547  * @add_data:   additional data
548  *
549  * Return value:
550  *      none
551  **/
552 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
553                          u8 type, u32 add_data)
554 {
555         struct ipr_trace_entry *trace_entry;
556         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
557         unsigned int trace_index;
558
559         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
560         trace_entry = &ioa_cfg->trace[trace_index];
561         trace_entry->time = jiffies;
562         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
563         trace_entry->type = type;
564         if (ipr_cmd->ioa_cfg->sis64)
565                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
566         else
567                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
568         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
569         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
570         trace_entry->u.add_data = add_data;
571         wmb();
572 }
573 #else
574 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
575 #endif
576
577 /**
578  * ipr_lock_and_done - Acquire lock and complete command
579  * @ipr_cmd:    ipr command struct
580  *
581  * Return value:
582  *      none
583  **/
584 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
585 {
586         unsigned long lock_flags;
587         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
588
589         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
590         ipr_cmd->done(ipr_cmd);
591         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
592 }
593
594 /**
595  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
596  * @ipr_cmd:    ipr command struct
597  *
598  * Return value:
599  *      none
600  **/
601 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
602 {
603         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
604         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
605         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
606         dma_addr_t dma_addr = ipr_cmd->dma_addr;
607         int hrrq_id;
608
609         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
610         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
611         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
612         ioarcb->data_transfer_length = 0;
613         ioarcb->read_data_transfer_length = 0;
614         ioarcb->ioadl_len = 0;
615         ioarcb->read_ioadl_len = 0;
616
617         if (ipr_cmd->ioa_cfg->sis64) {
618                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
619                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
620                 ioasa64->u.gata.status = 0;
621         } else {
622                 ioarcb->write_ioadl_addr =
623                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
624                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
625                 ioasa->u.gata.status = 0;
626         }
627
628         ioasa->hdr.ioasc = 0;
629         ioasa->hdr.residual_data_len = 0;
630         ipr_cmd->scsi_cmd = NULL;
631         ipr_cmd->qc = NULL;
632         ipr_cmd->sense_buffer[0] = 0;
633         ipr_cmd->dma_use_sg = 0;
634 }
635
636 /**
637  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
638  * @ipr_cmd:    ipr command struct
639  *
640  * Return value:
641  *      none
642  **/
643 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
644                               void (*fast_done) (struct ipr_cmnd *))
645 {
646         ipr_reinit_ipr_cmnd(ipr_cmd);
647         ipr_cmd->u.scratch = 0;
648         ipr_cmd->sibling = NULL;
649         ipr_cmd->eh_comp = NULL;
650         ipr_cmd->fast_done = fast_done;
651         init_timer(&ipr_cmd->timer);
652 }
653
654 /**
655  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
656  * @ioa_cfg:    ioa config struct
657  *
658  * Return value:
659  *      pointer to ipr command struct
660  **/
661 static
662 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
663 {
664         struct ipr_cmnd *ipr_cmd = NULL;
665
666         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
667                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
668                         struct ipr_cmnd, queue);
669                 list_del(&ipr_cmd->queue);
670         }
671
672
673         return ipr_cmd;
674 }
675
676 /**
677  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
678  * @ioa_cfg:    ioa config struct
679  *
680  * Return value:
681  *      pointer to ipr command struct
682  **/
683 static
684 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
685 {
686         struct ipr_cmnd *ipr_cmd =
687                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
688         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
689         return ipr_cmd;
690 }
691
692 /**
693  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
694  * @ioa_cfg:    ioa config struct
695  * @clr_ints:     interrupts to clear
696  *
697  * This function masks all interrupts on the adapter, then clears the
698  * interrupts specified in the mask
699  *
700  * Return value:
701  *      none
702  **/
703 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
704                                           u32 clr_ints)
705 {
706         volatile u32 int_reg;
707         int i;
708
709         /* Stop new interrupts */
710         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
711                 spin_lock(&ioa_cfg->hrrq[i]._lock);
712                 ioa_cfg->hrrq[i].allow_interrupts = 0;
713                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
714         }
715         wmb();
716
717         /* Set interrupt mask to stop all new interrupts */
718         if (ioa_cfg->sis64)
719                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
720         else
721                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
722
723         /* Clear any pending interrupts */
724         if (ioa_cfg->sis64)
725                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
726         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
727         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
728 }
729
730 /**
731  * ipr_save_pcix_cmd_reg - Save PCI-X command register
732  * @ioa_cfg:    ioa config struct
733  *
734  * Return value:
735  *      0 on success / -EIO on failure
736  **/
737 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
738 {
739         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
740
741         if (pcix_cmd_reg == 0)
742                 return 0;
743
744         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
745                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
746                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
747                 return -EIO;
748         }
749
750         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
751         return 0;
752 }
753
754 /**
755  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
756  * @ioa_cfg:    ioa config struct
757  *
758  * Return value:
759  *      0 on success / -EIO on failure
760  **/
761 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
762 {
763         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
764
765         if (pcix_cmd_reg) {
766                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
767                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
768                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
769                         return -EIO;
770                 }
771         }
772
773         return 0;
774 }
775
776 /**
777  * ipr_sata_eh_done - done function for aborted SATA commands
778  * @ipr_cmd:    ipr command struct
779  *
780  * This function is invoked for ops generated to SATA
781  * devices which are being aborted.
782  *
783  * Return value:
784  *      none
785  **/
786 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
787 {
788         struct ata_queued_cmd *qc = ipr_cmd->qc;
789         struct ipr_sata_port *sata_port = qc->ap->private_data;
790
791         qc->err_mask |= AC_ERR_OTHER;
792         sata_port->ioasa.status |= ATA_BUSY;
793         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
794         ata_qc_complete(qc);
795 }
796
797 /**
798  * ipr_scsi_eh_done - mid-layer done function for aborted ops
799  * @ipr_cmd:    ipr command struct
800  *
801  * This function is invoked by the interrupt handler for
802  * ops generated by the SCSI mid-layer which are being aborted.
803  *
804  * Return value:
805  *      none
806  **/
807 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
808 {
809         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
810
811         scsi_cmd->result |= (DID_ERROR << 16);
812
813         scsi_dma_unmap(ipr_cmd->scsi_cmd);
814         scsi_cmd->scsi_done(scsi_cmd);
815         if (ipr_cmd->eh_comp)
816                 complete(ipr_cmd->eh_comp);
817         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
818 }
819
820 /**
821  * ipr_fail_all_ops - Fails all outstanding ops.
822  * @ioa_cfg:    ioa config struct
823  *
824  * This function fails all outstanding ops.
825  *
826  * Return value:
827  *      none
828  **/
829 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
830 {
831         struct ipr_cmnd *ipr_cmd, *temp;
832         struct ipr_hrr_queue *hrrq;
833
834         ENTER;
835         for_each_hrrq(hrrq, ioa_cfg) {
836                 spin_lock(&hrrq->_lock);
837                 list_for_each_entry_safe(ipr_cmd,
838                                         temp, &hrrq->hrrq_pending_q, queue) {
839                         list_del(&ipr_cmd->queue);
840
841                         ipr_cmd->s.ioasa.hdr.ioasc =
842                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
843                         ipr_cmd->s.ioasa.hdr.ilid =
844                                 cpu_to_be32(IPR_DRIVER_ILID);
845
846                         if (ipr_cmd->scsi_cmd)
847                                 ipr_cmd->done = ipr_scsi_eh_done;
848                         else if (ipr_cmd->qc)
849                                 ipr_cmd->done = ipr_sata_eh_done;
850
851                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
852                                      IPR_IOASC_IOA_WAS_RESET);
853                         del_timer(&ipr_cmd->timer);
854                         ipr_cmd->done(ipr_cmd);
855                 }
856                 spin_unlock(&hrrq->_lock);
857         }
858         LEAVE;
859 }
860
861 /**
862  * ipr_send_command -  Send driver initiated requests.
863  * @ipr_cmd:            ipr command struct
864  *
865  * This function sends a command to the adapter using the correct write call.
866  * In the case of sis64, calculate the ioarcb size required. Then or in the
867  * appropriate bits.
868  *
869  * Return value:
870  *      none
871  **/
872 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
873 {
874         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
875         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
876
877         if (ioa_cfg->sis64) {
878                 /* The default size is 256 bytes */
879                 send_dma_addr |= 0x1;
880
881                 /* If the number of ioadls * size of ioadl > 128 bytes,
882                    then use a 512 byte ioarcb */
883                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
884                         send_dma_addr |= 0x4;
885                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
886         } else
887                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
888 }
889
890 /**
891  * ipr_do_req -  Send driver initiated requests.
892  * @ipr_cmd:            ipr command struct
893  * @done:                       done function
894  * @timeout_func:       timeout function
895  * @timeout:            timeout value
896  *
897  * This function sends the specified command to the adapter with the
898  * timeout given. The done function is invoked on command completion.
899  *
900  * Return value:
901  *      none
902  **/
903 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
904                        void (*done) (struct ipr_cmnd *),
905                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
906 {
907         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
908
909         ipr_cmd->done = done;
910
911         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
912         ipr_cmd->timer.expires = jiffies + timeout;
913         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
914
915         add_timer(&ipr_cmd->timer);
916
917         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
918
919         ipr_send_command(ipr_cmd);
920 }
921
922 /**
923  * ipr_internal_cmd_done - Op done function for an internally generated op.
924  * @ipr_cmd:    ipr command struct
925  *
926  * This function is the op done function for an internally generated,
927  * blocking op. It simply wakes the sleeping thread.
928  *
929  * Return value:
930  *      none
931  **/
932 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
933 {
934         if (ipr_cmd->sibling)
935                 ipr_cmd->sibling = NULL;
936         else
937                 complete(&ipr_cmd->completion);
938 }
939
940 /**
941  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
942  * @ipr_cmd:    ipr command struct
943  * @dma_addr:   dma address
944  * @len:        transfer length
945  * @flags:      ioadl flag value
946  *
947  * This function initializes an ioadl in the case where there is only a single
948  * descriptor.
949  *
950  * Return value:
951  *      nothing
952  **/
953 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
954                            u32 len, int flags)
955 {
956         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
957         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
958
959         ipr_cmd->dma_use_sg = 1;
960
961         if (ipr_cmd->ioa_cfg->sis64) {
962                 ioadl64->flags = cpu_to_be32(flags);
963                 ioadl64->data_len = cpu_to_be32(len);
964                 ioadl64->address = cpu_to_be64(dma_addr);
965
966                 ipr_cmd->ioarcb.ioadl_len =
967                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
968                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
969         } else {
970                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
971                 ioadl->address = cpu_to_be32(dma_addr);
972
973                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
974                         ipr_cmd->ioarcb.read_ioadl_len =
975                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
976                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
977                 } else {
978                         ipr_cmd->ioarcb.ioadl_len =
979                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
980                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
981                 }
982         }
983 }
984
985 /**
986  * ipr_send_blocking_cmd - Send command and sleep on its completion.
987  * @ipr_cmd:    ipr command struct
988  * @timeout_func:       function to invoke if command times out
989  * @timeout:    timeout
990  *
991  * Return value:
992  *      none
993  **/
994 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
995                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
996                                   u32 timeout)
997 {
998         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
999
1000         init_completion(&ipr_cmd->completion);
1001         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1002
1003         spin_unlock_irq(ioa_cfg->host->host_lock);
1004         wait_for_completion(&ipr_cmd->completion);
1005         spin_lock_irq(ioa_cfg->host->host_lock);
1006 }
1007
1008 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1009 {
1010         unsigned int hrrq;
1011
1012         if (ioa_cfg->hrrq_num == 1)
1013                 hrrq = 0;
1014         else {
1015                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1016                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1017         }
1018         return hrrq;
1019 }
1020
1021 /**
1022  * ipr_send_hcam - Send an HCAM to the adapter.
1023  * @ioa_cfg:    ioa config struct
1024  * @type:               HCAM type
1025  * @hostrcb:    hostrcb struct
1026  *
1027  * This function will send a Host Controlled Async command to the adapter.
1028  * If HCAMs are currently not allowed to be issued to the adapter, it will
1029  * place the hostrcb on the free queue.
1030  *
1031  * Return value:
1032  *      none
1033  **/
1034 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1035                           struct ipr_hostrcb *hostrcb)
1036 {
1037         struct ipr_cmnd *ipr_cmd;
1038         struct ipr_ioarcb *ioarcb;
1039
1040         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1041                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1042                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1043                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1044
1045                 ipr_cmd->u.hostrcb = hostrcb;
1046                 ioarcb = &ipr_cmd->ioarcb;
1047
1048                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1049                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1050                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1051                 ioarcb->cmd_pkt.cdb[1] = type;
1052                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1053                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1054
1055                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1056                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1057
1058                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1059                         ipr_cmd->done = ipr_process_ccn;
1060                 else
1061                         ipr_cmd->done = ipr_process_error;
1062
1063                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1064
1065                 ipr_send_command(ipr_cmd);
1066         } else {
1067                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1068         }
1069 }
1070
1071 /**
1072  * ipr_update_ata_class - Update the ata class in the resource entry
1073  * @res:        resource entry struct
1074  * @proto:      cfgte device bus protocol value
1075  *
1076  * Return value:
1077  *      none
1078  **/
1079 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1080 {
1081         switch (proto) {
1082         case IPR_PROTO_SATA:
1083         case IPR_PROTO_SAS_STP:
1084                 res->ata_class = ATA_DEV_ATA;
1085                 break;
1086         case IPR_PROTO_SATA_ATAPI:
1087         case IPR_PROTO_SAS_STP_ATAPI:
1088                 res->ata_class = ATA_DEV_ATAPI;
1089                 break;
1090         default:
1091                 res->ata_class = ATA_DEV_UNKNOWN;
1092                 break;
1093         };
1094 }
1095
1096 /**
1097  * ipr_init_res_entry - Initialize a resource entry struct.
1098  * @res:        resource entry struct
1099  * @cfgtew:     config table entry wrapper struct
1100  *
1101  * Return value:
1102  *      none
1103  **/
1104 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1105                                struct ipr_config_table_entry_wrapper *cfgtew)
1106 {
1107         int found = 0;
1108         unsigned int proto;
1109         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1110         struct ipr_resource_entry *gscsi_res = NULL;
1111
1112         res->needs_sync_complete = 0;
1113         res->in_erp = 0;
1114         res->add_to_ml = 0;
1115         res->del_from_ml = 0;
1116         res->resetting_device = 0;
1117         res->sdev = NULL;
1118         res->sata_port = NULL;
1119
1120         if (ioa_cfg->sis64) {
1121                 proto = cfgtew->u.cfgte64->proto;
1122                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1123                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1124                 res->type = cfgtew->u.cfgte64->res_type;
1125
1126                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1127                         sizeof(res->res_path));
1128
1129                 res->bus = 0;
1130                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1131                         sizeof(res->dev_lun.scsi_lun));
1132                 res->lun = scsilun_to_int(&res->dev_lun);
1133
1134                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1135                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1136                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1137                                         found = 1;
1138                                         res->target = gscsi_res->target;
1139                                         break;
1140                                 }
1141                         }
1142                         if (!found) {
1143                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1144                                                                   ioa_cfg->max_devs_supported);
1145                                 set_bit(res->target, ioa_cfg->target_ids);
1146                         }
1147                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1148                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1149                         res->target = 0;
1150                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1151                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1152                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1153                                                           ioa_cfg->max_devs_supported);
1154                         set_bit(res->target, ioa_cfg->array_ids);
1155                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1156                         res->bus = IPR_VSET_VIRTUAL_BUS;
1157                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1158                                                           ioa_cfg->max_devs_supported);
1159                         set_bit(res->target, ioa_cfg->vset_ids);
1160                 } else {
1161                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1162                                                           ioa_cfg->max_devs_supported);
1163                         set_bit(res->target, ioa_cfg->target_ids);
1164                 }
1165         } else {
1166                 proto = cfgtew->u.cfgte->proto;
1167                 res->qmodel = IPR_QUEUEING_MODEL(res);
1168                 res->flags = cfgtew->u.cfgte->flags;
1169                 if (res->flags & IPR_IS_IOA_RESOURCE)
1170                         res->type = IPR_RES_TYPE_IOAFP;
1171                 else
1172                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1173
1174                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1175                 res->target = cfgtew->u.cfgte->res_addr.target;
1176                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1177                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1178         }
1179
1180         ipr_update_ata_class(res, proto);
1181 }
1182
1183 /**
1184  * ipr_is_same_device - Determine if two devices are the same.
1185  * @res:        resource entry struct
1186  * @cfgtew:     config table entry wrapper struct
1187  *
1188  * Return value:
1189  *      1 if the devices are the same / 0 otherwise
1190  **/
1191 static int ipr_is_same_device(struct ipr_resource_entry *res,
1192                               struct ipr_config_table_entry_wrapper *cfgtew)
1193 {
1194         if (res->ioa_cfg->sis64) {
1195                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1196                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1197                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1198                                         sizeof(cfgtew->u.cfgte64->lun))) {
1199                         return 1;
1200                 }
1201         } else {
1202                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1203                     res->target == cfgtew->u.cfgte->res_addr.target &&
1204                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1205                         return 1;
1206         }
1207
1208         return 0;
1209 }
1210
1211 /**
1212  * __ipr_format_res_path - Format the resource path for printing.
1213  * @res_path:   resource path
1214  * @buf:        buffer
1215  * @len:        length of buffer provided
1216  *
1217  * Return value:
1218  *      pointer to buffer
1219  **/
1220 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1221 {
1222         int i;
1223         char *p = buffer;
1224
1225         *p = '\0';
1226         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1227         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1228                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1229
1230         return buffer;
1231 }
1232
1233 /**
1234  * ipr_format_res_path - Format the resource path for printing.
1235  * @ioa_cfg:    ioa config struct
1236  * @res_path:   resource path
1237  * @buf:        buffer
1238  * @len:        length of buffer provided
1239  *
1240  * Return value:
1241  *      pointer to buffer
1242  **/
1243 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1244                                  u8 *res_path, char *buffer, int len)
1245 {
1246         char *p = buffer;
1247
1248         *p = '\0';
1249         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1250         __ipr_format_res_path(res_path, p, len - (buffer - p));
1251         return buffer;
1252 }
1253
1254 /**
1255  * ipr_update_res_entry - Update the resource entry.
1256  * @res:        resource entry struct
1257  * @cfgtew:     config table entry wrapper struct
1258  *
1259  * Return value:
1260  *      none
1261  **/
1262 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1263                                  struct ipr_config_table_entry_wrapper *cfgtew)
1264 {
1265         char buffer[IPR_MAX_RES_PATH_LENGTH];
1266         unsigned int proto;
1267         int new_path = 0;
1268
1269         if (res->ioa_cfg->sis64) {
1270                 res->flags = cfgtew->u.cfgte64->flags;
1271                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1272                 res->type = cfgtew->u.cfgte64->res_type;
1273
1274                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1275                         sizeof(struct ipr_std_inq_data));
1276
1277                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1278                 proto = cfgtew->u.cfgte64->proto;
1279                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1280                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1281
1282                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1283                         sizeof(res->dev_lun.scsi_lun));
1284
1285                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1286                                         sizeof(res->res_path))) {
1287                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1288                                 sizeof(res->res_path));
1289                         new_path = 1;
1290                 }
1291
1292                 if (res->sdev && new_path)
1293                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1294                                     ipr_format_res_path(res->ioa_cfg,
1295                                         res->res_path, buffer, sizeof(buffer)));
1296         } else {
1297                 res->flags = cfgtew->u.cfgte->flags;
1298                 if (res->flags & IPR_IS_IOA_RESOURCE)
1299                         res->type = IPR_RES_TYPE_IOAFP;
1300                 else
1301                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1302
1303                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1304                         sizeof(struct ipr_std_inq_data));
1305
1306                 res->qmodel = IPR_QUEUEING_MODEL(res);
1307                 proto = cfgtew->u.cfgte->proto;
1308                 res->res_handle = cfgtew->u.cfgte->res_handle;
1309         }
1310
1311         ipr_update_ata_class(res, proto);
1312 }
1313
1314 /**
1315  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1316  *                        for the resource.
1317  * @res:        resource entry struct
1318  * @cfgtew:     config table entry wrapper struct
1319  *
1320  * Return value:
1321  *      none
1322  **/
1323 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1324 {
1325         struct ipr_resource_entry *gscsi_res = NULL;
1326         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1327
1328         if (!ioa_cfg->sis64)
1329                 return;
1330
1331         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1332                 clear_bit(res->target, ioa_cfg->array_ids);
1333         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1334                 clear_bit(res->target, ioa_cfg->vset_ids);
1335         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1336                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1337                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1338                                 return;
1339                 clear_bit(res->target, ioa_cfg->target_ids);
1340
1341         } else if (res->bus == 0)
1342                 clear_bit(res->target, ioa_cfg->target_ids);
1343 }
1344
1345 /**
1346  * ipr_handle_config_change - Handle a config change from the adapter
1347  * @ioa_cfg:    ioa config struct
1348  * @hostrcb:    hostrcb
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1354                                      struct ipr_hostrcb *hostrcb)
1355 {
1356         struct ipr_resource_entry *res = NULL;
1357         struct ipr_config_table_entry_wrapper cfgtew;
1358         __be32 cc_res_handle;
1359
1360         u32 is_ndn = 1;
1361
1362         if (ioa_cfg->sis64) {
1363                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1364                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1365         } else {
1366                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1367                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1368         }
1369
1370         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1371                 if (res->res_handle == cc_res_handle) {
1372                         is_ndn = 0;
1373                         break;
1374                 }
1375         }
1376
1377         if (is_ndn) {
1378                 if (list_empty(&ioa_cfg->free_res_q)) {
1379                         ipr_send_hcam(ioa_cfg,
1380                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1381                                       hostrcb);
1382                         return;
1383                 }
1384
1385                 res = list_entry(ioa_cfg->free_res_q.next,
1386                                  struct ipr_resource_entry, queue);
1387
1388                 list_del(&res->queue);
1389                 ipr_init_res_entry(res, &cfgtew);
1390                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1391         }
1392
1393         ipr_update_res_entry(res, &cfgtew);
1394
1395         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1396                 if (res->sdev) {
1397                         res->del_from_ml = 1;
1398                         res->res_handle = IPR_INVALID_RES_HANDLE;
1399                         if (ioa_cfg->allow_ml_add_del)
1400                                 schedule_work(&ioa_cfg->work_q);
1401                 } else {
1402                         ipr_clear_res_target(res);
1403                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1404                 }
1405         } else if (!res->sdev || res->del_from_ml) {
1406                 res->add_to_ml = 1;
1407                 if (ioa_cfg->allow_ml_add_del)
1408                         schedule_work(&ioa_cfg->work_q);
1409         }
1410
1411         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1412 }
1413
1414 /**
1415  * ipr_process_ccn - Op done function for a CCN.
1416  * @ipr_cmd:    ipr command struct
1417  *
1418  * This function is the op done function for a configuration
1419  * change notification host controlled async from the adapter.
1420  *
1421  * Return value:
1422  *      none
1423  **/
1424 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1425 {
1426         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1427         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1428         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1429
1430         list_del(&hostrcb->queue);
1431         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1432
1433         if (ioasc) {
1434                 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1435                         dev_err(&ioa_cfg->pdev->dev,
1436                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1437
1438                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1439         } else {
1440                 ipr_handle_config_change(ioa_cfg, hostrcb);
1441         }
1442 }
1443
1444 /**
1445  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1446  * @i:          index into buffer
1447  * @buf:                string to modify
1448  *
1449  * This function will strip all trailing whitespace, pad the end
1450  * of the string with a single space, and NULL terminate the string.
1451  *
1452  * Return value:
1453  *      new length of string
1454  **/
1455 static int strip_and_pad_whitespace(int i, char *buf)
1456 {
1457         while (i && buf[i] == ' ')
1458                 i--;
1459         buf[i+1] = ' ';
1460         buf[i+2] = '\0';
1461         return i + 2;
1462 }
1463
1464 /**
1465  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1466  * @prefix:             string to print at start of printk
1467  * @hostrcb:    hostrcb pointer
1468  * @vpd:                vendor/product id/sn struct
1469  *
1470  * Return value:
1471  *      none
1472  **/
1473 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1474                                 struct ipr_vpd *vpd)
1475 {
1476         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1477         int i = 0;
1478
1479         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1480         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1481
1482         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1483         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1484
1485         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1486         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1487
1488         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1489 }
1490
1491 /**
1492  * ipr_log_vpd - Log the passed VPD to the error log.
1493  * @vpd:                vendor/product id/sn struct
1494  *
1495  * Return value:
1496  *      none
1497  **/
1498 static void ipr_log_vpd(struct ipr_vpd *vpd)
1499 {
1500         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1501                     + IPR_SERIAL_NUM_LEN];
1502
1503         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1504         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1505                IPR_PROD_ID_LEN);
1506         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1507         ipr_err("Vendor/Product ID: %s\n", buffer);
1508
1509         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1510         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1511         ipr_err("    Serial Number: %s\n", buffer);
1512 }
1513
1514 /**
1515  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1516  * @prefix:             string to print at start of printk
1517  * @hostrcb:    hostrcb pointer
1518  * @vpd:                vendor/product id/sn/wwn struct
1519  *
1520  * Return value:
1521  *      none
1522  **/
1523 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1524                                     struct ipr_ext_vpd *vpd)
1525 {
1526         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1527         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1528                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1529 }
1530
1531 /**
1532  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1533  * @vpd:                vendor/product id/sn/wwn struct
1534  *
1535  * Return value:
1536  *      none
1537  **/
1538 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1539 {
1540         ipr_log_vpd(&vpd->vpd);
1541         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1542                 be32_to_cpu(vpd->wwid[1]));
1543 }
1544
1545 /**
1546  * ipr_log_enhanced_cache_error - Log a cache error.
1547  * @ioa_cfg:    ioa config struct
1548  * @hostrcb:    hostrcb struct
1549  *
1550  * Return value:
1551  *      none
1552  **/
1553 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1554                                          struct ipr_hostrcb *hostrcb)
1555 {
1556         struct ipr_hostrcb_type_12_error *error;
1557
1558         if (ioa_cfg->sis64)
1559                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1560         else
1561                 error = &hostrcb->hcam.u.error.u.type_12_error;
1562
1563         ipr_err("-----Current Configuration-----\n");
1564         ipr_err("Cache Directory Card Information:\n");
1565         ipr_log_ext_vpd(&error->ioa_vpd);
1566         ipr_err("Adapter Card Information:\n");
1567         ipr_log_ext_vpd(&error->cfc_vpd);
1568
1569         ipr_err("-----Expected Configuration-----\n");
1570         ipr_err("Cache Directory Card Information:\n");
1571         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1572         ipr_err("Adapter Card Information:\n");
1573         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1574
1575         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1576                      be32_to_cpu(error->ioa_data[0]),
1577                      be32_to_cpu(error->ioa_data[1]),
1578                      be32_to_cpu(error->ioa_data[2]));
1579 }
1580
1581 /**
1582  * ipr_log_cache_error - Log a cache error.
1583  * @ioa_cfg:    ioa config struct
1584  * @hostrcb:    hostrcb struct
1585  *
1586  * Return value:
1587  *      none
1588  **/
1589 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1590                                 struct ipr_hostrcb *hostrcb)
1591 {
1592         struct ipr_hostrcb_type_02_error *error =
1593                 &hostrcb->hcam.u.error.u.type_02_error;
1594
1595         ipr_err("-----Current Configuration-----\n");
1596         ipr_err("Cache Directory Card Information:\n");
1597         ipr_log_vpd(&error->ioa_vpd);
1598         ipr_err("Adapter Card Information:\n");
1599         ipr_log_vpd(&error->cfc_vpd);
1600
1601         ipr_err("-----Expected Configuration-----\n");
1602         ipr_err("Cache Directory Card Information:\n");
1603         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1604         ipr_err("Adapter Card Information:\n");
1605         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1606
1607         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1608                      be32_to_cpu(error->ioa_data[0]),
1609                      be32_to_cpu(error->ioa_data[1]),
1610                      be32_to_cpu(error->ioa_data[2]));
1611 }
1612
1613 /**
1614  * ipr_log_enhanced_config_error - Log a configuration error.
1615  * @ioa_cfg:    ioa config struct
1616  * @hostrcb:    hostrcb struct
1617  *
1618  * Return value:
1619  *      none
1620  **/
1621 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1622                                           struct ipr_hostrcb *hostrcb)
1623 {
1624         int errors_logged, i;
1625         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1626         struct ipr_hostrcb_type_13_error *error;
1627
1628         error = &hostrcb->hcam.u.error.u.type_13_error;
1629         errors_logged = be32_to_cpu(error->errors_logged);
1630
1631         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1632                 be32_to_cpu(error->errors_detected), errors_logged);
1633
1634         dev_entry = error->dev;
1635
1636         for (i = 0; i < errors_logged; i++, dev_entry++) {
1637                 ipr_err_separator;
1638
1639                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1640                 ipr_log_ext_vpd(&dev_entry->vpd);
1641
1642                 ipr_err("-----New Device Information-----\n");
1643                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1644
1645                 ipr_err("Cache Directory Card Information:\n");
1646                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1647
1648                 ipr_err("Adapter Card Information:\n");
1649                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1650         }
1651 }
1652
1653 /**
1654  * ipr_log_sis64_config_error - Log a device error.
1655  * @ioa_cfg:    ioa config struct
1656  * @hostrcb:    hostrcb struct
1657  *
1658  * Return value:
1659  *      none
1660  **/
1661 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1662                                        struct ipr_hostrcb *hostrcb)
1663 {
1664         int errors_logged, i;
1665         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1666         struct ipr_hostrcb_type_23_error *error;
1667         char buffer[IPR_MAX_RES_PATH_LENGTH];
1668
1669         error = &hostrcb->hcam.u.error64.u.type_23_error;
1670         errors_logged = be32_to_cpu(error->errors_logged);
1671
1672         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1673                 be32_to_cpu(error->errors_detected), errors_logged);
1674
1675         dev_entry = error->dev;
1676
1677         for (i = 0; i < errors_logged; i++, dev_entry++) {
1678                 ipr_err_separator;
1679
1680                 ipr_err("Device %d : %s", i + 1,
1681                         __ipr_format_res_path(dev_entry->res_path,
1682                                               buffer, sizeof(buffer)));
1683                 ipr_log_ext_vpd(&dev_entry->vpd);
1684
1685                 ipr_err("-----New Device Information-----\n");
1686                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1687
1688                 ipr_err("Cache Directory Card Information:\n");
1689                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1690
1691                 ipr_err("Adapter Card Information:\n");
1692                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1693         }
1694 }
1695
1696 /**
1697  * ipr_log_config_error - Log a configuration error.
1698  * @ioa_cfg:    ioa config struct
1699  * @hostrcb:    hostrcb struct
1700  *
1701  * Return value:
1702  *      none
1703  **/
1704 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1705                                  struct ipr_hostrcb *hostrcb)
1706 {
1707         int errors_logged, i;
1708         struct ipr_hostrcb_device_data_entry *dev_entry;
1709         struct ipr_hostrcb_type_03_error *error;
1710
1711         error = &hostrcb->hcam.u.error.u.type_03_error;
1712         errors_logged = be32_to_cpu(error->errors_logged);
1713
1714         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1715                 be32_to_cpu(error->errors_detected), errors_logged);
1716
1717         dev_entry = error->dev;
1718
1719         for (i = 0; i < errors_logged; i++, dev_entry++) {
1720                 ipr_err_separator;
1721
1722                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1723                 ipr_log_vpd(&dev_entry->vpd);
1724
1725                 ipr_err("-----New Device Information-----\n");
1726                 ipr_log_vpd(&dev_entry->new_vpd);
1727
1728                 ipr_err("Cache Directory Card Information:\n");
1729                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1730
1731                 ipr_err("Adapter Card Information:\n");
1732                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1733
1734                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1735                         be32_to_cpu(dev_entry->ioa_data[0]),
1736                         be32_to_cpu(dev_entry->ioa_data[1]),
1737                         be32_to_cpu(dev_entry->ioa_data[2]),
1738                         be32_to_cpu(dev_entry->ioa_data[3]),
1739                         be32_to_cpu(dev_entry->ioa_data[4]));
1740         }
1741 }
1742
1743 /**
1744  * ipr_log_enhanced_array_error - Log an array configuration error.
1745  * @ioa_cfg:    ioa config struct
1746  * @hostrcb:    hostrcb struct
1747  *
1748  * Return value:
1749  *      none
1750  **/
1751 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1752                                          struct ipr_hostrcb *hostrcb)
1753 {
1754         int i, num_entries;
1755         struct ipr_hostrcb_type_14_error *error;
1756         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1757         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1758
1759         error = &hostrcb->hcam.u.error.u.type_14_error;
1760
1761         ipr_err_separator;
1762
1763         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1764                 error->protection_level,
1765                 ioa_cfg->host->host_no,
1766                 error->last_func_vset_res_addr.bus,
1767                 error->last_func_vset_res_addr.target,
1768                 error->last_func_vset_res_addr.lun);
1769
1770         ipr_err_separator;
1771
1772         array_entry = error->array_member;
1773         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1774                             ARRAY_SIZE(error->array_member));
1775
1776         for (i = 0; i < num_entries; i++, array_entry++) {
1777                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1778                         continue;
1779
1780                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1781                         ipr_err("Exposed Array Member %d:\n", i);
1782                 else
1783                         ipr_err("Array Member %d:\n", i);
1784
1785                 ipr_log_ext_vpd(&array_entry->vpd);
1786                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1787                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1788                                  "Expected Location");
1789
1790                 ipr_err_separator;
1791         }
1792 }
1793
1794 /**
1795  * ipr_log_array_error - Log an array configuration error.
1796  * @ioa_cfg:    ioa config struct
1797  * @hostrcb:    hostrcb struct
1798  *
1799  * Return value:
1800  *      none
1801  **/
1802 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1803                                 struct ipr_hostrcb *hostrcb)
1804 {
1805         int i;
1806         struct ipr_hostrcb_type_04_error *error;
1807         struct ipr_hostrcb_array_data_entry *array_entry;
1808         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1809
1810         error = &hostrcb->hcam.u.error.u.type_04_error;
1811
1812         ipr_err_separator;
1813
1814         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1815                 error->protection_level,
1816                 ioa_cfg->host->host_no,
1817                 error->last_func_vset_res_addr.bus,
1818                 error->last_func_vset_res_addr.target,
1819                 error->last_func_vset_res_addr.lun);
1820
1821         ipr_err_separator;
1822
1823         array_entry = error->array_member;
1824
1825         for (i = 0; i < 18; i++) {
1826                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1827                         continue;
1828
1829                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1830                         ipr_err("Exposed Array Member %d:\n", i);
1831                 else
1832                         ipr_err("Array Member %d:\n", i);
1833
1834                 ipr_log_vpd(&array_entry->vpd);
1835
1836                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1837                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1838                                  "Expected Location");
1839
1840                 ipr_err_separator;
1841
1842                 if (i == 9)
1843                         array_entry = error->array_member2;
1844                 else
1845                         array_entry++;
1846         }
1847 }
1848
1849 /**
1850  * ipr_log_hex_data - Log additional hex IOA error data.
1851  * @ioa_cfg:    ioa config struct
1852  * @data:               IOA error data
1853  * @len:                data length
1854  *
1855  * Return value:
1856  *      none
1857  **/
1858 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1859 {
1860         int i;
1861
1862         if (len == 0)
1863                 return;
1864
1865         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1866                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1867
1868         for (i = 0; i < len / 4; i += 4) {
1869                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1870                         be32_to_cpu(data[i]),
1871                         be32_to_cpu(data[i+1]),
1872                         be32_to_cpu(data[i+2]),
1873                         be32_to_cpu(data[i+3]));
1874         }
1875 }
1876
1877 /**
1878  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1879  * @ioa_cfg:    ioa config struct
1880  * @hostrcb:    hostrcb struct
1881  *
1882  * Return value:
1883  *      none
1884  **/
1885 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1886                                             struct ipr_hostrcb *hostrcb)
1887 {
1888         struct ipr_hostrcb_type_17_error *error;
1889
1890         if (ioa_cfg->sis64)
1891                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1892         else
1893                 error = &hostrcb->hcam.u.error.u.type_17_error;
1894
1895         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1896         strim(error->failure_reason);
1897
1898         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1899                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1900         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1901         ipr_log_hex_data(ioa_cfg, error->data,
1902                          be32_to_cpu(hostrcb->hcam.length) -
1903                          (offsetof(struct ipr_hostrcb_error, u) +
1904                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1905 }
1906
1907 /**
1908  * ipr_log_dual_ioa_error - Log a dual adapter error.
1909  * @ioa_cfg:    ioa config struct
1910  * @hostrcb:    hostrcb struct
1911  *
1912  * Return value:
1913  *      none
1914  **/
1915 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1916                                    struct ipr_hostrcb *hostrcb)
1917 {
1918         struct ipr_hostrcb_type_07_error *error;
1919
1920         error = &hostrcb->hcam.u.error.u.type_07_error;
1921         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1922         strim(error->failure_reason);
1923
1924         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1925                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1926         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1927         ipr_log_hex_data(ioa_cfg, error->data,
1928                          be32_to_cpu(hostrcb->hcam.length) -
1929                          (offsetof(struct ipr_hostrcb_error, u) +
1930                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1931 }
1932
1933 static const struct {
1934         u8 active;
1935         char *desc;
1936 } path_active_desc[] = {
1937         { IPR_PATH_NO_INFO, "Path" },
1938         { IPR_PATH_ACTIVE, "Active path" },
1939         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1940 };
1941
1942 static const struct {
1943         u8 state;
1944         char *desc;
1945 } path_state_desc[] = {
1946         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1947         { IPR_PATH_HEALTHY, "is healthy" },
1948         { IPR_PATH_DEGRADED, "is degraded" },
1949         { IPR_PATH_FAILED, "is failed" }
1950 };
1951
1952 /**
1953  * ipr_log_fabric_path - Log a fabric path error
1954  * @hostrcb:    hostrcb struct
1955  * @fabric:             fabric descriptor
1956  *
1957  * Return value:
1958  *      none
1959  **/
1960 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1961                                 struct ipr_hostrcb_fabric_desc *fabric)
1962 {
1963         int i, j;
1964         u8 path_state = fabric->path_state;
1965         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1966         u8 state = path_state & IPR_PATH_STATE_MASK;
1967
1968         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1969                 if (path_active_desc[i].active != active)
1970                         continue;
1971
1972                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1973                         if (path_state_desc[j].state != state)
1974                                 continue;
1975
1976                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1977                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1978                                              path_active_desc[i].desc, path_state_desc[j].desc,
1979                                              fabric->ioa_port);
1980                         } else if (fabric->cascaded_expander == 0xff) {
1981                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1982                                              path_active_desc[i].desc, path_state_desc[j].desc,
1983                                              fabric->ioa_port, fabric->phy);
1984                         } else if (fabric->phy == 0xff) {
1985                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1986                                              path_active_desc[i].desc, path_state_desc[j].desc,
1987                                              fabric->ioa_port, fabric->cascaded_expander);
1988                         } else {
1989                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1990                                              path_active_desc[i].desc, path_state_desc[j].desc,
1991                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1992                         }
1993                         return;
1994                 }
1995         }
1996
1997         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1998                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1999 }
2000
2001 /**
2002  * ipr_log64_fabric_path - Log a fabric path error
2003  * @hostrcb:    hostrcb struct
2004  * @fabric:             fabric descriptor
2005  *
2006  * Return value:
2007  *      none
2008  **/
2009 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2010                                   struct ipr_hostrcb64_fabric_desc *fabric)
2011 {
2012         int i, j;
2013         u8 path_state = fabric->path_state;
2014         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2015         u8 state = path_state & IPR_PATH_STATE_MASK;
2016         char buffer[IPR_MAX_RES_PATH_LENGTH];
2017
2018         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2019                 if (path_active_desc[i].active != active)
2020                         continue;
2021
2022                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2023                         if (path_state_desc[j].state != state)
2024                                 continue;
2025
2026                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2027                                      path_active_desc[i].desc, path_state_desc[j].desc,
2028                                      ipr_format_res_path(hostrcb->ioa_cfg,
2029                                                 fabric->res_path,
2030                                                 buffer, sizeof(buffer)));
2031                         return;
2032                 }
2033         }
2034
2035         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2036                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2037                                     buffer, sizeof(buffer)));
2038 }
2039
2040 static const struct {
2041         u8 type;
2042         char *desc;
2043 } path_type_desc[] = {
2044         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2045         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2046         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2047         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2048 };
2049
2050 static const struct {
2051         u8 status;
2052         char *desc;
2053 } path_status_desc[] = {
2054         { IPR_PATH_CFG_NO_PROB, "Functional" },
2055         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2056         { IPR_PATH_CFG_FAILED, "Failed" },
2057         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2058         { IPR_PATH_NOT_DETECTED, "Missing" },
2059         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2060 };
2061
2062 static const char *link_rate[] = {
2063         "unknown",
2064         "disabled",
2065         "phy reset problem",
2066         "spinup hold",
2067         "port selector",
2068         "unknown",
2069         "unknown",
2070         "unknown",
2071         "1.5Gbps",
2072         "3.0Gbps",
2073         "unknown",
2074         "unknown",
2075         "unknown",
2076         "unknown",
2077         "unknown",
2078         "unknown"
2079 };
2080
2081 /**
2082  * ipr_log_path_elem - Log a fabric path element.
2083  * @hostrcb:    hostrcb struct
2084  * @cfg:                fabric path element struct
2085  *
2086  * Return value:
2087  *      none
2088  **/
2089 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2090                               struct ipr_hostrcb_config_element *cfg)
2091 {
2092         int i, j;
2093         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2094         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2095
2096         if (type == IPR_PATH_CFG_NOT_EXIST)
2097                 return;
2098
2099         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2100                 if (path_type_desc[i].type != type)
2101                         continue;
2102
2103                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2104                         if (path_status_desc[j].status != status)
2105                                 continue;
2106
2107                         if (type == IPR_PATH_CFG_IOA_PORT) {
2108                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2109                                              path_status_desc[j].desc, path_type_desc[i].desc,
2110                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2111                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2112                         } else {
2113                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2114                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2115                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2116                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2117                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2118                                 } else if (cfg->cascaded_expander == 0xff) {
2119                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2120                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2121                                                      path_type_desc[i].desc, cfg->phy,
2122                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2123                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2124                                 } else if (cfg->phy == 0xff) {
2125                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2126                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2127                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2128                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2129                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2130                                 } else {
2131                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2132                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2133                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2134                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2135                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2136                                 }
2137                         }
2138                         return;
2139                 }
2140         }
2141
2142         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2143                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2144                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2145                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2146 }
2147
2148 /**
2149  * ipr_log64_path_elem - Log a fabric path element.
2150  * @hostrcb:    hostrcb struct
2151  * @cfg:                fabric path element struct
2152  *
2153  * Return value:
2154  *      none
2155  **/
2156 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2157                                 struct ipr_hostrcb64_config_element *cfg)
2158 {
2159         int i, j;
2160         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2161         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2162         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2163         char buffer[IPR_MAX_RES_PATH_LENGTH];
2164
2165         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2166                 return;
2167
2168         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2169                 if (path_type_desc[i].type != type)
2170                         continue;
2171
2172                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2173                         if (path_status_desc[j].status != status)
2174                                 continue;
2175
2176                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2177                                      path_status_desc[j].desc, path_type_desc[i].desc,
2178                                      ipr_format_res_path(hostrcb->ioa_cfg,
2179                                         cfg->res_path, buffer, sizeof(buffer)),
2180                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2181                                         be32_to_cpu(cfg->wwid[0]),
2182                                         be32_to_cpu(cfg->wwid[1]));
2183                         return;
2184                 }
2185         }
2186         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2187                      "WWN=%08X%08X\n", cfg->type_status,
2188                      ipr_format_res_path(hostrcb->ioa_cfg,
2189                         cfg->res_path, buffer, sizeof(buffer)),
2190                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2191                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2192 }
2193
2194 /**
2195  * ipr_log_fabric_error - Log a fabric error.
2196  * @ioa_cfg:    ioa config struct
2197  * @hostrcb:    hostrcb struct
2198  *
2199  * Return value:
2200  *      none
2201  **/
2202 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2203                                  struct ipr_hostrcb *hostrcb)
2204 {
2205         struct ipr_hostrcb_type_20_error *error;
2206         struct ipr_hostrcb_fabric_desc *fabric;
2207         struct ipr_hostrcb_config_element *cfg;
2208         int i, add_len;
2209
2210         error = &hostrcb->hcam.u.error.u.type_20_error;
2211         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2212         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2213
2214         add_len = be32_to_cpu(hostrcb->hcam.length) -
2215                 (offsetof(struct ipr_hostrcb_error, u) +
2216                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2217
2218         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2219                 ipr_log_fabric_path(hostrcb, fabric);
2220                 for_each_fabric_cfg(fabric, cfg)
2221                         ipr_log_path_elem(hostrcb, cfg);
2222
2223                 add_len -= be16_to_cpu(fabric->length);
2224                 fabric = (struct ipr_hostrcb_fabric_desc *)
2225                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2226         }
2227
2228         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2229 }
2230
2231 /**
2232  * ipr_log_sis64_array_error - Log a sis64 array error.
2233  * @ioa_cfg:    ioa config struct
2234  * @hostrcb:    hostrcb struct
2235  *
2236  * Return value:
2237  *      none
2238  **/
2239 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2240                                       struct ipr_hostrcb *hostrcb)
2241 {
2242         int i, num_entries;
2243         struct ipr_hostrcb_type_24_error *error;
2244         struct ipr_hostrcb64_array_data_entry *array_entry;
2245         char buffer[IPR_MAX_RES_PATH_LENGTH];
2246         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2247
2248         error = &hostrcb->hcam.u.error64.u.type_24_error;
2249
2250         ipr_err_separator;
2251
2252         ipr_err("RAID %s Array Configuration: %s\n",
2253                 error->protection_level,
2254                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2255                         buffer, sizeof(buffer)));
2256
2257         ipr_err_separator;
2258
2259         array_entry = error->array_member;
2260         num_entries = min_t(u32, error->num_entries,
2261                             ARRAY_SIZE(error->array_member));
2262
2263         for (i = 0; i < num_entries; i++, array_entry++) {
2264
2265                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2266                         continue;
2267
2268                 if (error->exposed_mode_adn == i)
2269                         ipr_err("Exposed Array Member %d:\n", i);
2270                 else
2271                         ipr_err("Array Member %d:\n", i);
2272
2273                 ipr_err("Array Member %d:\n", i);
2274                 ipr_log_ext_vpd(&array_entry->vpd);
2275                 ipr_err("Current Location: %s\n",
2276                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2277                                 buffer, sizeof(buffer)));
2278                 ipr_err("Expected Location: %s\n",
2279                          ipr_format_res_path(ioa_cfg,
2280                                 array_entry->expected_res_path,
2281                                 buffer, sizeof(buffer)));
2282
2283                 ipr_err_separator;
2284         }
2285 }
2286
2287 /**
2288  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2289  * @ioa_cfg:    ioa config struct
2290  * @hostrcb:    hostrcb struct
2291  *
2292  * Return value:
2293  *      none
2294  **/
2295 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2296                                        struct ipr_hostrcb *hostrcb)
2297 {
2298         struct ipr_hostrcb_type_30_error *error;
2299         struct ipr_hostrcb64_fabric_desc *fabric;
2300         struct ipr_hostrcb64_config_element *cfg;
2301         int i, add_len;
2302
2303         error = &hostrcb->hcam.u.error64.u.type_30_error;
2304
2305         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2306         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2307
2308         add_len = be32_to_cpu(hostrcb->hcam.length) -
2309                 (offsetof(struct ipr_hostrcb64_error, u) +
2310                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2311
2312         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2313                 ipr_log64_fabric_path(hostrcb, fabric);
2314                 for_each_fabric_cfg(fabric, cfg)
2315                         ipr_log64_path_elem(hostrcb, cfg);
2316
2317                 add_len -= be16_to_cpu(fabric->length);
2318                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2319                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2320         }
2321
2322         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2323 }
2324
2325 /**
2326  * ipr_log_generic_error - Log an adapter error.
2327  * @ioa_cfg:    ioa config struct
2328  * @hostrcb:    hostrcb struct
2329  *
2330  * Return value:
2331  *      none
2332  **/
2333 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2334                                   struct ipr_hostrcb *hostrcb)
2335 {
2336         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2337                          be32_to_cpu(hostrcb->hcam.length));
2338 }
2339
2340 /**
2341  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2342  * @ioasc:      IOASC
2343  *
2344  * This function will return the index of into the ipr_error_table
2345  * for the specified IOASC. If the IOASC is not in the table,
2346  * 0 will be returned, which points to the entry used for unknown errors.
2347  *
2348  * Return value:
2349  *      index into the ipr_error_table
2350  **/
2351 static u32 ipr_get_error(u32 ioasc)
2352 {
2353         int i;
2354
2355         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2356                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2357                         return i;
2358
2359         return 0;
2360 }
2361
2362 /**
2363  * ipr_handle_log_data - Log an adapter error.
2364  * @ioa_cfg:    ioa config struct
2365  * @hostrcb:    hostrcb struct
2366  *
2367  * This function logs an adapter error to the system.
2368  *
2369  * Return value:
2370  *      none
2371  **/
2372 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2373                                 struct ipr_hostrcb *hostrcb)
2374 {
2375         u32 ioasc;
2376         int error_index;
2377
2378         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2379                 return;
2380
2381         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2382                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2383
2384         if (ioa_cfg->sis64)
2385                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2386         else
2387                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2388
2389         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2390             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2391                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2392                 scsi_report_bus_reset(ioa_cfg->host,
2393                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2394         }
2395
2396         error_index = ipr_get_error(ioasc);
2397
2398         if (!ipr_error_table[error_index].log_hcam)
2399                 return;
2400
2401         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2402
2403         /* Set indication we have logged an error */
2404         ioa_cfg->errors_logged++;
2405
2406         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2407                 return;
2408         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2409                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2410
2411         switch (hostrcb->hcam.overlay_id) {
2412         case IPR_HOST_RCB_OVERLAY_ID_2:
2413                 ipr_log_cache_error(ioa_cfg, hostrcb);
2414                 break;
2415         case IPR_HOST_RCB_OVERLAY_ID_3:
2416                 ipr_log_config_error(ioa_cfg, hostrcb);
2417                 break;
2418         case IPR_HOST_RCB_OVERLAY_ID_4:
2419         case IPR_HOST_RCB_OVERLAY_ID_6:
2420                 ipr_log_array_error(ioa_cfg, hostrcb);
2421                 break;
2422         case IPR_HOST_RCB_OVERLAY_ID_7:
2423                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2424                 break;
2425         case IPR_HOST_RCB_OVERLAY_ID_12:
2426                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2427                 break;
2428         case IPR_HOST_RCB_OVERLAY_ID_13:
2429                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2430                 break;
2431         case IPR_HOST_RCB_OVERLAY_ID_14:
2432         case IPR_HOST_RCB_OVERLAY_ID_16:
2433                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2434                 break;
2435         case IPR_HOST_RCB_OVERLAY_ID_17:
2436                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2437                 break;
2438         case IPR_HOST_RCB_OVERLAY_ID_20:
2439                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2440                 break;
2441         case IPR_HOST_RCB_OVERLAY_ID_23:
2442                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2443                 break;
2444         case IPR_HOST_RCB_OVERLAY_ID_24:
2445         case IPR_HOST_RCB_OVERLAY_ID_26:
2446                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2447                 break;
2448         case IPR_HOST_RCB_OVERLAY_ID_30:
2449                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2450                 break;
2451         case IPR_HOST_RCB_OVERLAY_ID_1:
2452         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2453         default:
2454                 ipr_log_generic_error(ioa_cfg, hostrcb);
2455                 break;
2456         }
2457 }
2458
2459 /**
2460  * ipr_process_error - Op done function for an adapter error log.
2461  * @ipr_cmd:    ipr command struct
2462  *
2463  * This function is the op done function for an error log host
2464  * controlled async from the adapter. It will log the error and
2465  * send the HCAM back to the adapter.
2466  *
2467  * Return value:
2468  *      none
2469  **/
2470 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2471 {
2472         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2473         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2474         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2475         u32 fd_ioasc;
2476
2477         if (ioa_cfg->sis64)
2478                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2479         else
2480                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2481
2482         list_del(&hostrcb->queue);
2483         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2484
2485         if (!ioasc) {
2486                 ipr_handle_log_data(ioa_cfg, hostrcb);
2487                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2488                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2489         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2490                 dev_err(&ioa_cfg->pdev->dev,
2491                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2492         }
2493
2494         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2495 }
2496
2497 /**
2498  * ipr_timeout -  An internally generated op has timed out.
2499  * @ipr_cmd:    ipr command struct
2500  *
2501  * This function blocks host requests and initiates an
2502  * adapter reset.
2503  *
2504  * Return value:
2505  *      none
2506  **/
2507 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2508 {
2509         unsigned long lock_flags = 0;
2510         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2511
2512         ENTER;
2513         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2514
2515         ioa_cfg->errors_logged++;
2516         dev_err(&ioa_cfg->pdev->dev,
2517                 "Adapter being reset due to command timeout.\n");
2518
2519         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2520                 ioa_cfg->sdt_state = GET_DUMP;
2521
2522         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2523                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2524
2525         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2526         LEAVE;
2527 }
2528
2529 /**
2530  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2531  * @ipr_cmd:    ipr command struct
2532  *
2533  * This function blocks host requests and initiates an
2534  * adapter reset.
2535  *
2536  * Return value:
2537  *      none
2538  **/
2539 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2540 {
2541         unsigned long lock_flags = 0;
2542         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2543
2544         ENTER;
2545         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2546
2547         ioa_cfg->errors_logged++;
2548         dev_err(&ioa_cfg->pdev->dev,
2549                 "Adapter timed out transitioning to operational.\n");
2550
2551         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2552                 ioa_cfg->sdt_state = GET_DUMP;
2553
2554         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2555                 if (ipr_fastfail)
2556                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2557                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2558         }
2559
2560         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2561         LEAVE;
2562 }
2563
2564 /**
2565  * ipr_find_ses_entry - Find matching SES in SES table
2566  * @res:        resource entry struct of SES
2567  *
2568  * Return value:
2569  *      pointer to SES table entry / NULL on failure
2570  **/
2571 static const struct ipr_ses_table_entry *
2572 ipr_find_ses_entry(struct ipr_resource_entry *res)
2573 {
2574         int i, j, matches;
2575         struct ipr_std_inq_vpids *vpids;
2576         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2577
2578         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2579                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2580                         if (ste->compare_product_id_byte[j] == 'X') {
2581                                 vpids = &res->std_inq_data.vpids;
2582                                 if (vpids->product_id[j] == ste->product_id[j])
2583                                         matches++;
2584                                 else
2585                                         break;
2586                         } else
2587                                 matches++;
2588                 }
2589
2590                 if (matches == IPR_PROD_ID_LEN)
2591                         return ste;
2592         }
2593
2594         return NULL;
2595 }
2596
2597 /**
2598  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2599  * @ioa_cfg:    ioa config struct
2600  * @bus:                SCSI bus
2601  * @bus_width:  bus width
2602  *
2603  * Return value:
2604  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2605  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2606  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2607  *      max 160MHz = max 320MB/sec).
2608  **/
2609 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2610 {
2611         struct ipr_resource_entry *res;
2612         const struct ipr_ses_table_entry *ste;
2613         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2614
2615         /* Loop through each config table entry in the config table buffer */
2616         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2617                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2618                         continue;
2619
2620                 if (bus != res->bus)
2621                         continue;
2622
2623                 if (!(ste = ipr_find_ses_entry(res)))
2624                         continue;
2625
2626                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2627         }
2628
2629         return max_xfer_rate;
2630 }
2631
2632 /**
2633  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2634  * @ioa_cfg:            ioa config struct
2635  * @max_delay:          max delay in micro-seconds to wait
2636  *
2637  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2638  *
2639  * Return value:
2640  *      0 on success / other on failure
2641  **/
2642 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2643 {
2644         volatile u32 pcii_reg;
2645         int delay = 1;
2646
2647         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2648         while (delay < max_delay) {
2649                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2650
2651                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2652                         return 0;
2653
2654                 /* udelay cannot be used if delay is more than a few milliseconds */
2655                 if ((delay / 1000) > MAX_UDELAY_MS)
2656                         mdelay(delay / 1000);
2657                 else
2658                         udelay(delay);
2659
2660                 delay += delay;
2661         }
2662         return -EIO;
2663 }
2664
2665 /**
2666  * ipr_get_sis64_dump_data_section - Dump IOA memory
2667  * @ioa_cfg:                    ioa config struct
2668  * @start_addr:                 adapter address to dump
2669  * @dest:                       destination kernel buffer
2670  * @length_in_words:            length to dump in 4 byte words
2671  *
2672  * Return value:
2673  *      0 on success
2674  **/
2675 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2676                                            u32 start_addr,
2677                                            __be32 *dest, u32 length_in_words)
2678 {
2679         int i;
2680
2681         for (i = 0; i < length_in_words; i++) {
2682                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2683                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2684                 dest++;
2685         }
2686
2687         return 0;
2688 }
2689
2690 /**
2691  * ipr_get_ldump_data_section - Dump IOA memory
2692  * @ioa_cfg:                    ioa config struct
2693  * @start_addr:                 adapter address to dump
2694  * @dest:                               destination kernel buffer
2695  * @length_in_words:    length to dump in 4 byte words
2696  *
2697  * Return value:
2698  *      0 on success / -EIO on failure
2699  **/
2700 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2701                                       u32 start_addr,
2702                                       __be32 *dest, u32 length_in_words)
2703 {
2704         volatile u32 temp_pcii_reg;
2705         int i, delay = 0;
2706
2707         if (ioa_cfg->sis64)
2708                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2709                                                        dest, length_in_words);
2710
2711         /* Write IOA interrupt reg starting LDUMP state  */
2712         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2713                ioa_cfg->regs.set_uproc_interrupt_reg32);
2714
2715         /* Wait for IO debug acknowledge */
2716         if (ipr_wait_iodbg_ack(ioa_cfg,
2717                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2718                 dev_err(&ioa_cfg->pdev->dev,
2719                         "IOA dump long data transfer timeout\n");
2720                 return -EIO;
2721         }
2722
2723         /* Signal LDUMP interlocked - clear IO debug ack */
2724         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2725                ioa_cfg->regs.clr_interrupt_reg);
2726
2727         /* Write Mailbox with starting address */
2728         writel(start_addr, ioa_cfg->ioa_mailbox);
2729
2730         /* Signal address valid - clear IOA Reset alert */
2731         writel(IPR_UPROCI_RESET_ALERT,
2732                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2733
2734         for (i = 0; i < length_in_words; i++) {
2735                 /* Wait for IO debug acknowledge */
2736                 if (ipr_wait_iodbg_ack(ioa_cfg,
2737                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2738                         dev_err(&ioa_cfg->pdev->dev,
2739                                 "IOA dump short data transfer timeout\n");
2740                         return -EIO;
2741                 }
2742
2743                 /* Read data from mailbox and increment destination pointer */
2744                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2745                 dest++;
2746
2747                 /* For all but the last word of data, signal data received */
2748                 if (i < (length_in_words - 1)) {
2749                         /* Signal dump data received - Clear IO debug Ack */
2750                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2751                                ioa_cfg->regs.clr_interrupt_reg);
2752                 }
2753         }
2754
2755         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2756         writel(IPR_UPROCI_RESET_ALERT,
2757                ioa_cfg->regs.set_uproc_interrupt_reg32);
2758
2759         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2760                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2761
2762         /* Signal dump data received - Clear IO debug Ack */
2763         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2764                ioa_cfg->regs.clr_interrupt_reg);
2765
2766         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2767         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2768                 temp_pcii_reg =
2769                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2770
2771                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2772                         return 0;
2773
2774                 udelay(10);
2775                 delay += 10;
2776         }
2777
2778         return 0;
2779 }
2780
2781 #ifdef CONFIG_SCSI_IPR_DUMP
2782 /**
2783  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2784  * @ioa_cfg:            ioa config struct
2785  * @pci_address:        adapter address
2786  * @length:                     length of data to copy
2787  *
2788  * Copy data from PCI adapter to kernel buffer.
2789  * Note: length MUST be a 4 byte multiple
2790  * Return value:
2791  *      0 on success / other on failure
2792  **/
2793 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2794                         unsigned long pci_address, u32 length)
2795 {
2796         int bytes_copied = 0;
2797         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2798         __be32 *page;
2799         unsigned long lock_flags = 0;
2800         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2801
2802         if (ioa_cfg->sis64)
2803                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2804         else
2805                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2806
2807         while (bytes_copied < length &&
2808                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2809                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2810                     ioa_dump->page_offset == 0) {
2811                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2812
2813                         if (!page) {
2814                                 ipr_trace;
2815                                 return bytes_copied;
2816                         }
2817
2818                         ioa_dump->page_offset = 0;
2819                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2820                         ioa_dump->next_page_index++;
2821                 } else
2822                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2823
2824                 rem_len = length - bytes_copied;
2825                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2826                 cur_len = min(rem_len, rem_page_len);
2827
2828                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2829                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2830                         rc = -EIO;
2831                 } else {
2832                         rc = ipr_get_ldump_data_section(ioa_cfg,
2833                                                         pci_address + bytes_copied,
2834                                                         &page[ioa_dump->page_offset / 4],
2835                                                         (cur_len / sizeof(u32)));
2836                 }
2837                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2838
2839                 if (!rc) {
2840                         ioa_dump->page_offset += cur_len;
2841                         bytes_copied += cur_len;
2842                 } else {
2843                         ipr_trace;
2844                         break;
2845                 }
2846                 schedule();
2847         }
2848
2849         return bytes_copied;
2850 }
2851
2852 /**
2853  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2854  * @hdr:        dump entry header struct
2855  *
2856  * Return value:
2857  *      nothing
2858  **/
2859 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2860 {
2861         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2862         hdr->num_elems = 1;
2863         hdr->offset = sizeof(*hdr);
2864         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2865 }
2866
2867 /**
2868  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2869  * @ioa_cfg:    ioa config struct
2870  * @driver_dump:        driver dump struct
2871  *
2872  * Return value:
2873  *      nothing
2874  **/
2875 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2876                                    struct ipr_driver_dump *driver_dump)
2877 {
2878         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2879
2880         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2881         driver_dump->ioa_type_entry.hdr.len =
2882                 sizeof(struct ipr_dump_ioa_type_entry) -
2883                 sizeof(struct ipr_dump_entry_header);
2884         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2885         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2886         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2887         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2888                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2889                 ucode_vpd->minor_release[1];
2890         driver_dump->hdr.num_entries++;
2891 }
2892
2893 /**
2894  * ipr_dump_version_data - Fill in the driver version in the dump.
2895  * @ioa_cfg:    ioa config struct
2896  * @driver_dump:        driver dump struct
2897  *
2898  * Return value:
2899  *      nothing
2900  **/
2901 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2902                                   struct ipr_driver_dump *driver_dump)
2903 {
2904         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2905         driver_dump->version_entry.hdr.len =
2906                 sizeof(struct ipr_dump_version_entry) -
2907                 sizeof(struct ipr_dump_entry_header);
2908         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2909         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2910         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2911         driver_dump->hdr.num_entries++;
2912 }
2913
2914 /**
2915  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2916  * @ioa_cfg:    ioa config struct
2917  * @driver_dump:        driver dump struct
2918  *
2919  * Return value:
2920  *      nothing
2921  **/
2922 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2923                                    struct ipr_driver_dump *driver_dump)
2924 {
2925         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2926         driver_dump->trace_entry.hdr.len =
2927                 sizeof(struct ipr_dump_trace_entry) -
2928                 sizeof(struct ipr_dump_entry_header);
2929         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2930         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2931         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2932         driver_dump->hdr.num_entries++;
2933 }
2934
2935 /**
2936  * ipr_dump_location_data - Fill in the IOA location in the dump.
2937  * @ioa_cfg:    ioa config struct
2938  * @driver_dump:        driver dump struct
2939  *
2940  * Return value:
2941  *      nothing
2942  **/
2943 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2944                                    struct ipr_driver_dump *driver_dump)
2945 {
2946         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2947         driver_dump->location_entry.hdr.len =
2948                 sizeof(struct ipr_dump_location_entry) -
2949                 sizeof(struct ipr_dump_entry_header);
2950         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2951         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2952         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2953         driver_dump->hdr.num_entries++;
2954 }
2955
2956 /**
2957  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2958  * @ioa_cfg:    ioa config struct
2959  * @dump:               dump struct
2960  *
2961  * Return value:
2962  *      nothing
2963  **/
2964 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2965 {
2966         unsigned long start_addr, sdt_word;
2967         unsigned long lock_flags = 0;
2968         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2969         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2970         u32 num_entries, max_num_entries, start_off, end_off;
2971         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2972         struct ipr_sdt *sdt;
2973         int valid = 1;
2974         int i;
2975
2976         ENTER;
2977
2978         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2979
2980         if (ioa_cfg->sdt_state != READ_DUMP) {
2981                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2982                 return;
2983         }
2984
2985         if (ioa_cfg->sis64) {
2986                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2987                 ssleep(IPR_DUMP_DELAY_SECONDS);
2988                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2989         }
2990
2991         start_addr = readl(ioa_cfg->ioa_mailbox);
2992
2993         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2994                 dev_err(&ioa_cfg->pdev->dev,
2995                         "Invalid dump table format: %lx\n", start_addr);
2996                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2997                 return;
2998         }
2999
3000         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3001
3002         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3003
3004         /* Initialize the overall dump header */
3005         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3006         driver_dump->hdr.num_entries = 1;
3007         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3008         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3009         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3010         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3011
3012         ipr_dump_version_data(ioa_cfg, driver_dump);
3013         ipr_dump_location_data(ioa_cfg, driver_dump);
3014         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3015         ipr_dump_trace_data(ioa_cfg, driver_dump);
3016
3017         /* Update dump_header */
3018         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3019
3020         /* IOA Dump entry */
3021         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3022         ioa_dump->hdr.len = 0;
3023         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3024         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3025
3026         /* First entries in sdt are actually a list of dump addresses and
3027          lengths to gather the real dump data.  sdt represents the pointer
3028          to the ioa generated dump table.  Dump data will be extracted based
3029          on entries in this table */
3030         sdt = &ioa_dump->sdt;
3031
3032         if (ioa_cfg->sis64) {
3033                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3034                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3035         } else {
3036                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3037                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3038         }
3039
3040         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3041                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3042         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3043                                         bytes_to_copy / sizeof(__be32));
3044
3045         /* Smart Dump table is ready to use and the first entry is valid */
3046         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3047             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3048                 dev_err(&ioa_cfg->pdev->dev,
3049                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3050                         rc, be32_to_cpu(sdt->hdr.state));
3051                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3052                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3053                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3054                 return;
3055         }
3056
3057         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3058
3059         if (num_entries > max_num_entries)
3060                 num_entries = max_num_entries;
3061
3062         /* Update dump length to the actual data to be copied */
3063         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3064         if (ioa_cfg->sis64)
3065                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3066         else
3067                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3068
3069         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3070
3071         for (i = 0; i < num_entries; i++) {
3072                 if (ioa_dump->hdr.len > max_dump_size) {
3073                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3074                         break;
3075                 }
3076
3077                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3078                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3079                         if (ioa_cfg->sis64)
3080                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3081                         else {
3082                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3083                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3084
3085                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3086                                         bytes_to_copy = end_off - start_off;
3087                                 else
3088                                         valid = 0;
3089                         }
3090                         if (valid) {
3091                                 if (bytes_to_copy > max_dump_size) {
3092                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3093                                         continue;
3094                                 }
3095
3096                                 /* Copy data from adapter to driver buffers */
3097                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3098                                                             bytes_to_copy);
3099
3100                                 ioa_dump->hdr.len += bytes_copied;
3101
3102                                 if (bytes_copied != bytes_to_copy) {
3103                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3104                                         break;
3105                                 }
3106                         }
3107                 }
3108         }
3109
3110         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3111
3112         /* Update dump_header */
3113         driver_dump->hdr.len += ioa_dump->hdr.len;
3114         wmb();
3115         ioa_cfg->sdt_state = DUMP_OBTAINED;
3116         LEAVE;
3117 }
3118
3119 #else
3120 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3121 #endif
3122
3123 /**
3124  * ipr_release_dump - Free adapter dump memory
3125  * @kref:       kref struct
3126  *
3127  * Return value:
3128  *      nothing
3129  **/
3130 static void ipr_release_dump(struct kref *kref)
3131 {
3132         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3133         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3134         unsigned long lock_flags = 0;
3135         int i;
3136
3137         ENTER;
3138         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3139         ioa_cfg->dump = NULL;
3140         ioa_cfg->sdt_state = INACTIVE;
3141         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3142
3143         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3144                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3145
3146         vfree(dump->ioa_dump.ioa_data);
3147         kfree(dump);
3148         LEAVE;
3149 }
3150
3151 /**
3152  * ipr_worker_thread - Worker thread
3153  * @work:               ioa config struct
3154  *
3155  * Called at task level from a work thread. This function takes care
3156  * of adding and removing device from the mid-layer as configuration
3157  * changes are detected by the adapter.
3158  *
3159  * Return value:
3160  *      nothing
3161  **/
3162 static void ipr_worker_thread(struct work_struct *work)
3163 {
3164         unsigned long lock_flags;
3165         struct ipr_resource_entry *res;
3166         struct scsi_device *sdev;
3167         struct ipr_dump *dump;
3168         struct ipr_ioa_cfg *ioa_cfg =
3169                 container_of(work, struct ipr_ioa_cfg, work_q);
3170         u8 bus, target, lun;
3171         int did_work;
3172
3173         ENTER;
3174         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3175
3176         if (ioa_cfg->sdt_state == READ_DUMP) {
3177                 dump = ioa_cfg->dump;
3178                 if (!dump) {
3179                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3180                         return;
3181                 }
3182                 kref_get(&dump->kref);
3183                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3184                 ipr_get_ioa_dump(ioa_cfg, dump);
3185                 kref_put(&dump->kref, ipr_release_dump);
3186
3187                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3188                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3189                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3190                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3191                 return;
3192         }
3193
3194 restart:
3195         do {
3196                 did_work = 0;
3197                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3198                     !ioa_cfg->allow_ml_add_del) {
3199                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3200                         return;
3201                 }
3202
3203                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3204                         if (res->del_from_ml && res->sdev) {
3205                                 did_work = 1;
3206                                 sdev = res->sdev;
3207                                 if (!scsi_device_get(sdev)) {
3208                                         if (!res->add_to_ml)
3209                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3210                                         else
3211                                                 res->del_from_ml = 0;
3212                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3213                                         scsi_remove_device(sdev);
3214                                         scsi_device_put(sdev);
3215                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3216                                 }
3217                                 break;
3218                         }
3219                 }
3220         } while (did_work);
3221
3222         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3223                 if (res->add_to_ml) {
3224                         bus = res->bus;
3225                         target = res->target;
3226                         lun = res->lun;
3227                         res->add_to_ml = 0;
3228                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3229                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3230                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3231                         goto restart;
3232                 }
3233         }
3234
3235         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3236         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3237         LEAVE;
3238 }
3239
3240 #ifdef CONFIG_SCSI_IPR_TRACE
3241 /**
3242  * ipr_read_trace - Dump the adapter trace
3243  * @filp:               open sysfs file
3244  * @kobj:               kobject struct
3245  * @bin_attr:           bin_attribute struct
3246  * @buf:                buffer
3247  * @off:                offset
3248  * @count:              buffer size
3249  *
3250  * Return value:
3251  *      number of bytes printed to buffer
3252  **/
3253 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3254                               struct bin_attribute *bin_attr,
3255                               char *buf, loff_t off, size_t count)
3256 {
3257         struct device *dev = container_of(kobj, struct device, kobj);
3258         struct Scsi_Host *shost = class_to_shost(dev);
3259         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3260         unsigned long lock_flags = 0;
3261         ssize_t ret;
3262
3263         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3264         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3265                                 IPR_TRACE_SIZE);
3266         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3267
3268         return ret;
3269 }
3270
3271 static struct bin_attribute ipr_trace_attr = {
3272         .attr = {
3273                 .name = "trace",
3274                 .mode = S_IRUGO,
3275         },
3276         .size = 0,
3277         .read = ipr_read_trace,
3278 };
3279 #endif
3280
3281 /**
3282  * ipr_show_fw_version - Show the firmware version
3283  * @dev:        class device struct
3284  * @buf:        buffer
3285  *
3286  * Return value:
3287  *      number of bytes printed to buffer
3288  **/
3289 static ssize_t ipr_show_fw_version(struct device *dev,
3290                                    struct device_attribute *attr, char *buf)
3291 {
3292         struct Scsi_Host *shost = class_to_shost(dev);
3293         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3294         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3295         unsigned long lock_flags = 0;
3296         int len;
3297
3298         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3299         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3300                        ucode_vpd->major_release, ucode_vpd->card_type,
3301                        ucode_vpd->minor_release[0],
3302                        ucode_vpd->minor_release[1]);
3303         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3304         return len;
3305 }
3306
3307 static struct device_attribute ipr_fw_version_attr = {
3308         .attr = {
3309                 .name =         "fw_version",
3310                 .mode =         S_IRUGO,
3311         },
3312         .show = ipr_show_fw_version,
3313 };
3314
3315 /**
3316  * ipr_show_log_level - Show the adapter's error logging level
3317  * @dev:        class device struct
3318  * @buf:        buffer
3319  *
3320  * Return value:
3321  *      number of bytes printed to buffer
3322  **/
3323 static ssize_t ipr_show_log_level(struct device *dev,
3324                                    struct device_attribute *attr, char *buf)
3325 {
3326         struct Scsi_Host *shost = class_to_shost(dev);
3327         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3328         unsigned long lock_flags = 0;
3329         int len;
3330
3331         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3332         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3333         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3334         return len;
3335 }
3336
3337 /**
3338  * ipr_store_log_level - Change the adapter's error logging level
3339  * @dev:        class device struct
3340  * @buf:        buffer
3341  *
3342  * Return value:
3343  *      number of bytes printed to buffer
3344  **/
3345 static ssize_t ipr_store_log_level(struct device *dev,
3346                                    struct device_attribute *attr,
3347                                    const char *buf, size_t count)
3348 {
3349         struct Scsi_Host *shost = class_to_shost(dev);
3350         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3351         unsigned long lock_flags = 0;
3352
3353         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3354         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3355         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356         return strlen(buf);
3357 }
3358
3359 static struct device_attribute ipr_log_level_attr = {
3360         .attr = {
3361                 .name =         "log_level",
3362                 .mode =         S_IRUGO | S_IWUSR,
3363         },
3364         .show = ipr_show_log_level,
3365         .store = ipr_store_log_level
3366 };
3367
3368 /**
3369  * ipr_store_diagnostics - IOA Diagnostics interface
3370  * @dev:        device struct
3371  * @buf:        buffer
3372  * @count:      buffer size
3373  *
3374  * This function will reset the adapter and wait a reasonable
3375  * amount of time for any errors that the adapter might log.
3376  *
3377  * Return value:
3378  *      count on success / other on failure
3379  **/
3380 static ssize_t ipr_store_diagnostics(struct device *dev,
3381                                      struct device_attribute *attr,
3382                                      const char *buf, size_t count)
3383 {
3384         struct Scsi_Host *shost = class_to_shost(dev);
3385         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3386         unsigned long lock_flags = 0;
3387         int rc = count;
3388
3389         if (!capable(CAP_SYS_ADMIN))
3390                 return -EACCES;
3391
3392         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3393         while (ioa_cfg->in_reset_reload) {
3394                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3395                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3396                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3397         }
3398
3399         ioa_cfg->errors_logged = 0;
3400         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3401
3402         if (ioa_cfg->in_reset_reload) {
3403                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3404                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3405
3406                 /* Wait for a second for any errors to be logged */
3407                 msleep(1000);
3408         } else {
3409                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3410                 return -EIO;
3411         }
3412
3413         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3414         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3415                 rc = -EIO;
3416         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3417
3418         return rc;
3419 }
3420
3421 static struct device_attribute ipr_diagnostics_attr = {
3422         .attr = {
3423                 .name =         "run_diagnostics",
3424                 .mode =         S_IWUSR,
3425         },
3426         .store = ipr_store_diagnostics
3427 };
3428
3429 /**
3430  * ipr_show_adapter_state - Show the adapter's state
3431  * @class_dev:  device struct
3432  * @buf:        buffer
3433  *
3434  * Return value:
3435  *      number of bytes printed to buffer
3436  **/
3437 static ssize_t ipr_show_adapter_state(struct device *dev,
3438                                       struct device_attribute *attr, char *buf)
3439 {
3440         struct Scsi_Host *shost = class_to_shost(dev);
3441         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3442         unsigned long lock_flags = 0;
3443         int len;
3444
3445         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3446         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3447                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3448         else
3449                 len = snprintf(buf, PAGE_SIZE, "online\n");
3450         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3451         return len;
3452 }
3453
3454 /**
3455  * ipr_store_adapter_state - Change adapter state
3456  * @dev:        device struct
3457  * @buf:        buffer
3458  * @count:      buffer size
3459  *
3460  * This function will change the adapter's state.
3461  *
3462  * Return value:
3463  *      count on success / other on failure
3464  **/
3465 static ssize_t ipr_store_adapter_state(struct device *dev,
3466                                        struct device_attribute *attr,
3467                                        const char *buf, size_t count)
3468 {
3469         struct Scsi_Host *shost = class_to_shost(dev);
3470         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3471         unsigned long lock_flags;
3472         int result = count, i;
3473
3474         if (!capable(CAP_SYS_ADMIN))
3475                 return -EACCES;
3476
3477         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3478         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3479             !strncmp(buf, "online", 6)) {
3480                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3481                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3482                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3483                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3484                 }
3485                 wmb();
3486                 ioa_cfg->reset_retries = 0;
3487                 ioa_cfg->in_ioa_bringdown = 0;
3488                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3489         }
3490         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3491         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3492
3493         return result;
3494 }
3495
3496 static struct device_attribute ipr_ioa_state_attr = {
3497         .attr = {
3498                 .name =         "online_state",
3499                 .mode =         S_IRUGO | S_IWUSR,
3500         },
3501         .show = ipr_show_adapter_state,
3502         .store = ipr_store_adapter_state
3503 };
3504
3505 /**
3506  * ipr_store_reset_adapter - Reset the adapter
3507  * @dev:        device struct
3508  * @buf:        buffer
3509  * @count:      buffer size
3510  *
3511  * This function will reset the adapter.
3512  *
3513  * Return value:
3514  *      count on success / other on failure
3515  **/
3516 static ssize_t ipr_store_reset_adapter(struct device *dev,
3517                                        struct device_attribute *attr,
3518                                        const char *buf, size_t count)
3519 {
3520         struct Scsi_Host *shost = class_to_shost(dev);
3521         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3522         unsigned long lock_flags;
3523         int result = count;
3524
3525         if (!capable(CAP_SYS_ADMIN))
3526                 return -EACCES;
3527
3528         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3529         if (!ioa_cfg->in_reset_reload)
3530                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3531         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3532         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3533
3534         return result;
3535 }
3536
3537 static struct device_attribute ipr_ioa_reset_attr = {
3538         .attr = {
3539                 .name =         "reset_host",
3540                 .mode =         S_IWUSR,
3541         },
3542         .store = ipr_store_reset_adapter
3543 };
3544
3545 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3546  /**
3547  * ipr_show_iopoll_weight - Show ipr polling mode
3548  * @dev:        class device struct
3549  * @buf:        buffer
3550  *
3551  * Return value:
3552  *      number of bytes printed to buffer
3553  **/
3554 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3555                                    struct device_attribute *attr, char *buf)
3556 {
3557         struct Scsi_Host *shost = class_to_shost(dev);
3558         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3559         unsigned long lock_flags = 0;
3560         int len;
3561
3562         spin_lock_irqsave(shost->host_lock, lock_flags);
3563         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3564         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3565
3566         return len;
3567 }
3568
3569 /**
3570  * ipr_store_iopoll_weight - Change the adapter's polling mode
3571  * @dev:        class device struct
3572  * @buf:        buffer
3573  *
3574  * Return value:
3575  *      number of bytes printed to buffer
3576  **/
3577 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3578                                         struct device_attribute *attr,
3579                                         const char *buf, size_t count)
3580 {
3581         struct Scsi_Host *shost = class_to_shost(dev);
3582         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3583         unsigned long user_iopoll_weight;
3584         unsigned long lock_flags = 0;
3585         int i;
3586
3587         if (!ioa_cfg->sis64) {
3588                 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3589                 return -EINVAL;
3590         }
3591         if (kstrtoul(buf, 10, &user_iopoll_weight))
3592                 return -EINVAL;
3593
3594         if (user_iopoll_weight > 256) {
3595                 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3596                 return -EINVAL;
3597         }
3598
3599         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3600                 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3601                 return strlen(buf);
3602         }
3603
3604         if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3605                         ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3606                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3607                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3608         }
3609
3610         spin_lock_irqsave(shost->host_lock, lock_flags);
3611         ioa_cfg->iopoll_weight = user_iopoll_weight;
3612         if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3613                         ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3614                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3615                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3616                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3617                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3618                 }
3619         }
3620         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3621
3622         return strlen(buf);
3623 }
3624
3625 static struct device_attribute ipr_iopoll_weight_attr = {
3626         .attr = {
3627                 .name =         "iopoll_weight",
3628                 .mode =         S_IRUGO | S_IWUSR,
3629         },
3630         .show = ipr_show_iopoll_weight,
3631         .store = ipr_store_iopoll_weight
3632 };
3633
3634 /**
3635  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3636  * @buf_len:            buffer length
3637  *
3638  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3639  * list to use for microcode download
3640  *
3641  * Return value:
3642  *      pointer to sglist / NULL on failure
3643  **/
3644 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3645 {
3646         int sg_size, order, bsize_elem, num_elem, i, j;
3647         struct ipr_sglist *sglist;
3648         struct scatterlist *scatterlist;
3649         struct page *page;
3650
3651         /* Get the minimum size per scatter/gather element */
3652         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3653
3654         /* Get the actual size per element */
3655         order = get_order(sg_size);
3656
3657         /* Determine the actual number of bytes per element */
3658         bsize_elem = PAGE_SIZE * (1 << order);
3659
3660         /* Determine the actual number of sg entries needed */
3661         if (buf_len % bsize_elem)
3662                 num_elem = (buf_len / bsize_elem) + 1;
3663         else
3664                 num_elem = buf_len / bsize_elem;
3665
3666         /* Allocate a scatter/gather list for the DMA */
3667         sglist = kzalloc(sizeof(struct ipr_sglist) +
3668                          (sizeof(struct scatterlist) * (num_elem - 1)),
3669                          GFP_KERNEL);
3670
3671         if (sglist == NULL) {
3672                 ipr_trace;
3673                 return NULL;
3674         }
3675
3676         scatterlist = sglist->scatterlist;
3677         sg_init_table(scatterlist, num_elem);
3678
3679         sglist->order = order;
3680         sglist->num_sg = num_elem;
3681
3682         /* Allocate a bunch of sg elements */
3683         for (i = 0; i < num_elem; i++) {
3684                 page = alloc_pages(GFP_KERNEL, order);
3685                 if (!page) {
3686                         ipr_trace;
3687
3688                         /* Free up what we already allocated */
3689                         for (j = i - 1; j >= 0; j--)
3690                                 __free_pages(sg_page(&scatterlist[j]), order);
3691                         kfree(sglist);
3692                         return NULL;
3693                 }
3694
3695                 sg_set_page(&scatterlist[i], page, 0, 0);
3696         }
3697
3698         return sglist;
3699 }
3700
3701 /**
3702  * ipr_free_ucode_buffer - Frees a microcode download buffer
3703  * @p_dnld:             scatter/gather list pointer
3704  *
3705  * Free a DMA'able ucode download buffer previously allocated with
3706  * ipr_alloc_ucode_buffer
3707  *
3708  * Return value:
3709  *      nothing
3710  **/
3711 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3712 {
3713         int i;
3714
3715         for (i = 0; i < sglist->num_sg; i++)
3716                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3717
3718         kfree(sglist);
3719 }
3720
3721 /**
3722  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3723  * @sglist:             scatter/gather list pointer
3724  * @buffer:             buffer pointer
3725  * @len:                buffer length
3726  *
3727  * Copy a microcode image from a user buffer into a buffer allocated by
3728  * ipr_alloc_ucode_buffer
3729  *
3730  * Return value:
3731  *      0 on success / other on failure
3732  **/
3733 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3734                                  u8 *buffer, u32 len)
3735 {
3736         int bsize_elem, i, result = 0;
3737         struct scatterlist *scatterlist;
3738         void *kaddr;
3739
3740         /* Determine the actual number of bytes per element */
3741         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3742
3743         scatterlist = sglist->scatterlist;
3744
3745         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3746                 struct page *page = sg_page(&scatterlist[i]);
3747
3748                 kaddr = kmap(page);
3749                 memcpy(kaddr, buffer, bsize_elem);
3750                 kunmap(page);
3751
3752                 scatterlist[i].length = bsize_elem;
3753
3754                 if (result != 0) {
3755                         ipr_trace;
3756                         return result;
3757                 }
3758         }
3759
3760         if (len % bsize_elem) {
3761                 struct page *page = sg_page(&scatterlist[i]);
3762
3763                 kaddr = kmap(page);
3764                 memcpy(kaddr, buffer, len % bsize_elem);
3765                 kunmap(page);
3766
3767                 scatterlist[i].length = len % bsize_elem;
3768         }
3769
3770         sglist->buffer_len = len;
3771         return result;
3772 }
3773
3774 /**
3775  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3776  * @ipr_cmd:            ipr command struct
3777  * @sglist:             scatter/gather list
3778  *
3779  * Builds a microcode download IOA data list (IOADL).
3780  *
3781  **/
3782 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3783                                     struct ipr_sglist *sglist)
3784 {
3785         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3786         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3787         struct scatterlist *scatterlist = sglist->scatterlist;
3788         int i;
3789
3790         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3791         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3792         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3793
3794         ioarcb->ioadl_len =
3795                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3796         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3797                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3798                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3799                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3800         }
3801
3802         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3803 }
3804
3805 /**
3806  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3807  * @ipr_cmd:    ipr command struct
3808  * @sglist:             scatter/gather list
3809  *
3810  * Builds a microcode download IOA data list (IOADL).
3811  *
3812  **/
3813 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3814                                   struct ipr_sglist *sglist)
3815 {
3816         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3817         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3818         struct scatterlist *scatterlist = sglist->scatterlist;
3819         int i;
3820
3821         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3822         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3823         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3824
3825         ioarcb->ioadl_len =
3826                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3827
3828         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3829                 ioadl[i].flags_and_data_len =
3830                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3831                 ioadl[i].address =
3832                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3833         }
3834
3835         ioadl[i-1].flags_and_data_len |=
3836                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3837 }
3838
3839 /**
3840  * ipr_update_ioa_ucode - Update IOA's microcode
3841  * @ioa_cfg:    ioa config struct
3842  * @sglist:             scatter/gather list
3843  *
3844  * Initiate an adapter reset to update the IOA's microcode
3845  *
3846  * Return value:
3847  *      0 on success / -EIO on failure
3848  **/
3849 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3850                                 struct ipr_sglist *sglist)
3851 {
3852         unsigned long lock_flags;
3853
3854         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3855         while (ioa_cfg->in_reset_reload) {
3856                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3857                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3858                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3859         }
3860
3861         if (ioa_cfg->ucode_sglist) {
3862                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3863                 dev_err(&ioa_cfg->pdev->dev,
3864                         "Microcode download already in progress\n");
3865                 return -EIO;
3866         }
3867
3868         sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3869                                         sglist->num_sg, DMA_TO_DEVICE);
3870
3871         if (!sglist->num_dma_sg) {
3872                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3873                 dev_err(&ioa_cfg->pdev->dev,
3874                         "Failed to map microcode download buffer!\n");
3875                 return -EIO;
3876         }
3877
3878         ioa_cfg->ucode_sglist = sglist;
3879         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3880         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3881         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3882
3883         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3884         ioa_cfg->ucode_sglist = NULL;
3885         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3886         return 0;
3887 }
3888
3889 /**
3890  * ipr_store_update_fw - Update the firmware on the adapter
3891  * @class_dev:  device struct
3892  * @buf:        buffer
3893  * @count:      buffer size
3894  *
3895  * This function will update the firmware on the adapter.
3896  *
3897  * Return value:
3898  *      count on success / other on failure
3899  **/
3900 static ssize_t ipr_store_update_fw(struct device *dev,
3901                                    struct device_attribute *attr,
3902                                    const char *buf, size_t count)
3903 {
3904         struct Scsi_Host *shost = class_to_shost(dev);
3905         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3906         struct ipr_ucode_image_header *image_hdr;
3907         const struct firmware *fw_entry;
3908         struct ipr_sglist *sglist;
3909         char fname[100];
3910         char *src;
3911         int len, result, dnld_size;
3912
3913         if (!capable(CAP_SYS_ADMIN))
3914                 return -EACCES;
3915
3916         len = snprintf(fname, 99, "%s", buf);
3917         fname[len-1] = '\0';
3918
3919         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3920                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3921                 return -EIO;
3922         }
3923
3924         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3925
3926         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3927         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3928         sglist = ipr_alloc_ucode_buffer(dnld_size);
3929
3930         if (!sglist) {
3931                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3932                 release_firmware(fw_entry);
3933                 return -ENOMEM;
3934         }
3935
3936         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3937
3938         if (result) {
3939                 dev_err(&ioa_cfg->pdev->dev,
3940                         "Microcode buffer copy to DMA buffer failed\n");
3941                 goto out;
3942         }
3943
3944         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
3945
3946         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3947
3948         if (!result)
3949                 result = count;
3950 out:
3951         ipr_free_ucode_buffer(sglist);
3952         release_firmware(fw_entry);
3953         return result;
3954 }
3955
3956 static struct device_attribute ipr_update_fw_attr = {
3957         .attr = {
3958                 .name =         "update_fw",
3959                 .mode =         S_IWUSR,
3960         },
3961         .store = ipr_store_update_fw
3962 };
3963
3964 /**
3965  * ipr_show_fw_type - Show the adapter's firmware type.
3966  * @dev:        class device struct
3967  * @buf:        buffer
3968  *
3969  * Return value:
3970  *      number of bytes printed to buffer
3971  **/
3972 static ssize_t ipr_show_fw_type(struct device *dev,
3973                                 struct device_attribute *attr, char *buf)
3974 {
3975         struct Scsi_Host *shost = class_to_shost(dev);
3976         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3977         unsigned long lock_flags = 0;
3978         int len;
3979
3980         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3981         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3982         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3983         return len;
3984 }
3985
3986 static struct device_attribute ipr_ioa_fw_type_attr = {
3987         .attr = {
3988                 .name =         "fw_type",
3989                 .mode =         S_IRUGO,
3990         },
3991         .show = ipr_show_fw_type
3992 };
3993
3994 static struct device_attribute *ipr_ioa_attrs[] = {
3995         &ipr_fw_version_attr,
3996         &ipr_log_level_attr,
3997         &ipr_diagnostics_attr,
3998         &ipr_ioa_state_attr,
3999         &ipr_ioa_reset_attr,
4000         &ipr_update_fw_attr,
4001         &ipr_ioa_fw_type_attr,
4002         &ipr_iopoll_weight_attr,
4003         NULL,
4004 };
4005
4006 #ifdef CONFIG_SCSI_IPR_DUMP
4007 /**
4008  * ipr_read_dump - Dump the adapter
4009  * @filp:               open sysfs file
4010  * @kobj:               kobject struct
4011  * @bin_attr:           bin_attribute struct
4012  * @buf:                buffer
4013  * @off:                offset
4014  * @count:              buffer size
4015  *
4016  * Return value:
4017  *      number of bytes printed to buffer
4018  **/
4019 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4020                              struct bin_attribute *bin_attr,
4021                              char *buf, loff_t off, size_t count)
4022 {
4023         struct device *cdev = container_of(kobj, struct device, kobj);
4024         struct Scsi_Host *shost = class_to_shost(cdev);
4025         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4026         struct ipr_dump *dump;
4027         unsigned long lock_flags = 0;
4028         char *src;
4029         int len, sdt_end;
4030         size_t rc = count;
4031
4032         if (!capable(CAP_SYS_ADMIN))
4033                 return -EACCES;
4034
4035         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4036         dump = ioa_cfg->dump;
4037
4038         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4039                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4040                 return 0;
4041         }
4042         kref_get(&dump->kref);
4043         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044
4045         if (off > dump->driver_dump.hdr.len) {
4046                 kref_put(&dump->kref, ipr_release_dump);
4047                 return 0;
4048         }
4049
4050         if (off + count > dump->driver_dump.hdr.len) {
4051                 count = dump->driver_dump.hdr.len - off;
4052                 rc = count;
4053         }
4054
4055         if (count && off < sizeof(dump->driver_dump)) {
4056                 if (off + count > sizeof(dump->driver_dump))
4057                         len = sizeof(dump->driver_dump) - off;
4058                 else
4059                         len = count;
4060                 src = (u8 *)&dump->driver_dump + off;
4061                 memcpy(buf, src, len);
4062                 buf += len;
4063                 off += len;
4064                 count -= len;
4065         }
4066
4067         off -= sizeof(dump->driver_dump);
4068
4069         if (ioa_cfg->sis64)
4070                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4071                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4072                            sizeof(struct ipr_sdt_entry));
4073         else
4074                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4075                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4076
4077         if (count && off < sdt_end) {
4078                 if (off + count > sdt_end)
4079                         len = sdt_end - off;
4080                 else
4081                         len = count;
4082                 src = (u8 *)&dump->ioa_dump + off;
4083                 memcpy(buf, src, len);
4084                 buf += len;
4085                 off += len;
4086                 count -= len;
4087         }
4088
4089         off -= sdt_end;
4090
4091         while (count) {
4092                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4093                         len = PAGE_ALIGN(off) - off;
4094                 else
4095                         len = count;
4096                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4097                 src += off & ~PAGE_MASK;
4098                 memcpy(buf, src, len);
4099                 buf += len;
4100                 off += len;
4101                 count -= len;
4102         }
4103
4104         kref_put(&dump->kref, ipr_release_dump);
4105         return rc;
4106 }
4107
4108 /**
4109  * ipr_alloc_dump - Prepare for adapter dump
4110  * @ioa_cfg:    ioa config struct
4111  *
4112  * Return value:
4113  *      0 on success / other on failure
4114  **/
4115 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4116 {
4117         struct ipr_dump *dump;
4118         __be32 **ioa_data;
4119         unsigned long lock_flags = 0;
4120
4121         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4122
4123         if (!dump) {
4124                 ipr_err("Dump memory allocation failed\n");
4125                 return -ENOMEM;
4126         }
4127
4128         if (ioa_cfg->sis64)
4129                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4130         else
4131                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4132
4133         if (!ioa_data) {
4134                 ipr_err("Dump memory allocation failed\n");
4135                 kfree(dump);
4136                 return -ENOMEM;
4137         }
4138
4139         dump->ioa_dump.ioa_data = ioa_data;
4140
4141         kref_init(&dump->kref);
4142         dump->ioa_cfg = ioa_cfg;
4143
4144         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4145
4146         if (INACTIVE != ioa_cfg->sdt_state) {
4147                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4148                 vfree(dump->ioa_dump.ioa_data);
4149                 kfree(dump);
4150                 return 0;
4151         }
4152
4153         ioa_cfg->dump = dump;
4154         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4155         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4156                 ioa_cfg->dump_taken = 1;
4157                 schedule_work(&ioa_cfg->work_q);
4158         }
4159         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4160
4161         return 0;
4162 }
4163
4164 /**
4165  * ipr_free_dump - Free adapter dump memory
4166  * @ioa_cfg:    ioa config struct
4167  *
4168  * Return value:
4169  *      0 on success / other on failure
4170  **/
4171 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4172 {
4173         struct ipr_dump *dump;
4174         unsigned long lock_flags = 0;
4175
4176         ENTER;
4177
4178         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4179         dump = ioa_cfg->dump;
4180         if (!dump) {
4181                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4182                 return 0;
4183         }
4184
4185         ioa_cfg->dump = NULL;
4186         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4187
4188         kref_put(&dump->kref, ipr_release_dump);
4189
4190         LEAVE;
4191         return 0;
4192 }
4193
4194 /**
4195  * ipr_write_dump - Setup dump state of adapter
4196  * @filp:               open sysfs file
4197  * @kobj:               kobject struct
4198  * @bin_attr:           bin_attribute struct
4199  * @buf:                buffer
4200  * @off:                offset
4201  * @count:              buffer size
4202  *
4203  * Return value:
4204  *      number of bytes printed to buffer
4205  **/
4206 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4207                               struct bin_attribute *bin_attr,
4208                               char *buf, loff_t off, size_t count)
4209 {
4210         struct device *cdev = container_of(kobj, struct device, kobj);
4211         struct Scsi_Host *shost = class_to_shost(cdev);
4212         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4213         int rc;
4214
4215         if (!capable(CAP_SYS_ADMIN))
4216                 return -EACCES;
4217
4218         if (buf[0] == '1')
4219                 rc = ipr_alloc_dump(ioa_cfg);
4220         else if (buf[0] == '0')
4221                 rc = ipr_free_dump(ioa_cfg);
4222         else
4223                 return -EINVAL;
4224
4225         if (rc)
4226                 return rc;
4227         else
4228                 return count;
4229 }
4230
4231 static struct bin_attribute ipr_dump_attr = {
4232         .attr = {
4233                 .name = "dump",
4234                 .mode = S_IRUSR | S_IWUSR,
4235         },
4236         .size = 0,
4237         .read = ipr_read_dump,
4238         .write = ipr_write_dump
4239 };
4240 #else
4241 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4242 #endif
4243
4244 /**
4245  * ipr_change_queue_depth - Change the device's queue depth
4246  * @sdev:       scsi device struct
4247  * @qdepth:     depth to set
4248  * @reason:     calling context
4249  *
4250  * Return value:
4251  *      actual depth set
4252  **/
4253 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4254                                   int reason)
4255 {
4256         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4257         struct ipr_resource_entry *res;
4258         unsigned long lock_flags = 0;
4259
4260         if (reason != SCSI_QDEPTH_DEFAULT)
4261                 return -EOPNOTSUPP;
4262
4263         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4264         res = (struct ipr_resource_entry *)sdev->hostdata;
4265
4266         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4267                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4268         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4269
4270         scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4271         return sdev->queue_depth;
4272 }
4273
4274 /**
4275  * ipr_change_queue_type - Change the device's queue type
4276  * @dsev:               scsi device struct
4277  * @tag_type:   type of tags to use
4278  *
4279  * Return value:
4280  *      actual queue type set
4281  **/
4282 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4283 {
4284         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4285         struct ipr_resource_entry *res;
4286         unsigned long lock_flags = 0;
4287
4288         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4289         res = (struct ipr_resource_entry *)sdev->hostdata;
4290
4291         if (res) {
4292                 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4293                         /*
4294                          * We don't bother quiescing the device here since the
4295                          * adapter firmware does it for us.
4296                          */
4297                         scsi_set_tag_type(sdev, tag_type);
4298
4299                         if (tag_type)
4300                                 scsi_activate_tcq(sdev, sdev->queue_depth);
4301                         else
4302                                 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4303                 } else
4304                         tag_type = 0;
4305         } else
4306                 tag_type = 0;
4307
4308         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4309         return tag_type;
4310 }
4311
4312 /**
4313  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4314  * @dev:        device struct
4315  * @attr:       device attribute structure
4316  * @buf:        buffer
4317  *
4318  * Return value:
4319  *      number of bytes printed to buffer
4320  **/
4321 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4322 {
4323         struct scsi_device *sdev = to_scsi_device(dev);
4324         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4325         struct ipr_resource_entry *res;
4326         unsigned long lock_flags = 0;
4327         ssize_t len = -ENXIO;
4328
4329         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4330         res = (struct ipr_resource_entry *)sdev->hostdata;
4331         if (res)
4332                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4333         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4334         return len;
4335 }
4336
4337 static struct device_attribute ipr_adapter_handle_attr = {
4338         .attr = {
4339                 .name =         "adapter_handle",
4340                 .mode =         S_IRUSR,
4341         },
4342         .show = ipr_show_adapter_handle
4343 };
4344
4345 /**
4346  * ipr_show_resource_path - Show the resource path or the resource address for
4347  *                          this device.
4348  * @dev:        device struct
4349  * @attr:       device attribute structure
4350  * @buf:        buffer
4351  *
4352  * Return value:
4353  *      number of bytes printed to buffer
4354  **/
4355 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4356 {
4357         struct scsi_device *sdev = to_scsi_device(dev);
4358         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4359         struct ipr_resource_entry *res;
4360         unsigned long lock_flags = 0;
4361         ssize_t len = -ENXIO;
4362         char buffer[IPR_MAX_RES_PATH_LENGTH];
4363
4364         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4365         res = (struct ipr_resource_entry *)sdev->hostdata;
4366         if (res && ioa_cfg->sis64)
4367                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4368                                __ipr_format_res_path(res->res_path, buffer,
4369                                                      sizeof(buffer)));
4370         else if (res)
4371                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4372                                res->bus, res->target, res->lun);
4373
4374         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4375         return len;
4376 }
4377
4378 static struct device_attribute ipr_resource_path_attr = {
4379         .attr = {
4380                 .name =         "resource_path",
4381                 .mode =         S_IRUGO,
4382         },
4383         .show = ipr_show_resource_path
4384 };
4385
4386 /**
4387  * ipr_show_device_id - Show the device_id for this device.
4388  * @dev:        device struct
4389  * @attr:       device attribute structure
4390  * @buf:        buffer
4391  *
4392  * Return value:
4393  *      number of bytes printed to buffer
4394  **/
4395 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4396 {
4397         struct scsi_device *sdev = to_scsi_device(dev);
4398         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4399         struct ipr_resource_entry *res;
4400         unsigned long lock_flags = 0;
4401         ssize_t len = -ENXIO;
4402
4403         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4404         res = (struct ipr_resource_entry *)sdev->hostdata;
4405         if (res && ioa_cfg->sis64)
4406                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4407         else if (res)
4408                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4409
4410         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4411         return len;
4412 }
4413
4414 static struct device_attribute ipr_device_id_attr = {
4415         .attr = {
4416                 .name =         "device_id",
4417                 .mode =         S_IRUGO,
4418         },
4419         .show = ipr_show_device_id
4420 };
4421
4422 /**
4423  * ipr_show_resource_type - Show the resource type for this device.
4424  * @dev:        device struct
4425  * @attr:       device attribute structure
4426  * @buf:        buffer
4427  *
4428  * Return value:
4429  *      number of bytes printed to buffer
4430  **/
4431 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4432 {
4433         struct scsi_device *sdev = to_scsi_device(dev);
4434         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4435         struct ipr_resource_entry *res;
4436         unsigned long lock_flags = 0;
4437         ssize_t len = -ENXIO;
4438
4439         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4440         res = (struct ipr_resource_entry *)sdev->hostdata;
4441
4442         if (res)
4443                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4444
4445         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4446         return len;
4447 }
4448
4449 static struct device_attribute ipr_resource_type_attr = {
4450         .attr = {
4451                 .name =         "resource_type",
4452                 .mode =         S_IRUGO,
4453         },
4454         .show = ipr_show_resource_type
4455 };
4456
4457 static struct device_attribute *ipr_dev_attrs[] = {
4458         &ipr_adapter_handle_attr,
4459         &ipr_resource_path_attr,
4460         &ipr_device_id_attr,
4461         &ipr_resource_type_attr,
4462         NULL,
4463 };
4464
4465 /**
4466  * ipr_biosparam - Return the HSC mapping
4467  * @sdev:                       scsi device struct
4468  * @block_device:       block device pointer
4469  * @capacity:           capacity of the device
4470  * @parm:                       Array containing returned HSC values.
4471  *
4472  * This function generates the HSC parms that fdisk uses.
4473  * We want to make sure we return something that places partitions
4474  * on 4k boundaries for best performance with the IOA.
4475  *
4476  * Return value:
4477  *      0 on success
4478  **/
4479 static int ipr_biosparam(struct scsi_device *sdev,
4480                          struct block_device *block_device,
4481                          sector_t capacity, int *parm)
4482 {
4483         int heads, sectors;
4484         sector_t cylinders;
4485
4486         heads = 128;
4487         sectors = 32;
4488
4489         cylinders = capacity;
4490         sector_div(cylinders, (128 * 32));
4491
4492         /* return result */
4493         parm[0] = heads;
4494         parm[1] = sectors;
4495         parm[2] = cylinders;
4496
4497         return 0;
4498 }
4499
4500 /**
4501  * ipr_find_starget - Find target based on bus/target.
4502  * @starget:    scsi target struct
4503  *
4504  * Return value:
4505  *      resource entry pointer if found / NULL if not found
4506  **/
4507 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4508 {
4509         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4510         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4511         struct ipr_resource_entry *res;
4512
4513         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4514                 if ((res->bus == starget->channel) &&
4515                     (res->target == starget->id)) {
4516                         return res;
4517                 }
4518         }
4519
4520         return NULL;
4521 }
4522
4523 static struct ata_port_info sata_port_info;
4524
4525 /**
4526  * ipr_target_alloc - Prepare for commands to a SCSI target
4527  * @starget:    scsi target struct
4528  *
4529  * If the device is a SATA device, this function allocates an
4530  * ATA port with libata, else it does nothing.
4531  *
4532  * Return value:
4533  *      0 on success / non-0 on failure
4534  **/
4535 static int ipr_target_alloc(struct scsi_target *starget)
4536 {
4537         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4538         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4539         struct ipr_sata_port *sata_port;
4540         struct ata_port *ap;
4541         struct ipr_resource_entry *res;
4542         unsigned long lock_flags;
4543
4544         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4545         res = ipr_find_starget(starget);
4546         starget->hostdata = NULL;
4547
4548         if (res && ipr_is_gata(res)) {
4549                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4550                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4551                 if (!sata_port)
4552                         return -ENOMEM;
4553
4554                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4555                 if (ap) {
4556                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4557                         sata_port->ioa_cfg = ioa_cfg;
4558                         sata_port->ap = ap;
4559                         sata_port->res = res;
4560
4561                         res->sata_port = sata_port;
4562                         ap->private_data = sata_port;
4563                         starget->hostdata = sata_port;
4564                 } else {
4565                         kfree(sata_port);
4566                         return -ENOMEM;
4567                 }
4568         }
4569         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4570
4571         return 0;
4572 }
4573
4574 /**
4575  * ipr_target_destroy - Destroy a SCSI target
4576  * @starget:    scsi target struct
4577  *
4578  * If the device was a SATA device, this function frees the libata
4579  * ATA port, else it does nothing.
4580  *
4581  **/
4582 static void ipr_target_destroy(struct scsi_target *starget)
4583 {
4584         struct ipr_sata_port *sata_port = starget->hostdata;
4585         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4586         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4587
4588         if (ioa_cfg->sis64) {
4589                 if (!ipr_find_starget(starget)) {
4590                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4591                                 clear_bit(starget->id, ioa_cfg->array_ids);
4592                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4593                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4594                         else if (starget->channel == 0)
4595                                 clear_bit(starget->id, ioa_cfg->target_ids);
4596                 }
4597         }
4598
4599         if (sata_port) {
4600                 starget->hostdata = NULL;
4601                 ata_sas_port_destroy(sata_port->ap);
4602                 kfree(sata_port);
4603         }
4604 }
4605
4606 /**
4607  * ipr_find_sdev - Find device based on bus/target/lun.
4608  * @sdev:       scsi device struct
4609  *
4610  * Return value:
4611  *      resource entry pointer if found / NULL if not found
4612  **/
4613 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4614 {
4615         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4616         struct ipr_resource_entry *res;
4617
4618         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4619                 if ((res->bus == sdev->channel) &&
4620                     (res->target == sdev->id) &&
4621                     (res->lun == sdev->lun))
4622                         return res;
4623         }
4624
4625         return NULL;
4626 }
4627
4628 /**
4629  * ipr_slave_destroy - Unconfigure a SCSI device
4630  * @sdev:       scsi device struct
4631  *
4632  * Return value:
4633  *      nothing
4634  **/
4635 static void ipr_slave_destroy(struct scsi_device *sdev)
4636 {
4637         struct ipr_resource_entry *res;
4638         struct ipr_ioa_cfg *ioa_cfg;
4639         unsigned long lock_flags = 0;
4640
4641         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4642
4643         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4644         res = (struct ipr_resource_entry *) sdev->hostdata;
4645         if (res) {
4646                 if (res->sata_port)
4647                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4648                 sdev->hostdata = NULL;
4649                 res->sdev = NULL;
4650                 res->sata_port = NULL;
4651         }
4652         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4653 }
4654
4655 /**
4656  * ipr_slave_configure - Configure a SCSI device
4657  * @sdev:       scsi device struct
4658  *
4659  * This function configures the specified scsi device.
4660  *
4661  * Return value:
4662  *      0 on success
4663  **/
4664 static int ipr_slave_configure(struct scsi_device *sdev)
4665 {
4666         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4667         struct ipr_resource_entry *res;
4668         struct ata_port *ap = NULL;
4669         unsigned long lock_flags = 0;
4670         char buffer[IPR_MAX_RES_PATH_LENGTH];
4671
4672         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4673         res = sdev->hostdata;
4674         if (res) {
4675                 if (ipr_is_af_dasd_device(res))
4676                         sdev->type = TYPE_RAID;
4677                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4678                         sdev->scsi_level = 4;
4679                         sdev->no_uld_attach = 1;
4680                 }
4681                 if (ipr_is_vset_device(res)) {
4682                         blk_queue_rq_timeout(sdev->request_queue,
4683                                              IPR_VSET_RW_TIMEOUT);
4684                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4685                 }
4686                 if (ipr_is_gata(res) && res->sata_port)
4687                         ap = res->sata_port->ap;
4688                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4689
4690                 if (ap) {
4691                         scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4692                         ata_sas_slave_configure(sdev, ap);
4693                 } else
4694                         scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4695                 if (ioa_cfg->sis64)
4696                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4697                                     ipr_format_res_path(ioa_cfg,
4698                                 res->res_path, buffer, sizeof(buffer)));
4699                 return 0;
4700         }
4701         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4702         return 0;
4703 }
4704
4705 /**
4706  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4707  * @sdev:       scsi device struct
4708  *
4709  * This function initializes an ATA port so that future commands
4710  * sent through queuecommand will work.
4711  *
4712  * Return value:
4713  *      0 on success
4714  **/
4715 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4716 {
4717         struct ipr_sata_port *sata_port = NULL;
4718         int rc = -ENXIO;
4719
4720         ENTER;
4721         if (sdev->sdev_target)
4722                 sata_port = sdev->sdev_target->hostdata;
4723         if (sata_port) {
4724                 rc = ata_sas_port_init(sata_port->ap);
4725                 if (rc == 0)
4726                         rc = ata_sas_sync_probe(sata_port->ap);
4727         }
4728
4729         if (rc)
4730                 ipr_slave_destroy(sdev);
4731
4732         LEAVE;
4733         return rc;
4734 }
4735
4736 /**
4737  * ipr_slave_alloc - Prepare for commands to a device.
4738  * @sdev:       scsi device struct
4739  *
4740  * This function saves a pointer to the resource entry
4741  * in the scsi device struct if the device exists. We
4742  * can then use this pointer in ipr_queuecommand when
4743  * handling new commands.
4744  *
4745  * Return value:
4746  *      0 on success / -ENXIO if device does not exist
4747  **/
4748 static int ipr_slave_alloc(struct scsi_device *sdev)
4749 {
4750         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4751         struct ipr_resource_entry *res;
4752         unsigned long lock_flags;
4753         int rc = -ENXIO;
4754
4755         sdev->hostdata = NULL;
4756
4757         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4758
4759         res = ipr_find_sdev(sdev);
4760         if (res) {
4761                 res->sdev = sdev;
4762                 res->add_to_ml = 0;
4763                 res->in_erp = 0;
4764                 sdev->hostdata = res;
4765                 if (!ipr_is_naca_model(res))
4766                         res->needs_sync_complete = 1;
4767                 rc = 0;
4768                 if (ipr_is_gata(res)) {
4769                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4770                         return ipr_ata_slave_alloc(sdev);
4771                 }
4772         }
4773
4774         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4775
4776         return rc;
4777 }
4778
4779 /**
4780  * ipr_match_lun - Match function for specified LUN
4781  * @ipr_cmd:    ipr command struct
4782  * @device:             device to match (sdev)
4783  *
4784  * Returns:
4785  *      1 if command matches sdev / 0 if command does not match sdev
4786  **/
4787 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4788 {
4789         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4790                 return 1;
4791         return 0;
4792 }
4793
4794 /**
4795  * ipr_wait_for_ops - Wait for matching commands to complete
4796  * @ipr_cmd:    ipr command struct
4797  * @device:             device to match (sdev)
4798  * @match:              match function to use
4799  *
4800  * Returns:
4801  *      SUCCESS / FAILED
4802  **/
4803 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4804                             int (*match)(struct ipr_cmnd *, void *))
4805 {
4806         struct ipr_cmnd *ipr_cmd;
4807         int wait;
4808         unsigned long flags;
4809         struct ipr_hrr_queue *hrrq;
4810         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4811         DECLARE_COMPLETION_ONSTACK(comp);
4812
4813         ENTER;
4814         do {
4815                 wait = 0;
4816
4817                 for_each_hrrq(hrrq, ioa_cfg) {
4818                         spin_lock_irqsave(hrrq->lock, flags);
4819                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4820                                 if (match(ipr_cmd, device)) {
4821                                         ipr_cmd->eh_comp = &comp;
4822                                         wait++;
4823                                 }
4824                         }
4825                         spin_unlock_irqrestore(hrrq->lock, flags);
4826                 }
4827
4828                 if (wait) {
4829                         timeout = wait_for_completion_timeout(&comp, timeout);
4830
4831                         if (!timeout) {
4832                                 wait = 0;
4833
4834                                 for_each_hrrq(hrrq, ioa_cfg) {
4835                                         spin_lock_irqsave(hrrq->lock, flags);
4836                                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4837                                                 if (match(ipr_cmd, device)) {
4838                                                         ipr_cmd->eh_comp = NULL;
4839                                                         wait++;
4840                                                 }
4841                                         }
4842                                         spin_unlock_irqrestore(hrrq->lock, flags);
4843                                 }
4844
4845                                 if (wait)
4846                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4847                                 LEAVE;
4848                                 return wait ? FAILED : SUCCESS;
4849                         }
4850                 }
4851         } while (wait);
4852
4853         LEAVE;
4854         return SUCCESS;
4855 }
4856
4857 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4858 {
4859         struct ipr_ioa_cfg *ioa_cfg;
4860         unsigned long lock_flags = 0;
4861         int rc = SUCCESS;
4862
4863         ENTER;
4864         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4865         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4866
4867         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4868                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4869                 dev_err(&ioa_cfg->pdev->dev,
4870                         "Adapter being reset as a result of error recovery.\n");
4871
4872                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4873                         ioa_cfg->sdt_state = GET_DUMP;
4874         }
4875
4876         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4877         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4878         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4879
4880         /* If we got hit with a host reset while we were already resetting
4881          the adapter for some reason, and the reset failed. */
4882         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4883                 ipr_trace;
4884                 rc = FAILED;
4885         }
4886
4887         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4888         LEAVE;
4889         return rc;
4890 }
4891
4892 /**
4893  * ipr_device_reset - Reset the device
4894  * @ioa_cfg:    ioa config struct
4895  * @res:                resource entry struct
4896  *
4897  * This function issues a device reset to the affected device.
4898  * If the device is a SCSI device, a LUN reset will be sent
4899  * to the device first. If that does not work, a target reset
4900  * will be sent. If the device is a SATA device, a PHY reset will
4901  * be sent.
4902  *
4903  * Return value:
4904  *      0 on success / non-zero on failure
4905  **/
4906 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4907                             struct ipr_resource_entry *res)
4908 {
4909         struct ipr_cmnd *ipr_cmd;
4910         struct ipr_ioarcb *ioarcb;
4911         struct ipr_cmd_pkt *cmd_pkt;
4912         struct ipr_ioarcb_ata_regs *regs;
4913         u32 ioasc;
4914
4915         ENTER;
4916         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4917         ioarcb = &ipr_cmd->ioarcb;
4918         cmd_pkt = &ioarcb->cmd_pkt;
4919
4920         if (ipr_cmd->ioa_cfg->sis64) {
4921                 regs = &ipr_cmd->i.ata_ioadl.regs;
4922                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4923         } else
4924                 regs = &ioarcb->u.add_data.u.regs;
4925
4926         ioarcb->res_handle = res->res_handle;
4927         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4928         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4929         if (ipr_is_gata(res)) {
4930                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4931                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4932                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4933         }
4934
4935         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4936         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4937         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4938         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4939                 if (ipr_cmd->ioa_cfg->sis64)
4940                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4941                                sizeof(struct ipr_ioasa_gata));
4942                 else
4943                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4944                                sizeof(struct ipr_ioasa_gata));
4945         }
4946
4947         LEAVE;
4948         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4949 }
4950
4951 /**
4952  * ipr_sata_reset - Reset the SATA port
4953  * @link:       SATA link to reset
4954  * @classes:    class of the attached device
4955  *
4956  * This function issues a SATA phy reset to the affected ATA link.
4957  *
4958  * Return value:
4959  *      0 on success / non-zero on failure
4960  **/
4961 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4962                                 unsigned long deadline)
4963 {
4964         struct ipr_sata_port *sata_port = link->ap->private_data;
4965         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4966         struct ipr_resource_entry *res;
4967         unsigned long lock_flags = 0;
4968         int rc = -ENXIO;
4969
4970         ENTER;
4971         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4972         while (ioa_cfg->in_reset_reload) {
4973                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4974                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4975                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4976         }
4977
4978         res = sata_port->res;
4979         if (res) {
4980                 rc = ipr_device_reset(ioa_cfg, res);
4981                 *classes = res->ata_class;
4982         }
4983
4984         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4985         LEAVE;
4986         return rc;
4987 }
4988
4989 /**
4990  * ipr_eh_dev_reset - Reset the device
4991  * @scsi_cmd:   scsi command struct
4992  *
4993  * This function issues a device reset to the affected device.
4994  * A LUN reset will be sent to the device first. If that does
4995  * not work, a target reset will be sent.
4996  *
4997  * Return value:
4998  *      SUCCESS / FAILED
4999  **/
5000 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5001 {
5002         struct ipr_cmnd *ipr_cmd;
5003         struct ipr_ioa_cfg *ioa_cfg;
5004         struct ipr_resource_entry *res;
5005         struct ata_port *ap;
5006         int rc = 0;
5007         struct ipr_hrr_queue *hrrq;
5008
5009         ENTER;
5010         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5011         res = scsi_cmd->device->hostdata;
5012
5013         if (!res)
5014                 return FAILED;
5015
5016         /*
5017          * If we are currently going through reset/reload, return failed. This will force the
5018          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5019          * reset to complete
5020          */
5021         if (ioa_cfg->in_reset_reload)
5022                 return FAILED;
5023         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5024                 return FAILED;
5025
5026         for_each_hrrq(hrrq, ioa_cfg) {
5027                 spin_lock(&hrrq->_lock);
5028                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5029                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5030                                 if (ipr_cmd->scsi_cmd)
5031                                         ipr_cmd->done = ipr_scsi_eh_done;
5032                                 if (ipr_cmd->qc)
5033                                         ipr_cmd->done = ipr_sata_eh_done;
5034                                 if (ipr_cmd->qc &&
5035                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5036                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5037                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5038                                 }
5039                         }
5040                 }
5041                 spin_unlock(&hrrq->_lock);
5042         }
5043         res->resetting_device = 1;
5044         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5045
5046         if (ipr_is_gata(res) && res->sata_port) {
5047                 ap = res->sata_port->ap;
5048                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5049                 ata_std_error_handler(ap);
5050                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5051
5052                 for_each_hrrq(hrrq, ioa_cfg) {
5053                         spin_lock(&hrrq->_lock);
5054                         list_for_each_entry(ipr_cmd,
5055                                             &hrrq->hrrq_pending_q, queue) {
5056                                 if (ipr_cmd->ioarcb.res_handle ==
5057                                     res->res_handle) {
5058                                         rc = -EIO;
5059                                         break;
5060                                 }
5061                         }
5062                         spin_unlock(&hrrq->_lock);
5063                 }
5064         } else
5065                 rc = ipr_device_reset(ioa_cfg, res);
5066         res->resetting_device = 0;
5067
5068         LEAVE;
5069         return rc ? FAILED : SUCCESS;
5070 }
5071
5072 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5073 {
5074         int rc;
5075         struct ipr_ioa_cfg *ioa_cfg;
5076
5077         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5078
5079         spin_lock_irq(cmd->device->host->host_lock);
5080         rc = __ipr_eh_dev_reset(cmd);
5081         spin_unlock_irq(cmd->device->host->host_lock);
5082
5083         if (rc == SUCCESS)
5084                 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5085
5086         return rc;
5087 }
5088
5089 /**
5090  * ipr_bus_reset_done - Op done function for bus reset.
5091  * @ipr_cmd:    ipr command struct
5092  *
5093  * This function is the op done function for a bus reset
5094  *
5095  * Return value:
5096  *      none
5097  **/
5098 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5099 {
5100         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5101         struct ipr_resource_entry *res;
5102
5103         ENTER;
5104         if (!ioa_cfg->sis64)
5105                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5106                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5107                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5108                                 break;
5109                         }
5110                 }
5111
5112         /*
5113          * If abort has not completed, indicate the reset has, else call the
5114          * abort's done function to wake the sleeping eh thread
5115          */
5116         if (ipr_cmd->sibling->sibling)
5117                 ipr_cmd->sibling->sibling = NULL;
5118         else
5119                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5120
5121         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5122         LEAVE;
5123 }
5124
5125 /**
5126  * ipr_abort_timeout - An abort task has timed out
5127  * @ipr_cmd:    ipr command struct
5128  *
5129  * This function handles when an abort task times out. If this
5130  * happens we issue a bus reset since we have resources tied
5131  * up that must be freed before returning to the midlayer.
5132  *
5133  * Return value:
5134  *      none
5135  **/
5136 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5137 {
5138         struct ipr_cmnd *reset_cmd;
5139         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5140         struct ipr_cmd_pkt *cmd_pkt;
5141         unsigned long lock_flags = 0;
5142
5143         ENTER;
5144         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5145         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5146                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5147                 return;
5148         }
5149
5150         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5151         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5152         ipr_cmd->sibling = reset_cmd;
5153         reset_cmd->sibling = ipr_cmd;
5154         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5155         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5156         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5157         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5158         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5159
5160         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5161         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5162         LEAVE;
5163 }
5164
5165 /**
5166  * ipr_cancel_op - Cancel specified op
5167  * @scsi_cmd:   scsi command struct
5168  *
5169  * This function cancels specified op.
5170  *
5171  * Return value:
5172  *      SUCCESS / FAILED
5173  **/
5174 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5175 {
5176         struct ipr_cmnd *ipr_cmd;
5177         struct ipr_ioa_cfg *ioa_cfg;
5178         struct ipr_resource_entry *res;
5179         struct ipr_cmd_pkt *cmd_pkt;
5180         u32 ioasc, int_reg;
5181         int op_found = 0;
5182         struct ipr_hrr_queue *hrrq;
5183
5184         ENTER;
5185         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5186         res = scsi_cmd->device->hostdata;
5187
5188         /* If we are currently going through reset/reload, return failed.
5189          * This will force the mid-layer to call ipr_eh_host_reset,
5190          * which will then go to sleep and wait for the reset to complete
5191          */
5192         if (ioa_cfg->in_reset_reload ||
5193             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5194                 return FAILED;
5195         if (!res)
5196                 return FAILED;
5197
5198         /*
5199          * If we are aborting a timed out op, chances are that the timeout was caused
5200          * by a still not detected EEH error. In such cases, reading a register will
5201          * trigger the EEH recovery infrastructure.
5202          */
5203         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5204
5205         if (!ipr_is_gscsi(res))
5206                 return FAILED;
5207
5208         for_each_hrrq(hrrq, ioa_cfg) {
5209                 spin_lock(&hrrq->_lock);
5210                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5211                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5212                                 ipr_cmd->done = ipr_scsi_eh_done;
5213                                 op_found = 1;
5214                                 break;
5215                         }
5216                 }
5217                 spin_unlock(&hrrq->_lock);
5218         }
5219
5220         if (!op_found)
5221                 return SUCCESS;
5222
5223         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5224         ipr_cmd->ioarcb.res_handle = res->res_handle;
5225         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5226         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5227         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5228         ipr_cmd->u.sdev = scsi_cmd->device;
5229
5230         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5231                     scsi_cmd->cmnd[0]);
5232         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5233         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5234
5235         /*
5236          * If the abort task timed out and we sent a bus reset, we will get
5237          * one the following responses to the abort
5238          */
5239         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5240                 ioasc = 0;
5241                 ipr_trace;
5242         }
5243
5244         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5245         if (!ipr_is_naca_model(res))
5246                 res->needs_sync_complete = 1;
5247
5248         LEAVE;
5249         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5250 }
5251
5252 /**
5253  * ipr_eh_abort - Abort a single op
5254  * @scsi_cmd:   scsi command struct
5255  *
5256  * Return value:
5257  *      SUCCESS / FAILED
5258  **/
5259 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5260 {
5261         unsigned long flags;
5262         int rc;
5263         struct ipr_ioa_cfg *ioa_cfg;
5264
5265         ENTER;
5266
5267         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5268
5269         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5270         rc = ipr_cancel_op(scsi_cmd);
5271         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5272
5273         if (rc == SUCCESS)
5274                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5275         LEAVE;
5276         return rc;
5277 }
5278
5279 /**
5280  * ipr_handle_other_interrupt - Handle "other" interrupts
5281  * @ioa_cfg:    ioa config struct
5282  * @int_reg:    interrupt register
5283  *
5284  * Return value:
5285  *      IRQ_NONE / IRQ_HANDLED
5286  **/
5287 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5288                                               u32 int_reg)
5289 {
5290         irqreturn_t rc = IRQ_HANDLED;
5291         u32 int_mask_reg;
5292
5293         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5294         int_reg &= ~int_mask_reg;
5295
5296         /* If an interrupt on the adapter did not occur, ignore it.
5297          * Or in the case of SIS 64, check for a stage change interrupt.
5298          */
5299         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5300                 if (ioa_cfg->sis64) {
5301                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5302                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5303                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5304
5305                                 /* clear stage change */
5306                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5307                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5308                                 list_del(&ioa_cfg->reset_cmd->queue);
5309                                 del_timer(&ioa_cfg->reset_cmd->timer);
5310                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5311                                 return IRQ_HANDLED;
5312                         }
5313                 }
5314
5315                 return IRQ_NONE;
5316         }
5317
5318         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5319                 /* Mask the interrupt */
5320                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5321
5322                 /* Clear the interrupt */
5323                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5324                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5325
5326                 list_del(&ioa_cfg->reset_cmd->queue);
5327                 del_timer(&ioa_cfg->reset_cmd->timer);
5328                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5329         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5330                 if (ioa_cfg->clear_isr) {
5331                         if (ipr_debug && printk_ratelimit())
5332                                 dev_err(&ioa_cfg->pdev->dev,
5333                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5334                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5335                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5336                         return IRQ_NONE;
5337                 }
5338         } else {
5339                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5340                         ioa_cfg->ioa_unit_checked = 1;
5341                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5342                         dev_err(&ioa_cfg->pdev->dev,
5343                                 "No Host RRQ. 0x%08X\n", int_reg);
5344                 else
5345                         dev_err(&ioa_cfg->pdev->dev,
5346                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5347
5348                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5349                         ioa_cfg->sdt_state = GET_DUMP;
5350
5351                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5352                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5353         }
5354
5355         return rc;
5356 }
5357
5358 /**
5359  * ipr_isr_eh - Interrupt service routine error handler
5360  * @ioa_cfg:    ioa config struct
5361  * @msg:        message to log
5362  *
5363  * Return value:
5364  *      none
5365  **/
5366 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5367 {
5368         ioa_cfg->errors_logged++;
5369         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5370
5371         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5372                 ioa_cfg->sdt_state = GET_DUMP;
5373
5374         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5375 }
5376
5377 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5378                                                 struct list_head *doneq)
5379 {
5380         u32 ioasc;
5381         u16 cmd_index;
5382         struct ipr_cmnd *ipr_cmd;
5383         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5384         int num_hrrq = 0;
5385
5386         /* If interrupts are disabled, ignore the interrupt */
5387         if (!hrr_queue->allow_interrupts)
5388                 return 0;
5389
5390         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5391                hrr_queue->toggle_bit) {
5392
5393                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5394                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5395                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5396
5397                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5398                              cmd_index < hrr_queue->min_cmd_id)) {
5399                         ipr_isr_eh(ioa_cfg,
5400                                 "Invalid response handle from IOA: ",
5401                                 cmd_index);
5402                         break;
5403                 }
5404
5405                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5406                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5407
5408                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5409
5410                 list_move_tail(&ipr_cmd->queue, doneq);
5411
5412                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5413                         hrr_queue->hrrq_curr++;
5414                 } else {
5415                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5416                         hrr_queue->toggle_bit ^= 1u;
5417                 }
5418                 num_hrrq++;
5419                 if (budget > 0 && num_hrrq >= budget)
5420                         break;
5421         }
5422
5423         return num_hrrq;
5424 }
5425
5426 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5427 {
5428         struct ipr_ioa_cfg *ioa_cfg;
5429         struct ipr_hrr_queue *hrrq;
5430         struct ipr_cmnd *ipr_cmd, *temp;
5431         unsigned long hrrq_flags;
5432         int completed_ops;
5433         LIST_HEAD(doneq);
5434
5435         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5436         ioa_cfg = hrrq->ioa_cfg;
5437
5438         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5439         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5440
5441         if (completed_ops < budget)
5442                 blk_iopoll_complete(iop);
5443         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5444
5445         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5446                 list_del(&ipr_cmd->queue);
5447                 del_timer(&ipr_cmd->timer);
5448                 ipr_cmd->fast_done(ipr_cmd);
5449         }
5450
5451         return completed_ops;
5452 }
5453
5454 /**
5455  * ipr_isr - Interrupt service routine
5456  * @irq:        irq number
5457  * @devp:       pointer to ioa config struct
5458  *
5459  * Return value:
5460  *      IRQ_NONE / IRQ_HANDLED
5461  **/
5462 static irqreturn_t ipr_isr(int irq, void *devp)
5463 {
5464         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5465         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5466         unsigned long hrrq_flags = 0;
5467         u32 int_reg = 0;
5468         int num_hrrq = 0;
5469         int irq_none = 0;
5470         struct ipr_cmnd *ipr_cmd, *temp;
5471         irqreturn_t rc = IRQ_NONE;
5472         LIST_HEAD(doneq);
5473
5474         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5475         /* If interrupts are disabled, ignore the interrupt */
5476         if (!hrrq->allow_interrupts) {
5477                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5478                 return IRQ_NONE;
5479         }
5480
5481         while (1) {
5482                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5483                         rc =  IRQ_HANDLED;
5484
5485                         if (!ioa_cfg->clear_isr)
5486                                 break;
5487
5488                         /* Clear the PCI interrupt */
5489                         num_hrrq = 0;
5490                         do {
5491                                 writel(IPR_PCII_HRRQ_UPDATED,
5492                                      ioa_cfg->regs.clr_interrupt_reg32);
5493                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5494                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5495                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5496
5497                 } else if (rc == IRQ_NONE && irq_none == 0) {
5498                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5499                         irq_none++;
5500                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5501                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5502                         ipr_isr_eh(ioa_cfg,
5503                                 "Error clearing HRRQ: ", num_hrrq);
5504                         rc = IRQ_HANDLED;
5505                         break;
5506                 } else
5507                         break;
5508         }
5509
5510         if (unlikely(rc == IRQ_NONE))
5511                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5512
5513         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5514         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5515                 list_del(&ipr_cmd->queue);
5516                 del_timer(&ipr_cmd->timer);
5517                 ipr_cmd->fast_done(ipr_cmd);
5518         }
5519         return rc;
5520 }
5521
5522 /**
5523  * ipr_isr_mhrrq - Interrupt service routine
5524  * @irq:        irq number
5525  * @devp:       pointer to ioa config struct
5526  *
5527  * Return value:
5528  *      IRQ_NONE / IRQ_HANDLED
5529  **/
5530 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5531 {
5532         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5533         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5534         unsigned long hrrq_flags = 0;
5535         struct ipr_cmnd *ipr_cmd, *temp;
5536         irqreturn_t rc = IRQ_NONE;
5537         LIST_HEAD(doneq);
5538
5539         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5540
5541         /* If interrupts are disabled, ignore the interrupt */
5542         if (!hrrq->allow_interrupts) {
5543                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5544                 return IRQ_NONE;
5545         }
5546
5547         if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5548                         ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5549                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5550                        hrrq->toggle_bit) {
5551                         if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5552                                 blk_iopoll_sched(&hrrq->iopoll);
5553                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5554                         return IRQ_HANDLED;
5555                 }
5556         } else {
5557                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5558                         hrrq->toggle_bit)
5559
5560                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5561                                 rc =  IRQ_HANDLED;
5562         }
5563
5564         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5565
5566         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5567                 list_del(&ipr_cmd->queue);
5568                 del_timer(&ipr_cmd->timer);
5569                 ipr_cmd->fast_done(ipr_cmd);
5570         }
5571         return rc;
5572 }
5573
5574 /**
5575  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5576  * @ioa_cfg:    ioa config struct
5577  * @ipr_cmd:    ipr command struct
5578  *
5579  * Return value:
5580  *      0 on success / -1 on failure
5581  **/
5582 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5583                              struct ipr_cmnd *ipr_cmd)
5584 {
5585         int i, nseg;
5586         struct scatterlist *sg;
5587         u32 length;
5588         u32 ioadl_flags = 0;
5589         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5590         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5591         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5592
5593         length = scsi_bufflen(scsi_cmd);
5594         if (!length)
5595                 return 0;
5596
5597         nseg = scsi_dma_map(scsi_cmd);
5598         if (nseg < 0) {
5599                 if (printk_ratelimit())
5600                         dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5601                 return -1;
5602         }
5603
5604         ipr_cmd->dma_use_sg = nseg;
5605
5606         ioarcb->data_transfer_length = cpu_to_be32(length);
5607         ioarcb->ioadl_len =
5608                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5609
5610         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5611                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5612                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5613         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5614                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5615
5616         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5617                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5618                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5619                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5620         }
5621
5622         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5623         return 0;
5624 }
5625
5626 /**
5627  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5628  * @ioa_cfg:    ioa config struct
5629  * @ipr_cmd:    ipr command struct
5630  *
5631  * Return value:
5632  *      0 on success / -1 on failure
5633  **/
5634 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5635                            struct ipr_cmnd *ipr_cmd)
5636 {
5637         int i, nseg;
5638         struct scatterlist *sg;
5639         u32 length;
5640         u32 ioadl_flags = 0;
5641         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5642         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5643         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5644
5645         length = scsi_bufflen(scsi_cmd);
5646         if (!length)
5647                 return 0;
5648
5649         nseg = scsi_dma_map(scsi_cmd);
5650         if (nseg < 0) {
5651                 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5652                 return -1;
5653         }
5654
5655         ipr_cmd->dma_use_sg = nseg;
5656
5657         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5658                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5659                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5660                 ioarcb->data_transfer_length = cpu_to_be32(length);
5661                 ioarcb->ioadl_len =
5662                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5663         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5664                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5665                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5666                 ioarcb->read_ioadl_len =
5667                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5668         }
5669
5670         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5671                 ioadl = ioarcb->u.add_data.u.ioadl;
5672                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5673                                     offsetof(struct ipr_ioarcb, u.add_data));
5674                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5675         }
5676
5677         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5678                 ioadl[i].flags_and_data_len =
5679                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5680                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5681         }
5682
5683         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5684         return 0;
5685 }
5686
5687 /**
5688  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5689  * @scsi_cmd:   scsi command struct
5690  *
5691  * Return value:
5692  *      task attributes
5693  **/
5694 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5695 {
5696         u8 tag[2];
5697         u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5698
5699         if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5700                 switch (tag[0]) {
5701                 case MSG_SIMPLE_TAG:
5702                         rc = IPR_FLAGS_LO_SIMPLE_TASK;
5703                         break;
5704                 case MSG_HEAD_TAG:
5705                         rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5706                         break;
5707                 case MSG_ORDERED_TAG:
5708                         rc = IPR_FLAGS_LO_ORDERED_TASK;
5709                         break;
5710                 };
5711         }
5712
5713         return rc;
5714 }
5715
5716 /**
5717  * ipr_erp_done - Process completion of ERP for a device
5718  * @ipr_cmd:            ipr command struct
5719  *
5720  * This function copies the sense buffer into the scsi_cmd
5721  * struct and pushes the scsi_done function.
5722  *
5723  * Return value:
5724  *      nothing
5725  **/
5726 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5727 {
5728         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5729         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5730         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5731
5732         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5733                 scsi_cmd->result |= (DID_ERROR << 16);
5734                 scmd_printk(KERN_ERR, scsi_cmd,
5735                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5736         } else {
5737                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5738                        SCSI_SENSE_BUFFERSIZE);
5739         }
5740
5741         if (res) {
5742                 if (!ipr_is_naca_model(res))
5743                         res->needs_sync_complete = 1;
5744                 res->in_erp = 0;
5745         }
5746         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5747         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5748         scsi_cmd->scsi_done(scsi_cmd);
5749 }
5750
5751 /**
5752  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5753  * @ipr_cmd:    ipr command struct
5754  *
5755  * Return value:
5756  *      none
5757  **/
5758 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5759 {
5760         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5761         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5762         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5763
5764         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5765         ioarcb->data_transfer_length = 0;
5766         ioarcb->read_data_transfer_length = 0;
5767         ioarcb->ioadl_len = 0;
5768         ioarcb->read_ioadl_len = 0;
5769         ioasa->hdr.ioasc = 0;
5770         ioasa->hdr.residual_data_len = 0;
5771
5772         if (ipr_cmd->ioa_cfg->sis64)
5773                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5774                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5775         else {
5776                 ioarcb->write_ioadl_addr =
5777                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5778                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5779         }
5780 }
5781
5782 /**
5783  * ipr_erp_request_sense - Send request sense to a device
5784  * @ipr_cmd:    ipr command struct
5785  *
5786  * This function sends a request sense to a device as a result
5787  * of a check condition.
5788  *
5789  * Return value:
5790  *      nothing
5791  **/
5792 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5793 {
5794         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5795         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5796
5797         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5798                 ipr_erp_done(ipr_cmd);
5799                 return;
5800         }
5801
5802         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5803
5804         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5805         cmd_pkt->cdb[0] = REQUEST_SENSE;
5806         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5807         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5808         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5809         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5810
5811         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5812                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5813
5814         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5815                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5816 }
5817
5818 /**
5819  * ipr_erp_cancel_all - Send cancel all to a device
5820  * @ipr_cmd:    ipr command struct
5821  *
5822  * This function sends a cancel all to a device to clear the
5823  * queue. If we are running TCQ on the device, QERR is set to 1,
5824  * which means all outstanding ops have been dropped on the floor.
5825  * Cancel all will return them to us.
5826  *
5827  * Return value:
5828  *      nothing
5829  **/
5830 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5831 {
5832         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5833         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5834         struct ipr_cmd_pkt *cmd_pkt;
5835
5836         res->in_erp = 1;
5837
5838         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5839
5840         if (!scsi_get_tag_type(scsi_cmd->device)) {
5841                 ipr_erp_request_sense(ipr_cmd);
5842                 return;
5843         }
5844
5845         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5846         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5847         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5848
5849         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5850                    IPR_CANCEL_ALL_TIMEOUT);
5851 }
5852
5853 /**
5854  * ipr_dump_ioasa - Dump contents of IOASA
5855  * @ioa_cfg:    ioa config struct
5856  * @ipr_cmd:    ipr command struct
5857  * @res:                resource entry struct
5858  *
5859  * This function is invoked by the interrupt handler when ops
5860  * fail. It will log the IOASA if appropriate. Only called
5861  * for GPDD ops.
5862  *
5863  * Return value:
5864  *      none
5865  **/
5866 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5867                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5868 {
5869         int i;
5870         u16 data_len;
5871         u32 ioasc, fd_ioasc;
5872         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5873         __be32 *ioasa_data = (__be32 *)ioasa;
5874         int error_index;
5875
5876         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5877         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5878
5879         if (0 == ioasc)
5880                 return;
5881
5882         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5883                 return;
5884
5885         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5886                 error_index = ipr_get_error(fd_ioasc);
5887         else
5888                 error_index = ipr_get_error(ioasc);
5889
5890         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5891                 /* Don't log an error if the IOA already logged one */
5892                 if (ioasa->hdr.ilid != 0)
5893                         return;
5894
5895                 if (!ipr_is_gscsi(res))
5896                         return;
5897
5898                 if (ipr_error_table[error_index].log_ioasa == 0)
5899                         return;
5900         }
5901
5902         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5903
5904         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5905         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5906                 data_len = sizeof(struct ipr_ioasa64);
5907         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5908                 data_len = sizeof(struct ipr_ioasa);
5909
5910         ipr_err("IOASA Dump:\n");
5911
5912         for (i = 0; i < data_len / 4; i += 4) {
5913                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5914                         be32_to_cpu(ioasa_data[i]),
5915                         be32_to_cpu(ioasa_data[i+1]),
5916                         be32_to_cpu(ioasa_data[i+2]),
5917                         be32_to_cpu(ioasa_data[i+3]));
5918         }
5919 }
5920
5921 /**
5922  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5923  * @ioasa:              IOASA
5924  * @sense_buf:  sense data buffer
5925  *
5926  * Return value:
5927  *      none
5928  **/
5929 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5930 {
5931         u32 failing_lba;
5932         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5933         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5934         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5935         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5936
5937         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5938
5939         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5940                 return;
5941
5942         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5943
5944         if (ipr_is_vset_device(res) &&
5945             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5946             ioasa->u.vset.failing_lba_hi != 0) {
5947                 sense_buf[0] = 0x72;
5948                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5949                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5950                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5951
5952                 sense_buf[7] = 12;
5953                 sense_buf[8] = 0;
5954                 sense_buf[9] = 0x0A;
5955                 sense_buf[10] = 0x80;
5956
5957                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5958
5959                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5960                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5961                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5962                 sense_buf[15] = failing_lba & 0x000000ff;
5963
5964                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5965
5966                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5967                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5968                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5969                 sense_buf[19] = failing_lba & 0x000000ff;
5970         } else {
5971                 sense_buf[0] = 0x70;
5972                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5973                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5974                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5975
5976                 /* Illegal request */
5977                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5978                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5979                         sense_buf[7] = 10;      /* additional length */
5980
5981                         /* IOARCB was in error */
5982                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5983                                 sense_buf[15] = 0xC0;
5984                         else    /* Parameter data was invalid */
5985                                 sense_buf[15] = 0x80;
5986
5987                         sense_buf[16] =
5988                             ((IPR_FIELD_POINTER_MASK &
5989                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5990                         sense_buf[17] =
5991                             (IPR_FIELD_POINTER_MASK &
5992                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5993                 } else {
5994                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5995                                 if (ipr_is_vset_device(res))
5996                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5997                                 else
5998                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5999
6000                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6001                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6002                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6003                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6004                                 sense_buf[6] = failing_lba & 0x000000ff;
6005                         }
6006
6007                         sense_buf[7] = 6;       /* additional length */
6008                 }
6009         }
6010 }
6011
6012 /**
6013  * ipr_get_autosense - Copy autosense data to sense buffer
6014  * @ipr_cmd:    ipr command struct
6015  *
6016  * This function copies the autosense buffer to the buffer
6017  * in the scsi_cmd, if there is autosense available.
6018  *
6019  * Return value:
6020  *      1 if autosense was available / 0 if not
6021  **/
6022 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6023 {
6024         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6025         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6026
6027         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6028                 return 0;
6029
6030         if (ipr_cmd->ioa_cfg->sis64)
6031                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6032                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6033                            SCSI_SENSE_BUFFERSIZE));
6034         else
6035                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6036                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6037                            SCSI_SENSE_BUFFERSIZE));
6038         return 1;
6039 }
6040
6041 /**
6042  * ipr_erp_start - Process an error response for a SCSI op
6043  * @ioa_cfg:    ioa config struct
6044  * @ipr_cmd:    ipr command struct
6045  *
6046  * This function determines whether or not to initiate ERP
6047  * on the affected device.
6048  *
6049  * Return value:
6050  *      nothing
6051  **/
6052 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6053                               struct ipr_cmnd *ipr_cmd)
6054 {
6055         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6056         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6057         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6058         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6059
6060         if (!res) {
6061                 ipr_scsi_eh_done(ipr_cmd);
6062                 return;
6063         }
6064
6065         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6066                 ipr_gen_sense(ipr_cmd);
6067
6068         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6069
6070         switch (masked_ioasc) {
6071         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6072                 if (ipr_is_naca_model(res))
6073                         scsi_cmd->result |= (DID_ABORT << 16);
6074                 else
6075                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6076                 break;
6077         case IPR_IOASC_IR_RESOURCE_HANDLE:
6078         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6079                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6080                 break;
6081         case IPR_IOASC_HW_SEL_TIMEOUT:
6082                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6083                 if (!ipr_is_naca_model(res))
6084                         res->needs_sync_complete = 1;
6085                 break;
6086         case IPR_IOASC_SYNC_REQUIRED:
6087                 if (!res->in_erp)
6088                         res->needs_sync_complete = 1;
6089                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6090                 break;
6091         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6092         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6093                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6094                 break;
6095         case IPR_IOASC_BUS_WAS_RESET:
6096         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6097                 /*
6098                  * Report the bus reset and ask for a retry. The device
6099                  * will give CC/UA the next command.
6100                  */
6101                 if (!res->resetting_device)
6102                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6103                 scsi_cmd->result |= (DID_ERROR << 16);
6104                 if (!ipr_is_naca_model(res))
6105                         res->needs_sync_complete = 1;
6106                 break;
6107         case IPR_IOASC_HW_DEV_BUS_STATUS:
6108                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6109                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6110                         if (!ipr_get_autosense(ipr_cmd)) {
6111                                 if (!ipr_is_naca_model(res)) {
6112                                         ipr_erp_cancel_all(ipr_cmd);
6113                                         return;
6114                                 }
6115                         }
6116                 }
6117                 if (!ipr_is_naca_model(res))
6118                         res->needs_sync_complete = 1;
6119                 break;
6120         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6121                 break;
6122         default:
6123                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6124                         scsi_cmd->result |= (DID_ERROR << 16);
6125                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6126                         res->needs_sync_complete = 1;
6127                 break;
6128         }
6129
6130         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6131         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6132         scsi_cmd->scsi_done(scsi_cmd);
6133 }
6134
6135 /**
6136  * ipr_scsi_done - mid-layer done function
6137  * @ipr_cmd:    ipr command struct
6138  *
6139  * This function is invoked by the interrupt handler for
6140  * ops generated by the SCSI mid-layer
6141  *
6142  * Return value:
6143  *      none
6144  **/
6145 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6146 {
6147         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6148         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6149         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6150         unsigned long lock_flags;
6151
6152         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6153
6154         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6155                 scsi_dma_unmap(scsi_cmd);
6156
6157                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6158                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6159                 scsi_cmd->scsi_done(scsi_cmd);
6160                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6161         } else {
6162                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6163                 spin_lock(&ipr_cmd->hrrq->_lock);
6164                 ipr_erp_start(ioa_cfg, ipr_cmd);
6165                 spin_unlock(&ipr_cmd->hrrq->_lock);
6166                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6167         }
6168 }
6169
6170 /**
6171  * ipr_queuecommand - Queue a mid-layer request
6172  * @shost:              scsi host struct
6173  * @scsi_cmd:   scsi command struct
6174  *
6175  * This function queues a request generated by the mid-layer.
6176  *
6177  * Return value:
6178  *      0 on success
6179  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6180  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6181  **/
6182 static int ipr_queuecommand(struct Scsi_Host *shost,
6183                             struct scsi_cmnd *scsi_cmd)
6184 {
6185         struct ipr_ioa_cfg *ioa_cfg;
6186         struct ipr_resource_entry *res;
6187         struct ipr_ioarcb *ioarcb;
6188         struct ipr_cmnd *ipr_cmd;
6189         unsigned long hrrq_flags, lock_flags;
6190         int rc;
6191         struct ipr_hrr_queue *hrrq;
6192         int hrrq_id;
6193
6194         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6195
6196         scsi_cmd->result = (DID_OK << 16);
6197         res = scsi_cmd->device->hostdata;
6198
6199         if (ipr_is_gata(res) && res->sata_port) {
6200                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6201                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6202                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6203                 return rc;
6204         }
6205
6206         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6207         hrrq = &ioa_cfg->hrrq[hrrq_id];
6208
6209         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6210         /*
6211          * We are currently blocking all devices due to a host reset
6212          * We have told the host to stop giving us new requests, but
6213          * ERP ops don't count. FIXME
6214          */
6215         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6216                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6217                 return SCSI_MLQUEUE_HOST_BUSY;
6218         }
6219
6220         /*
6221          * FIXME - Create scsi_set_host_offline interface
6222          *  and the ioa_is_dead check can be removed
6223          */
6224         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6225                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6226                 goto err_nodev;
6227         }
6228
6229         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6230         if (ipr_cmd == NULL) {
6231                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6232                 return SCSI_MLQUEUE_HOST_BUSY;
6233         }
6234         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6235
6236         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6237         ioarcb = &ipr_cmd->ioarcb;
6238
6239         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6240         ipr_cmd->scsi_cmd = scsi_cmd;
6241         ipr_cmd->done = ipr_scsi_eh_done;
6242
6243         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6244                 if (scsi_cmd->underflow == 0)
6245                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6246
6247                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6248                 if (ipr_is_gscsi(res))
6249                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6250                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6251                 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6252         }
6253
6254         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6255             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6256                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6257         }
6258
6259         if (ioa_cfg->sis64)
6260                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6261         else
6262                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6263
6264         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6265         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6266                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6267                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6268                 if (!rc)
6269                         scsi_dma_unmap(scsi_cmd);
6270                 return SCSI_MLQUEUE_HOST_BUSY;
6271         }
6272
6273         if (unlikely(hrrq->ioa_is_dead)) {
6274                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6275                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6276                 scsi_dma_unmap(scsi_cmd);
6277                 goto err_nodev;
6278         }
6279
6280         ioarcb->res_handle = res->res_handle;
6281         if (res->needs_sync_complete) {
6282                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6283                 res->needs_sync_complete = 0;
6284         }
6285         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6286         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6287         ipr_send_command(ipr_cmd);
6288         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6289         return 0;
6290
6291 err_nodev:
6292         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6293         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6294         scsi_cmd->result = (DID_NO_CONNECT << 16);
6295         scsi_cmd->scsi_done(scsi_cmd);
6296         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6297         return 0;
6298 }
6299
6300 /**
6301  * ipr_ioctl - IOCTL handler
6302  * @sdev:       scsi device struct
6303  * @cmd:        IOCTL cmd
6304  * @arg:        IOCTL arg
6305  *
6306  * Return value:
6307  *      0 on success / other on failure
6308  **/
6309 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6310 {
6311         struct ipr_resource_entry *res;
6312
6313         res = (struct ipr_resource_entry *)sdev->hostdata;
6314         if (res && ipr_is_gata(res)) {
6315                 if (cmd == HDIO_GET_IDENTITY)
6316                         return -ENOTTY;
6317                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6318         }
6319
6320         return -EINVAL;
6321 }
6322
6323 /**
6324  * ipr_info - Get information about the card/driver
6325  * @scsi_host:  scsi host struct
6326  *
6327  * Return value:
6328  *      pointer to buffer with description string
6329  **/
6330 static const char *ipr_ioa_info(struct Scsi_Host *host)
6331 {
6332         static char buffer[512];
6333         struct ipr_ioa_cfg *ioa_cfg;
6334         unsigned long lock_flags = 0;
6335
6336         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6337
6338         spin_lock_irqsave(host->host_lock, lock_flags);
6339         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6340         spin_unlock_irqrestore(host->host_lock, lock_flags);
6341
6342         return buffer;
6343 }
6344
6345 static struct scsi_host_template driver_template = {
6346         .module = THIS_MODULE,
6347         .name = "IPR",
6348         .info = ipr_ioa_info,
6349         .ioctl = ipr_ioctl,
6350         .queuecommand = ipr_queuecommand,
6351         .eh_abort_handler = ipr_eh_abort,
6352         .eh_device_reset_handler = ipr_eh_dev_reset,
6353         .eh_host_reset_handler = ipr_eh_host_reset,
6354         .slave_alloc = ipr_slave_alloc,
6355         .slave_configure = ipr_slave_configure,
6356         .slave_destroy = ipr_slave_destroy,
6357         .target_alloc = ipr_target_alloc,
6358         .target_destroy = ipr_target_destroy,
6359         .change_queue_depth = ipr_change_queue_depth,
6360         .change_queue_type = ipr_change_queue_type,
6361         .bios_param = ipr_biosparam,
6362         .can_queue = IPR_MAX_COMMANDS,
6363         .this_id = -1,
6364         .sg_tablesize = IPR_MAX_SGLIST,
6365         .max_sectors = IPR_IOA_MAX_SECTORS,
6366         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6367         .use_clustering = ENABLE_CLUSTERING,
6368         .shost_attrs = ipr_ioa_attrs,
6369         .sdev_attrs = ipr_dev_attrs,
6370         .proc_name = IPR_NAME,
6371         .no_write_same = 1,
6372 };
6373
6374 /**
6375  * ipr_ata_phy_reset - libata phy_reset handler
6376  * @ap:         ata port to reset
6377  *
6378  **/
6379 static void ipr_ata_phy_reset(struct ata_port *ap)
6380 {
6381         unsigned long flags;
6382         struct ipr_sata_port *sata_port = ap->private_data;
6383         struct ipr_resource_entry *res = sata_port->res;
6384         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6385         int rc;
6386
6387         ENTER;
6388         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6389         while (ioa_cfg->in_reset_reload) {
6390                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6391                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6392                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6393         }
6394
6395         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6396                 goto out_unlock;
6397
6398         rc = ipr_device_reset(ioa_cfg, res);
6399
6400         if (rc) {
6401                 ap->link.device[0].class = ATA_DEV_NONE;
6402                 goto out_unlock;
6403         }
6404
6405         ap->link.device[0].class = res->ata_class;
6406         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6407                 ap->link.device[0].class = ATA_DEV_NONE;
6408
6409 out_unlock:
6410         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6411         LEAVE;
6412 }
6413
6414 /**
6415  * ipr_ata_post_internal - Cleanup after an internal command
6416  * @qc: ATA queued command
6417  *
6418  * Return value:
6419  *      none
6420  **/
6421 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6422 {
6423         struct ipr_sata_port *sata_port = qc->ap->private_data;
6424         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6425         struct ipr_cmnd *ipr_cmd;
6426         struct ipr_hrr_queue *hrrq;
6427         unsigned long flags;
6428
6429         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6430         while (ioa_cfg->in_reset_reload) {
6431                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6432                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6433                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6434         }
6435
6436         for_each_hrrq(hrrq, ioa_cfg) {
6437                 spin_lock(&hrrq->_lock);
6438                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6439                         if (ipr_cmd->qc == qc) {
6440                                 ipr_device_reset(ioa_cfg, sata_port->res);
6441                                 break;
6442                         }
6443                 }
6444                 spin_unlock(&hrrq->_lock);
6445         }
6446         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6447 }
6448
6449 /**
6450  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6451  * @regs:       destination
6452  * @tf: source ATA taskfile
6453  *
6454  * Return value:
6455  *      none
6456  **/
6457 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6458                              struct ata_taskfile *tf)
6459 {
6460         regs->feature = tf->feature;
6461         regs->nsect = tf->nsect;
6462         regs->lbal = tf->lbal;
6463         regs->lbam = tf->lbam;
6464         regs->lbah = tf->lbah;
6465         regs->device = tf->device;
6466         regs->command = tf->command;
6467         regs->hob_feature = tf->hob_feature;
6468         regs->hob_nsect = tf->hob_nsect;
6469         regs->hob_lbal = tf->hob_lbal;
6470         regs->hob_lbam = tf->hob_lbam;
6471         regs->hob_lbah = tf->hob_lbah;
6472         regs->ctl = tf->ctl;
6473 }
6474
6475 /**
6476  * ipr_sata_done - done function for SATA commands
6477  * @ipr_cmd:    ipr command struct
6478  *
6479  * This function is invoked by the interrupt handler for
6480  * ops generated by the SCSI mid-layer to SATA devices
6481  *
6482  * Return value:
6483  *      none
6484  **/
6485 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6486 {
6487         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6488         struct ata_queued_cmd *qc = ipr_cmd->qc;
6489         struct ipr_sata_port *sata_port = qc->ap->private_data;
6490         struct ipr_resource_entry *res = sata_port->res;
6491         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6492
6493         spin_lock(&ipr_cmd->hrrq->_lock);
6494         if (ipr_cmd->ioa_cfg->sis64)
6495                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6496                        sizeof(struct ipr_ioasa_gata));
6497         else
6498                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6499                        sizeof(struct ipr_ioasa_gata));
6500         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6501
6502         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6503                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6504
6505         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6506                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6507         else
6508                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6509         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6510         spin_unlock(&ipr_cmd->hrrq->_lock);
6511         ata_qc_complete(qc);
6512 }
6513
6514 /**
6515  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6516  * @ipr_cmd:    ipr command struct
6517  * @qc:         ATA queued command
6518  *
6519  **/
6520 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6521                                   struct ata_queued_cmd *qc)
6522 {
6523         u32 ioadl_flags = 0;
6524         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6525         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6526         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6527         int len = qc->nbytes;
6528         struct scatterlist *sg;
6529         unsigned int si;
6530         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6531
6532         if (len == 0)
6533                 return;
6534
6535         if (qc->dma_dir == DMA_TO_DEVICE) {
6536                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6537                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6538         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6539                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6540
6541         ioarcb->data_transfer_length = cpu_to_be32(len);
6542         ioarcb->ioadl_len =
6543                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6544         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6545                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6546
6547         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6548                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6549                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6550                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6551
6552                 last_ioadl64 = ioadl64;
6553                 ioadl64++;
6554         }
6555
6556         if (likely(last_ioadl64))
6557                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6558 }
6559
6560 /**
6561  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6562  * @ipr_cmd:    ipr command struct
6563  * @qc:         ATA queued command
6564  *
6565  **/
6566 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6567                                 struct ata_queued_cmd *qc)
6568 {
6569         u32 ioadl_flags = 0;
6570         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6571         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6572         struct ipr_ioadl_desc *last_ioadl = NULL;
6573         int len = qc->nbytes;
6574         struct scatterlist *sg;
6575         unsigned int si;
6576
6577         if (len == 0)
6578                 return;
6579
6580         if (qc->dma_dir == DMA_TO_DEVICE) {
6581                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6582                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6583                 ioarcb->data_transfer_length = cpu_to_be32(len);
6584                 ioarcb->ioadl_len =
6585                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6586         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6587                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6588                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6589                 ioarcb->read_ioadl_len =
6590                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6591         }
6592
6593         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6594                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6595                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6596
6597                 last_ioadl = ioadl;
6598                 ioadl++;
6599         }
6600
6601         if (likely(last_ioadl))
6602                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6603 }
6604
6605 /**
6606  * ipr_qc_defer - Get a free ipr_cmd
6607  * @qc: queued command
6608  *
6609  * Return value:
6610  *      0 if success
6611  **/
6612 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6613 {
6614         struct ata_port *ap = qc->ap;
6615         struct ipr_sata_port *sata_port = ap->private_data;
6616         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6617         struct ipr_cmnd *ipr_cmd;
6618         struct ipr_hrr_queue *hrrq;
6619         int hrrq_id;
6620
6621         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6622         hrrq = &ioa_cfg->hrrq[hrrq_id];
6623
6624         qc->lldd_task = NULL;
6625         spin_lock(&hrrq->_lock);
6626         if (unlikely(hrrq->ioa_is_dead)) {
6627                 spin_unlock(&hrrq->_lock);
6628                 return 0;
6629         }
6630
6631         if (unlikely(!hrrq->allow_cmds)) {
6632                 spin_unlock(&hrrq->_lock);
6633                 return ATA_DEFER_LINK;
6634         }
6635
6636         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6637         if (ipr_cmd == NULL) {
6638                 spin_unlock(&hrrq->_lock);
6639                 return ATA_DEFER_LINK;
6640         }
6641
6642         qc->lldd_task = ipr_cmd;
6643         spin_unlock(&hrrq->_lock);
6644         return 0;
6645 }
6646
6647 /**
6648  * ipr_qc_issue - Issue a SATA qc to a device
6649  * @qc: queued command
6650  *
6651  * Return value:
6652  *      0 if success
6653  **/
6654 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6655 {
6656         struct ata_port *ap = qc->ap;
6657         struct ipr_sata_port *sata_port = ap->private_data;
6658         struct ipr_resource_entry *res = sata_port->res;
6659         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6660         struct ipr_cmnd *ipr_cmd;
6661         struct ipr_ioarcb *ioarcb;
6662         struct ipr_ioarcb_ata_regs *regs;
6663
6664         if (qc->lldd_task == NULL)
6665                 ipr_qc_defer(qc);
6666
6667         ipr_cmd = qc->lldd_task;
6668         if (ipr_cmd == NULL)
6669                 return AC_ERR_SYSTEM;
6670
6671         qc->lldd_task = NULL;
6672         spin_lock(&ipr_cmd->hrrq->_lock);
6673         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6674                         ipr_cmd->hrrq->ioa_is_dead)) {
6675                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6676                 spin_unlock(&ipr_cmd->hrrq->_lock);
6677                 return AC_ERR_SYSTEM;
6678         }
6679
6680         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6681         ioarcb = &ipr_cmd->ioarcb;
6682
6683         if (ioa_cfg->sis64) {
6684                 regs = &ipr_cmd->i.ata_ioadl.regs;
6685                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6686         } else
6687                 regs = &ioarcb->u.add_data.u.regs;
6688
6689         memset(regs, 0, sizeof(*regs));
6690         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6691
6692         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6693         ipr_cmd->qc = qc;
6694         ipr_cmd->done = ipr_sata_done;
6695         ipr_cmd->ioarcb.res_handle = res->res_handle;
6696         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6697         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6698         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6699         ipr_cmd->dma_use_sg = qc->n_elem;
6700
6701         if (ioa_cfg->sis64)
6702                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6703         else
6704                 ipr_build_ata_ioadl(ipr_cmd, qc);
6705
6706         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6707         ipr_copy_sata_tf(regs, &qc->tf);
6708         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6709         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6710
6711         switch (qc->tf.protocol) {
6712         case ATA_PROT_NODATA:
6713         case ATA_PROT_PIO:
6714                 break;
6715
6716         case ATA_PROT_DMA:
6717                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6718                 break;
6719
6720         case ATAPI_PROT_PIO:
6721         case ATAPI_PROT_NODATA:
6722                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6723                 break;
6724
6725         case ATAPI_PROT_DMA:
6726                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6727                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6728                 break;
6729
6730         default:
6731                 WARN_ON(1);
6732                 spin_unlock(&ipr_cmd->hrrq->_lock);
6733                 return AC_ERR_INVALID;
6734         }
6735
6736         ipr_send_command(ipr_cmd);
6737         spin_unlock(&ipr_cmd->hrrq->_lock);
6738
6739         return 0;
6740 }
6741
6742 /**
6743  * ipr_qc_fill_rtf - Read result TF
6744  * @qc: ATA queued command
6745  *
6746  * Return value:
6747  *      true
6748  **/
6749 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6750 {
6751         struct ipr_sata_port *sata_port = qc->ap->private_data;
6752         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6753         struct ata_taskfile *tf = &qc->result_tf;
6754
6755         tf->feature = g->error;
6756         tf->nsect = g->nsect;
6757         tf->lbal = g->lbal;
6758         tf->lbam = g->lbam;
6759         tf->lbah = g->lbah;
6760         tf->device = g->device;
6761         tf->command = g->status;
6762         tf->hob_nsect = g->hob_nsect;
6763         tf->hob_lbal = g->hob_lbal;
6764         tf->hob_lbam = g->hob_lbam;
6765         tf->hob_lbah = g->hob_lbah;
6766         tf->ctl = g->alt_status;
6767
6768         return true;
6769 }
6770
6771 static struct ata_port_operations ipr_sata_ops = {
6772         .phy_reset = ipr_ata_phy_reset,
6773         .hardreset = ipr_sata_reset,
6774         .post_internal_cmd = ipr_ata_post_internal,
6775         .qc_prep = ata_noop_qc_prep,
6776         .qc_defer = ipr_qc_defer,
6777         .qc_issue = ipr_qc_issue,
6778         .qc_fill_rtf = ipr_qc_fill_rtf,
6779         .port_start = ata_sas_port_start,
6780         .port_stop = ata_sas_port_stop
6781 };
6782
6783 static struct ata_port_info sata_port_info = {
6784         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6785         .pio_mask       = ATA_PIO4_ONLY,
6786         .mwdma_mask     = ATA_MWDMA2,
6787         .udma_mask      = ATA_UDMA6,
6788         .port_ops       = &ipr_sata_ops
6789 };
6790
6791 #ifdef CONFIG_PPC_PSERIES
6792 static const u16 ipr_blocked_processors[] = {
6793         PVR_NORTHSTAR,
6794         PVR_PULSAR,
6795         PVR_POWER4,
6796         PVR_ICESTAR,
6797         PVR_SSTAR,
6798         PVR_POWER4p,
6799         PVR_630,
6800         PVR_630p
6801 };
6802
6803 /**
6804  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6805  * @ioa_cfg:    ioa cfg struct
6806  *
6807  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6808  * certain pSeries hardware. This function determines if the given
6809  * adapter is in one of these confgurations or not.
6810  *
6811  * Return value:
6812  *      1 if adapter is not supported / 0 if adapter is supported
6813  **/
6814 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6815 {
6816         int i;
6817
6818         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6819                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6820                         if (pvr_version_is(ipr_blocked_processors[i]))
6821                                 return 1;
6822                 }
6823         }
6824         return 0;
6825 }
6826 #else
6827 #define ipr_invalid_adapter(ioa_cfg) 0
6828 #endif
6829
6830 /**
6831  * ipr_ioa_bringdown_done - IOA bring down completion.
6832  * @ipr_cmd:    ipr command struct
6833  *
6834  * This function processes the completion of an adapter bring down.
6835  * It wakes any reset sleepers.
6836  *
6837  * Return value:
6838  *      IPR_RC_JOB_RETURN
6839  **/
6840 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6841 {
6842         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6843         int i;
6844
6845         ENTER;
6846         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6847                 ipr_trace;
6848                 spin_unlock_irq(ioa_cfg->host->host_lock);
6849                 scsi_unblock_requests(ioa_cfg->host);
6850                 spin_lock_irq(ioa_cfg->host->host_lock);
6851         }
6852
6853         ioa_cfg->in_reset_reload = 0;
6854         ioa_cfg->reset_retries = 0;
6855         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6856                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6857                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6858                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6859         }
6860         wmb();
6861
6862         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6863         wake_up_all(&ioa_cfg->reset_wait_q);
6864         LEAVE;
6865
6866         return IPR_RC_JOB_RETURN;
6867 }
6868
6869 /**
6870  * ipr_ioa_reset_done - IOA reset completion.
6871  * @ipr_cmd:    ipr command struct
6872  *
6873  * This function processes the completion of an adapter reset.
6874  * It schedules any necessary mid-layer add/removes and
6875  * wakes any reset sleepers.
6876  *
6877  * Return value:
6878  *      IPR_RC_JOB_RETURN
6879  **/
6880 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6881 {
6882         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6883         struct ipr_resource_entry *res;
6884         struct ipr_hostrcb *hostrcb, *temp;
6885         int i = 0, j;
6886
6887         ENTER;
6888         ioa_cfg->in_reset_reload = 0;
6889         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6890                 spin_lock(&ioa_cfg->hrrq[j]._lock);
6891                 ioa_cfg->hrrq[j].allow_cmds = 1;
6892                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6893         }
6894         wmb();
6895         ioa_cfg->reset_cmd = NULL;
6896         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6897
6898         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6899                 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6900                         ipr_trace;
6901                         break;
6902                 }
6903         }
6904         schedule_work(&ioa_cfg->work_q);
6905
6906         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6907                 list_del(&hostrcb->queue);
6908                 if (i++ < IPR_NUM_LOG_HCAMS)
6909                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6910                 else
6911                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6912         }
6913
6914         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6915         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6916
6917         ioa_cfg->reset_retries = 0;
6918         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6919         wake_up_all(&ioa_cfg->reset_wait_q);
6920
6921         spin_unlock(ioa_cfg->host->host_lock);
6922         scsi_unblock_requests(ioa_cfg->host);
6923         spin_lock(ioa_cfg->host->host_lock);
6924
6925         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6926                 scsi_block_requests(ioa_cfg->host);
6927
6928         LEAVE;
6929         return IPR_RC_JOB_RETURN;
6930 }
6931
6932 /**
6933  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6934  * @supported_dev:      supported device struct
6935  * @vpids:                      vendor product id struct
6936  *
6937  * Return value:
6938  *      none
6939  **/
6940 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6941                                  struct ipr_std_inq_vpids *vpids)
6942 {
6943         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6944         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6945         supported_dev->num_records = 1;
6946         supported_dev->data_length =
6947                 cpu_to_be16(sizeof(struct ipr_supported_device));
6948         supported_dev->reserved = 0;
6949 }
6950
6951 /**
6952  * ipr_set_supported_devs - Send Set Supported Devices for a device
6953  * @ipr_cmd:    ipr command struct
6954  *
6955  * This function sends a Set Supported Devices to the adapter
6956  *
6957  * Return value:
6958  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6959  **/
6960 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6961 {
6962         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6963         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6964         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6965         struct ipr_resource_entry *res = ipr_cmd->u.res;
6966
6967         ipr_cmd->job_step = ipr_ioa_reset_done;
6968
6969         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6970                 if (!ipr_is_scsi_disk(res))
6971                         continue;
6972
6973                 ipr_cmd->u.res = res;
6974                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6975
6976                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6977                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6978                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6979
6980                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6981                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6982                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6983                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6984
6985                 ipr_init_ioadl(ipr_cmd,
6986                                ioa_cfg->vpd_cbs_dma +
6987                                  offsetof(struct ipr_misc_cbs, supp_dev),
6988                                sizeof(struct ipr_supported_device),
6989                                IPR_IOADL_FLAGS_WRITE_LAST);
6990
6991                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6992                            IPR_SET_SUP_DEVICE_TIMEOUT);
6993
6994                 if (!ioa_cfg->sis64)
6995                         ipr_cmd->job_step = ipr_set_supported_devs;
6996                 LEAVE;
6997                 return IPR_RC_JOB_RETURN;
6998         }
6999
7000         LEAVE;
7001         return IPR_RC_JOB_CONTINUE;
7002 }
7003
7004 /**
7005  * ipr_get_mode_page - Locate specified mode page
7006  * @mode_pages: mode page buffer
7007  * @page_code:  page code to find
7008  * @len:                minimum required length for mode page
7009  *
7010  * Return value:
7011  *      pointer to mode page / NULL on failure
7012  **/
7013 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7014                                u32 page_code, u32 len)
7015 {
7016         struct ipr_mode_page_hdr *mode_hdr;
7017         u32 page_length;
7018         u32 length;
7019
7020         if (!mode_pages || (mode_pages->hdr.length == 0))
7021                 return NULL;
7022
7023         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7024         mode_hdr = (struct ipr_mode_page_hdr *)
7025                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7026
7027         while (length) {
7028                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7029                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7030                                 return mode_hdr;
7031                         break;
7032                 } else {
7033                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7034                                        mode_hdr->page_length);
7035                         length -= page_length;
7036                         mode_hdr = (struct ipr_mode_page_hdr *)
7037                                 ((unsigned long)mode_hdr + page_length);
7038                 }
7039         }
7040         return NULL;
7041 }
7042
7043 /**
7044  * ipr_check_term_power - Check for term power errors
7045  * @ioa_cfg:    ioa config struct
7046  * @mode_pages: IOAFP mode pages buffer
7047  *
7048  * Check the IOAFP's mode page 28 for term power errors
7049  *
7050  * Return value:
7051  *      nothing
7052  **/
7053 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7054                                  struct ipr_mode_pages *mode_pages)
7055 {
7056         int i;
7057         int entry_length;
7058         struct ipr_dev_bus_entry *bus;
7059         struct ipr_mode_page28 *mode_page;
7060
7061         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7062                                       sizeof(struct ipr_mode_page28));
7063
7064         entry_length = mode_page->entry_length;
7065
7066         bus = mode_page->bus;
7067
7068         for (i = 0; i < mode_page->num_entries; i++) {
7069                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7070                         dev_err(&ioa_cfg->pdev->dev,
7071                                 "Term power is absent on scsi bus %d\n",
7072                                 bus->res_addr.bus);
7073                 }
7074
7075                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7076         }
7077 }
7078
7079 /**
7080  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7081  * @ioa_cfg:    ioa config struct
7082  *
7083  * Looks through the config table checking for SES devices. If
7084  * the SES device is in the SES table indicating a maximum SCSI
7085  * bus speed, the speed is limited for the bus.
7086  *
7087  * Return value:
7088  *      none
7089  **/
7090 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7091 {
7092         u32 max_xfer_rate;
7093         int i;
7094
7095         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7096                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7097                                                        ioa_cfg->bus_attr[i].bus_width);
7098
7099                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7100                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7101         }
7102 }
7103
7104 /**
7105  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7106  * @ioa_cfg:    ioa config struct
7107  * @mode_pages: mode page 28 buffer
7108  *
7109  * Updates mode page 28 based on driver configuration
7110  *
7111  * Return value:
7112  *      none
7113  **/
7114 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7115                                           struct ipr_mode_pages *mode_pages)
7116 {
7117         int i, entry_length;
7118         struct ipr_dev_bus_entry *bus;
7119         struct ipr_bus_attributes *bus_attr;
7120         struct ipr_mode_page28 *mode_page;
7121
7122         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7123                                       sizeof(struct ipr_mode_page28));
7124
7125         entry_length = mode_page->entry_length;
7126
7127         /* Loop for each device bus entry */
7128         for (i = 0, bus = mode_page->bus;
7129              i < mode_page->num_entries;
7130              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7131                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7132                         dev_err(&ioa_cfg->pdev->dev,
7133                                 "Invalid resource address reported: 0x%08X\n",
7134                                 IPR_GET_PHYS_LOC(bus->res_addr));
7135                         continue;
7136                 }
7137
7138                 bus_attr = &ioa_cfg->bus_attr[i];
7139                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7140                 bus->bus_width = bus_attr->bus_width;
7141                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7142                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7143                 if (bus_attr->qas_enabled)
7144                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7145                 else
7146                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7147         }
7148 }
7149
7150 /**
7151  * ipr_build_mode_select - Build a mode select command
7152  * @ipr_cmd:    ipr command struct
7153  * @res_handle: resource handle to send command to
7154  * @parm:               Byte 2 of Mode Sense command
7155  * @dma_addr:   DMA buffer address
7156  * @xfer_len:   data transfer length
7157  *
7158  * Return value:
7159  *      none
7160  **/
7161 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7162                                   __be32 res_handle, u8 parm,
7163                                   dma_addr_t dma_addr, u8 xfer_len)
7164 {
7165         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7166
7167         ioarcb->res_handle = res_handle;
7168         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7169         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7170         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7171         ioarcb->cmd_pkt.cdb[1] = parm;
7172         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7173
7174         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7175 }
7176
7177 /**
7178  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7179  * @ipr_cmd:    ipr command struct
7180  *
7181  * This function sets up the SCSI bus attributes and sends
7182  * a Mode Select for Page 28 to activate them.
7183  *
7184  * Return value:
7185  *      IPR_RC_JOB_RETURN
7186  **/
7187 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7188 {
7189         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7190         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7191         int length;
7192
7193         ENTER;
7194         ipr_scsi_bus_speed_limit(ioa_cfg);
7195         ipr_check_term_power(ioa_cfg, mode_pages);
7196         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7197         length = mode_pages->hdr.length + 1;
7198         mode_pages->hdr.length = 0;
7199
7200         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7201                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7202                               length);
7203
7204         ipr_cmd->job_step = ipr_set_supported_devs;
7205         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7206                                     struct ipr_resource_entry, queue);
7207         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7208
7209         LEAVE;
7210         return IPR_RC_JOB_RETURN;
7211 }
7212
7213 /**
7214  * ipr_build_mode_sense - Builds a mode sense command
7215  * @ipr_cmd:    ipr command struct
7216  * @res:                resource entry struct
7217  * @parm:               Byte 2 of mode sense command
7218  * @dma_addr:   DMA address of mode sense buffer
7219  * @xfer_len:   Size of DMA buffer
7220  *
7221  * Return value:
7222  *      none
7223  **/
7224 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7225                                  __be32 res_handle,
7226                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7227 {
7228         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7229
7230         ioarcb->res_handle = res_handle;
7231         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7232         ioarcb->cmd_pkt.cdb[2] = parm;
7233         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7234         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7235
7236         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7237 }
7238
7239 /**
7240  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7241  * @ipr_cmd:    ipr command struct
7242  *
7243  * This function handles the failure of an IOA bringup command.
7244  *
7245  * Return value:
7246  *      IPR_RC_JOB_RETURN
7247  **/
7248 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7249 {
7250         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7251         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7252
7253         dev_err(&ioa_cfg->pdev->dev,
7254                 "0x%02X failed with IOASC: 0x%08X\n",
7255                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7256
7257         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7258         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7259         return IPR_RC_JOB_RETURN;
7260 }
7261
7262 /**
7263  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7264  * @ipr_cmd:    ipr command struct
7265  *
7266  * This function handles the failure of a Mode Sense to the IOAFP.
7267  * Some adapters do not handle all mode pages.
7268  *
7269  * Return value:
7270  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7271  **/
7272 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7273 {
7274         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7275         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7276
7277         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7278                 ipr_cmd->job_step = ipr_set_supported_devs;
7279                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7280                                             struct ipr_resource_entry, queue);
7281                 return IPR_RC_JOB_CONTINUE;
7282         }
7283
7284         return ipr_reset_cmd_failed(ipr_cmd);
7285 }
7286
7287 /**
7288  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7289  * @ipr_cmd:    ipr command struct
7290  *
7291  * This function send a Page 28 mode sense to the IOA to
7292  * retrieve SCSI bus attributes.
7293  *
7294  * Return value:
7295  *      IPR_RC_JOB_RETURN
7296  **/
7297 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7298 {
7299         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7300
7301         ENTER;
7302         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7303                              0x28, ioa_cfg->vpd_cbs_dma +
7304                              offsetof(struct ipr_misc_cbs, mode_pages),
7305                              sizeof(struct ipr_mode_pages));
7306
7307         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7308         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7309
7310         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7311
7312         LEAVE;
7313         return IPR_RC_JOB_RETURN;
7314 }
7315
7316 /**
7317  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7318  * @ipr_cmd:    ipr command struct
7319  *
7320  * This function enables dual IOA RAID support if possible.
7321  *
7322  * Return value:
7323  *      IPR_RC_JOB_RETURN
7324  **/
7325 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7326 {
7327         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7328         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7329         struct ipr_mode_page24 *mode_page;
7330         int length;
7331
7332         ENTER;
7333         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7334                                       sizeof(struct ipr_mode_page24));
7335
7336         if (mode_page)
7337                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7338
7339         length = mode_pages->hdr.length + 1;
7340         mode_pages->hdr.length = 0;
7341
7342         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7343                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7344                               length);
7345
7346         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7347         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7348
7349         LEAVE;
7350         return IPR_RC_JOB_RETURN;
7351 }
7352
7353 /**
7354  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7355  * @ipr_cmd:    ipr command struct
7356  *
7357  * This function handles the failure of a Mode Sense to the IOAFP.
7358  * Some adapters do not handle all mode pages.
7359  *
7360  * Return value:
7361  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7362  **/
7363 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7364 {
7365         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7366
7367         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7368                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7369                 return IPR_RC_JOB_CONTINUE;
7370         }
7371
7372         return ipr_reset_cmd_failed(ipr_cmd);
7373 }
7374
7375 /**
7376  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7377  * @ipr_cmd:    ipr command struct
7378  *
7379  * This function send a mode sense to the IOA to retrieve
7380  * the IOA Advanced Function Control mode page.
7381  *
7382  * Return value:
7383  *      IPR_RC_JOB_RETURN
7384  **/
7385 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7386 {
7387         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7388
7389         ENTER;
7390         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7391                              0x24, ioa_cfg->vpd_cbs_dma +
7392                              offsetof(struct ipr_misc_cbs, mode_pages),
7393                              sizeof(struct ipr_mode_pages));
7394
7395         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7396         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7397
7398         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7399
7400         LEAVE;
7401         return IPR_RC_JOB_RETURN;
7402 }
7403
7404 /**
7405  * ipr_init_res_table - Initialize the resource table
7406  * @ipr_cmd:    ipr command struct
7407  *
7408  * This function looks through the existing resource table, comparing
7409  * it with the config table. This function will take care of old/new
7410  * devices and schedule adding/removing them from the mid-layer
7411  * as appropriate.
7412  *
7413  * Return value:
7414  *      IPR_RC_JOB_CONTINUE
7415  **/
7416 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7417 {
7418         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7419         struct ipr_resource_entry *res, *temp;
7420         struct ipr_config_table_entry_wrapper cfgtew;
7421         int entries, found, flag, i;
7422         LIST_HEAD(old_res);
7423
7424         ENTER;
7425         if (ioa_cfg->sis64)
7426                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7427         else
7428                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7429
7430         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7431                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7432
7433         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7434                 list_move_tail(&res->queue, &old_res);
7435
7436         if (ioa_cfg->sis64)
7437                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7438         else
7439                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7440
7441         for (i = 0; i < entries; i++) {
7442                 if (ioa_cfg->sis64)
7443                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7444                 else
7445                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7446                 found = 0;
7447
7448                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7449                         if (ipr_is_same_device(res, &cfgtew)) {
7450                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7451                                 found = 1;
7452                                 break;
7453                         }
7454                 }
7455
7456                 if (!found) {
7457                         if (list_empty(&ioa_cfg->free_res_q)) {
7458                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7459                                 break;
7460                         }
7461
7462                         found = 1;
7463                         res = list_entry(ioa_cfg->free_res_q.next,
7464                                          struct ipr_resource_entry, queue);
7465                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7466                         ipr_init_res_entry(res, &cfgtew);
7467                         res->add_to_ml = 1;
7468                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7469                         res->sdev->allow_restart = 1;
7470
7471                 if (found)
7472                         ipr_update_res_entry(res, &cfgtew);
7473         }
7474
7475         list_for_each_entry_safe(res, temp, &old_res, queue) {
7476                 if (res->sdev) {
7477                         res->del_from_ml = 1;
7478                         res->res_handle = IPR_INVALID_RES_HANDLE;
7479                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7480                 }
7481         }
7482
7483         list_for_each_entry_safe(res, temp, &old_res, queue) {
7484                 ipr_clear_res_target(res);
7485                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7486         }
7487
7488         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7489                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7490         else
7491                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7492
7493         LEAVE;
7494         return IPR_RC_JOB_CONTINUE;
7495 }
7496
7497 /**
7498  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7499  * @ipr_cmd:    ipr command struct
7500  *
7501  * This function sends a Query IOA Configuration command
7502  * to the adapter to retrieve the IOA configuration table.
7503  *
7504  * Return value:
7505  *      IPR_RC_JOB_RETURN
7506  **/
7507 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7508 {
7509         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7510         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7511         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7512         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7513
7514         ENTER;
7515         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7516                 ioa_cfg->dual_raid = 1;
7517         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7518                  ucode_vpd->major_release, ucode_vpd->card_type,
7519                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7520         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7521         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7522
7523         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7524         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7525         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7526         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7527
7528         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7529                        IPR_IOADL_FLAGS_READ_LAST);
7530
7531         ipr_cmd->job_step = ipr_init_res_table;
7532
7533         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7534
7535         LEAVE;
7536         return IPR_RC_JOB_RETURN;
7537 }
7538
7539 /**
7540  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7541  * @ipr_cmd:    ipr command struct
7542  *
7543  * This utility function sends an inquiry to the adapter.
7544  *
7545  * Return value:
7546  *      none
7547  **/
7548 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7549                               dma_addr_t dma_addr, u8 xfer_len)
7550 {
7551         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7552
7553         ENTER;
7554         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7555         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7556
7557         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7558         ioarcb->cmd_pkt.cdb[1] = flags;
7559         ioarcb->cmd_pkt.cdb[2] = page;
7560         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7561
7562         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7563
7564         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7565         LEAVE;
7566 }
7567
7568 /**
7569  * ipr_inquiry_page_supported - Is the given inquiry page supported
7570  * @page0:              inquiry page 0 buffer
7571  * @page:               page code.
7572  *
7573  * This function determines if the specified inquiry page is supported.
7574  *
7575  * Return value:
7576  *      1 if page is supported / 0 if not
7577  **/
7578 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7579 {
7580         int i;
7581
7582         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7583                 if (page0->page[i] == page)
7584                         return 1;
7585
7586         return 0;
7587 }
7588
7589 /**
7590  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7591  * @ipr_cmd:    ipr command struct
7592  *
7593  * This function sends a Page 0xD0 inquiry to the adapter
7594  * to retrieve adapter capabilities.
7595  *
7596  * Return value:
7597  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7598  **/
7599 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7600 {
7601         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7602         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7603         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7604
7605         ENTER;
7606         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7607         memset(cap, 0, sizeof(*cap));
7608
7609         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7610                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7611                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7612                                   sizeof(struct ipr_inquiry_cap));
7613                 return IPR_RC_JOB_RETURN;
7614         }
7615
7616         LEAVE;
7617         return IPR_RC_JOB_CONTINUE;
7618 }
7619
7620 /**
7621  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7622  * @ipr_cmd:    ipr command struct
7623  *
7624  * This function sends a Page 3 inquiry to the adapter
7625  * to retrieve software VPD information.
7626  *
7627  * Return value:
7628  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7629  **/
7630 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7631 {
7632         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7633
7634         ENTER;
7635
7636         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7637
7638         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7639                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7640                           sizeof(struct ipr_inquiry_page3));
7641
7642         LEAVE;
7643         return IPR_RC_JOB_RETURN;
7644 }
7645
7646 /**
7647  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7648  * @ipr_cmd:    ipr command struct
7649  *
7650  * This function sends a Page 0 inquiry to the adapter
7651  * to retrieve supported inquiry pages.
7652  *
7653  * Return value:
7654  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7655  **/
7656 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7657 {
7658         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7659         char type[5];
7660
7661         ENTER;
7662
7663         /* Grab the type out of the VPD and store it away */
7664         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7665         type[4] = '\0';
7666         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7667
7668         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7669
7670         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7671                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7672                           sizeof(struct ipr_inquiry_page0));
7673
7674         LEAVE;
7675         return IPR_RC_JOB_RETURN;
7676 }
7677
7678 /**
7679  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7680  * @ipr_cmd:    ipr command struct
7681  *
7682  * This function sends a standard inquiry to the adapter.
7683  *
7684  * Return value:
7685  *      IPR_RC_JOB_RETURN
7686  **/
7687 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7688 {
7689         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7690
7691         ENTER;
7692         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7693
7694         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7695                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7696                           sizeof(struct ipr_ioa_vpd));
7697
7698         LEAVE;
7699         return IPR_RC_JOB_RETURN;
7700 }
7701
7702 /**
7703  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7704  * @ipr_cmd:    ipr command struct
7705  *
7706  * This function send an Identify Host Request Response Queue
7707  * command to establish the HRRQ with the adapter.
7708  *
7709  * Return value:
7710  *      IPR_RC_JOB_RETURN
7711  **/
7712 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7713 {
7714         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7715         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7716         struct ipr_hrr_queue *hrrq;
7717
7718         ENTER;
7719         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7720         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7721
7722         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7723                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7724
7725                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7726                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7727
7728                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7729                 if (ioa_cfg->sis64)
7730                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7731
7732                 if (ioa_cfg->nvectors == 1)
7733                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7734                 else
7735                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7736
7737                 ioarcb->cmd_pkt.cdb[2] =
7738                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7739                 ioarcb->cmd_pkt.cdb[3] =
7740                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7741                 ioarcb->cmd_pkt.cdb[4] =
7742                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7743                 ioarcb->cmd_pkt.cdb[5] =
7744                         ((u64) hrrq->host_rrq_dma) & 0xff;
7745                 ioarcb->cmd_pkt.cdb[7] =
7746                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7747                 ioarcb->cmd_pkt.cdb[8] =
7748                         (sizeof(u32) * hrrq->size) & 0xff;
7749
7750                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7751                         ioarcb->cmd_pkt.cdb[9] =
7752                                         ioa_cfg->identify_hrrq_index;
7753
7754                 if (ioa_cfg->sis64) {
7755                         ioarcb->cmd_pkt.cdb[10] =
7756                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7757                         ioarcb->cmd_pkt.cdb[11] =
7758                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7759                         ioarcb->cmd_pkt.cdb[12] =
7760                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7761                         ioarcb->cmd_pkt.cdb[13] =
7762                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7763                 }
7764
7765                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7766                         ioarcb->cmd_pkt.cdb[14] =
7767                                         ioa_cfg->identify_hrrq_index;
7768
7769                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7770                            IPR_INTERNAL_TIMEOUT);
7771
7772                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7773                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7774
7775                 LEAVE;
7776                 return IPR_RC_JOB_RETURN;
7777         }
7778
7779         LEAVE;
7780         return IPR_RC_JOB_CONTINUE;
7781 }
7782
7783 /**
7784  * ipr_reset_timer_done - Adapter reset timer function
7785  * @ipr_cmd:    ipr command struct
7786  *
7787  * Description: This function is used in adapter reset processing
7788  * for timing events. If the reset_cmd pointer in the IOA
7789  * config struct is not this adapter's we are doing nested
7790  * resets and fail_all_ops will take care of freeing the
7791  * command block.
7792  *
7793  * Return value:
7794  *      none
7795  **/
7796 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7797 {
7798         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7799         unsigned long lock_flags = 0;
7800
7801         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7802
7803         if (ioa_cfg->reset_cmd == ipr_cmd) {
7804                 list_del(&ipr_cmd->queue);
7805                 ipr_cmd->done(ipr_cmd);
7806         }
7807
7808         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7809 }
7810
7811 /**
7812  * ipr_reset_start_timer - Start a timer for adapter reset job
7813  * @ipr_cmd:    ipr command struct
7814  * @timeout:    timeout value
7815  *
7816  * Description: This function is used in adapter reset processing
7817  * for timing events. If the reset_cmd pointer in the IOA
7818  * config struct is not this adapter's we are doing nested
7819  * resets and fail_all_ops will take care of freeing the
7820  * command block.
7821  *
7822  * Return value:
7823  *      none
7824  **/
7825 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7826                                   unsigned long timeout)
7827 {
7828
7829         ENTER;
7830         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7831         ipr_cmd->done = ipr_reset_ioa_job;
7832
7833         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7834         ipr_cmd->timer.expires = jiffies + timeout;
7835         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7836         add_timer(&ipr_cmd->timer);
7837 }
7838
7839 /**
7840  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7841  * @ioa_cfg:    ioa cfg struct
7842  *
7843  * Return value:
7844  *      nothing
7845  **/
7846 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7847 {
7848         struct ipr_hrr_queue *hrrq;
7849
7850         for_each_hrrq(hrrq, ioa_cfg) {
7851                 spin_lock(&hrrq->_lock);
7852                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7853
7854                 /* Initialize Host RRQ pointers */
7855                 hrrq->hrrq_start = hrrq->host_rrq;
7856                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7857                 hrrq->hrrq_curr = hrrq->hrrq_start;
7858                 hrrq->toggle_bit = 1;
7859                 spin_unlock(&hrrq->_lock);
7860         }
7861         wmb();
7862
7863         ioa_cfg->identify_hrrq_index = 0;
7864         if (ioa_cfg->hrrq_num == 1)
7865                 atomic_set(&ioa_cfg->hrrq_index, 0);
7866         else
7867                 atomic_set(&ioa_cfg->hrrq_index, 1);
7868
7869         /* Zero out config table */
7870         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7871 }
7872
7873 /**
7874  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7875  * @ipr_cmd:    ipr command struct
7876  *
7877  * Return value:
7878  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7879  **/
7880 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7881 {
7882         unsigned long stage, stage_time;
7883         u32 feedback;
7884         volatile u32 int_reg;
7885         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7886         u64 maskval = 0;
7887
7888         feedback = readl(ioa_cfg->regs.init_feedback_reg);
7889         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7890         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7891
7892         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7893
7894         /* sanity check the stage_time value */
7895         if (stage_time == 0)
7896                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7897         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7898                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7899         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7900                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7901
7902         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7903                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7904                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7905                 stage_time = ioa_cfg->transop_timeout;
7906                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7907         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7908                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7909                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7910                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7911                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
7912                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7913                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7914                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7915                         return IPR_RC_JOB_CONTINUE;
7916                 }
7917         }
7918
7919         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7920         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7921         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7922         ipr_cmd->done = ipr_reset_ioa_job;
7923         add_timer(&ipr_cmd->timer);
7924
7925         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7926
7927         return IPR_RC_JOB_RETURN;
7928 }
7929
7930 /**
7931  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7932  * @ipr_cmd:    ipr command struct
7933  *
7934  * This function reinitializes some control blocks and
7935  * enables destructive diagnostics on the adapter.
7936  *
7937  * Return value:
7938  *      IPR_RC_JOB_RETURN
7939  **/
7940 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7941 {
7942         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7943         volatile u32 int_reg;
7944         volatile u64 maskval;
7945         int i;
7946
7947         ENTER;
7948         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7949         ipr_init_ioa_mem(ioa_cfg);
7950
7951         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7952                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7953                 ioa_cfg->hrrq[i].allow_interrupts = 1;
7954                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7955         }
7956         wmb();
7957         if (ioa_cfg->sis64) {
7958                 /* Set the adapter to the correct endian mode. */
7959                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7960                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7961         }
7962
7963         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7964
7965         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7966                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7967                        ioa_cfg->regs.clr_interrupt_mask_reg32);
7968                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7969                 return IPR_RC_JOB_CONTINUE;
7970         }
7971
7972         /* Enable destructive diagnostics on IOA */
7973         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7974
7975         if (ioa_cfg->sis64) {
7976                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7977                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7978                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7979         } else
7980                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7981
7982         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7983
7984         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7985
7986         if (ioa_cfg->sis64) {
7987                 ipr_cmd->job_step = ipr_reset_next_stage;
7988                 return IPR_RC_JOB_CONTINUE;
7989         }
7990
7991         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7992         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7993         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7994         ipr_cmd->done = ipr_reset_ioa_job;
7995         add_timer(&ipr_cmd->timer);
7996         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7997
7998         LEAVE;
7999         return IPR_RC_JOB_RETURN;
8000 }
8001
8002 /**
8003  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8004  * @ipr_cmd:    ipr command struct
8005  *
8006  * This function is invoked when an adapter dump has run out
8007  * of processing time.
8008  *
8009  * Return value:
8010  *      IPR_RC_JOB_CONTINUE
8011  **/
8012 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8013 {
8014         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8015
8016         if (ioa_cfg->sdt_state == GET_DUMP)
8017                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8018         else if (ioa_cfg->sdt_state == READ_DUMP)
8019                 ioa_cfg->sdt_state = ABORT_DUMP;
8020
8021         ioa_cfg->dump_timeout = 1;
8022         ipr_cmd->job_step = ipr_reset_alert;
8023
8024         return IPR_RC_JOB_CONTINUE;
8025 }
8026
8027 /**
8028  * ipr_unit_check_no_data - Log a unit check/no data error log
8029  * @ioa_cfg:            ioa config struct
8030  *
8031  * Logs an error indicating the adapter unit checked, but for some
8032  * reason, we were unable to fetch the unit check buffer.
8033  *
8034  * Return value:
8035  *      nothing
8036  **/
8037 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8038 {
8039         ioa_cfg->errors_logged++;
8040         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8041 }
8042
8043 /**
8044  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8045  * @ioa_cfg:            ioa config struct
8046  *
8047  * Fetches the unit check buffer from the adapter by clocking the data
8048  * through the mailbox register.
8049  *
8050  * Return value:
8051  *      nothing
8052  **/
8053 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8054 {
8055         unsigned long mailbox;
8056         struct ipr_hostrcb *hostrcb;
8057         struct ipr_uc_sdt sdt;
8058         int rc, length;
8059         u32 ioasc;
8060
8061         mailbox = readl(ioa_cfg->ioa_mailbox);
8062
8063         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8064                 ipr_unit_check_no_data(ioa_cfg);
8065                 return;
8066         }
8067
8068         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8069         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8070                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8071
8072         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8073             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8074             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8075                 ipr_unit_check_no_data(ioa_cfg);
8076                 return;
8077         }
8078
8079         /* Find length of the first sdt entry (UC buffer) */
8080         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8081                 length = be32_to_cpu(sdt.entry[0].end_token);
8082         else
8083                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8084                           be32_to_cpu(sdt.entry[0].start_token)) &
8085                           IPR_FMT2_MBX_ADDR_MASK;
8086
8087         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8088                              struct ipr_hostrcb, queue);
8089         list_del(&hostrcb->queue);
8090         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8091
8092         rc = ipr_get_ldump_data_section(ioa_cfg,
8093                                         be32_to_cpu(sdt.entry[0].start_token),
8094                                         (__be32 *)&hostrcb->hcam,
8095                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8096
8097         if (!rc) {
8098                 ipr_handle_log_data(ioa_cfg, hostrcb);
8099                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8100                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8101                     ioa_cfg->sdt_state == GET_DUMP)
8102                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8103         } else
8104                 ipr_unit_check_no_data(ioa_cfg);
8105
8106         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8107 }
8108
8109 /**
8110  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8111  * @ipr_cmd:    ipr command struct
8112  *
8113  * Description: This function will call to get the unit check buffer.
8114  *
8115  * Return value:
8116  *      IPR_RC_JOB_RETURN
8117  **/
8118 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8119 {
8120         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8121
8122         ENTER;
8123         ioa_cfg->ioa_unit_checked = 0;
8124         ipr_get_unit_check_buffer(ioa_cfg);
8125         ipr_cmd->job_step = ipr_reset_alert;
8126         ipr_reset_start_timer(ipr_cmd, 0);
8127
8128         LEAVE;
8129         return IPR_RC_JOB_RETURN;
8130 }
8131
8132 /**
8133  * ipr_reset_restore_cfg_space - Restore PCI config space.
8134  * @ipr_cmd:    ipr command struct
8135  *
8136  * Description: This function restores the saved PCI config space of
8137  * the adapter, fails all outstanding ops back to the callers, and
8138  * fetches the dump/unit check if applicable to this reset.
8139  *
8140  * Return value:
8141  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8142  **/
8143 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8144 {
8145         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8146         u32 int_reg;
8147
8148         ENTER;
8149         ioa_cfg->pdev->state_saved = true;
8150         pci_restore_state(ioa_cfg->pdev);
8151
8152         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8153                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8154                 return IPR_RC_JOB_CONTINUE;
8155         }
8156
8157         ipr_fail_all_ops(ioa_cfg);
8158
8159         if (ioa_cfg->sis64) {
8160                 /* Set the adapter to the correct endian mode. */
8161                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8162                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8163         }
8164
8165         if (ioa_cfg->ioa_unit_checked) {
8166                 if (ioa_cfg->sis64) {
8167                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8168                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8169                         return IPR_RC_JOB_RETURN;
8170                 } else {
8171                         ioa_cfg->ioa_unit_checked = 0;
8172                         ipr_get_unit_check_buffer(ioa_cfg);
8173                         ipr_cmd->job_step = ipr_reset_alert;
8174                         ipr_reset_start_timer(ipr_cmd, 0);
8175                         return IPR_RC_JOB_RETURN;
8176                 }
8177         }
8178
8179         if (ioa_cfg->in_ioa_bringdown) {
8180                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8181         } else {
8182                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8183
8184                 if (GET_DUMP == ioa_cfg->sdt_state) {
8185                         ioa_cfg->sdt_state = READ_DUMP;
8186                         ioa_cfg->dump_timeout = 0;
8187                         if (ioa_cfg->sis64)
8188                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8189                         else
8190                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8191                         ipr_cmd->job_step = ipr_reset_wait_for_dump;
8192                         schedule_work(&ioa_cfg->work_q);
8193                         return IPR_RC_JOB_RETURN;
8194                 }
8195         }
8196
8197         LEAVE;
8198         return IPR_RC_JOB_CONTINUE;
8199 }
8200
8201 /**
8202  * ipr_reset_bist_done - BIST has completed on the adapter.
8203  * @ipr_cmd:    ipr command struct
8204  *
8205  * Description: Unblock config space and resume the reset process.
8206  *
8207  * Return value:
8208  *      IPR_RC_JOB_CONTINUE
8209  **/
8210 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8211 {
8212         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8213
8214         ENTER;
8215         if (ioa_cfg->cfg_locked)
8216                 pci_cfg_access_unlock(ioa_cfg->pdev);
8217         ioa_cfg->cfg_locked = 0;
8218         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8219         LEAVE;
8220         return IPR_RC_JOB_CONTINUE;
8221 }
8222
8223 /**
8224  * ipr_reset_start_bist - Run BIST on the adapter.
8225  * @ipr_cmd:    ipr command struct
8226  *
8227  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8228  *
8229  * Return value:
8230  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8231  **/
8232 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8233 {
8234         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8235         int rc = PCIBIOS_SUCCESSFUL;
8236
8237         ENTER;
8238         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8239                 writel(IPR_UPROCI_SIS64_START_BIST,
8240                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8241         else
8242                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8243
8244         if (rc == PCIBIOS_SUCCESSFUL) {
8245                 ipr_cmd->job_step = ipr_reset_bist_done;
8246                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8247                 rc = IPR_RC_JOB_RETURN;
8248         } else {
8249                 if (ioa_cfg->cfg_locked)
8250                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8251                 ioa_cfg->cfg_locked = 0;
8252                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8253                 rc = IPR_RC_JOB_CONTINUE;
8254         }
8255
8256         LEAVE;
8257         return rc;
8258 }
8259
8260 /**
8261  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8262  * @ipr_cmd:    ipr command struct
8263  *
8264  * Description: This clears PCI reset to the adapter and delays two seconds.
8265  *
8266  * Return value:
8267  *      IPR_RC_JOB_RETURN
8268  **/
8269 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8270 {
8271         ENTER;
8272         pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8273         ipr_cmd->job_step = ipr_reset_bist_done;
8274         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8275         LEAVE;
8276         return IPR_RC_JOB_RETURN;
8277 }
8278
8279 /**
8280  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8281  * @ipr_cmd:    ipr command struct
8282  *
8283  * Description: This asserts PCI reset to the adapter.
8284  *
8285  * Return value:
8286  *      IPR_RC_JOB_RETURN
8287  **/
8288 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8289 {
8290         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8291         struct pci_dev *pdev = ioa_cfg->pdev;
8292
8293         ENTER;
8294         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8295         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8296         ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8297         LEAVE;
8298         return IPR_RC_JOB_RETURN;
8299 }
8300
8301 /**
8302  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8303  * @ipr_cmd:    ipr command struct
8304  *
8305  * Description: This attempts to block config access to the IOA.
8306  *
8307  * Return value:
8308  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8309  **/
8310 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8311 {
8312         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8313         int rc = IPR_RC_JOB_CONTINUE;
8314
8315         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8316                 ioa_cfg->cfg_locked = 1;
8317                 ipr_cmd->job_step = ioa_cfg->reset;
8318         } else {
8319                 if (ipr_cmd->u.time_left) {
8320                         rc = IPR_RC_JOB_RETURN;
8321                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8322                         ipr_reset_start_timer(ipr_cmd,
8323                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8324                 } else {
8325                         ipr_cmd->job_step = ioa_cfg->reset;
8326                         dev_err(&ioa_cfg->pdev->dev,
8327                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8328                 }
8329         }
8330
8331         return rc;
8332 }
8333
8334 /**
8335  * ipr_reset_block_config_access - Block config access to the IOA
8336  * @ipr_cmd:    ipr command struct
8337  *
8338  * Description: This attempts to block config access to the IOA
8339  *
8340  * Return value:
8341  *      IPR_RC_JOB_CONTINUE
8342  **/
8343 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8344 {
8345         ipr_cmd->ioa_cfg->cfg_locked = 0;
8346         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8347         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8348         return IPR_RC_JOB_CONTINUE;
8349 }
8350
8351 /**
8352  * ipr_reset_allowed - Query whether or not IOA can be reset
8353  * @ioa_cfg:    ioa config struct
8354  *
8355  * Return value:
8356  *      0 if reset not allowed / non-zero if reset is allowed
8357  **/
8358 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8359 {
8360         volatile u32 temp_reg;
8361
8362         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8363         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8364 }
8365
8366 /**
8367  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8368  * @ipr_cmd:    ipr command struct
8369  *
8370  * Description: This function waits for adapter permission to run BIST,
8371  * then runs BIST. If the adapter does not give permission after a
8372  * reasonable time, we will reset the adapter anyway. The impact of
8373  * resetting the adapter without warning the adapter is the risk of
8374  * losing the persistent error log on the adapter. If the adapter is
8375  * reset while it is writing to the flash on the adapter, the flash
8376  * segment will have bad ECC and be zeroed.
8377  *
8378  * Return value:
8379  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8380  **/
8381 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8382 {
8383         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8384         int rc = IPR_RC_JOB_RETURN;
8385
8386         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8387                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8388                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8389         } else {
8390                 ipr_cmd->job_step = ipr_reset_block_config_access;
8391                 rc = IPR_RC_JOB_CONTINUE;
8392         }
8393
8394         return rc;
8395 }
8396
8397 /**
8398  * ipr_reset_alert - Alert the adapter of a pending reset
8399  * @ipr_cmd:    ipr command struct
8400  *
8401  * Description: This function alerts the adapter that it will be reset.
8402  * If memory space is not currently enabled, proceed directly
8403  * to running BIST on the adapter. The timer must always be started
8404  * so we guarantee we do not run BIST from ipr_isr.
8405  *
8406  * Return value:
8407  *      IPR_RC_JOB_RETURN
8408  **/
8409 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8410 {
8411         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8412         u16 cmd_reg;
8413         int rc;
8414
8415         ENTER;
8416         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8417
8418         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8419                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8420                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8421                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8422         } else {
8423                 ipr_cmd->job_step = ipr_reset_block_config_access;
8424         }
8425
8426         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8427         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8428
8429         LEAVE;
8430         return IPR_RC_JOB_RETURN;
8431 }
8432
8433 /**
8434  * ipr_reset_ucode_download_done - Microcode download completion
8435  * @ipr_cmd:    ipr command struct
8436  *
8437  * Description: This function unmaps the microcode download buffer.
8438  *
8439  * Return value:
8440  *      IPR_RC_JOB_CONTINUE
8441  **/
8442 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8443 {
8444         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8445         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8446
8447         pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8448                      sglist->num_sg, DMA_TO_DEVICE);
8449
8450         ipr_cmd->job_step = ipr_reset_alert;
8451         return IPR_RC_JOB_CONTINUE;
8452 }
8453
8454 /**
8455  * ipr_reset_ucode_download - Download microcode to the adapter
8456  * @ipr_cmd:    ipr command struct
8457  *
8458  * Description: This function checks to see if it there is microcode
8459  * to download to the adapter. If there is, a download is performed.
8460  *
8461  * Return value:
8462  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8463  **/
8464 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8465 {
8466         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8467         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8468
8469         ENTER;
8470         ipr_cmd->job_step = ipr_reset_alert;
8471
8472         if (!sglist)
8473                 return IPR_RC_JOB_CONTINUE;
8474
8475         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8476         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8477         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8478         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8479         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8480         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8481         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8482
8483         if (ioa_cfg->sis64)
8484                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8485         else
8486                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8487         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8488
8489         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8490                    IPR_WRITE_BUFFER_TIMEOUT);
8491
8492         LEAVE;
8493         return IPR_RC_JOB_RETURN;
8494 }
8495
8496 /**
8497  * ipr_reset_shutdown_ioa - Shutdown the adapter
8498  * @ipr_cmd:    ipr command struct
8499  *
8500  * Description: This function issues an adapter shutdown of the
8501  * specified type to the specified adapter as part of the
8502  * adapter reset job.
8503  *
8504  * Return value:
8505  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8506  **/
8507 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8508 {
8509         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8510         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8511         unsigned long timeout;
8512         int rc = IPR_RC_JOB_CONTINUE;
8513
8514         ENTER;
8515         if (shutdown_type != IPR_SHUTDOWN_NONE &&
8516                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8517                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8518                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8519                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8520                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8521
8522                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8523                         timeout = IPR_SHUTDOWN_TIMEOUT;
8524                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8525                         timeout = IPR_INTERNAL_TIMEOUT;
8526                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8527                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8528                 else
8529                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8530
8531                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8532
8533                 rc = IPR_RC_JOB_RETURN;
8534                 ipr_cmd->job_step = ipr_reset_ucode_download;
8535         } else
8536                 ipr_cmd->job_step = ipr_reset_alert;
8537
8538         LEAVE;
8539         return rc;
8540 }
8541
8542 /**
8543  * ipr_reset_ioa_job - Adapter reset job
8544  * @ipr_cmd:    ipr command struct
8545  *
8546  * Description: This function is the job router for the adapter reset job.
8547  *
8548  * Return value:
8549  *      none
8550  **/
8551 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8552 {
8553         u32 rc, ioasc;
8554         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8555
8556         do {
8557                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8558
8559                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8560                         /*
8561                          * We are doing nested adapter resets and this is
8562                          * not the current reset job.
8563                          */
8564                         list_add_tail(&ipr_cmd->queue,
8565                                         &ipr_cmd->hrrq->hrrq_free_q);
8566                         return;
8567                 }
8568
8569                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8570                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8571                         if (rc == IPR_RC_JOB_RETURN)
8572                                 return;
8573                 }
8574
8575                 ipr_reinit_ipr_cmnd(ipr_cmd);
8576                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8577                 rc = ipr_cmd->job_step(ipr_cmd);
8578         } while (rc == IPR_RC_JOB_CONTINUE);
8579 }
8580
8581 /**
8582  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8583  * @ioa_cfg:            ioa config struct
8584  * @job_step:           first job step of reset job
8585  * @shutdown_type:      shutdown type
8586  *
8587  * Description: This function will initiate the reset of the given adapter
8588  * starting at the selected job step.
8589  * If the caller needs to wait on the completion of the reset,
8590  * the caller must sleep on the reset_wait_q.
8591  *
8592  * Return value:
8593  *      none
8594  **/
8595 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8596                                     int (*job_step) (struct ipr_cmnd *),
8597                                     enum ipr_shutdown_type shutdown_type)
8598 {
8599         struct ipr_cmnd *ipr_cmd;
8600         int i;
8601
8602         ioa_cfg->in_reset_reload = 1;
8603         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8604                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8605                 ioa_cfg->hrrq[i].allow_cmds = 0;
8606                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8607         }
8608         wmb();
8609         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8610                 scsi_block_requests(ioa_cfg->host);
8611
8612         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8613         ioa_cfg->reset_cmd = ipr_cmd;
8614         ipr_cmd->job_step = job_step;
8615         ipr_cmd->u.shutdown_type = shutdown_type;
8616
8617         ipr_reset_ioa_job(ipr_cmd);
8618 }
8619
8620 /**
8621  * ipr_initiate_ioa_reset - Initiate an adapter reset
8622  * @ioa_cfg:            ioa config struct
8623  * @shutdown_type:      shutdown type
8624  *
8625  * Description: This function will initiate the reset of the given adapter.
8626  * If the caller needs to wait on the completion of the reset,
8627  * the caller must sleep on the reset_wait_q.
8628  *
8629  * Return value:
8630  *      none
8631  **/
8632 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8633                                    enum ipr_shutdown_type shutdown_type)
8634 {
8635         int i;
8636
8637         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8638                 return;
8639
8640         if (ioa_cfg->in_reset_reload) {
8641                 if (ioa_cfg->sdt_state == GET_DUMP)
8642                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8643                 else if (ioa_cfg->sdt_state == READ_DUMP)
8644                         ioa_cfg->sdt_state = ABORT_DUMP;
8645         }
8646
8647         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8648                 dev_err(&ioa_cfg->pdev->dev,
8649                         "IOA taken offline - error recovery failed\n");
8650
8651                 ioa_cfg->reset_retries = 0;
8652                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8653                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8654                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8655                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8656                 }
8657                 wmb();
8658
8659                 if (ioa_cfg->in_ioa_bringdown) {
8660                         ioa_cfg->reset_cmd = NULL;
8661                         ioa_cfg->in_reset_reload = 0;
8662                         ipr_fail_all_ops(ioa_cfg);
8663                         wake_up_all(&ioa_cfg->reset_wait_q);
8664
8665                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8666                                 spin_unlock_irq(ioa_cfg->host->host_lock);
8667                                 scsi_unblock_requests(ioa_cfg->host);
8668                                 spin_lock_irq(ioa_cfg->host->host_lock);
8669                         }
8670                         return;
8671                 } else {
8672                         ioa_cfg->in_ioa_bringdown = 1;
8673                         shutdown_type = IPR_SHUTDOWN_NONE;
8674                 }
8675         }
8676
8677         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8678                                 shutdown_type);
8679 }
8680
8681 /**
8682  * ipr_reset_freeze - Hold off all I/O activity
8683  * @ipr_cmd:    ipr command struct
8684  *
8685  * Description: If the PCI slot is frozen, hold off all I/O
8686  * activity; then, as soon as the slot is available again,
8687  * initiate an adapter reset.
8688  */
8689 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8690 {
8691         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8692         int i;
8693
8694         /* Disallow new interrupts, avoid loop */
8695         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8696                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8697                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8698                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8699         }
8700         wmb();
8701         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8702         ipr_cmd->done = ipr_reset_ioa_job;
8703         return IPR_RC_JOB_RETURN;
8704 }
8705
8706 /**
8707  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8708  * @pdev:       PCI device struct
8709  *
8710  * Description: This routine is called to tell us that the PCI bus
8711  * is down. Can't do anything here, except put the device driver
8712  * into a holding pattern, waiting for the PCI bus to come back.
8713  */
8714 static void ipr_pci_frozen(struct pci_dev *pdev)
8715 {
8716         unsigned long flags = 0;
8717         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8718
8719         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8720         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8721         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8722 }
8723
8724 /**
8725  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8726  * @pdev:       PCI device struct
8727  *
8728  * Description: This routine is called by the pci error recovery
8729  * code after the PCI slot has been reset, just before we
8730  * should resume normal operations.
8731  */
8732 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8733 {
8734         unsigned long flags = 0;
8735         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8736
8737         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8738         if (ioa_cfg->needs_warm_reset)
8739                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8740         else
8741                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8742                                         IPR_SHUTDOWN_NONE);
8743         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8744         return PCI_ERS_RESULT_RECOVERED;
8745 }
8746
8747 /**
8748  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8749  * @pdev:       PCI device struct
8750  *
8751  * Description: This routine is called when the PCI bus has
8752  * permanently failed.
8753  */
8754 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8755 {
8756         unsigned long flags = 0;
8757         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8758         int i;
8759
8760         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8761         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8762                 ioa_cfg->sdt_state = ABORT_DUMP;
8763         ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8764         ioa_cfg->in_ioa_bringdown = 1;
8765         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8766                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8767                 ioa_cfg->hrrq[i].allow_cmds = 0;
8768                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8769         }
8770         wmb();
8771         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8772         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8773 }
8774
8775 /**
8776  * ipr_pci_error_detected - Called when a PCI error is detected.
8777  * @pdev:       PCI device struct
8778  * @state:      PCI channel state
8779  *
8780  * Description: Called when a PCI error is detected.
8781  *
8782  * Return value:
8783  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8784  */
8785 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8786                                                pci_channel_state_t state)
8787 {
8788         switch (state) {
8789         case pci_channel_io_frozen:
8790                 ipr_pci_frozen(pdev);
8791                 return PCI_ERS_RESULT_NEED_RESET;
8792         case pci_channel_io_perm_failure:
8793                 ipr_pci_perm_failure(pdev);
8794                 return PCI_ERS_RESULT_DISCONNECT;
8795                 break;
8796         default:
8797                 break;
8798         }
8799         return PCI_ERS_RESULT_NEED_RESET;
8800 }
8801
8802 /**
8803  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8804  * @ioa_cfg:    ioa cfg struct
8805  *
8806  * Description: This is the second phase of adapter intialization
8807  * This function takes care of initilizing the adapter to the point
8808  * where it can accept new commands.
8809
8810  * Return value:
8811  *      0 on success / -EIO on failure
8812  **/
8813 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8814 {
8815         int rc = 0;
8816         unsigned long host_lock_flags = 0;
8817
8818         ENTER;
8819         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8820         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8821         if (ioa_cfg->needs_hard_reset) {
8822                 ioa_cfg->needs_hard_reset = 0;
8823                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8824         } else
8825                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8826                                         IPR_SHUTDOWN_NONE);
8827         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8828         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8829         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8830
8831         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8832                 rc = -EIO;
8833         } else if (ipr_invalid_adapter(ioa_cfg)) {
8834                 if (!ipr_testmode)
8835                         rc = -EIO;
8836
8837                 dev_err(&ioa_cfg->pdev->dev,
8838                         "Adapter not supported in this hardware configuration.\n");
8839         }
8840
8841         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8842
8843         LEAVE;
8844         return rc;
8845 }
8846
8847 /**
8848  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8849  * @ioa_cfg:    ioa config struct
8850  *
8851  * Return value:
8852  *      none
8853  **/
8854 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8855 {
8856         int i;
8857
8858         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8859                 if (ioa_cfg->ipr_cmnd_list[i])
8860                         pci_pool_free(ioa_cfg->ipr_cmd_pool,
8861                                       ioa_cfg->ipr_cmnd_list[i],
8862                                       ioa_cfg->ipr_cmnd_list_dma[i]);
8863
8864                 ioa_cfg->ipr_cmnd_list[i] = NULL;
8865         }
8866
8867         if (ioa_cfg->ipr_cmd_pool)
8868                 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8869
8870         kfree(ioa_cfg->ipr_cmnd_list);
8871         kfree(ioa_cfg->ipr_cmnd_list_dma);
8872         ioa_cfg->ipr_cmnd_list = NULL;
8873         ioa_cfg->ipr_cmnd_list_dma = NULL;
8874         ioa_cfg->ipr_cmd_pool = NULL;
8875 }
8876
8877 /**
8878  * ipr_free_mem - Frees memory allocated for an adapter
8879  * @ioa_cfg:    ioa cfg struct
8880  *
8881  * Return value:
8882  *      nothing
8883  **/
8884 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8885 {
8886         int i;
8887
8888         kfree(ioa_cfg->res_entries);
8889         pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8890                             ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8891         ipr_free_cmd_blks(ioa_cfg);
8892
8893         for (i = 0; i < ioa_cfg->hrrq_num; i++)
8894                 pci_free_consistent(ioa_cfg->pdev,
8895                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
8896                                         ioa_cfg->hrrq[i].host_rrq,
8897                                         ioa_cfg->hrrq[i].host_rrq_dma);
8898
8899         pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8900                             ioa_cfg->u.cfg_table,
8901                             ioa_cfg->cfg_table_dma);
8902
8903         for (i = 0; i < IPR_NUM_HCAMS; i++) {
8904                 pci_free_consistent(ioa_cfg->pdev,
8905                                     sizeof(struct ipr_hostrcb),
8906                                     ioa_cfg->hostrcb[i],
8907                                     ioa_cfg->hostrcb_dma[i]);
8908         }
8909
8910         ipr_free_dump(ioa_cfg);
8911         kfree(ioa_cfg->trace);
8912 }
8913
8914 /**
8915  * ipr_free_all_resources - Free all allocated resources for an adapter.
8916  * @ipr_cmd:    ipr command struct
8917  *
8918  * This function frees all allocated resources for the
8919  * specified adapter.
8920  *
8921  * Return value:
8922  *      none
8923  **/
8924 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8925 {
8926         struct pci_dev *pdev = ioa_cfg->pdev;
8927
8928         ENTER;
8929         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8930             ioa_cfg->intr_flag == IPR_USE_MSIX) {
8931                 int i;
8932                 for (i = 0; i < ioa_cfg->nvectors; i++)
8933                         free_irq(ioa_cfg->vectors_info[i].vec,
8934                                 &ioa_cfg->hrrq[i]);
8935         } else
8936                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8937
8938         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8939                 pci_disable_msi(pdev);
8940                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8941         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8942                 pci_disable_msix(pdev);
8943                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8944         }
8945
8946         iounmap(ioa_cfg->hdw_dma_regs);
8947         pci_release_regions(pdev);
8948         ipr_free_mem(ioa_cfg);
8949         scsi_host_put(ioa_cfg->host);
8950         pci_disable_device(pdev);
8951         LEAVE;
8952 }
8953
8954 /**
8955  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8956  * @ioa_cfg:    ioa config struct
8957  *
8958  * Return value:
8959  *      0 on success / -ENOMEM on allocation failure
8960  **/
8961 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8962 {
8963         struct ipr_cmnd *ipr_cmd;
8964         struct ipr_ioarcb *ioarcb;
8965         dma_addr_t dma_addr;
8966         int i, entries_each_hrrq, hrrq_id = 0;
8967
8968         ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8969                                                 sizeof(struct ipr_cmnd), 512, 0);
8970
8971         if (!ioa_cfg->ipr_cmd_pool)
8972                 return -ENOMEM;
8973
8974         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8975         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8976
8977         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8978                 ipr_free_cmd_blks(ioa_cfg);
8979                 return -ENOMEM;
8980         }
8981
8982         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8983                 if (ioa_cfg->hrrq_num > 1) {
8984                         if (i == 0) {
8985                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8986                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
8987                                         ioa_cfg->hrrq[i].max_cmd_id =
8988                                                 (entries_each_hrrq - 1);
8989                         } else {
8990                                 entries_each_hrrq =
8991                                         IPR_NUM_BASE_CMD_BLKS/
8992                                         (ioa_cfg->hrrq_num - 1);
8993                                 ioa_cfg->hrrq[i].min_cmd_id =
8994                                         IPR_NUM_INTERNAL_CMD_BLKS +
8995                                         (i - 1) * entries_each_hrrq;
8996                                 ioa_cfg->hrrq[i].max_cmd_id =
8997                                         (IPR_NUM_INTERNAL_CMD_BLKS +
8998                                         i * entries_each_hrrq - 1);
8999                         }
9000                 } else {
9001                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9002                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9003                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9004                 }
9005                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9006         }
9007
9008         BUG_ON(ioa_cfg->hrrq_num == 0);
9009
9010         i = IPR_NUM_CMD_BLKS -
9011                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9012         if (i > 0) {
9013                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9014                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9015         }
9016
9017         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9018                 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9019
9020                 if (!ipr_cmd) {
9021                         ipr_free_cmd_blks(ioa_cfg);
9022                         return -ENOMEM;
9023                 }
9024
9025                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9026                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9027                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9028
9029                 ioarcb = &ipr_cmd->ioarcb;
9030                 ipr_cmd->dma_addr = dma_addr;
9031                 if (ioa_cfg->sis64)
9032                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9033                 else
9034                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9035
9036                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9037                 if (ioa_cfg->sis64) {
9038                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9039                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9040                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9041                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9042                 } else {
9043                         ioarcb->write_ioadl_addr =
9044                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9045                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9046                         ioarcb->ioasa_host_pci_addr =
9047                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9048                 }
9049                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9050                 ipr_cmd->cmd_index = i;
9051                 ipr_cmd->ioa_cfg = ioa_cfg;
9052                 ipr_cmd->sense_buffer_dma = dma_addr +
9053                         offsetof(struct ipr_cmnd, sense_buffer);
9054
9055                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9056                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9057                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9058                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9059                         hrrq_id++;
9060         }
9061
9062         return 0;
9063 }
9064
9065 /**
9066  * ipr_alloc_mem - Allocate memory for an adapter
9067  * @ioa_cfg:    ioa config struct
9068  *
9069  * Return value:
9070  *      0 on success / non-zero for error
9071  **/
9072 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9073 {
9074         struct pci_dev *pdev = ioa_cfg->pdev;
9075         int i, rc = -ENOMEM;
9076
9077         ENTER;
9078         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9079                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9080
9081         if (!ioa_cfg->res_entries)
9082                 goto out;
9083
9084         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9085                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9086                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9087         }
9088
9089         ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9090                                                 sizeof(struct ipr_misc_cbs),
9091                                                 &ioa_cfg->vpd_cbs_dma);
9092
9093         if (!ioa_cfg->vpd_cbs)
9094                 goto out_free_res_entries;
9095
9096         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9097                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9098                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9099                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9100                 if (i == 0)
9101                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9102                 else
9103                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9104         }
9105
9106         if (ipr_alloc_cmd_blks(ioa_cfg))
9107                 goto out_free_vpd_cbs;
9108
9109         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9110                 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9111                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9112                                         &ioa_cfg->hrrq[i].host_rrq_dma);
9113
9114                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9115                         while (--i > 0)
9116                                 pci_free_consistent(pdev,
9117                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9118                                         ioa_cfg->hrrq[i].host_rrq,
9119                                         ioa_cfg->hrrq[i].host_rrq_dma);
9120                         goto out_ipr_free_cmd_blocks;
9121                 }
9122                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9123         }
9124
9125         ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9126                                                     ioa_cfg->cfg_table_size,
9127                                                     &ioa_cfg->cfg_table_dma);
9128
9129         if (!ioa_cfg->u.cfg_table)
9130                 goto out_free_host_rrq;
9131
9132         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9133                 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9134                                                            sizeof(struct ipr_hostrcb),
9135                                                            &ioa_cfg->hostrcb_dma[i]);
9136
9137                 if (!ioa_cfg->hostrcb[i])
9138                         goto out_free_hostrcb_dma;
9139
9140                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9141                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9142                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9143                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9144         }
9145
9146         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9147                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9148
9149         if (!ioa_cfg->trace)
9150                 goto out_free_hostrcb_dma;
9151
9152         rc = 0;
9153 out:
9154         LEAVE;
9155         return rc;
9156
9157 out_free_hostrcb_dma:
9158         while (i-- > 0) {
9159                 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9160                                     ioa_cfg->hostrcb[i],
9161                                     ioa_cfg->hostrcb_dma[i]);
9162         }
9163         pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9164                             ioa_cfg->u.cfg_table,
9165                             ioa_cfg->cfg_table_dma);
9166 out_free_host_rrq:
9167         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9168                 pci_free_consistent(pdev,
9169                                 sizeof(u32) * ioa_cfg->hrrq[i].size,
9170                                 ioa_cfg->hrrq[i].host_rrq,
9171                                 ioa_cfg->hrrq[i].host_rrq_dma);
9172         }
9173 out_ipr_free_cmd_blocks:
9174         ipr_free_cmd_blks(ioa_cfg);
9175 out_free_vpd_cbs:
9176         pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9177                             ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9178 out_free_res_entries:
9179         kfree(ioa_cfg->res_entries);
9180         goto out;
9181 }
9182
9183 /**
9184  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9185  * @ioa_cfg:    ioa config struct
9186  *
9187  * Return value:
9188  *      none
9189  **/
9190 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9191 {
9192         int i;
9193
9194         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9195                 ioa_cfg->bus_attr[i].bus = i;
9196                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9197                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9198                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9199                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9200                 else
9201                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9202         }
9203 }
9204
9205 /**
9206  * ipr_init_ioa_cfg - Initialize IOA config struct
9207  * @ioa_cfg:    ioa config struct
9208  * @host:               scsi host struct
9209  * @pdev:               PCI dev struct
9210  *
9211  * Return value:
9212  *      none
9213  **/
9214 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9215                              struct Scsi_Host *host, struct pci_dev *pdev)
9216 {
9217         const struct ipr_interrupt_offsets *p;
9218         struct ipr_interrupts *t;
9219         void __iomem *base;
9220
9221         ioa_cfg->host = host;
9222         ioa_cfg->pdev = pdev;
9223         ioa_cfg->log_level = ipr_log_level;
9224         ioa_cfg->doorbell = IPR_DOORBELL;
9225         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9226         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9227         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9228         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9229         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9230         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9231
9232         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9233         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9234         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9235         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9236         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9237         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9238         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9239         ioa_cfg->sdt_state = INACTIVE;
9240
9241         ipr_initialize_bus_attr(ioa_cfg);
9242         ioa_cfg->max_devs_supported = ipr_max_devs;
9243
9244         if (ioa_cfg->sis64) {
9245                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9246                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9247                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9248                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9249         } else {
9250                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9251                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9252                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9253                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9254         }
9255         host->max_channel = IPR_MAX_BUS_TO_SCAN;
9256         host->unique_id = host->host_no;
9257         host->max_cmd_len = IPR_MAX_CDB_LEN;
9258         host->can_queue = ioa_cfg->max_cmds;
9259         pci_set_drvdata(pdev, ioa_cfg);
9260
9261         p = &ioa_cfg->chip_cfg->regs;
9262         t = &ioa_cfg->regs;
9263         base = ioa_cfg->hdw_dma_regs;
9264
9265         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9266         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9267         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9268         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9269         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9270         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9271         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9272         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9273         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9274         t->ioarrin_reg = base + p->ioarrin_reg;
9275         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9276         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9277         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9278         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9279         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9280         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9281
9282         if (ioa_cfg->sis64) {
9283                 t->init_feedback_reg = base + p->init_feedback_reg;
9284                 t->dump_addr_reg = base + p->dump_addr_reg;
9285                 t->dump_data_reg = base + p->dump_data_reg;
9286                 t->endian_swap_reg = base + p->endian_swap_reg;
9287         }
9288 }
9289
9290 /**
9291  * ipr_get_chip_info - Find adapter chip information
9292  * @dev_id:             PCI device id struct
9293  *
9294  * Return value:
9295  *      ptr to chip information on success / NULL on failure
9296  **/
9297 static const struct ipr_chip_t *
9298 ipr_get_chip_info(const struct pci_device_id *dev_id)
9299 {
9300         int i;
9301
9302         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9303                 if (ipr_chip[i].vendor == dev_id->vendor &&
9304                     ipr_chip[i].device == dev_id->device)
9305                         return &ipr_chip[i];
9306         return NULL;
9307 }
9308
9309 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9310 {
9311         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9312         int i, err, vectors;
9313
9314         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9315                 entries[i].entry = i;
9316
9317         vectors = ipr_number_of_msix;
9318
9319         while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9320                         vectors = err;
9321
9322         if (err < 0) {
9323                 pci_disable_msix(ioa_cfg->pdev);
9324                 return err;
9325         }
9326
9327         if (!err) {
9328                 for (i = 0; i < vectors; i++)
9329                         ioa_cfg->vectors_info[i].vec = entries[i].vector;
9330                 ioa_cfg->nvectors = vectors;
9331         }
9332
9333         return err;
9334 }
9335
9336 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9337 {
9338         int i, err, vectors;
9339
9340         vectors = ipr_number_of_msix;
9341
9342         while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9343                         vectors = err;
9344
9345         if (err < 0) {
9346                 pci_disable_msi(ioa_cfg->pdev);
9347                 return err;
9348         }
9349
9350         if (!err) {
9351                 for (i = 0; i < vectors; i++)
9352                         ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9353                 ioa_cfg->nvectors = vectors;
9354         }
9355
9356         return err;
9357 }
9358
9359 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9360 {
9361         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9362
9363         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9364                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9365                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9366                 ioa_cfg->vectors_info[vec_idx].
9367                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9368         }
9369 }
9370
9371 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9372 {
9373         int i, rc;
9374
9375         for (i = 1; i < ioa_cfg->nvectors; i++) {
9376                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9377                         ipr_isr_mhrrq,
9378                         0,
9379                         ioa_cfg->vectors_info[i].desc,
9380                         &ioa_cfg->hrrq[i]);
9381                 if (rc) {
9382                         while (--i >= 0)
9383                                 free_irq(ioa_cfg->vectors_info[i].vec,
9384                                         &ioa_cfg->hrrq[i]);
9385                         return rc;
9386                 }
9387         }
9388         return 0;
9389 }
9390
9391 /**
9392  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9393  * @pdev:               PCI device struct
9394  *
9395  * Description: Simply set the msi_received flag to 1 indicating that
9396  * Message Signaled Interrupts are supported.
9397  *
9398  * Return value:
9399  *      0 on success / non-zero on failure
9400  **/
9401 static irqreturn_t ipr_test_intr(int irq, void *devp)
9402 {
9403         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9404         unsigned long lock_flags = 0;
9405         irqreturn_t rc = IRQ_HANDLED;
9406
9407         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9408         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9409
9410         ioa_cfg->msi_received = 1;
9411         wake_up(&ioa_cfg->msi_wait_q);
9412
9413         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9414         return rc;
9415 }
9416
9417 /**
9418  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9419  * @pdev:               PCI device struct
9420  *
9421  * Description: The return value from pci_enable_msi() can not always be
9422  * trusted.  This routine sets up and initiates a test interrupt to determine
9423  * if the interrupt is received via the ipr_test_intr() service routine.
9424  * If the tests fails, the driver will fall back to LSI.
9425  *
9426  * Return value:
9427  *      0 on success / non-zero on failure
9428  **/
9429 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9430 {
9431         int rc;
9432         volatile u32 int_reg;
9433         unsigned long lock_flags = 0;
9434
9435         ENTER;
9436
9437         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9438         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9439         ioa_cfg->msi_received = 0;
9440         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9441         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9442         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9443         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9444
9445         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9446                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9447         else
9448                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9449         if (rc) {
9450                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9451                 return rc;
9452         } else if (ipr_debug)
9453                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9454
9455         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9456         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9457         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9458         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9459         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9460
9461         if (!ioa_cfg->msi_received) {
9462                 /* MSI test failed */
9463                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9464                 rc = -EOPNOTSUPP;
9465         } else if (ipr_debug)
9466                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9467
9468         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9469
9470         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9471                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9472         else
9473                 free_irq(pdev->irq, ioa_cfg);
9474
9475         LEAVE;
9476
9477         return rc;
9478 }
9479
9480  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9481  * @pdev:               PCI device struct
9482  * @dev_id:             PCI device id struct
9483  *
9484  * Return value:
9485  *      0 on success / non-zero on failure
9486  **/
9487 static int ipr_probe_ioa(struct pci_dev *pdev,
9488                          const struct pci_device_id *dev_id)
9489 {
9490         struct ipr_ioa_cfg *ioa_cfg;
9491         struct Scsi_Host *host;
9492         unsigned long ipr_regs_pci;
9493         void __iomem *ipr_regs;
9494         int rc = PCIBIOS_SUCCESSFUL;
9495         volatile u32 mask, uproc, interrupts;
9496         unsigned long lock_flags;
9497
9498         ENTER;
9499
9500         if ((rc = pci_enable_device(pdev))) {
9501                 dev_err(&pdev->dev, "Cannot enable adapter\n");
9502                 goto out;
9503         }
9504
9505         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9506
9507         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9508
9509         if (!host) {
9510                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9511                 rc = -ENOMEM;
9512                 goto out_disable;
9513         }
9514
9515         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9516         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9517         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9518
9519         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9520
9521         if (!ioa_cfg->ipr_chip) {
9522                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9523                         dev_id->vendor, dev_id->device);
9524                 goto out_scsi_host_put;
9525         }
9526
9527         /* set SIS 32 or SIS 64 */
9528         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9529         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9530         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9531         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9532
9533         if (ipr_transop_timeout)
9534                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9535         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9536                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9537         else
9538                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9539
9540         ioa_cfg->revid = pdev->revision;
9541
9542         ipr_regs_pci = pci_resource_start(pdev, 0);
9543
9544         rc = pci_request_regions(pdev, IPR_NAME);
9545         if (rc < 0) {
9546                 dev_err(&pdev->dev,
9547                         "Couldn't register memory range of registers\n");
9548                 goto out_scsi_host_put;
9549         }
9550
9551         ipr_regs = pci_ioremap_bar(pdev, 0);
9552
9553         if (!ipr_regs) {
9554                 dev_err(&pdev->dev,
9555                         "Couldn't map memory range of registers\n");
9556                 rc = -ENOMEM;
9557                 goto out_release_regions;
9558         }
9559
9560         ioa_cfg->hdw_dma_regs = ipr_regs;
9561         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9562         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9563
9564         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9565
9566         pci_set_master(pdev);
9567
9568         if (ioa_cfg->sis64) {
9569                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9570                 if (rc < 0) {
9571                         dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9572                         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9573                 }
9574
9575         } else
9576                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9577
9578         if (rc < 0) {
9579                 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9580                 goto cleanup_nomem;
9581         }
9582
9583         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9584                                    ioa_cfg->chip_cfg->cache_line_size);
9585
9586         if (rc != PCIBIOS_SUCCESSFUL) {
9587                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9588                 rc = -EIO;
9589                 goto cleanup_nomem;
9590         }
9591
9592         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9593                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9594                         IPR_MAX_MSIX_VECTORS);
9595                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9596         }
9597
9598         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9599                         ipr_enable_msix(ioa_cfg) == 0)
9600                 ioa_cfg->intr_flag = IPR_USE_MSIX;
9601         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9602                         ipr_enable_msi(ioa_cfg) == 0)
9603                 ioa_cfg->intr_flag = IPR_USE_MSI;
9604         else {
9605                 ioa_cfg->intr_flag = IPR_USE_LSI;
9606                 ioa_cfg->nvectors = 1;
9607                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9608         }
9609
9610         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9611             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9612                 rc = ipr_test_msi(ioa_cfg, pdev);
9613                 if (rc == -EOPNOTSUPP) {
9614                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9615                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9616                                 pci_disable_msi(pdev);
9617                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9618                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9619                                 pci_disable_msix(pdev);
9620                         }
9621
9622                         ioa_cfg->intr_flag = IPR_USE_LSI;
9623                         ioa_cfg->nvectors = 1;
9624                 }
9625                 else if (rc)
9626                         goto out_msi_disable;
9627                 else {
9628                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9629                                 dev_info(&pdev->dev,
9630                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
9631                                         ioa_cfg->nvectors, pdev->irq);
9632                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9633                                 dev_info(&pdev->dev,
9634                                         "Request for %d MSIXs succeeded.",
9635                                         ioa_cfg->nvectors);
9636                 }
9637         }
9638
9639         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9640                                 (unsigned int)num_online_cpus(),
9641                                 (unsigned int)IPR_MAX_HRRQ_NUM);
9642
9643         /* Save away PCI config space for use following IOA reset */
9644         rc = pci_save_state(pdev);
9645
9646         if (rc != PCIBIOS_SUCCESSFUL) {
9647                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9648                 rc = -EIO;
9649                 goto out_msi_disable;
9650         }
9651
9652         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9653                 goto out_msi_disable;
9654
9655         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9656                 goto out_msi_disable;
9657
9658         if (ioa_cfg->sis64)
9659                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9660                                 + ((sizeof(struct ipr_config_table_entry64)
9661                                 * ioa_cfg->max_devs_supported)));
9662         else
9663                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9664                                 + ((sizeof(struct ipr_config_table_entry)
9665                                 * ioa_cfg->max_devs_supported)));
9666
9667         rc = ipr_alloc_mem(ioa_cfg);
9668         if (rc < 0) {
9669                 dev_err(&pdev->dev,
9670                         "Couldn't allocate enough memory for device driver!\n");
9671                 goto out_msi_disable;
9672         }
9673
9674         /*
9675          * If HRRQ updated interrupt is not masked, or reset alert is set,
9676          * the card is in an unknown state and needs a hard reset
9677          */
9678         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9679         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9680         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9681         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9682                 ioa_cfg->needs_hard_reset = 1;
9683         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9684                 ioa_cfg->needs_hard_reset = 1;
9685         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9686                 ioa_cfg->ioa_unit_checked = 1;
9687
9688         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9689         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9690         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9691
9692         if (ioa_cfg->intr_flag == IPR_USE_MSI
9693                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9694                 name_msi_vectors(ioa_cfg);
9695                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9696                         0,
9697                         ioa_cfg->vectors_info[0].desc,
9698                         &ioa_cfg->hrrq[0]);
9699                 if (!rc)
9700                         rc = ipr_request_other_msi_irqs(ioa_cfg);
9701         } else {
9702                 rc = request_irq(pdev->irq, ipr_isr,
9703                          IRQF_SHARED,
9704                          IPR_NAME, &ioa_cfg->hrrq[0]);
9705         }
9706         if (rc) {
9707                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9708                         pdev->irq, rc);
9709                 goto cleanup_nolog;
9710         }
9711
9712         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9713             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9714                 ioa_cfg->needs_warm_reset = 1;
9715                 ioa_cfg->reset = ipr_reset_slot_reset;
9716         } else
9717                 ioa_cfg->reset = ipr_reset_start_bist;
9718
9719         spin_lock(&ipr_driver_lock);
9720         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9721         spin_unlock(&ipr_driver_lock);
9722
9723         LEAVE;
9724 out:
9725         return rc;
9726
9727 cleanup_nolog:
9728         ipr_free_mem(ioa_cfg);
9729 out_msi_disable:
9730         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9731                 pci_disable_msi(pdev);
9732         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9733                 pci_disable_msix(pdev);
9734 cleanup_nomem:
9735         iounmap(ipr_regs);
9736 out_release_regions:
9737         pci_release_regions(pdev);
9738 out_scsi_host_put:
9739         scsi_host_put(host);
9740 out_disable:
9741         pci_disable_device(pdev);
9742         goto out;
9743 }
9744
9745 /**
9746  * ipr_scan_vsets - Scans for VSET devices
9747  * @ioa_cfg:    ioa config struct
9748  *
9749  * Description: Since the VSET resources do not follow SAM in that we can have
9750  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9751  *
9752  * Return value:
9753  *      none
9754  **/
9755 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9756 {
9757         int target, lun;
9758
9759         for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9760                 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9761                         scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9762 }
9763
9764 /**
9765  * ipr_initiate_ioa_bringdown - Bring down an adapter
9766  * @ioa_cfg:            ioa config struct
9767  * @shutdown_type:      shutdown type
9768  *
9769  * Description: This function will initiate bringing down the adapter.
9770  * This consists of issuing an IOA shutdown to the adapter
9771  * to flush the cache, and running BIST.
9772  * If the caller needs to wait on the completion of the reset,
9773  * the caller must sleep on the reset_wait_q.
9774  *
9775  * Return value:
9776  *      none
9777  **/
9778 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9779                                        enum ipr_shutdown_type shutdown_type)
9780 {
9781         ENTER;
9782         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9783                 ioa_cfg->sdt_state = ABORT_DUMP;
9784         ioa_cfg->reset_retries = 0;
9785         ioa_cfg->in_ioa_bringdown = 1;
9786         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9787         LEAVE;
9788 }
9789
9790 /**
9791  * __ipr_remove - Remove a single adapter
9792  * @pdev:       pci device struct
9793  *
9794  * Adapter hot plug remove entry point.
9795  *
9796  * Return value:
9797  *      none
9798  **/
9799 static void __ipr_remove(struct pci_dev *pdev)
9800 {
9801         unsigned long host_lock_flags = 0;
9802         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9803         int i;
9804         ENTER;
9805
9806         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9807         while (ioa_cfg->in_reset_reload) {
9808                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9809                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9810                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9811         }
9812
9813         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9814                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9815                 ioa_cfg->hrrq[i].removing_ioa = 1;
9816                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9817         }
9818         wmb();
9819         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9820
9821         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9822         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9823         flush_work(&ioa_cfg->work_q);
9824         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9825         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9826
9827         spin_lock(&ipr_driver_lock);
9828         list_del(&ioa_cfg->queue);
9829         spin_unlock(&ipr_driver_lock);
9830
9831         if (ioa_cfg->sdt_state == ABORT_DUMP)
9832                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9833         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9834
9835         ipr_free_all_resources(ioa_cfg);
9836
9837         LEAVE;
9838 }
9839
9840 /**
9841  * ipr_remove - IOA hot plug remove entry point
9842  * @pdev:       pci device struct
9843  *
9844  * Adapter hot plug remove entry point.
9845  *
9846  * Return value:
9847  *      none
9848  **/
9849 static void ipr_remove(struct pci_dev *pdev)
9850 {
9851         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9852
9853         ENTER;
9854
9855         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9856                               &ipr_trace_attr);
9857         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9858                              &ipr_dump_attr);
9859         scsi_remove_host(ioa_cfg->host);
9860
9861         __ipr_remove(pdev);
9862
9863         LEAVE;
9864 }
9865
9866 /**
9867  * ipr_probe - Adapter hot plug add entry point
9868  *
9869  * Return value:
9870  *      0 on success / non-zero on failure
9871  **/
9872 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9873 {
9874         struct ipr_ioa_cfg *ioa_cfg;
9875         int rc, i;
9876
9877         rc = ipr_probe_ioa(pdev, dev_id);
9878
9879         if (rc)
9880                 return rc;
9881
9882         ioa_cfg = pci_get_drvdata(pdev);
9883         rc = ipr_probe_ioa_part2(ioa_cfg);
9884
9885         if (rc) {
9886                 __ipr_remove(pdev);
9887                 return rc;
9888         }
9889
9890         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9891
9892         if (rc) {
9893                 __ipr_remove(pdev);
9894                 return rc;
9895         }
9896
9897         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9898                                    &ipr_trace_attr);
9899
9900         if (rc) {
9901                 scsi_remove_host(ioa_cfg->host);
9902                 __ipr_remove(pdev);
9903                 return rc;
9904         }
9905
9906         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9907                                    &ipr_dump_attr);
9908
9909         if (rc) {
9910                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9911                                       &ipr_trace_attr);
9912                 scsi_remove_host(ioa_cfg->host);
9913                 __ipr_remove(pdev);
9914                 return rc;
9915         }
9916
9917         scsi_scan_host(ioa_cfg->host);
9918         ipr_scan_vsets(ioa_cfg);
9919         scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9920         ioa_cfg->allow_ml_add_del = 1;
9921         ioa_cfg->host->max_channel = IPR_VSET_BUS;
9922         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9923
9924         if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9925                         ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9926                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9927                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9928                                         ioa_cfg->iopoll_weight, ipr_iopoll);
9929                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9930                 }
9931         }
9932
9933         schedule_work(&ioa_cfg->work_q);
9934         return 0;
9935 }
9936
9937 /**
9938  * ipr_shutdown - Shutdown handler.
9939  * @pdev:       pci device struct
9940  *
9941  * This function is invoked upon system shutdown/reboot. It will issue
9942  * an adapter shutdown to the adapter to flush the write cache.
9943  *
9944  * Return value:
9945  *      none
9946  **/
9947 static void ipr_shutdown(struct pci_dev *pdev)
9948 {
9949         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9950         unsigned long lock_flags = 0;
9951         int i;
9952
9953         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9954         if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9955                         ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9956                 ioa_cfg->iopoll_weight = 0;
9957                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9958                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9959         }
9960
9961         while (ioa_cfg->in_reset_reload) {
9962                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9963                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9964                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9965         }
9966
9967         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9968         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9969         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9970 }
9971
9972 static struct pci_device_id ipr_pci_table[] = {
9973         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9974                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9975         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9976                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9977         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9978                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9979         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9980                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9981         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9982                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9983         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9984                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9985         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9986                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9987         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9988                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9989                 IPR_USE_LONG_TRANSOP_TIMEOUT },
9990         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9991               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9992         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9993               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9994               IPR_USE_LONG_TRANSOP_TIMEOUT },
9995         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9996               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9997               IPR_USE_LONG_TRANSOP_TIMEOUT },
9998         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9999               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10000         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10001               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10002               IPR_USE_LONG_TRANSOP_TIMEOUT},
10003         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10004               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10005               IPR_USE_LONG_TRANSOP_TIMEOUT },
10006         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10007               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10008               IPR_USE_LONG_TRANSOP_TIMEOUT },
10009         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10010               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10011         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10012               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10013         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10014               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10015               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10016         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10017                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10018         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10019                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10020         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10021                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10022                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10023         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10024                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10025                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10026         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10027                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10028         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10029                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10030         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10031                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10032         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10033                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10034         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10035                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10036         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10037                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10038         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10039                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10040         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10041                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10042         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10043                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10044         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10045                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10046         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10047                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10048         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10049                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10050         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10051                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10052         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10053                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10054         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10055                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10056         { }
10057 };
10058 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10059
10060 static const struct pci_error_handlers ipr_err_handler = {
10061         .error_detected = ipr_pci_error_detected,
10062         .slot_reset = ipr_pci_slot_reset,
10063 };
10064
10065 static struct pci_driver ipr_driver = {
10066         .name = IPR_NAME,
10067         .id_table = ipr_pci_table,
10068         .probe = ipr_probe,
10069         .remove = ipr_remove,
10070         .shutdown = ipr_shutdown,
10071         .err_handler = &ipr_err_handler,
10072 };
10073
10074 /**
10075  * ipr_halt_done - Shutdown prepare completion
10076  *
10077  * Return value:
10078  *      none
10079  **/
10080 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10081 {
10082         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10083 }
10084
10085 /**
10086  * ipr_halt - Issue shutdown prepare to all adapters
10087  *
10088  * Return value:
10089  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10090  **/
10091 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10092 {
10093         struct ipr_cmnd *ipr_cmd;
10094         struct ipr_ioa_cfg *ioa_cfg;
10095         unsigned long flags = 0;
10096
10097         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10098                 return NOTIFY_DONE;
10099
10100         spin_lock(&ipr_driver_lock);
10101
10102         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10103                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10104                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10105                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10106                         continue;
10107                 }
10108
10109                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10110                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10111                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10112                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10113                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10114
10115                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10116                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10117         }
10118         spin_unlock(&ipr_driver_lock);
10119
10120         return NOTIFY_OK;
10121 }
10122
10123 static struct notifier_block ipr_notifier = {
10124         ipr_halt, NULL, 0
10125 };
10126
10127 /**
10128  * ipr_init - Module entry point
10129  *
10130  * Return value:
10131  *      0 on success / negative value on failure
10132  **/
10133 static int __init ipr_init(void)
10134 {
10135         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10136                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10137
10138         register_reboot_notifier(&ipr_notifier);
10139         return pci_register_driver(&ipr_driver);
10140 }
10141
10142 /**
10143  * ipr_exit - Module unload
10144  *
10145  * Module unload entry point.
10146  *
10147  * Return value:
10148  *      none
10149  **/
10150 static void __exit ipr_exit(void)
10151 {
10152         unregister_reboot_notifier(&ipr_notifier);
10153         pci_unregister_driver(&ipr_driver);
10154 }
10155
10156 module_init(ipr_init);
10157 module_exit(ipr_exit);