2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/blkdev.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/sched.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/device.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <linux/libata.h>
38 #define DRV_NAME "sata_mv"
39 #define DRV_VERSION "0.25"
42 /* BAR's are enumerated in terms of pci_resource_start() terms */
43 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
44 MV_IO_BAR = 2, /* offset 0x18: IO space */
45 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
47 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
48 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
51 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
52 MV_SATAHC0_REG_BASE = 0x20000,
53 MV_FLASH_CTL = 0x1046c,
54 MV_GPIO_PORT_CTL = 0x104f0,
55 MV_RESET_CFG = 0x180d8,
57 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
58 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
59 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
60 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
62 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
65 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
67 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
68 * CRPB needs alignment on a 256B boundary. Size == 256B
69 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
70 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
72 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
73 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
75 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
76 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
79 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
81 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
85 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
86 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
87 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
88 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
89 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
91 CRQB_FLAG_READ = (1 << 0),
93 CRQB_CMD_ADDR_SHIFT = 8,
94 CRQB_CMD_CS = (0x2 << 11),
95 CRQB_CMD_LAST = (1 << 15),
97 CRPB_FLAG_STATUS_SHIFT = 8,
99 EPRD_FLAG_END_OF_TBL = (1 << 31),
101 /* PCI interface registers */
103 PCI_COMMAND_OFS = 0xc00,
105 PCI_MAIN_CMD_STS_OFS = 0xd30,
106 STOP_PCI_MASTER = (1 << 2),
107 PCI_MASTER_EMPTY = (1 << 3),
108 GLOB_SFT_RST = (1 << 4),
111 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
112 MV_PCI_DISC_TIMER = 0xd04,
113 MV_PCI_MSI_TRIGGER = 0xc38,
114 MV_PCI_SERR_MASK = 0xc28,
115 MV_PCI_XBAR_TMOUT = 0x1d04,
116 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
117 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
118 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
119 MV_PCI_ERR_COMMAND = 0x1d50,
121 PCI_IRQ_CAUSE_OFS = 0x1d58,
122 PCI_IRQ_MASK_OFS = 0x1d5c,
123 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
125 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
126 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
127 PORT0_ERR = (1 << 0), /* shift by port # */
128 PORT0_DONE = (1 << 1), /* shift by port # */
129 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
130 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
132 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
133 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
134 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
135 GPIO_INT = (1 << 22),
136 SELF_INT = (1 << 23),
137 TWSI_INT = (1 << 24),
138 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
139 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
140 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
143 /* SATAHC registers */
146 HC_IRQ_CAUSE_OFS = 0x14,
147 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
148 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
149 DEV_IRQ = (1 << 8), /* shift by port # */
151 /* Shadow block registers */
153 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
156 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
157 SATA_ACTIVE_OFS = 0x350,
164 SATA_INTERFACE_CTL = 0x050,
166 MV_M2_PREAMP_MASK = 0x7e0,
170 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
171 EDMA_CFG_NCQ = (1 << 5),
172 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
173 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
174 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
176 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
177 EDMA_ERR_IRQ_MASK_OFS = 0xc,
178 EDMA_ERR_D_PAR = (1 << 0),
179 EDMA_ERR_PRD_PAR = (1 << 1),
180 EDMA_ERR_DEV = (1 << 2),
181 EDMA_ERR_DEV_DCON = (1 << 3),
182 EDMA_ERR_DEV_CON = (1 << 4),
183 EDMA_ERR_SERR = (1 << 5),
184 EDMA_ERR_SELF_DIS = (1 << 7),
185 EDMA_ERR_BIST_ASYNC = (1 << 8),
186 EDMA_ERR_CRBQ_PAR = (1 << 9),
187 EDMA_ERR_CRPB_PAR = (1 << 10),
188 EDMA_ERR_INTRL_PAR = (1 << 11),
189 EDMA_ERR_IORDY = (1 << 12),
190 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
191 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
192 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
193 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
194 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
195 EDMA_ERR_TRANS_PROTO = (1 << 31),
196 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
197 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
198 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
199 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
200 EDMA_ERR_LNK_DATA_RX |
201 EDMA_ERR_LNK_DATA_TX |
202 EDMA_ERR_TRANS_PROTO),
204 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
205 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
207 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
208 EDMA_REQ_Q_PTR_SHIFT = 5,
210 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
211 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
212 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
213 EDMA_RSP_Q_PTR_SHIFT = 3,
220 EDMA_IORDY_TMOUT = 0x34,
223 /* Host private flags (hp_flags) */
224 MV_HP_FLAG_MSI = (1 << 0),
225 MV_HP_ERRATA_50XXB0 = (1 << 1),
226 MV_HP_ERRATA_50XXB2 = (1 << 2),
227 MV_HP_ERRATA_60X1B2 = (1 << 3),
228 MV_HP_ERRATA_60X1C0 = (1 << 4),
229 MV_HP_50XX = (1 << 5),
231 /* Port private flags (pp_flags) */
232 MV_PP_FLAG_EDMA_EN = (1 << 0),
233 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
236 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
237 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
240 /* Our DMA boundary is determined by an ePRD being unable to handle
241 * anything larger than 64KB
243 MV_DMA_BOUNDARY = 0xffffU,
245 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
247 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
258 /* Command ReQuest Block: 32B */
266 /* Command ResPonse Block: 8B */
273 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
281 struct mv_port_priv {
282 struct mv_crqb *crqb;
284 struct mv_crpb *crpb;
286 struct mv_sg *sg_tbl;
287 dma_addr_t sg_tbl_dma;
289 unsigned req_producer; /* cp of req_in_ptr */
290 unsigned rsp_consumer; /* cp of rsp_out_ptr */
294 struct mv_port_signal {
301 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
303 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
304 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
306 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
308 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
309 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
312 struct mv_host_priv {
314 struct mv_port_signal signal[8];
315 const struct mv_hw_ops *ops;
318 static void mv_irq_clear(struct ata_port *ap);
319 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
320 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
321 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
322 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
323 static void mv_phy_reset(struct ata_port *ap);
324 static void mv_host_stop(struct ata_host_set *host_set);
325 static int mv_port_start(struct ata_port *ap);
326 static void mv_port_stop(struct ata_port *ap);
327 static void mv_qc_prep(struct ata_queued_cmd *qc);
328 static int mv_qc_issue(struct ata_queued_cmd *qc);
329 static irqreturn_t mv_interrupt(int irq, void *dev_instance,
330 struct pt_regs *regs);
331 static void mv_eng_timeout(struct ata_port *ap);
332 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
334 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
336 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
337 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
339 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
341 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
342 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
344 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
346 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
347 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
349 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
351 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
352 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
353 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
354 unsigned int port_no);
355 static void mv_stop_and_reset(struct ata_port *ap);
357 static struct scsi_host_template mv_sht = {
358 .module = THIS_MODULE,
360 .ioctl = ata_scsi_ioctl,
361 .queuecommand = ata_scsi_queuecmd,
362 .eh_strategy_handler = ata_scsi_error,
363 .can_queue = MV_USE_Q_DEPTH,
364 .this_id = ATA_SHT_THIS_ID,
365 .sg_tablesize = MV_MAX_SG_CT,
366 .max_sectors = ATA_MAX_SECTORS,
367 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
368 .emulated = ATA_SHT_EMULATED,
369 .use_clustering = ATA_SHT_USE_CLUSTERING,
370 .proc_name = DRV_NAME,
371 .dma_boundary = MV_DMA_BOUNDARY,
372 .slave_configure = ata_scsi_slave_config,
373 .bios_param = ata_std_bios_param,
377 static const struct ata_port_operations mv5_ops = {
378 .port_disable = ata_port_disable,
380 .tf_load = ata_tf_load,
381 .tf_read = ata_tf_read,
382 .check_status = ata_check_status,
383 .exec_command = ata_exec_command,
384 .dev_select = ata_std_dev_select,
386 .phy_reset = mv_phy_reset,
388 .qc_prep = mv_qc_prep,
389 .qc_issue = mv_qc_issue,
391 .eng_timeout = mv_eng_timeout,
393 .irq_handler = mv_interrupt,
394 .irq_clear = mv_irq_clear,
396 .scr_read = mv5_scr_read,
397 .scr_write = mv5_scr_write,
399 .port_start = mv_port_start,
400 .port_stop = mv_port_stop,
401 .host_stop = mv_host_stop,
404 static const struct ata_port_operations mv6_ops = {
405 .port_disable = ata_port_disable,
407 .tf_load = ata_tf_load,
408 .tf_read = ata_tf_read,
409 .check_status = ata_check_status,
410 .exec_command = ata_exec_command,
411 .dev_select = ata_std_dev_select,
413 .phy_reset = mv_phy_reset,
415 .qc_prep = mv_qc_prep,
416 .qc_issue = mv_qc_issue,
418 .eng_timeout = mv_eng_timeout,
420 .irq_handler = mv_interrupt,
421 .irq_clear = mv_irq_clear,
423 .scr_read = mv_scr_read,
424 .scr_write = mv_scr_write,
426 .port_start = mv_port_start,
427 .port_stop = mv_port_stop,
428 .host_stop = mv_host_stop,
431 static struct ata_port_info mv_port_info[] = {
434 .host_flags = MV_COMMON_FLAGS,
435 .pio_mask = 0x1f, /* pio0-4 */
436 .udma_mask = 0x7f, /* udma0-6 */
437 .port_ops = &mv5_ops,
441 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
442 .pio_mask = 0x1f, /* pio0-4 */
443 .udma_mask = 0x7f, /* udma0-6 */
444 .port_ops = &mv5_ops,
448 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
449 .pio_mask = 0x1f, /* pio0-4 */
450 .udma_mask = 0x7f, /* udma0-6 */
451 .port_ops = &mv5_ops,
455 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
456 .pio_mask = 0x1f, /* pio0-4 */
457 .udma_mask = 0x7f, /* udma0-6 */
458 .port_ops = &mv6_ops,
462 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
464 .pio_mask = 0x1f, /* pio0-4 */
465 .udma_mask = 0x7f, /* udma0-6 */
466 .port_ops = &mv6_ops,
470 static const struct pci_device_id mv_pci_tbl[] = {
471 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
472 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
473 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080},
474 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
476 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
477 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
478 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
479 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
481 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
482 {} /* terminate list */
485 static struct pci_driver mv_pci_driver = {
487 .id_table = mv_pci_tbl,
488 .probe = mv_init_one,
489 .remove = ata_pci_remove_one,
492 static const struct mv_hw_ops mv5xxx_ops = {
493 .phy_errata = mv5_phy_errata,
494 .enable_leds = mv5_enable_leds,
495 .read_preamp = mv5_read_preamp,
496 .reset_hc = mv5_reset_hc,
497 .reset_flash = mv5_reset_flash,
498 .reset_bus = mv5_reset_bus,
501 static const struct mv_hw_ops mv6xxx_ops = {
502 .phy_errata = mv6_phy_errata,
503 .enable_leds = mv6_enable_leds,
504 .read_preamp = mv6_read_preamp,
505 .reset_hc = mv6_reset_hc,
506 .reset_flash = mv6_reset_flash,
507 .reset_bus = mv_reset_pci_bus,
514 static inline void writelfl(unsigned long data, void __iomem *addr)
517 (void) readl(addr); /* flush to avoid PCI posted write */
520 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
522 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
525 static inline unsigned int mv_hc_from_port(unsigned int port)
527 return port >> MV_PORT_HC_SHIFT;
530 static inline unsigned int mv_hardport_from_port(unsigned int port)
532 return port & MV_PORT_MASK;
535 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
538 return mv_hc_base(base, mv_hc_from_port(port));
541 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
543 return mv_hc_base_from_port(base, port) +
544 MV_SATAHC_ARBTR_REG_SZ +
545 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
548 static inline void __iomem *mv_ap_base(struct ata_port *ap)
550 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
553 static inline int mv_get_hc_count(unsigned long host_flags)
555 return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
558 static void mv_irq_clear(struct ata_port *ap)
563 * mv_start_dma - Enable eDMA engine
564 * @base: port base address
565 * @pp: port private data
567 * Verify the local cache of the eDMA state is accurate with an
571 * Inherited from caller.
573 static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
575 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
576 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
577 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
579 assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
583 * mv_stop_dma - Disable eDMA engine
584 * @ap: ATA channel to manipulate
586 * Verify the local cache of the eDMA state is accurate with an
590 * Inherited from caller.
592 static void mv_stop_dma(struct ata_port *ap)
594 void __iomem *port_mmio = mv_ap_base(ap);
595 struct mv_port_priv *pp = ap->private_data;
599 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
600 /* Disable EDMA if active. The disable bit auto clears.
602 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
603 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
605 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
608 /* now properly wait for the eDMA to stop */
609 for (i = 1000; i > 0; i--) {
610 reg = readl(port_mmio + EDMA_CMD_OFS);
611 if (!(EDMA_EN & reg)) {
618 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
619 /* FIXME: Consider doing a reset here to recover */
624 static void mv_dump_mem(void __iomem *start, unsigned bytes)
627 for (b = 0; b < bytes; ) {
628 DPRINTK("%p: ", start + b);
629 for (w = 0; b < bytes && w < 4; w++) {
630 printk("%08x ",readl(start + b));
638 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
643 for (b = 0; b < bytes; ) {
644 DPRINTK("%02x: ", b);
645 for (w = 0; b < bytes && w < 4; w++) {
646 (void) pci_read_config_dword(pdev,b,&dw);
654 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
655 struct pci_dev *pdev)
658 void __iomem *hc_base = mv_hc_base(mmio_base,
659 port >> MV_PORT_HC_SHIFT);
660 void __iomem *port_base;
661 int start_port, num_ports, p, start_hc, num_hcs, hc;
664 start_hc = start_port = 0;
665 num_ports = 8; /* shld be benign for 4 port devs */
668 start_hc = port >> MV_PORT_HC_SHIFT;
670 num_ports = num_hcs = 1;
672 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
673 num_ports > 1 ? num_ports - 1 : start_port);
676 DPRINTK("PCI config space regs:\n");
677 mv_dump_pci_cfg(pdev, 0x68);
679 DPRINTK("PCI regs:\n");
680 mv_dump_mem(mmio_base+0xc00, 0x3c);
681 mv_dump_mem(mmio_base+0xd00, 0x34);
682 mv_dump_mem(mmio_base+0xf00, 0x4);
683 mv_dump_mem(mmio_base+0x1d00, 0x6c);
684 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
685 hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
686 DPRINTK("HC regs (HC %i):\n", hc);
687 mv_dump_mem(hc_base, 0x1c);
689 for (p = start_port; p < start_port + num_ports; p++) {
690 port_base = mv_port_base(mmio_base, p);
691 DPRINTK("EDMA regs (port %i):\n",p);
692 mv_dump_mem(port_base, 0x54);
693 DPRINTK("SATA regs (port %i):\n",p);
694 mv_dump_mem(port_base+0x300, 0x60);
699 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
707 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
710 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
719 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
721 unsigned int ofs = mv_scr_offset(sc_reg_in);
723 if (0xffffffffU != ofs) {
724 return readl(mv_ap_base(ap) + ofs);
730 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
732 unsigned int ofs = mv_scr_offset(sc_reg_in);
734 if (0xffffffffU != ofs) {
735 writelfl(val, mv_ap_base(ap) + ofs);
740 * mv_host_stop - Host specific cleanup/stop routine.
741 * @host_set: host data structure
743 * Disable ints, cleanup host memory, call general purpose
747 * Inherited from caller.
749 static void mv_host_stop(struct ata_host_set *host_set)
751 struct mv_host_priv *hpriv = host_set->private_data;
752 struct pci_dev *pdev = to_pci_dev(host_set->dev);
754 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
755 pci_disable_msi(pdev);
760 ata_host_stop(host_set);
763 static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
765 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
769 * mv_port_start - Port specific init/start routine.
770 * @ap: ATA channel to manipulate
772 * Allocate and point to DMA memory, init port private memory,
776 * Inherited from caller.
778 static int mv_port_start(struct ata_port *ap)
780 struct device *dev = ap->host_set->dev;
781 struct mv_port_priv *pp;
782 void __iomem *port_mmio = mv_ap_base(ap);
787 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
790 memset(pp, 0, sizeof(*pp));
792 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
796 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
798 rc = ata_pad_alloc(ap, dev);
802 /* First item in chunk of DMA memory:
803 * 32-slot command request table (CRQB), 32 bytes each in size
806 pp->crqb_dma = mem_dma;
808 mem_dma += MV_CRQB_Q_SZ;
811 * 32-slot command response table (CRPB), 8 bytes each in size
814 pp->crpb_dma = mem_dma;
816 mem_dma += MV_CRPB_Q_SZ;
819 * Table of scatter-gather descriptors (ePRD), 16 bytes each
822 pp->sg_tbl_dma = mem_dma;
824 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
825 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
827 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
828 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
829 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
831 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
832 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
834 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
835 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
836 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
838 pp->req_producer = pp->rsp_consumer = 0;
840 /* Don't turn on EDMA here...do it before DMA commands only. Else
841 * we'll be unable to send non-data, PIO, etc due to restricted access
844 ap->private_data = pp;
848 mv_priv_free(pp, dev);
856 * mv_port_stop - Port specific cleanup/stop routine.
857 * @ap: ATA channel to manipulate
859 * Stop DMA, cleanup port memory.
862 * This routine uses the host_set lock to protect the DMA stop.
864 static void mv_port_stop(struct ata_port *ap)
866 struct device *dev = ap->host_set->dev;
867 struct mv_port_priv *pp = ap->private_data;
870 spin_lock_irqsave(&ap->host_set->lock, flags);
872 spin_unlock_irqrestore(&ap->host_set->lock, flags);
874 ap->private_data = NULL;
875 ata_pad_free(ap, dev);
876 mv_priv_free(pp, dev);
881 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
882 * @qc: queued command whose SG list to source from
884 * Populate the SG list and mark the last entry.
887 * Inherited from caller.
889 static void mv_fill_sg(struct ata_queued_cmd *qc)
891 struct mv_port_priv *pp = qc->ap->private_data;
893 struct scatterlist *sg;
895 ata_for_each_sg(sg, qc) {
899 addr = sg_dma_address(sg);
900 sg_len = sg_dma_len(sg);
902 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
903 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
904 assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
905 pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
906 if (ata_sg_is_last(sg, qc))
907 pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
913 static inline unsigned mv_inc_q_index(unsigned *index)
915 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
919 static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
921 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
922 (last ? CRQB_CMD_LAST : 0);
926 * mv_qc_prep - Host specific command preparation.
927 * @qc: queued command to prepare
929 * This routine simply redirects to the general purpose routine
930 * if command is not DMA. Else, it handles prep of the CRQB
931 * (command request block), does some sanity checking, and calls
932 * the SG load routine.
935 * Inherited from caller.
937 static void mv_qc_prep(struct ata_queued_cmd *qc)
939 struct ata_port *ap = qc->ap;
940 struct mv_port_priv *pp = ap->private_data;
942 struct ata_taskfile *tf;
945 if (ATA_PROT_DMA != qc->tf.protocol) {
949 /* the req producer index should be the same as we remember it */
950 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
951 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
954 /* Fill in command request block
956 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
957 flags |= CRQB_FLAG_READ;
959 assert(MV_MAX_Q_DEPTH > qc->tag);
960 flags |= qc->tag << CRQB_TAG_SHIFT;
962 pp->crqb[pp->req_producer].sg_addr =
963 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
964 pp->crqb[pp->req_producer].sg_addr_hi =
965 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
966 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
968 cw = &pp->crqb[pp->req_producer].ata_cmd[0];
971 /* Sadly, the CRQB cannot accomodate all registers--there are
972 * only 11 bytes...so we must pick and choose required
973 * registers based on the command. So, we drop feature and
974 * hob_feature for [RW] DMA commands, but they are needed for
975 * NCQ. NCQ will drop hob_nsect.
977 switch (tf->command) {
979 case ATA_CMD_READ_EXT:
981 case ATA_CMD_WRITE_EXT:
982 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
984 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
985 case ATA_CMD_FPDMA_READ:
986 case ATA_CMD_FPDMA_WRITE:
987 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
988 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
990 #endif /* FIXME: remove this line when NCQ added */
992 /* The only other commands EDMA supports in non-queued and
993 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
994 * of which are defined/used by Linux. If we get here, this
997 * FIXME: modify libata to give qc_prep a return value and
1000 BUG_ON(tf->command);
1003 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1004 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1005 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1006 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1007 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1008 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1009 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1010 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1011 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1013 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
1020 * mv_qc_issue - Initiate a command to the host
1021 * @qc: queued command to start
1023 * This routine simply redirects to the general purpose routine
1024 * if command is not DMA. Else, it sanity checks our local
1025 * caches of the request producer/consumer indices then enables
1026 * DMA and bumps the request producer index.
1029 * Inherited from caller.
1031 static int mv_qc_issue(struct ata_queued_cmd *qc)
1033 void __iomem *port_mmio = mv_ap_base(qc->ap);
1034 struct mv_port_priv *pp = qc->ap->private_data;
1037 if (ATA_PROT_DMA != qc->tf.protocol) {
1038 /* We're about to send a non-EDMA capable command to the
1039 * port. Turn off EDMA so there won't be problems accessing
1040 * shadow block, etc registers.
1042 mv_stop_dma(qc->ap);
1043 return ata_qc_issue_prot(qc);
1046 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1048 /* the req producer index should be the same as we remember it */
1049 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
1051 /* until we do queuing, the queue should be empty at this point */
1052 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
1053 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1054 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1056 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1058 mv_start_dma(port_mmio, pp);
1060 /* and write the request in pointer to kick the EDMA to life */
1061 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1062 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
1063 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1069 * mv_get_crpb_status - get status from most recently completed cmd
1070 * @ap: ATA channel to manipulate
1072 * This routine is for use when the port is in DMA mode, when it
1073 * will be using the CRPB (command response block) method of
1074 * returning command completion information. We assert indices
1075 * are good, grab status, and bump the response consumer index to
1076 * prove that we're up to date.
1079 * Inherited from caller.
1081 static u8 mv_get_crpb_status(struct ata_port *ap)
1083 void __iomem *port_mmio = mv_ap_base(ap);
1084 struct mv_port_priv *pp = ap->private_data;
1087 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1089 /* the response consumer index should be the same as we remember it */
1090 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
1093 /* increment our consumer index... */
1094 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1096 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1097 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1098 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
1101 /* write out our inc'd consumer index so EDMA knows we're caught up */
1102 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1103 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
1104 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1106 /* Return ATA status register for completed CRPB */
1107 return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
1111 * mv_err_intr - Handle error interrupts on the port
1112 * @ap: ATA channel to manipulate
1114 * In most cases, just clear the interrupt and move on. However,
1115 * some cases require an eDMA reset, which is done right before
1116 * the COMRESET in mv_phy_reset(). The SERR case requires a
1117 * clear of pending errors in the SATA SERROR register. Finally,
1118 * if the port disabled DMA, update our cached copy to match.
1121 * Inherited from caller.
1123 static void mv_err_intr(struct ata_port *ap)
1125 void __iomem *port_mmio = mv_ap_base(ap);
1126 u32 edma_err_cause, serr = 0;
1128 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1130 if (EDMA_ERR_SERR & edma_err_cause) {
1131 serr = scr_read(ap, SCR_ERROR);
1132 scr_write_flush(ap, SCR_ERROR, serr);
1134 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1135 struct mv_port_priv *pp = ap->private_data;
1136 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1138 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1139 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
1141 /* Clear EDMA now that SERR cleanup done */
1142 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1144 /* check for fatal here and recover if needed */
1145 if (EDMA_ERR_FATAL & edma_err_cause) {
1146 mv_stop_and_reset(ap);
1151 * mv_host_intr - Handle all interrupts on the given host controller
1152 * @host_set: host specific structure
1153 * @relevant: port error bits relevant to this host controller
1154 * @hc: which host controller we're to look at
1156 * Read then write clear the HC interrupt status then walk each
1157 * port connected to the HC and see if it needs servicing. Port
1158 * success ints are reported in the HC interrupt status reg, the
1159 * port error ints are reported in the higher level main
1160 * interrupt status register and thus are passed in via the
1161 * 'relevant' argument.
1164 * Inherited from caller.
1166 static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1169 void __iomem *mmio = host_set->mmio_base;
1170 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1171 struct ata_port *ap;
1172 struct ata_queued_cmd *qc;
1174 int shift, port, port0, hard_port, handled;
1175 unsigned int err_mask;
1181 port0 = MV_PORTS_PER_HC;
1184 /* we'll need the HC success int register in most cases */
1185 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1187 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1190 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1191 hc,relevant,hc_irq_cause);
1193 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1194 ap = host_set->ports[port];
1195 hard_port = port & MV_PORT_MASK; /* range 0-3 */
1196 handled = 0; /* ensure ata_status is set if handled++ */
1198 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1199 /* new CRPB on the queue; just one at a time until NCQ
1201 ata_status = mv_get_crpb_status(ap);
1203 } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1204 /* received ATA IRQ; read the status reg to clear INTRQ
1206 ata_status = readb((void __iomem *)
1207 ap->ioaddr.status_addr);
1211 err_mask = ac_err_mask(ata_status);
1213 shift = port << 1; /* (port * 2) */
1214 if (port >= MV_PORTS_PER_HC) {
1215 shift++; /* skip bit 8 in the HC Main IRQ reg */
1217 if ((PORT0_ERR << shift) & relevant) {
1219 err_mask |= AC_ERR_OTHER;
1223 if (handled && ap) {
1224 qc = ata_qc_from_tag(ap, ap->active_tag);
1226 VPRINTK("port %u IRQ found for qc, "
1227 "ata_status 0x%x\n", port,ata_status);
1228 /* mark qc status appropriately */
1229 ata_qc_complete(qc, err_mask);
1239 * @dev_instance: private data; in this case the host structure
1242 * Read the read only register to determine if any host
1243 * controllers have pending interrupts. If so, call lower level
1244 * routine to handle. Also check for PCI errors which are only
1248 * This routine holds the host_set lock while processing pending
1251 static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1252 struct pt_regs *regs)
1254 struct ata_host_set *host_set = dev_instance;
1255 unsigned int hc, handled = 0, n_hcs;
1256 void __iomem *mmio = host_set->mmio_base;
1259 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1261 /* check the cases where we either have nothing pending or have read
1262 * a bogus register value which can indicate HW removal or PCI fault
1264 if (!irq_stat || (0xffffffffU == irq_stat)) {
1268 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
1269 spin_lock(&host_set->lock);
1271 for (hc = 0; hc < n_hcs; hc++) {
1272 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1274 mv_host_intr(host_set, relevant, hc);
1278 if (PCI_ERR & irq_stat) {
1279 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1280 readl(mmio + PCI_IRQ_CAUSE_OFS));
1282 DPRINTK("All regs @ PCI error\n");
1283 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1285 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1288 spin_unlock(&host_set->lock);
1290 return IRQ_RETVAL(handled);
1293 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1295 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1296 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1298 return hc_mmio + ofs;
1301 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1305 switch (sc_reg_in) {
1309 ofs = sc_reg_in * sizeof(u32);
1318 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1320 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1321 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1323 if (ofs != 0xffffffffU)
1324 return readl(mmio + ofs);
1329 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1331 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1332 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1334 if (ofs != 0xffffffffU)
1335 writelfl(val, mmio + ofs);
1338 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1343 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1345 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1348 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1350 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1353 mv_reset_pci_bus(pdev, mmio);
1356 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1358 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1361 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1364 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1367 tmp = readl(phy_mmio + MV5_PHY_MODE);
1369 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1370 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1373 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1377 writel(0, mmio + MV_GPIO_PORT_CTL);
1379 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1381 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1383 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1386 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1389 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1390 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1392 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1395 tmp = readl(phy_mmio + MV5_LT_MODE);
1397 writel(tmp, phy_mmio + MV5_LT_MODE);
1399 tmp = readl(phy_mmio + MV5_PHY_CTL);
1402 writel(tmp, phy_mmio + MV5_PHY_CTL);
1405 tmp = readl(phy_mmio + MV5_PHY_MODE);
1407 tmp |= hpriv->signal[port].pre;
1408 tmp |= hpriv->signal[port].amps;
1409 writel(tmp, phy_mmio + MV5_PHY_MODE);
1414 #define ZERO(reg) writel(0, port_mmio + (reg))
1415 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1418 void __iomem *port_mmio = mv_port_base(mmio, port);
1420 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1422 mv_channel_reset(hpriv, mmio, port);
1424 ZERO(0x028); /* command */
1425 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1426 ZERO(0x004); /* timer */
1427 ZERO(0x008); /* irq err cause */
1428 ZERO(0x00c); /* irq err mask */
1429 ZERO(0x010); /* rq bah */
1430 ZERO(0x014); /* rq inp */
1431 ZERO(0x018); /* rq outp */
1432 ZERO(0x01c); /* respq bah */
1433 ZERO(0x024); /* respq outp */
1434 ZERO(0x020); /* respq inp */
1435 ZERO(0x02c); /* test control */
1436 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1440 #define ZERO(reg) writel(0, hc_mmio + (reg))
1441 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1444 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1452 tmp = readl(hc_mmio + 0x20);
1455 writel(tmp, hc_mmio + 0x20);
1459 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1462 unsigned int hc, port;
1464 for (hc = 0; hc < n_hc; hc++) {
1465 for (port = 0; port < MV_PORTS_PER_HC; port++)
1466 mv5_reset_hc_port(hpriv, mmio,
1467 (hc * MV_PORTS_PER_HC) + port);
1469 mv5_reset_one_hc(hpriv, mmio, hc);
1476 #define ZERO(reg) writel(0, mmio + (reg))
1477 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1481 tmp = readl(mmio + MV_PCI_MODE);
1483 writel(tmp, mmio + MV_PCI_MODE);
1485 ZERO(MV_PCI_DISC_TIMER);
1486 ZERO(MV_PCI_MSI_TRIGGER);
1487 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1488 ZERO(HC_MAIN_IRQ_MASK_OFS);
1489 ZERO(MV_PCI_SERR_MASK);
1490 ZERO(PCI_IRQ_CAUSE_OFS);
1491 ZERO(PCI_IRQ_MASK_OFS);
1492 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1493 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1494 ZERO(MV_PCI_ERR_ATTRIBUTE);
1495 ZERO(MV_PCI_ERR_COMMAND);
1499 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1503 mv5_reset_flash(hpriv, mmio);
1505 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1507 tmp |= (1 << 5) | (1 << 6);
1508 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1512 * mv6_reset_hc - Perform the 6xxx global soft reset
1513 * @mmio: base address of the HBA
1515 * This routine only applies to 6xxx parts.
1518 * Inherited from caller.
1520 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1523 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1527 /* Following procedure defined in PCI "main command and status
1531 writel(t | STOP_PCI_MASTER, reg);
1533 for (i = 0; i < 1000; i++) {
1536 if (PCI_MASTER_EMPTY & t) {
1540 if (!(PCI_MASTER_EMPTY & t)) {
1541 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1549 writel(t | GLOB_SFT_RST, reg);
1552 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1554 if (!(GLOB_SFT_RST & t)) {
1555 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1560 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1563 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1566 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1568 if (GLOB_SFT_RST & t) {
1569 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1576 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1579 void __iomem *port_mmio;
1582 tmp = readl(mmio + MV_RESET_CFG);
1583 if ((tmp & (1 << 0)) == 0) {
1584 hpriv->signal[idx].amps = 0x7 << 8;
1585 hpriv->signal[idx].pre = 0x1 << 5;
1589 port_mmio = mv_port_base(mmio, idx);
1590 tmp = readl(port_mmio + PHY_MODE2);
1592 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1593 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1596 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1598 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1601 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1604 void __iomem *port_mmio = mv_port_base(mmio, port);
1606 u32 hp_flags = hpriv->hp_flags;
1608 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1610 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1613 if (fix_phy_mode2) {
1614 m2 = readl(port_mmio + PHY_MODE2);
1617 writel(m2, port_mmio + PHY_MODE2);
1621 m2 = readl(port_mmio + PHY_MODE2);
1622 m2 &= ~((1 << 16) | (1 << 31));
1623 writel(m2, port_mmio + PHY_MODE2);
1628 /* who knows what this magic does */
1629 tmp = readl(port_mmio + PHY_MODE3);
1632 writel(tmp, port_mmio + PHY_MODE3);
1634 if (fix_phy_mode4) {
1637 m4 = readl(port_mmio + PHY_MODE4);
1639 if (hp_flags & MV_HP_ERRATA_60X1B2)
1640 tmp = readl(port_mmio + 0x310);
1642 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1644 writel(m4, port_mmio + PHY_MODE4);
1646 if (hp_flags & MV_HP_ERRATA_60X1B2)
1647 writel(tmp, port_mmio + 0x310);
1650 /* Revert values of pre-emphasis and signal amps to the saved ones */
1651 m2 = readl(port_mmio + PHY_MODE2);
1653 m2 &= ~MV_M2_PREAMP_MASK;
1654 m2 |= hpriv->signal[port].amps;
1655 m2 |= hpriv->signal[port].pre;
1658 writel(m2, port_mmio + PHY_MODE2);
1661 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1662 unsigned int port_no)
1664 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1666 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1668 if (IS_60XX(hpriv)) {
1669 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1670 ifctl |= (1 << 12) | (1 << 7);
1671 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1674 udelay(25); /* allow reset propagation */
1676 /* Spec never mentions clearing the bit. Marvell's driver does
1677 * clear the bit, however.
1679 writelfl(0, port_mmio + EDMA_CMD_OFS);
1681 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1687 static void mv_stop_and_reset(struct ata_port *ap)
1689 struct mv_host_priv *hpriv = ap->host_set->private_data;
1690 void __iomem *mmio = ap->host_set->mmio_base;
1694 mv_channel_reset(hpriv, mmio, ap->port_no);
1700 * mv_phy_reset - Perform eDMA reset followed by COMRESET
1701 * @ap: ATA channel to manipulate
1703 * Part of this is taken from __sata_phy_reset and modified to
1704 * not sleep since this routine gets called from interrupt level.
1707 * Inherited from caller. This is coded to safe to call at
1708 * interrupt level, i.e. it does not sleep.
1710 static void mv_phy_reset(struct ata_port *ap)
1712 struct mv_port_priv *pp = ap->private_data;
1713 void __iomem *port_mmio = mv_ap_base(ap);
1714 struct ata_taskfile tf;
1715 struct ata_device *dev = &ap->device[0];
1716 unsigned long timeout;
1718 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1720 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1721 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1722 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1724 /* proceed to init communications via the scr_control reg */
1725 scr_write_flush(ap, SCR_CONTROL, 0x301);
1727 scr_write_flush(ap, SCR_CONTROL, 0x300);
1728 timeout = jiffies + (HZ * 1);
1731 if ((scr_read(ap, SCR_STATUS) & 0xf) != 1)
1733 } while (time_before(jiffies, timeout));
1735 mv_scr_write(ap, SCR_ERROR, mv_scr_read(ap, SCR_ERROR));
1737 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1738 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1739 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1741 if (sata_dev_present(ap)) {
1744 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1745 ap->id, scr_read(ap, SCR_STATUS));
1746 ata_port_disable(ap);
1749 ap->cbl = ATA_CBL_SATA;
1751 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
1752 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
1753 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
1754 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
1756 dev->class = ata_dev_classify(&tf);
1757 if (!ata_dev_present(dev)) {
1758 VPRINTK("Port disabled post-sig: No device present.\n");
1759 ata_port_disable(ap);
1762 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1764 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1770 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
1771 * @ap: ATA channel to manipulate
1773 * Intent is to clear all pending error conditions, reset the
1774 * chip/bus, fail the command, and move on.
1777 * This routine holds the host_set lock while failing the command.
1779 static void mv_eng_timeout(struct ata_port *ap)
1781 struct ata_queued_cmd *qc;
1782 unsigned long flags;
1784 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1785 DPRINTK("All regs @ start of eng_timeout\n");
1786 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
1787 to_pci_dev(ap->host_set->dev));
1789 qc = ata_qc_from_tag(ap, ap->active_tag);
1790 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
1791 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
1792 &qc->scsicmd->cmnd);
1795 mv_stop_and_reset(ap);
1798 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
1801 /* hack alert! We cannot use the supplied completion
1802 * function from inside the ->eh_strategy_handler() thread.
1803 * libata is the only user of ->eh_strategy_handler() in
1804 * any kernel, so the default scsi_done() assumes it is
1805 * not being called from the SCSI EH.
1807 spin_lock_irqsave(&ap->host_set->lock, flags);
1808 qc->scsidone = scsi_finish_command;
1809 ata_qc_complete(qc, AC_ERR_OTHER);
1810 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1815 * mv_port_init - Perform some early initialization on a single port.
1816 * @port: libata data structure storing shadow register addresses
1817 * @port_mmio: base address of the port
1819 * Initialize shadow register mmio addresses, clear outstanding
1820 * interrupts on the port, and unmask interrupts for the future
1821 * start of the port.
1824 * Inherited from caller.
1826 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
1828 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
1831 /* PIO related setup
1833 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
1835 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
1836 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
1837 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
1838 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
1839 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
1840 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
1842 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
1843 /* special case: control/altstatus doesn't have ATA_REG_ address */
1844 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
1847 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
1849 /* Clear any currently outstanding port interrupt conditions */
1850 serr_ofs = mv_scr_offset(SCR_ERROR);
1851 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
1852 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1854 /* unmask all EDMA error interrupts */
1855 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
1857 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
1858 readl(port_mmio + EDMA_CFG_OFS),
1859 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
1860 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
1863 static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
1864 unsigned int board_idx)
1867 u32 hp_flags = hpriv->hp_flags;
1869 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1873 hpriv->ops = &mv5xxx_ops;
1874 hp_flags |= MV_HP_50XX;
1878 hp_flags |= MV_HP_ERRATA_50XXB0;
1881 hp_flags |= MV_HP_ERRATA_50XXB2;
1884 dev_printk(KERN_WARNING, &pdev->dev,
1885 "Applying 50XXB2 workarounds to unknown rev\n");
1886 hp_flags |= MV_HP_ERRATA_50XXB2;
1893 hpriv->ops = &mv5xxx_ops;
1894 hp_flags |= MV_HP_50XX;
1898 hp_flags |= MV_HP_ERRATA_50XXB0;
1901 hp_flags |= MV_HP_ERRATA_50XXB2;
1904 dev_printk(KERN_WARNING, &pdev->dev,
1905 "Applying B2 workarounds to unknown rev\n");
1906 hp_flags |= MV_HP_ERRATA_50XXB2;
1913 hpriv->ops = &mv6xxx_ops;
1917 hp_flags |= MV_HP_ERRATA_60X1B2;
1920 hp_flags |= MV_HP_ERRATA_60X1C0;
1923 dev_printk(KERN_WARNING, &pdev->dev,
1924 "Applying B2 workarounds to unknown rev\n");
1925 hp_flags |= MV_HP_ERRATA_60X1B2;
1931 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
1935 hpriv->hp_flags = hp_flags;
1941 * mv_init_host - Perform some early initialization of the host.
1942 * @pdev: host PCI device
1943 * @probe_ent: early data struct representing the host
1945 * If possible, do an early global reset of the host. Then do
1946 * our port init and clear/unmask all/relevant host interrupts.
1949 * Inherited from caller.
1951 static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
1952 unsigned int board_idx)
1954 int rc = 0, n_hc, port, hc;
1955 void __iomem *mmio = probe_ent->mmio_base;
1956 struct mv_host_priv *hpriv = probe_ent->private_data;
1958 /* global interrupt mask */
1959 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
1961 rc = mv_chip_id(pdev, hpriv, board_idx);
1965 n_hc = mv_get_hc_count(probe_ent->host_flags);
1966 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
1968 for (port = 0; port < probe_ent->n_ports; port++)
1969 hpriv->ops->read_preamp(hpriv, port, mmio);
1971 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
1975 hpriv->ops->reset_flash(hpriv, mmio);
1976 hpriv->ops->reset_bus(pdev, mmio);
1977 hpriv->ops->enable_leds(hpriv, mmio);
1979 for (port = 0; port < probe_ent->n_ports; port++) {
1980 if (IS_60XX(hpriv)) {
1981 void __iomem *port_mmio = mv_port_base(mmio, port);
1983 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1985 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1988 hpriv->ops->phy_errata(hpriv, mmio, port);
1991 for (port = 0; port < probe_ent->n_ports; port++) {
1992 void __iomem *port_mmio = mv_port_base(mmio, port);
1993 mv_port_init(&probe_ent->port[port], port_mmio);
1996 for (hc = 0; hc < n_hc; hc++) {
1997 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1999 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2000 "(before clear)=0x%08x\n", hc,
2001 readl(hc_mmio + HC_CFG_OFS),
2002 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2004 /* Clear any currently outstanding hc interrupt conditions */
2005 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2008 /* Clear any currently outstanding host interrupt conditions */
2009 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2011 /* and unmask interrupt generation for host regs */
2012 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2013 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2015 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2016 "PCI int cause/mask=0x%08x/0x%08x\n",
2017 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2018 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2019 readl(mmio + PCI_IRQ_CAUSE_OFS),
2020 readl(mmio + PCI_IRQ_MASK_OFS));
2027 * mv_print_info - Dump key info to kernel log for perusal.
2028 * @probe_ent: early data struct representing the host
2030 * FIXME: complete this.
2033 * Inherited from caller.
2035 static void mv_print_info(struct ata_probe_ent *probe_ent)
2037 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2038 struct mv_host_priv *hpriv = probe_ent->private_data;
2042 /* Use this to determine the HW stepping of the chip so we know
2043 * what errata to workaround
2045 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2047 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2050 else if (scc == 0x01)
2055 dev_printk(KERN_INFO, &pdev->dev,
2056 "%u slots %u ports %s mode IRQ via %s\n",
2057 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2058 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2062 * mv_init_one - handle a positive probe of a Marvell host
2063 * @pdev: PCI device found
2064 * @ent: PCI device ID entry for the matched host
2067 * Inherited from caller.
2069 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2071 static int printed_version = 0;
2072 struct ata_probe_ent *probe_ent = NULL;
2073 struct mv_host_priv *hpriv;
2074 unsigned int board_idx = (unsigned int)ent->driver_data;
2075 void __iomem *mmio_base;
2076 int pci_dev_busy = 0, rc;
2078 if (!printed_version++)
2079 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2081 rc = pci_enable_device(pdev);
2086 rc = pci_request_regions(pdev, DRV_NAME);
2092 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
2093 if (probe_ent == NULL) {
2095 goto err_out_regions;
2098 memset(probe_ent, 0, sizeof(*probe_ent));
2099 probe_ent->dev = pci_dev_to_dev(pdev);
2100 INIT_LIST_HEAD(&probe_ent->node);
2102 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
2103 if (mmio_base == NULL) {
2105 goto err_out_free_ent;
2108 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
2111 goto err_out_iounmap;
2113 memset(hpriv, 0, sizeof(*hpriv));
2115 probe_ent->sht = mv_port_info[board_idx].sht;
2116 probe_ent->host_flags = mv_port_info[board_idx].host_flags;
2117 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2118 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2119 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2121 probe_ent->irq = pdev->irq;
2122 probe_ent->irq_flags = SA_SHIRQ;
2123 probe_ent->mmio_base = mmio_base;
2124 probe_ent->private_data = hpriv;
2126 /* initialize adapter */
2127 rc = mv_init_host(pdev, probe_ent, board_idx);
2132 /* Enable interrupts */
2133 if (pci_enable_msi(pdev) == 0) {
2134 hpriv->hp_flags |= MV_HP_FLAG_MSI;
2139 mv_dump_pci_cfg(pdev, 0x68);
2140 mv_print_info(probe_ent);
2142 if (ata_device_add(probe_ent) == 0) {
2143 rc = -ENODEV; /* No devices discovered */
2144 goto err_out_dev_add;
2151 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
2152 pci_disable_msi(pdev);
2159 pci_iounmap(pdev, mmio_base);
2163 pci_release_regions(pdev);
2165 if (!pci_dev_busy) {
2166 pci_disable_device(pdev);
2172 static int __init mv_init(void)
2174 return pci_module_init(&mv_pci_driver);
2177 static void __exit mv_exit(void)
2179 pci_unregister_driver(&mv_pci_driver);
2182 MODULE_AUTHOR("Brett Russ");
2183 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2184 MODULE_LICENSE("GPL");
2185 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2186 MODULE_VERSION(DRV_VERSION);
2188 module_init(mv_init);
2189 module_exit(mv_exit);