2 * linux/arch/alpha/kernel/sys_marvel.c
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <linux/pci.h>
12 #include <linux/init.h>
13 #include <linux/bitops.h>
15 #include <asm/ptrace.h>
16 #include <asm/system.h>
19 #include <asm/mmu_context.h>
21 #include <asm/pgtable.h>
22 #include <asm/core_marvel.h>
23 #include <asm/hwrpb.h>
24 #include <asm/tlbflush.h>
32 #include "machvec_impl.h"
34 #if NR_IRQS < MARVEL_NR_IRQS
35 # error NR_IRQS < MARVEL_NR_IRQS !!!
43 io7_device_interrupt(unsigned long vector)
49 * Vector is 0x800 + (interrupt)
51 * where (interrupt) is:
53 * ...16|15 14|13 4|3 0
54 * -----+-----+--------+---
59 * 0x0800 - 0x0ff0 - 0x0800 + (LSI id << 4)
60 * 0x1000 - 0x2ff0 - 0x1000 + (MSI_DAT<8:0> << 4)
63 irq = ((vector & 0xffff) - 0x800) >> 4;
65 irq += 16; /* offset for legacy */
66 irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* not too many bits */
67 irq |= pid << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */
72 static volatile unsigned long *
73 io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
75 volatile unsigned long *ctl;
79 pid = irq >> MARVEL_IRQ_VEC_PE_SHIFT;
81 if (!(io7 = marvel_find_io7(pid))) {
83 "%s for nonexistent io7 -- vec %x, pid %d\n",
88 irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* isolate the vector */
89 irq -= 16; /* subtract legacy bias */
93 "%s for invalid irq -- pid %d adjusted irq %x\n",
98 ctl = &io7->csrs->PO7_LSI_CTL[irq & 0xff].csr; /* assume LSI */
99 if (irq >= 0x80) /* MSI */
100 ctl = &io7->csrs->PO7_MSI_CTL[((irq - 0x80) >> 5) & 0x0f].csr;
102 if (pio7) *pio7 = io7;
107 io7_enable_irq(unsigned int irq)
109 volatile unsigned long *ctl;
112 ctl = io7_get_irq_ctl(irq, &io7);
114 printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
119 spin_lock(&io7->irq_lock);
123 spin_unlock(&io7->irq_lock);
127 io7_disable_irq(unsigned int irq)
129 volatile unsigned long *ctl;
132 ctl = io7_get_irq_ctl(irq, &io7);
134 printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
139 spin_lock(&io7->irq_lock);
140 *ctl &= ~(1UL << 24);
143 spin_unlock(&io7->irq_lock);
147 marvel_irq_noop(unsigned int irq)
153 marvel_irq_noop_return(unsigned int irq)
158 static struct irq_chip marvel_legacy_irq_type = {
160 .mask = marvel_irq_noop,
161 .unmask = marvel_irq_noop,
164 static struct irq_chip io7_lsi_irq_type = {
166 .unmask = io7_enable_irq,
167 .mask = io7_disable_irq,
168 .mask_ack = io7_disable_irq,
171 static struct irq_chip io7_msi_irq_type = {
173 .unmask = io7_enable_irq,
174 .mask = io7_disable_irq,
175 .ack = marvel_irq_noop,
179 io7_redirect_irq(struct io7 *io7,
180 volatile unsigned long *csr,
186 val &= ~(0x1ffUL << 24); /* clear the target pid */
187 val |= ((unsigned long)where << 24); /* set the new target pid */
195 io7_redirect_one_lsi(struct io7 *io7, unsigned int which, unsigned int where)
200 * LSI_CTL has target PID @ 14
202 val = io7->csrs->PO7_LSI_CTL[which].csr;
203 val &= ~(0x1ffUL << 14); /* clear the target pid */
204 val |= ((unsigned long)where << 14); /* set the new target pid */
206 io7->csrs->PO7_LSI_CTL[which].csr = val;
208 io7->csrs->PO7_LSI_CTL[which].csr;
212 io7_redirect_one_msi(struct io7 *io7, unsigned int which, unsigned int where)
217 * MSI_CTL has target PID @ 14
219 val = io7->csrs->PO7_MSI_CTL[which].csr;
220 val &= ~(0x1ffUL << 14); /* clear the target pid */
221 val |= ((unsigned long)where << 14); /* set the new target pid */
223 io7->csrs->PO7_MSI_CTL[which].csr = val;
225 io7->csrs->PO7_MSI_CTL[which].csr;
229 init_one_io7_lsi(struct io7 *io7, unsigned int which, unsigned int where)
232 * LSI_CTL has target PID @ 14
234 io7->csrs->PO7_LSI_CTL[which].csr = ((unsigned long)where << 14);
236 io7->csrs->PO7_LSI_CTL[which].csr;
240 init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where)
243 * MSI_CTL has target PID @ 14
245 io7->csrs->PO7_MSI_CTL[which].csr = ((unsigned long)where << 14);
247 io7->csrs->PO7_MSI_CTL[which].csr;
251 init_io7_irqs(struct io7 *io7,
252 struct irq_chip *lsi_ops,
253 struct irq_chip *msi_ops)
255 long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16;
258 printk("Initializing interrupts for IO7 at PE %u - base %lx\n",
262 * Where should interrupts from this IO7 go?
264 * They really should be sent to the local CPU to avoid having to
265 * traverse the mesh, but if it's not an SMP kernel, they have to
266 * go to the boot CPU. Send them all to the boot CPU for now,
267 * as each secondary starts, it can redirect it's local device
270 printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid);
272 spin_lock(&io7->irq_lock);
274 /* set up the error irqs */
275 io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid);
276 io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, boot_cpuid);
277 io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, boot_cpuid);
278 io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, boot_cpuid);
279 io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, boot_cpuid);
281 /* Set up the lsi irqs. */
282 for (i = 0; i < 128; ++i) {
283 irq_to_desc(base + i)->status |= IRQ_LEVEL;
284 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
287 /* Disable the implemented irqs in hardware. */
288 for (i = 0; i < 0x60; ++i)
289 init_one_io7_lsi(io7, i, boot_cpuid);
291 init_one_io7_lsi(io7, 0x74, boot_cpuid);
292 init_one_io7_lsi(io7, 0x75, boot_cpuid);
295 /* Set up the msi irqs. */
296 for (i = 128; i < (128 + 512); ++i) {
297 irq_to_desc(base + i)->status |= IRQ_LEVEL;
298 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
301 for (i = 0; i < 16; ++i)
302 init_one_io7_msi(io7, i, boot_cpuid);
304 spin_unlock(&io7->irq_lock);
308 marvel_init_irq(void)
311 struct io7 *io7 = NULL;
313 /* Reserve the legacy irqs. */
314 for (i = 0; i < 16; ++i) {
315 set_irq_chip_and_handler(i, &marvel_legacy_irq_type,
319 /* Init the io7 irqs. */
320 for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
321 init_io7_irqs(io7, &io7_lsi_irq_type, &io7_msi_irq_type);
325 marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
327 struct pci_controller *hose = dev->sysdata;
328 struct io7_port *io7_port = hose->sysdata;
329 struct io7 *io7 = io7_port->io7;
330 int msi_loc, msi_data_off;
336 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
339 msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI);
342 pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
344 if (msg_ctl & PCI_MSI_FLAGS_ENABLE) {
345 msi_data_off = PCI_MSI_DATA_32;
346 if (msg_ctl & PCI_MSI_FLAGS_64BIT)
347 msi_data_off = PCI_MSI_DATA_64;
348 pci_read_config_word(dev, msi_loc + msi_data_off, &msg_dat);
350 irq = msg_dat & 0x1ff; /* we use msg_data<8:0> */
351 irq += 0x80; /* offset for lsi */
354 printk("PCI:%d:%d:%d (hose %d) is using MSI\n",
356 PCI_SLOT(dev->devfn),
357 PCI_FUNC(dev->devfn),
359 printk(" %d message(s) from 0x%04x\n",
360 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
362 printk(" reporting on %d IRQ(s) from %d (0x%x)\n",
363 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
364 (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT),
365 (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT));
369 pci_write_config_word(dev, msi_loc + PCI_MSI_FLAGS,
370 msg_ctl & ~PCI_MSI_FLAGS_ENABLE);
371 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
374 printk(" forcing LSI interrupt on irq %d [0x%x]\n", irq, irq);
378 irq += 16; /* offset for legacy */
379 irq |= io7->pe << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */
385 marvel_init_pci(void)
389 marvel_register_error_handlers();
393 locate_and_init_vga(NULL);
395 /* Clear any io7 errors. */
396 for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
397 io7_clear_errors(io7);
401 marvel_init_rtc(void)
406 struct marvel_rtc_time {
407 struct rtc_time *time;
413 smp_get_rtc_time(void *data)
415 struct marvel_rtc_time *mrt = data;
416 mrt->retval = __get_rtc_time(mrt->time);
420 smp_set_rtc_time(void *data)
422 struct marvel_rtc_time *mrt = data;
423 mrt->retval = __set_rtc_time(mrt->time);
428 marvel_get_rtc_time(struct rtc_time *time)
431 struct marvel_rtc_time mrt;
433 if (smp_processor_id() != boot_cpuid) {
435 smp_call_function_single(boot_cpuid, smp_get_rtc_time, &mrt, 1);
439 return __get_rtc_time(time);
443 marvel_set_rtc_time(struct rtc_time *time)
446 struct marvel_rtc_time mrt;
448 if (smp_processor_id() != boot_cpuid) {
450 smp_call_function_single(boot_cpuid, smp_set_rtc_time, &mrt, 1);
454 return __set_rtc_time(time);
458 marvel_smp_callin(void)
460 int cpuid = hard_smp_processor_id();
461 struct io7 *io7 = marvel_find_io7(cpuid);
468 * There is a local IO7 - redirect all of its interrupts here.
470 printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid);
472 /* Redirect the error IRQS here. */
473 io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid);
474 io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid);
475 io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid);
476 io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid);
477 io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid);
479 /* Redirect the implemented LSIs here. */
480 for (i = 0; i < 0x60; ++i)
481 io7_redirect_one_lsi(io7, i, cpuid);
483 io7_redirect_one_lsi(io7, 0x74, cpuid);
484 io7_redirect_one_lsi(io7, 0x75, cpuid);
486 /* Redirect the MSIs here. */
487 for (i = 0; i < 16; ++i)
488 io7_redirect_one_msi(io7, i, cpuid);
494 struct alpha_machine_vector marvel_ev7_mv __initmv = {
495 .vector_name = "MARVEL/EV7",
498 .rtc_get_time = marvel_get_rtc_time,
499 .rtc_set_time = marvel_set_rtc_time,
501 .machine_check = marvel_machine_check,
502 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
503 .min_io_address = DEFAULT_IO_BASE,
504 .min_mem_address = DEFAULT_MEM_BASE,
505 .pci_dac_offset = IO7_DAC_OFFSET,
507 .nr_irqs = MARVEL_NR_IRQS,
508 .device_interrupt = io7_device_interrupt,
510 .agp_info = marvel_agp_info,
512 .smp_callin = marvel_smp_callin,
513 .init_arch = marvel_init_arch,
514 .init_irq = marvel_init_irq,
515 .init_rtc = marvel_init_rtc,
516 .init_pci = marvel_init_pci,
517 .kill_arch = marvel_kill_arch,
518 .pci_map_irq = marvel_map_irq,
519 .pci_swizzle = common_swizzle,
521 .pa_to_nid = marvel_pa_to_nid,
522 .cpuid_to_nid = marvel_cpuid_to_nid,
523 .node_mem_start = marvel_node_mem_start,
524 .node_mem_size = marvel_node_mem_size,