Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 12 Oct 2007 02:40:14 +0000 (19:40 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 12 Oct 2007 02:40:14 +0000 (19:40 -0700)
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (867 commits)
  [SKY2]: status polling loop (post merge)
  [NET]: Fix NAPI completion handling in some drivers.
  [TCP]: Limit processing lost_retrans loop to work-to-do cases
  [TCP]: Fix lost_retrans loop vs fastpath problems
  [TCP]: No need to re-count fackets_out/sacked_out at RTO
  [TCP]: Extract tcp_match_queue_to_sack from sacktag code
  [TCP]: Kill almost unused variable pcount from sacktag
  [TCP]: Fix mark_head_lost to ignore R-bit when trying to mark L
  [TCP]: Add bytes_acked (ABC) clearing to FRTO too
  [IPv6]: Update setsockopt(IPV6_MULTICAST_IF) to support RFC 3493, try2
  [NETFILTER]: x_tables: add missing ip6t_modulename aliases
  [NETFILTER]: nf_conntrack_tcp: fix connection reopening
  [QETH]: fix qeth_main.c
  [NETLINK]: fib_frontend build fixes
  [IPv6]: Export userland ND options through netlink (RDNSS support)
  [9P]: build fix with !CONFIG_SYSCTL
  [NET]: Fix dev_put() and dev_hold() comments
  [NET]: make netlink user -> kernel interface synchronious
  [NET]: unify netlink kernel socket recognition
  [NET]: cleanup 3rd argument in netlink_sendskb
  ...

Fix up conflicts manually in Documentation/feature-removal-schedule.txt
and my new least favourite crap, the "mod_devicetable" support in the
files include/linux/mod_devicetable.h and scripts/mod/file2alias.c.

(The latter files seem to be explicitly _designed_ to get conflicts when
different subsystems work with them - that have an absolutely horrid
lack of subsystem separation!)

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
12 files changed:
1  2 
Documentation/feature-removal-schedule.txt
MAINTAINERS
arch/mips/configs/bigsur_defconfig
arch/mips/configs/sb1250-swarm_defconfig
arch/x86/kernel/irq_32.c
arch/x86/kernel/irq_64.c
drivers/block/aoe/aoecmd.c
drivers/media/dvb/dvb-core/dvb_net.c
fs/compat_ioctl.c
include/linux/mod_devicetable.h
include/linux/pci_ids.h
scripts/mod/file2alias.c

index 675f75601ae632a694fe06a0e8658b5039bf586f,64831def40abe9b17043af9726762b7c2de3e251..63df2262d41a2625da0816a0383aaaece00963bc
@@@ -307,10 -307,11 +307,23 @@@ Who:    Stephen Hemminger <shemminger@l
  
  ---------------------------
  
 +What: i386/x86_64 bzImage symlinks
 +When: April 2008
 +
 +Why:  The i386/x86_64 merge provides a symlink to the old bzImage
 +      location so not yet updated user space tools, e.g. package
 +      scripts, do not break.
 +Who:  Thomas Gleixner <tglx@linutronix.de>
++
++---------------------------
++
+ What: shaper network driver
+ When: January 2008
+ Files:        drivers/net/shaper.c, include/linux/if_shaper.h
+ Why:  This driver has been marked obsolete for many years.
+       It was only designed to work on lower speed links and has design
+       flaws that lead to machine crashes. The qdisc infrastructure in
+       2.4 or later kernels, provides richer features and is more robust.
+ Who:  Stephen Hemminger <shemminger@linux-foundation.org>
++
++---------------------------
diff --cc MAINTAINERS
Simple merge
Simple merge
index dd2b97fc00b2ad8c76fe2f7bce0a2164e4866ae6,0000000000000000000000000000000000000000..4f681bcdb1fc3b1afac4676359ff1116dc3454bf
mode 100644,000000..100644
--- /dev/null
@@@ -1,343 -1,0 +1,341 @@@
- EXPORT_SYMBOL(do_softirq);
 +/*
 + *    linux/arch/i386/kernel/irq.c
 + *
 + *    Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
 + *
 + * This file contains the lowest level x86-specific interrupt
 + * entry, irq-stacks and irq statistics code. All the remaining
 + * irq logic is done by the generic kernel/irq/ code and
 + * by the x86-specific irq controller code. (e.g. i8259.c and
 + * io_apic.c.)
 + */
 +
 +#include <linux/module.h>
 +#include <linux/seq_file.h>
 +#include <linux/interrupt.h>
 +#include <linux/kernel_stat.h>
 +#include <linux/notifier.h>
 +#include <linux/cpu.h>
 +#include <linux/delay.h>
 +
 +#include <asm/apic.h>
 +#include <asm/uaccess.h>
 +
 +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 +EXPORT_PER_CPU_SYMBOL(irq_stat);
 +
 +DEFINE_PER_CPU(struct pt_regs *, irq_regs);
 +EXPORT_PER_CPU_SYMBOL(irq_regs);
 +
 +/*
 + * 'what should we do if we get a hw irq event on an illegal vector'.
 + * each architecture has to answer this themselves.
 + */
 +void ack_bad_irq(unsigned int irq)
 +{
 +      printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
 +
 +#ifdef CONFIG_X86_LOCAL_APIC
 +      /*
 +       * Currently unexpected vectors happen only on SMP and APIC.
 +       * We _must_ ack these because every local APIC has only N
 +       * irq slots per priority level, and a 'hanging, unacked' IRQ
 +       * holds up an irq slot - in excessive cases (when multiple
 +       * unexpected vectors occur) that might lock up the APIC
 +       * completely.
 +       * But only ack when the APIC is enabled -AK
 +       */
 +      if (cpu_has_apic)
 +              ack_APIC_irq();
 +#endif
 +}
 +
 +#ifdef CONFIG_4KSTACKS
 +/*
 + * per-CPU IRQ handling contexts (thread information and stack)
 + */
 +union irq_ctx {
 +      struct thread_info      tinfo;
 +      u32                     stack[THREAD_SIZE/sizeof(u32)];
 +};
 +
 +static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
 +static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
 +#endif
 +
 +/*
 + * do_IRQ handles all normal device IRQ's (the special
 + * SMP cross-CPU interrupts have their own specific
 + * handlers).
 + */
 +fastcall unsigned int do_IRQ(struct pt_regs *regs)
 +{     
 +      struct pt_regs *old_regs;
 +      /* high bit used in ret_from_ code */
 +      int irq = ~regs->orig_eax;
 +      struct irq_desc *desc = irq_desc + irq;
 +#ifdef CONFIG_4KSTACKS
 +      union irq_ctx *curctx, *irqctx;
 +      u32 *isp;
 +#endif
 +
 +      if (unlikely((unsigned)irq >= NR_IRQS)) {
 +              printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
 +                                      __FUNCTION__, irq);
 +              BUG();
 +      }
 +
 +      old_regs = set_irq_regs(regs);
 +      irq_enter();
 +#ifdef CONFIG_DEBUG_STACKOVERFLOW
 +      /* Debugging check for stack overflow: is there less than 1KB free? */
 +      {
 +              long esp;
 +
 +              __asm__ __volatile__("andl %%esp,%0" :
 +                                      "=r" (esp) : "0" (THREAD_SIZE - 1));
 +              if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
 +                      printk("do_IRQ: stack overflow: %ld\n",
 +                              esp - sizeof(struct thread_info));
 +                      dump_stack();
 +              }
 +      }
 +#endif
 +
 +#ifdef CONFIG_4KSTACKS
 +
 +      curctx = (union irq_ctx *) current_thread_info();
 +      irqctx = hardirq_ctx[smp_processor_id()];
 +
 +      /*
 +       * this is where we switch to the IRQ stack. However, if we are
 +       * already using the IRQ stack (because we interrupted a hardirq
 +       * handler) we can't do that and just have to keep using the
 +       * current stack (which is the irq stack already after all)
 +       */
 +      if (curctx != irqctx) {
 +              int arg1, arg2, ebx;
 +
 +              /* build the stack frame on the IRQ stack */
 +              isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
 +              irqctx->tinfo.task = curctx->tinfo.task;
 +              irqctx->tinfo.previous_esp = current_stack_pointer;
 +
 +              /*
 +               * Copy the softirq bits in preempt_count so that the
 +               * softirq checks work in the hardirq context.
 +               */
 +              irqctx->tinfo.preempt_count =
 +                      (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
 +                      (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
 +
 +              asm volatile(
 +                      "       xchgl  %%ebx,%%esp      \n"
 +                      "       call   *%%edi           \n"
 +                      "       movl   %%ebx,%%esp      \n"
 +                      : "=a" (arg1), "=d" (arg2), "=b" (ebx)
 +                      :  "0" (irq),   "1" (desc),  "2" (isp),
 +                         "D" (desc->handle_irq)
 +                      : "memory", "cc"
 +              );
 +      } else
 +#endif
 +              desc->handle_irq(irq, desc);
 +
 +      irq_exit();
 +      set_irq_regs(old_regs);
 +      return 1;
 +}
 +
 +#ifdef CONFIG_4KSTACKS
 +
 +static char softirq_stack[NR_CPUS * THREAD_SIZE]
 +              __attribute__((__section__(".bss.page_aligned")));
 +
 +static char hardirq_stack[NR_CPUS * THREAD_SIZE]
 +              __attribute__((__section__(".bss.page_aligned")));
 +
 +/*
 + * allocate per-cpu stacks for hardirq and for softirq processing
 + */
 +void irq_ctx_init(int cpu)
 +{
 +      union irq_ctx *irqctx;
 +
 +      if (hardirq_ctx[cpu])
 +              return;
 +
 +      irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
 +      irqctx->tinfo.task              = NULL;
 +      irqctx->tinfo.exec_domain       = NULL;
 +      irqctx->tinfo.cpu               = cpu;
 +      irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
 +      irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
 +
 +      hardirq_ctx[cpu] = irqctx;
 +
 +      irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
 +      irqctx->tinfo.task              = NULL;
 +      irqctx->tinfo.exec_domain       = NULL;
 +      irqctx->tinfo.cpu               = cpu;
 +      irqctx->tinfo.preempt_count     = 0;
 +      irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
 +
 +      softirq_ctx[cpu] = irqctx;
 +
 +      printk("CPU %u irqstacks, hard=%p soft=%p\n",
 +              cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
 +}
 +
 +void irq_ctx_exit(int cpu)
 +{
 +      hardirq_ctx[cpu] = NULL;
 +}
 +
 +extern asmlinkage void __do_softirq(void);
 +
 +asmlinkage void do_softirq(void)
 +{
 +      unsigned long flags;
 +      struct thread_info *curctx;
 +      union irq_ctx *irqctx;
 +      u32 *isp;
 +
 +      if (in_interrupt())
 +              return;
 +
 +      local_irq_save(flags);
 +
 +      if (local_softirq_pending()) {
 +              curctx = current_thread_info();
 +              irqctx = softirq_ctx[smp_processor_id()];
 +              irqctx->tinfo.task = curctx->task;
 +              irqctx->tinfo.previous_esp = current_stack_pointer;
 +
 +              /* build the stack frame on the softirq stack */
 +              isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
 +
 +              asm volatile(
 +                      "       xchgl   %%ebx,%%esp     \n"
 +                      "       call    __do_softirq    \n"
 +                      "       movl    %%ebx,%%esp     \n"
 +                      : "=b"(isp)
 +                      : "0"(isp)
 +                      : "memory", "cc", "edx", "ecx", "eax"
 +              );
 +              /*
 +               * Shouldnt happen, we returned above if in_interrupt():
 +               */
 +              WARN_ON_ONCE(softirq_count());
 +      }
 +
 +      local_irq_restore(flags);
 +}
 +#endif
 +
 +/*
 + * Interrupt statistics:
 + */
 +
 +atomic_t irq_err_count;
 +
 +/*
 + * /proc/interrupts printing:
 + */
 +
 +int show_interrupts(struct seq_file *p, void *v)
 +{
 +      int i = *(loff_t *) v, j;
 +      struct irqaction * action;
 +      unsigned long flags;
 +
 +      if (i == 0) {
 +              seq_printf(p, "           ");
 +              for_each_online_cpu(j)
 +                      seq_printf(p, "CPU%-8d",j);
 +              seq_putc(p, '\n');
 +      }
 +
 +      if (i < NR_IRQS) {
 +              spin_lock_irqsave(&irq_desc[i].lock, flags);
 +              action = irq_desc[i].action;
 +              if (!action)
 +                      goto skip;
 +              seq_printf(p, "%3d: ",i);
 +#ifndef CONFIG_SMP
 +              seq_printf(p, "%10u ", kstat_irqs(i));
 +#else
 +              for_each_online_cpu(j)
 +                      seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 +#endif
 +              seq_printf(p, " %8s", irq_desc[i].chip->name);
 +              seq_printf(p, "-%-8s", irq_desc[i].name);
 +              seq_printf(p, "  %s", action->name);
 +
 +              for (action=action->next; action; action = action->next)
 +                      seq_printf(p, ", %s", action->name);
 +
 +              seq_putc(p, '\n');
 +skip:
 +              spin_unlock_irqrestore(&irq_desc[i].lock, flags);
 +      } else if (i == NR_IRQS) {
 +              seq_printf(p, "NMI: ");
 +              for_each_online_cpu(j)
 +                      seq_printf(p, "%10u ", nmi_count(j));
 +              seq_putc(p, '\n');
 +#ifdef CONFIG_X86_LOCAL_APIC
 +              seq_printf(p, "LOC: ");
 +              for_each_online_cpu(j)
 +                      seq_printf(p, "%10u ",
 +                              per_cpu(irq_stat,j).apic_timer_irqs);
 +              seq_putc(p, '\n');
 +#endif
 +              seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
 +#if defined(CONFIG_X86_IO_APIC)
 +              seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
 +#endif
 +      }
 +      return 0;
 +}
 +
 +#ifdef CONFIG_HOTPLUG_CPU
 +#include <mach_apic.h>
 +
 +void fixup_irqs(cpumask_t map)
 +{
 +      unsigned int irq;
 +      static int warned;
 +
 +      for (irq = 0; irq < NR_IRQS; irq++) {
 +              cpumask_t mask;
 +              if (irq == 2)
 +                      continue;
 +
 +              cpus_and(mask, irq_desc[irq].affinity, map);
 +              if (any_online_cpu(mask) == NR_CPUS) {
 +                      printk("Breaking affinity for irq %i\n", irq);
 +                      mask = map;
 +              }
 +              if (irq_desc[irq].chip->set_affinity)
 +                      irq_desc[irq].chip->set_affinity(irq, mask);
 +              else if (irq_desc[irq].action && !(warned++))
 +                      printk("Cannot set affinity for irq %i\n", irq);
 +      }
 +
 +#if 0
 +      barrier();
 +      /* Ingo Molnar says: "after the IO-APIC masks have been redirected
 +         [note the nop - the interrupt-enable boundary on x86 is two
 +         instructions from sti] - to flush out pending hardirqs and
 +         IPIs. After this point nothing is supposed to reach this CPU." */
 +      __asm__ __volatile__("sti; nop; cli");
 +      barrier();
 +#else
 +      /* That doesn't seem sufficient.  Give it 1ms. */
 +      local_irq_enable();
 +      mdelay(1);
 +      local_irq_disable();
 +#endif
 +}
 +#endif
 +
index 39cb3fa83ebbe3e68af877a0f43074189b1c027a,0000000000000000000000000000000000000000..bd11e42b22bfa6b66ebf2dcf897845a1baaa25f9
mode 100644,000000..100644
--- /dev/null
@@@ -1,213 -1,0 +1,212 @@@
- EXPORT_SYMBOL(do_softirq);
 +/*
 + *    linux/arch/x86_64/kernel/irq.c
 + *
 + *    Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
 + *
 + * This file contains the lowest level x86_64-specific interrupt
 + * entry and irq statistics code. All the remaining irq logic is
 + * done by the generic kernel/irq/ code and in the
 + * x86_64-specific irq controller code. (e.g. i8259.c and
 + * io_apic.c.)
 + */
 +
 +#include <linux/kernel_stat.h>
 +#include <linux/interrupt.h>
 +#include <linux/seq_file.h>
 +#include <linux/module.h>
 +#include <linux/delay.h>
 +#include <asm/uaccess.h>
 +#include <asm/io_apic.h>
 +#include <asm/idle.h>
 +#include <asm/smp.h>
 +
 +atomic_t irq_err_count;
 +
 +#ifdef CONFIG_DEBUG_STACKOVERFLOW
 +/*
 + * Probabilistic stack overflow check:
 + *
 + * Only check the stack in process context, because everything else
 + * runs on the big interrupt stacks. Checking reliably is too expensive,
 + * so we just check from interrupts.
 + */
 +static inline void stack_overflow_check(struct pt_regs *regs)
 +{
 +      u64 curbase = (u64)task_stack_page(current);
 +      static unsigned long warned = -60*HZ;
 +
 +      if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
 +          regs->rsp <  curbase + sizeof(struct thread_info) + 128 &&
 +          time_after(jiffies, warned + 60*HZ)) {
 +              printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
 +                     current->comm, curbase, regs->rsp);
 +              show_stack(NULL,NULL);
 +              warned = jiffies;
 +      }
 +}
 +#endif
 +
 +/*
 + * Generic, controller-independent functions:
 + */
 +
 +int show_interrupts(struct seq_file *p, void *v)
 +{
 +      int i = *(loff_t *) v, j;
 +      struct irqaction * action;
 +      unsigned long flags;
 +
 +      if (i == 0) {
 +              seq_printf(p, "           ");
 +              for_each_online_cpu(j)
 +                      seq_printf(p, "CPU%-8d",j);
 +              seq_putc(p, '\n');
 +      }
 +
 +      if (i < NR_IRQS) {
 +              spin_lock_irqsave(&irq_desc[i].lock, flags);
 +              action = irq_desc[i].action;
 +              if (!action) 
 +                      goto skip;
 +              seq_printf(p, "%3d: ",i);
 +#ifndef CONFIG_SMP
 +              seq_printf(p, "%10u ", kstat_irqs(i));
 +#else
 +              for_each_online_cpu(j)
 +                      seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 +#endif
 +              seq_printf(p, " %8s", irq_desc[i].chip->name);
 +              seq_printf(p, "-%-8s", irq_desc[i].name);
 +
 +              seq_printf(p, "  %s", action->name);
 +              for (action=action->next; action; action = action->next)
 +                      seq_printf(p, ", %s", action->name);
 +              seq_putc(p, '\n');
 +skip:
 +              spin_unlock_irqrestore(&irq_desc[i].lock, flags);
 +      } else if (i == NR_IRQS) {
 +              seq_printf(p, "NMI: ");
 +              for_each_online_cpu(j)
 +                      seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
 +              seq_putc(p, '\n');
 +              seq_printf(p, "LOC: ");
 +              for_each_online_cpu(j)
 +                      seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
 +              seq_putc(p, '\n');
 +              seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
 +      }
 +      return 0;
 +}
 +
 +/*
 + * do_IRQ handles all normal device IRQ's (the special
 + * SMP cross-CPU interrupts have their own specific
 + * handlers).
 + */
 +asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
 +{
 +      struct pt_regs *old_regs = set_irq_regs(regs);
 +
 +      /* high bit used in ret_from_ code  */
 +      unsigned vector = ~regs->orig_rax;
 +      unsigned irq;
 +
 +      exit_idle();
 +      irq_enter();
 +      irq = __get_cpu_var(vector_irq)[vector];
 +
 +#ifdef CONFIG_DEBUG_STACKOVERFLOW
 +      stack_overflow_check(regs);
 +#endif
 +
 +      if (likely(irq < NR_IRQS))
 +              generic_handle_irq(irq);
 +      else {
 +              if (!disable_apic)
 +                      ack_APIC_irq();
 +
 +              if (printk_ratelimit())
 +                      printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
 +                              __func__, smp_processor_id(), vector);
 +      }
 +
 +      irq_exit();
 +
 +      set_irq_regs(old_regs);
 +      return 1;
 +}
 +
 +#ifdef CONFIG_HOTPLUG_CPU
 +void fixup_irqs(cpumask_t map)
 +{
 +      unsigned int irq;
 +      static int warned;
 +
 +      for (irq = 0; irq < NR_IRQS; irq++) {
 +              cpumask_t mask;
 +              int break_affinity = 0;
 +              int set_affinity = 1;
 +
 +              if (irq == 2)
 +                      continue;
 +
 +              /* interrupt's are disabled at this point */
 +              spin_lock(&irq_desc[irq].lock);
 +
 +              if (!irq_has_action(irq) ||
 +                  cpus_equal(irq_desc[irq].affinity, map)) {
 +                      spin_unlock(&irq_desc[irq].lock);
 +                      continue;
 +              }
 +
 +              cpus_and(mask, irq_desc[irq].affinity, map);
 +              if (cpus_empty(mask)) {
 +                      break_affinity = 1;
 +                      mask = map;
 +              }
 +
 +              if (irq_desc[irq].chip->mask)
 +                      irq_desc[irq].chip->mask(irq);
 +
 +              if (irq_desc[irq].chip->set_affinity)
 +                      irq_desc[irq].chip->set_affinity(irq, mask);
 +              else if (!(warned++))
 +                      set_affinity = 0;
 +
 +              if (irq_desc[irq].chip->unmask)
 +                      irq_desc[irq].chip->unmask(irq);
 +
 +              spin_unlock(&irq_desc[irq].lock);
 +
 +              if (break_affinity && set_affinity)
 +                      printk("Broke affinity for irq %i\n", irq);
 +              else if (!set_affinity)
 +                      printk("Cannot set affinity for irq %i\n", irq);
 +      }
 +
 +      /* That doesn't seem sufficient.  Give it 1ms. */
 +      local_irq_enable();
 +      mdelay(1);
 +      local_irq_disable();
 +}
 +#endif
 +
 +extern void call_softirq(void);
 +
 +asmlinkage void do_softirq(void)
 +{
 +      __u32 pending;
 +      unsigned long flags;
 +
 +      if (in_interrupt())
 +              return;
 +
 +      local_irq_save(flags);
 +      pending = local_softirq_pending();
 +      /* Switch to interrupt stack */
 +      if (pending) {
 +              call_softirq();
 +              WARN_ON_ONCE(softirq_count());
 +      }
 +      local_irq_restore(flags);
 +}
Simple merge
Simple merge
Simple merge
index e47e5951058be53593f68e9c8d955418a0e82ccf,0c522e6b0917ad84a2fc5f26423cc8efb4419702..74523d999f7a9a99f4e5890102e653c49f094ade
@@@ -340,15 -340,19 +340,30 @@@ struct parisc_device_id 
  #define PA_HVERSION_ANY_ID    0xffff
  #define PA_SVERSION_ANY_ID    0xffffffff
  
 +/* SDIO */
 +
 +#define SDIO_ANY_ID (~0)
 +
 +struct sdio_device_id {
 +      __u8    class;                  /* Standard interface or SDIO_ANY_ID */
 +      __u16   vendor;                 /* Vendor or SDIO_ANY_ID */
 +      __u16   device;                 /* Device ID or SDIO_ANY_ID */
 +      kernel_ulong_t driver_data;     /* Data private to the driver */
 +};
 +
+ /* SSB core, see drivers/ssb/ */
+ struct ssb_device_id {
+       __u16   vendor;
+       __u16   coreid;
+       __u8    revision;
+ };
+ #define SSB_DEVICE(_vendor, _coreid, _revision)  \
+       { .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
+ #define SSB_DEVTABLE_END  \
+       { 0, },
+ #define SSB_ANY_VENDOR                0xFFFF
+ #define SSB_ANY_ID            0xFFFF
+ #define SSB_ANY_REV           0xFF
  #endif /* LINUX_MOD_DEVICETABLE_H */
Simple merge
index 1e5d4d6931955bbf9432b3309c13b0ef0b8192c5,895ba3ac62082bbeb48d46fc6390a9a888a82821..36e3754db53a5b0951fb0a6c180f111ae2f9c54a
@@@ -484,19 -484,18 +484,33 @@@ static int do_parisc_entry(const char *
        return 1;
  }
  
 +/* Looks like: sdio:cNvNdN. */
 +static int do_sdio_entry(const char *filename,
 +                      struct sdio_device_id *id, char *alias)
 +{
 +      id->class = TO_NATIVE(id->class);
 +      id->vendor = TO_NATIVE(id->vendor);
 +      id->device = TO_NATIVE(id->device);
 +
 +      strcpy(alias, "sdio:");
 +      ADD(alias, "c", id->class != (__u8)SDIO_ANY_ID, id->class);
 +      ADD(alias, "v", id->vendor != (__u16)SDIO_ANY_ID, id->vendor);
 +      ADD(alias, "d", id->device != (__u16)SDIO_ANY_ID, id->device);
++      return 1;
++}
++
+ /* Looks like: ssb:vNidNrevN. */
+ static int do_ssb_entry(const char *filename,
+                       struct ssb_device_id *id, char *alias)
+ {
+       id->vendor = TO_NATIVE(id->vendor);
+       id->coreid = TO_NATIVE(id->coreid);
+       id->revision = TO_NATIVE(id->revision);
  
+       strcpy(alias, "ssb:");
+       ADD(alias, "v", id->vendor != SSB_ANY_VENDOR, id->vendor);
+       ADD(alias, "id", id->coreid != SSB_ANY_ID, id->coreid);
+       ADD(alias, "rev", id->revision != SSB_ANY_REV, id->revision);
        return 1;
  }
  
@@@ -615,10 -614,10 +629,14 @@@ void handle_moddevtable(struct module *
                do_table(symval, sym->st_size,
                         sizeof(struct parisc_device_id), "parisc",
                         do_parisc_entry, mod);
 +      else if (sym_is(symname, "__mod_sdio_device_table"))
 +              do_table(symval, sym->st_size,
 +                       sizeof(struct sdio_device_id), "sdio",
 +                       do_sdio_entry, mod);
+       else if (sym_is(symname, "__mod_ssb_device_table"))
+               do_table(symval, sym->st_size,
+                        sizeof(struct ssb_device_id), "ssb",
+                        do_ssb_entry, mod);
  }
  
  /* Now add out buffered information to the generated C source */