Merge branch 'master' into for-next
authorJiri Kosina <jkosina@suse.cz>
Wed, 4 Aug 2010 13:14:38 +0000 (15:14 +0200)
committerJiri Kosina <jkosina@suse.cz>
Wed, 4 Aug 2010 13:14:38 +0000 (15:14 +0200)
16 files changed:
1  2 
Documentation/feature-removal-schedule.txt
arch/arm/mach-imx/dma-v1.c
arch/powerpc/platforms/ps3/htab.c
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
drivers/gpu/drm/drm_edid.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/net/gianfar.c
drivers/net/ll_temac_main.c
drivers/net/wireless/hostap/hostap_hw.c
drivers/serial/cpm_uart/cpm_uart_core.c
drivers/usb/class/cdc-acm.c
include/linux/netdevice.h
kernel/debug/debug_core.c
kernel/debug/gdbstub.c

index 1a0fc32bc2050abbe9ecefca7e62407690bd0e87,1571c0c83dba0b0876ea52e4ff85ff69a56dab87..40a9c323931905a99447f90084584eb7eb420128
@@@ -93,7 -93,7 +93,7 @@@ Why:  Broken design for runtime control 
        inputs.  This framework was never widely used, and most attempts to
        use it were broken.  Drivers should instead be exposing domain-specific
        interfaces either to kernel or to userspace.
 -Who:  Pavel Machek <pavel@suse.cz>
 +Who:  Pavel Machek <pavel@ucw.cz>
  
  ---------------------------
  
@@@ -647,3 -647,10 +647,10 @@@ Who:     Stefan Richter <stefanr@s5r6.in-be
  
  ----------------------------
  
+ What: The acpi_sleep=s4_nonvs command line option
+ When: 2.6.37
+ Files:        arch/x86/kernel/acpi/sleep.c
+ Why:  superseded by acpi_sleep=nonvs
+ Who:  Rafael J. Wysocki <rjw@sisk.pl>
+ ----------------------------
index 0000000000000000000000000000000000000000,fd1d9197d06ef66f9b5e5a3d95d3a825510f47a5..3e8c47c63bac6bb3556c5b74e90c9def1b66e4c5
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,863 +1,862 @@@
 -              printk(KERN_ERR "imxdma%d: imx_dma_setup_sg epty sg list\n",
+ /*
+  *  linux/arch/arm/plat-mxc/dma-v1.c
+  *
+  *  i.MX DMA registration and IRQ dispatching
+  *
+  * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
+  * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
+  * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License
+  * as published by the Free Software Foundation; either version 2
+  * of the License, or (at your option) any later version.
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+  * MA 02110-1301, USA.
+  */
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/interrupt.h>
+ #include <linux/errno.h>
+ #include <linux/clk.h>
+ #include <linux/scatterlist.h>
+ #include <linux/io.h>
+ #include <asm/system.h>
+ #include <asm/irq.h>
+ #include <mach/hardware.h>
+ #include <mach/dma-v1.h>
+ #define DMA_DCR     0x00              /* Control Register */
+ #define DMA_DISR    0x04              /* Interrupt status Register */
+ #define DMA_DIMR    0x08              /* Interrupt mask Register */
+ #define DMA_DBTOSR  0x0c              /* Burst timeout status Register */
+ #define DMA_DRTOSR  0x10              /* Request timeout Register */
+ #define DMA_DSESR   0x14              /* Transfer Error Status Register */
+ #define DMA_DBOSR   0x18              /* Buffer overflow status Register */
+ #define DMA_DBTOCR  0x1c              /* Burst timeout control Register */
+ #define DMA_WSRA    0x40              /* W-Size Register A */
+ #define DMA_XSRA    0x44              /* X-Size Register A */
+ #define DMA_YSRA    0x48              /* Y-Size Register A */
+ #define DMA_WSRB    0x4c              /* W-Size Register B */
+ #define DMA_XSRB    0x50              /* X-Size Register B */
+ #define DMA_YSRB    0x54              /* Y-Size Register B */
+ #define DMA_SAR(x)  (0x80 + ((x) << 6))       /* Source Address Registers */
+ #define DMA_DAR(x)  (0x84 + ((x) << 6))       /* Destination Address Registers */
+ #define DMA_CNTR(x) (0x88 + ((x) << 6))       /* Count Registers */
+ #define DMA_CCR(x)  (0x8c + ((x) << 6))       /* Control Registers */
+ #define DMA_RSSR(x) (0x90 + ((x) << 6))       /* Request source select Registers */
+ #define DMA_BLR(x)  (0x94 + ((x) << 6))       /* Burst length Registers */
+ #define DMA_RTOR(x) (0x98 + ((x) << 6))       /* Request timeout Registers */
+ #define DMA_BUCR(x) (0x98 + ((x) << 6))       /* Bus Utilization Registers */
+ #define DMA_CCNR(x) (0x9C + ((x) << 6))       /* Channel counter Registers */
+ #define DCR_DRST           (1<<1)
+ #define DCR_DEN            (1<<0)
+ #define DBTOCR_EN          (1<<15)
+ #define DBTOCR_CNT(x)      ((x) & 0x7fff)
+ #define CNTR_CNT(x)        ((x) & 0xffffff)
+ #define CCR_ACRPT          (1<<14)
+ #define CCR_DMOD_LINEAR    (0x0 << 12)
+ #define CCR_DMOD_2D        (0x1 << 12)
+ #define CCR_DMOD_FIFO      (0x2 << 12)
+ #define CCR_DMOD_EOBFIFO   (0x3 << 12)
+ #define CCR_SMOD_LINEAR    (0x0 << 10)
+ #define CCR_SMOD_2D        (0x1 << 10)
+ #define CCR_SMOD_FIFO      (0x2 << 10)
+ #define CCR_SMOD_EOBFIFO   (0x3 << 10)
+ #define CCR_MDIR_DEC       (1<<9)
+ #define CCR_MSEL_B         (1<<8)
+ #define CCR_DSIZ_32        (0x0 << 6)
+ #define CCR_DSIZ_8         (0x1 << 6)
+ #define CCR_DSIZ_16        (0x2 << 6)
+ #define CCR_SSIZ_32        (0x0 << 4)
+ #define CCR_SSIZ_8         (0x1 << 4)
+ #define CCR_SSIZ_16        (0x2 << 4)
+ #define CCR_REN            (1<<3)
+ #define CCR_RPT            (1<<2)
+ #define CCR_FRC            (1<<1)
+ #define CCR_CEN            (1<<0)
+ #define RTOR_EN            (1<<15)
+ #define RTOR_CLK           (1<<14)
+ #define RTOR_PSC           (1<<13)
+ /*
+  * struct imx_dma_channel - i.MX specific DMA extension
+  * @name: name specified by DMA client
+  * @irq_handler: client callback for end of transfer
+  * @err_handler: client callback for error condition
+  * @data: clients context data for callbacks
+  * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
+  * @sg: pointer to the actual read/written chunk for scatter-gather emulation
+  * @resbytes: total residual number of bytes to transfer
+  *            (it can be lower or same as sum of SG mapped chunk sizes)
+  * @sgcount: number of chunks to be read/written
+  *
+  * Structure is used for IMX DMA processing. It would be probably good
+  * @struct dma_struct in the future for external interfacing and use
+  * @struct imx_dma_channel only as extension to it.
+  */
+ struct imx_dma_channel {
+       const char *name;
+       void (*irq_handler) (int, void *);
+       void (*err_handler) (int, void *, int errcode);
+       void (*prog_handler) (int, void *, struct scatterlist *);
+       void *data;
+       unsigned int dma_mode;
+       struct scatterlist *sg;
+       unsigned int resbytes;
+       int dma_num;
+       int in_use;
+       u32 ccr_from_device;
+       u32 ccr_to_device;
+       struct timer_list watchdog;
+       int hw_chaining;
+ };
+ static void __iomem *imx_dmav1_baseaddr;
+ static void imx_dmav1_writel(unsigned val, unsigned offset)
+ {
+       __raw_writel(val, imx_dmav1_baseaddr + offset);
+ }
+ static unsigned imx_dmav1_readl(unsigned offset)
+ {
+       return __raw_readl(imx_dmav1_baseaddr + offset);
+ }
+ static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
+ static struct clk *dma_clk;
+ static int imx_dma_hw_chain(struct imx_dma_channel *imxdma)
+ {
+       if (cpu_is_mx27())
+               return imxdma->hw_chaining;
+       else
+               return 0;
+ }
+ /*
+  * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation
+  */
+ static inline int imx_dma_sg_next(int channel, struct scatterlist *sg)
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
+       unsigned long now;
+       if (!imxdma->name) {
+               printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
+                      __func__, channel);
+               return 0;
+       }
+       now = min(imxdma->resbytes, sg->length);
+       if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP)
+               imxdma->resbytes -= now;
+       if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
+               imx_dmav1_writel(sg->dma_address, DMA_DAR(channel));
+       else
+               imx_dmav1_writel(sg->dma_address, DMA_SAR(channel));
+       imx_dmav1_writel(now, DMA_CNTR(channel));
+       pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
+               "size 0x%08x\n", channel,
+                imx_dmav1_readl(DMA_DAR(channel)),
+                imx_dmav1_readl(DMA_SAR(channel)),
+                imx_dmav1_readl(DMA_CNTR(channel)));
+       return now;
+ }
+ /**
+  * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from
+  * device transfer
+  *
+  * @channel: i.MX DMA channel number
+  * @dma_address: the DMA/physical memory address of the linear data block
+  *            to transfer
+  * @dma_length: length of the data block in bytes
+  * @dev_addr: physical device port address
+  * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
+  *           or %DMA_MODE_WRITE from memory to the device
+  *
+  * Return value: if incorrect parameters are provided -%EINVAL.
+  *            Zero indicates success.
+  */
+ int
+ imx_dma_setup_single(int channel, dma_addr_t dma_address,
+                    unsigned int dma_length, unsigned int dev_addr,
+                    unsigned int dmamode)
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
+       imxdma->sg = NULL;
+       imxdma->dma_mode = dmamode;
+       if (!dma_address) {
+               printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
+                      channel);
+               return -EINVAL;
+       }
+       if (!dma_length) {
+               printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
+                      channel);
+               return -EINVAL;
+       }
+       if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
+               pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
+                       "dev_addr=0x%08x for read\n",
+                       channel, __func__, (unsigned int)dma_address,
+                       dma_length, dev_addr);
+               imx_dmav1_writel(dev_addr, DMA_SAR(channel));
+               imx_dmav1_writel(dma_address, DMA_DAR(channel));
+               imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
+       } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
+               pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
+                       "dev_addr=0x%08x for write\n",
+                       channel, __func__, (unsigned int)dma_address,
+                       dma_length, dev_addr);
+               imx_dmav1_writel(dma_address, DMA_SAR(channel));
+               imx_dmav1_writel(dev_addr, DMA_DAR(channel));
+               imx_dmav1_writel(imxdma->ccr_to_device,
+                               DMA_CCR(channel));
+       } else {
+               printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
+                      channel);
+               return -EINVAL;
+       }
+       imx_dmav1_writel(dma_length, DMA_CNTR(channel));
+       return 0;
+ }
+ EXPORT_SYMBOL(imx_dma_setup_single);
+ /**
+  * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer
+  * @channel: i.MX DMA channel number
+  * @sg: pointer to the scatter-gather list/vector
+  * @sgcount: scatter-gather list hungs count
+  * @dma_length: total length of the transfer request in bytes
+  * @dev_addr: physical device port address
+  * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
+  *           or %DMA_MODE_WRITE from memory to the device
+  *
+  * The function sets up DMA channel state and registers to be ready for
+  * transfer specified by provided parameters. The scatter-gather emulation
+  * is set up according to the parameters.
+  *
+  * The full preparation of the transfer requires setup of more register
+  * by the caller before imx_dma_enable() can be called.
+  *
+  * %BLR(channel) holds transfer burst length in bytes, 0 means 64 bytes
+  *
+  * %RSSR(channel) has to be set to the DMA request line source %DMA_REQ_xxx
+  *
+  * %CCR(channel) has to specify transfer parameters, the next settings is
+  * typical for linear or simple scatter-gather transfers if %DMA_MODE_READ is
+  * specified
+  *
+  * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x
+  *
+  * The typical setup for %DMA_MODE_WRITE is specified by next options
+  * combination
+  *
+  * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x
+  *
+  * Be careful here and do not mistakenly mix source and target device
+  * port sizes constants, they are really different:
+  * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32,
+  * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32
+  *
+  * Return value: if incorrect parameters are provided -%EINVAL.
+  * Zero indicates success.
+  */
+ int
+ imx_dma_setup_sg(int channel,
+                struct scatterlist *sg, unsigned int sgcount,
+                unsigned int dma_length, unsigned int dev_addr,
+                unsigned int dmamode)
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
+       if (imxdma->in_use)
+               return -EBUSY;
+       imxdma->sg = sg;
+       imxdma->dma_mode = dmamode;
+       imxdma->resbytes = dma_length;
+       if (!sg || !sgcount) {
 - * This function tries to find a free channel in the specified priority group
++              printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n",
+                      channel);
+               return -EINVAL;
+       }
+       if (!sg->length) {
+               printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
+                      channel);
+               return -EINVAL;
+       }
+       if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
+               pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
+                       "dev_addr=0x%08x for read\n",
+                       channel, __func__, sg, sgcount, dma_length, dev_addr);
+               imx_dmav1_writel(dev_addr, DMA_SAR(channel));
+               imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
+       } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
+               pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
+                       "dev_addr=0x%08x for write\n",
+                       channel, __func__, sg, sgcount, dma_length, dev_addr);
+               imx_dmav1_writel(dev_addr, DMA_DAR(channel));
+               imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel));
+       } else {
+               printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
+                      channel);
+               return -EINVAL;
+       }
+       imx_dma_sg_next(channel, sg);
+       return 0;
+ }
+ EXPORT_SYMBOL(imx_dma_setup_sg);
+ int
+ imx_dma_config_channel(int channel, unsigned int config_port,
+       unsigned int config_mem, unsigned int dmareq, int hw_chaining)
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
+       u32 dreq = 0;
+       imxdma->hw_chaining = 0;
+       if (hw_chaining) {
+               imxdma->hw_chaining = 1;
+               if (!imx_dma_hw_chain(imxdma))
+                       return -EINVAL;
+       }
+       if (dmareq)
+               dreq = CCR_REN;
+       imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq;
+       imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq;
+       imx_dmav1_writel(dmareq, DMA_RSSR(channel));
+       return 0;
+ }
+ EXPORT_SYMBOL(imx_dma_config_channel);
+ void imx_dma_config_burstlen(int channel, unsigned int burstlen)
+ {
+       imx_dmav1_writel(burstlen, DMA_BLR(channel));
+ }
+ EXPORT_SYMBOL(imx_dma_config_burstlen);
+ /**
+  * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification
+  * handlers
+  * @channel: i.MX DMA channel number
+  * @irq_handler: the pointer to the function called if the transfer
+  *            ends successfully
+  * @err_handler: the pointer to the function called if the premature
+  *            end caused by error occurs
+  * @data: user specified value to be passed to the handlers
+  */
+ int
+ imx_dma_setup_handlers(int channel,
+                      void (*irq_handler) (int, void *),
+                      void (*err_handler) (int, void *, int),
+                      void *data)
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
+       unsigned long flags;
+       if (!imxdma->name) {
+               printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
+                      __func__, channel);
+               return -ENODEV;
+       }
+       local_irq_save(flags);
+       imx_dmav1_writel(1 << channel, DMA_DISR);
+       imxdma->irq_handler = irq_handler;
+       imxdma->err_handler = err_handler;
+       imxdma->data = data;
+       local_irq_restore(flags);
+       return 0;
+ }
+ EXPORT_SYMBOL(imx_dma_setup_handlers);
+ /**
+  * imx_dma_setup_progression_handler - setup i.MX DMA channel progression
+  * handlers
+  * @channel: i.MX DMA channel number
+  * @prog_handler: the pointer to the function called if the transfer progresses
+  */
+ int
+ imx_dma_setup_progression_handler(int channel,
+                       void (*prog_handler) (int, void*, struct scatterlist*))
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
+       unsigned long flags;
+       if (!imxdma->name) {
+               printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
+                      __func__, channel);
+               return -ENODEV;
+       }
+       local_irq_save(flags);
+       imxdma->prog_handler = prog_handler;
+       local_irq_restore(flags);
+       return 0;
+ }
+ EXPORT_SYMBOL(imx_dma_setup_progression_handler);
+ /**
+  * imx_dma_enable - function to start i.MX DMA channel operation
+  * @channel: i.MX DMA channel number
+  *
+  * The channel has to be allocated by driver through imx_dma_request()
+  * or imx_dma_request_by_prio() function.
+  * The transfer parameters has to be set to the channel registers through
+  * call of the imx_dma_setup_single() or imx_dma_setup_sg() function
+  * and registers %BLR(channel), %RSSR(channel) and %CCR(channel) has to
+  * be set prior this function call by the channel user.
+  */
+ void imx_dma_enable(int channel)
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
+       unsigned long flags;
+       pr_debug("imxdma%d: imx_dma_enable\n", channel);
+       if (!imxdma->name) {
+               printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
+                      __func__, channel);
+               return;
+       }
+       if (imxdma->in_use)
+               return;
+       local_irq_save(flags);
+       imx_dmav1_writel(1 << channel, DMA_DISR);
+       imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
+       imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
+               CCR_ACRPT, DMA_CCR(channel));
+ #ifdef CONFIG_ARCH_MX2
+       if ((cpu_is_mx21() || cpu_is_mx27()) &&
+                       imxdma->sg && imx_dma_hw_chain(imxdma)) {
+               imxdma->sg = sg_next(imxdma->sg);
+               if (imxdma->sg) {
+                       u32 tmp;
+                       imx_dma_sg_next(channel, imxdma->sg);
+                       tmp = imx_dmav1_readl(DMA_CCR(channel));
+                       imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
+                               DMA_CCR(channel));
+               }
+       }
+ #endif
+       imxdma->in_use = 1;
+       local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(imx_dma_enable);
+ /**
+  * imx_dma_disable - stop, finish i.MX DMA channel operatin
+  * @channel: i.MX DMA channel number
+  */
+ void imx_dma_disable(int channel)
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
+       unsigned long flags;
+       pr_debug("imxdma%d: imx_dma_disable\n", channel);
+       if (imx_dma_hw_chain(imxdma))
+               del_timer(&imxdma->watchdog);
+       local_irq_save(flags);
+       imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
+       imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
+                       DMA_CCR(channel));
+       imx_dmav1_writel(1 << channel, DMA_DISR);
+       imxdma->in_use = 0;
+       local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(imx_dma_disable);
+ #ifdef CONFIG_ARCH_MX2
+ static void imx_dma_watchdog(unsigned long chno)
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
+       imx_dmav1_writel(0, DMA_CCR(chno));
+       imxdma->in_use = 0;
+       imxdma->sg = NULL;
+       if (imxdma->err_handler)
+               imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT);
+ }
+ #endif
+ static irqreturn_t dma_err_handler(int irq, void *dev_id)
+ {
+       int i, disr;
+       struct imx_dma_channel *imxdma;
+       unsigned int err_mask;
+       int errcode;
+       disr = imx_dmav1_readl(DMA_DISR);
+       err_mask = imx_dmav1_readl(DMA_DBTOSR) |
+                  imx_dmav1_readl(DMA_DRTOSR) |
+                  imx_dmav1_readl(DMA_DSESR)  |
+                  imx_dmav1_readl(DMA_DBOSR);
+       if (!err_mask)
+               return IRQ_HANDLED;
+       imx_dmav1_writel(disr & err_mask, DMA_DISR);
+       for (i = 0; i < IMX_DMA_CHANNELS; i++) {
+               if (!(err_mask & (1 << i)))
+                       continue;
+               imxdma = &imx_dma_channels[i];
+               errcode = 0;
+               if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
+                       imx_dmav1_writel(1 << i, DMA_DBTOSR);
+                       errcode |= IMX_DMA_ERR_BURST;
+               }
+               if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
+                       imx_dmav1_writel(1 << i, DMA_DRTOSR);
+                       errcode |= IMX_DMA_ERR_REQUEST;
+               }
+               if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
+                       imx_dmav1_writel(1 << i, DMA_DSESR);
+                       errcode |= IMX_DMA_ERR_TRANSFER;
+               }
+               if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
+                       imx_dmav1_writel(1 << i, DMA_DBOSR);
+                       errcode |= IMX_DMA_ERR_BUFFER;
+               }
+               if (imxdma->name && imxdma->err_handler) {
+                       imxdma->err_handler(i, imxdma->data, errcode);
+                       continue;
+               }
+               imx_dma_channels[i].sg = NULL;
+               printk(KERN_WARNING
+                      "DMA timeout on channel %d (%s) -%s%s%s%s\n",
+                      i, imxdma->name,
+                      errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
+                      errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
+                      errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
+                      errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
+       }
+       return IRQ_HANDLED;
+ }
+ static void dma_irq_handle_channel(int chno)
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
+       if (!imxdma->name) {
+               /*
+                * IRQ for an unregistered DMA channel:
+                * let's clear the interrupts and disable it.
+                */
+               printk(KERN_WARNING
+                      "spurious IRQ for DMA channel %d\n", chno);
+               return;
+       }
+       if (imxdma->sg) {
+               u32 tmp;
+               struct scatterlist *current_sg = imxdma->sg;
+               imxdma->sg = sg_next(imxdma->sg);
+               if (imxdma->sg) {
+                       imx_dma_sg_next(chno, imxdma->sg);
+                       tmp = imx_dmav1_readl(DMA_CCR(chno));
+                       if (imx_dma_hw_chain(imxdma)) {
+                               /* FIXME: The timeout should probably be
+                                * configurable
+                                */
+                               mod_timer(&imxdma->watchdog,
+                                       jiffies + msecs_to_jiffies(500));
+                               tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
+                               imx_dmav1_writel(tmp, DMA_CCR(chno));
+                       } else {
+                               imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
+                               tmp |= CCR_CEN;
+                       }
+                       imx_dmav1_writel(tmp, DMA_CCR(chno));
+                       if (imxdma->prog_handler)
+                               imxdma->prog_handler(chno, imxdma->data,
+                                               current_sg);
+                       return;
+               }
+               if (imx_dma_hw_chain(imxdma)) {
+                       del_timer(&imxdma->watchdog);
+                       return;
+               }
+       }
+       imx_dmav1_writel(0, DMA_CCR(chno));
+       imxdma->in_use = 0;
+       if (imxdma->irq_handler)
+               imxdma->irq_handler(chno, imxdma->data);
+ }
+ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
+ {
+       int i, disr;
+ #ifdef CONFIG_ARCH_MX2
+       if (cpu_is_mx21() || cpu_is_mx27())
+               dma_err_handler(irq, dev_id);
+ #endif
+       disr = imx_dmav1_readl(DMA_DISR);
+       pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
+                    disr);
+       imx_dmav1_writel(disr, DMA_DISR);
+       for (i = 0; i < IMX_DMA_CHANNELS; i++) {
+               if (disr & (1 << i))
+                       dma_irq_handle_channel(i);
+       }
+       return IRQ_HANDLED;
+ }
+ /**
+  * imx_dma_request - request/allocate specified channel number
+  * @channel: i.MX DMA channel number
+  * @name: the driver/caller own non-%NULL identification
+  */
+ int imx_dma_request(int channel, const char *name)
+ {
+       struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
+       unsigned long flags;
+       int ret = 0;
+       /* basic sanity checks */
+       if (!name)
+               return -EINVAL;
+       if (channel >= IMX_DMA_CHANNELS) {
+               printk(KERN_CRIT "%s: called for  non-existed channel %d\n",
+                      __func__, channel);
+               return -EINVAL;
+       }
+       local_irq_save(flags);
+       if (imxdma->name) {
+               local_irq_restore(flags);
+               return -EBUSY;
+       }
+       memset(imxdma, 0, sizeof(imxdma));
+       imxdma->name = name;
+       local_irq_restore(flags); /* request_irq() can block */
+ #ifdef CONFIG_ARCH_MX2
+       if (cpu_is_mx21() || cpu_is_mx27()) {
+               ret = request_irq(MX2x_INT_DMACH0 + channel,
+                               dma_irq_handler, 0, "DMA", NULL);
+               if (ret) {
+                       imxdma->name = NULL;
+                       pr_crit("Can't register IRQ %d for DMA channel %d\n",
+                                       MX2x_INT_DMACH0 + channel, channel);
+                       return ret;
+               }
+               init_timer(&imxdma->watchdog);
+               imxdma->watchdog.function = &imx_dma_watchdog;
+               imxdma->watchdog.data = channel;
+       }
+ #endif
+       return ret;
+ }
+ EXPORT_SYMBOL(imx_dma_request);
+ /**
+  * imx_dma_free - release previously acquired channel
+  * @channel: i.MX DMA channel number
+  */
+ void imx_dma_free(int channel)
+ {
+       unsigned long flags;
+       struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
+       if (!imxdma->name) {
+               printk(KERN_CRIT
+                      "%s: trying to free free channel %d\n",
+                      __func__, channel);
+               return;
+       }
+       local_irq_save(flags);
+       /* Disable interrupts */
+       imx_dma_disable(channel);
+       imxdma->name = NULL;
+ #ifdef CONFIG_ARCH_MX2
+       if (cpu_is_mx21() || cpu_is_mx27())
+               free_irq(MX2x_INT_DMACH0 + channel, NULL);
+ #endif
+       local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(imx_dma_free);
+ /**
+  * imx_dma_request_by_prio - find and request some of free channels best
+  * suiting requested priority
+  * @channel: i.MX DMA channel number
+  * @name: the driver/caller own non-%NULL identification
+  *
+  * This function tries to find a free channel in the specified priority group
+  * if the priority cannot be achieved it tries to look for free channel
+  * in the higher and then even lower priority groups.
+  *
+  * Return value: If there is no free channel to allocate, -%ENODEV is returned.
+  *               On successful allocation channel is returned.
+  */
+ int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio)
+ {
+       int i;
+       int best;
+       switch (prio) {
+       case (DMA_PRIO_HIGH):
+               best = 8;
+               break;
+       case (DMA_PRIO_MEDIUM):
+               best = 4;
+               break;
+       case (DMA_PRIO_LOW):
+       default:
+               best = 0;
+               break;
+       }
+       for (i = best; i < IMX_DMA_CHANNELS; i++)
+               if (!imx_dma_request(i, name))
+                       return i;
+       for (i = best - 1; i >= 0; i--)
+               if (!imx_dma_request(i, name))
+                       return i;
+       printk(KERN_ERR "%s: no free DMA channel found\n", __func__);
+       return -ENODEV;
+ }
+ EXPORT_SYMBOL(imx_dma_request_by_prio);
+ static int __init imx_dma_init(void)
+ {
+       int ret = 0;
+       int i;
+ #ifdef CONFIG_ARCH_MX1
+       if (cpu_is_mx1())
+               imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
+       else
+ #endif
+ #ifdef CONFIG_MACH_MX21
+       if (cpu_is_mx21())
+               imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
+       else
+ #endif
+ #ifdef CONFIG_MACH_MX27
+       if (cpu_is_mx27())
+               imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
+       else
+ #endif
+               BUG();
+       dma_clk = clk_get(NULL, "dma");
+       clk_enable(dma_clk);
+       /* reset DMA module */
+       imx_dmav1_writel(DCR_DRST, DMA_DCR);
+ #ifdef CONFIG_ARCH_MX1
+       if (cpu_is_mx1()) {
+               ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL);
+               if (ret) {
+                       pr_crit("Wow!  Can't register IRQ for DMA\n");
+                       return ret;
+               }
+               ret = request_irq(MX1_DMA_ERR, dma_err_handler, 0, "DMA", NULL);
+               if (ret) {
+                       pr_crit("Wow!  Can't register ERRIRQ for DMA\n");
+                       free_irq(MX1_DMA_INT, NULL);
+                       return ret;
+               }
+       }
+ #endif
+       /* enable DMA module */
+       imx_dmav1_writel(DCR_DEN, DMA_DCR);
+       /* clear all interrupts */
+       imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
+       /* disable interrupts */
+       imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
+       for (i = 0; i < IMX_DMA_CHANNELS; i++) {
+               imx_dma_channels[i].sg = NULL;
+               imx_dma_channels[i].dma_num = i;
+       }
+       return ret;
+ }
+ arch_initcall(imx_dma_init);
index 15a2510ee63954a2f8ace1383b720d3845fac375,2c0ed87f20244c88e4feea8c79aca1a29c4115a0..3124cf791ebb5504d842c494c345b7754ddbb851
@@@ -19,7 -19,7 +19,7 @@@
   */
  
  #include <linux/kernel.h>
- #include <linux/lmb.h>
+ #include <linux/memblock.h>
  
  #include <asm/machdep.h>
  #include <asm/prom.h>
@@@ -136,7 -136,7 +136,7 @@@ static long ps3_hpte_updatepp(unsigned 
         * As lv1_read_htab_entries() does not give us the RPN, we can
         * not synthesize the new hpte_r value here, and therefore can
         * not update the hpte with lv1_insert_htab_entry(), so we
 -       * insted invalidate it and ask the caller to update it via
 +       * instead invalidate it and ask the caller to update it via
         * ps3_hpte_insert() by returning a -1 value.
         */
        if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
index f51cc55aced60f3cb062d6afd5a7ed207e68e4a5,fcc3c61fdecc0a8e73859868f5f863c9fdb315e3..33cec152070df4f9c5dc30d47a6274524d097225
@@@ -2,7 -2,7 +2,7 @@@
   * sleep.c - x86-specific ACPI sleep support.
   *
   *  Copyright (C) 2001-2003 Patrick Mochel
 - *  Copyright (C) 2001-2003 Pavel Machek <pavel@suse.cz>
 + *  Copyright (C) 2001-2003 Pavel Machek <pavel@ucw.cz>
   */
  
  #include <linux/acpi.h>
@@@ -157,9 -157,14 +157,14 @@@ static int __init acpi_sleep_setup(cha
  #ifdef CONFIG_HIBERNATION
                if (strncmp(str, "s4_nohwsig", 10) == 0)
                        acpi_no_s4_hw_signature();
-               if (strncmp(str, "s4_nonvs", 8) == 0)
-                       acpi_s4_no_nvs();
+               if (strncmp(str, "s4_nonvs", 8) == 0) {
+                       pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, "
+                                       "please use acpi_sleep=nonvs instead");
+                       acpi_nvs_nosave();
+               }
  #endif
+               if (strncmp(str, "nonvs", 5) == 0)
+                       acpi_nvs_nosave();
                if (strncmp(str, "old_ordering", 12) == 0)
                        acpi_old_suspend_ordering();
                str = strchr(str, ',');
index 0bcc5aeda9982bb79a2f069ab0f34b4970a531af,a96489ee6cabf04a53664300f87141c4bbffc293..980508c79082fadaacbc0339c0fe196d6a6dd5ce
@@@ -460,7 -460,7 +460,7 @@@ static void lapic_timer_broadcast(cons
  }
  
  /*
 - * Setup the local APIC timer for this CPU. Copy the initilized values
 + * Setup the local APIC timer for this CPU. Copy the initialized values
   * of the boot CPU and register the clock event in the framework.
   */
  static void __cpuinit setup_APIC_timer(void)
@@@ -921,7 -921,7 +921,7 @@@ void disable_local_APIC(void
        unsigned int value;
  
        /* APIC hasn't been mapped yet */
-       if (!apic_phys)
+       if (!x2apic_mode && !apic_phys)
                return;
  
        clear_local_APIC();
index 0af9aa20fce1069d5dfa167e1a4703f743b400b9,3e90cce3dc8bfe50fa245e8febe2aaf2625df25e..1376f4144b3f4dbf0460556f7ed7a035b2b43a9c
@@@ -9,7 -9,7 +9,7 @@@
   *  Based on the powernow-k7.c module written by Dave Jones.
   *  (C) 2003 Dave Jones on behalf of SuSE Labs
   *  (C) 2004 Dominik Brodowski <linux@brodo.de>
 - *  (C) 2004 Pavel Machek <pavel@suse.cz>
 + *  (C) 2004 Pavel Machek <pavel@ucw.cz>
   *  Licensed under the terms of the GNU GPL License version 2.
   *  Based upon datasheets & sample CPUs kindly provided by AMD.
   *
@@@ -1023,13 -1023,12 +1023,12 @@@ static int get_transition_latency(struc
        }
        if (max_latency == 0) {
                /*
-                * Fam 11h always returns 0 as transition latency.
-                * This is intended and means "very fast". While cpufreq core
-                * and governors currently can handle that gracefully, better
-                * set it to 1 to avoid problems in the future.
-                * For all others it's a BIOS bug.
+                * Fam 11h and later may return 0 as transition latency. This
+                * is intended and means "very fast". While cpufreq core and
+                * governors currently can handle that gracefully, better set it
+                * to 1 to avoid problems in the future.
                 */
-               if (boot_cpu_data.x86 != 0x11)
+               if (boot_cpu_data.x86 < 0x11)
                        printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
                                "latency\n");
                max_latency = 1;
index da06476f2df4932d202080f6473219919caa943a,f87bf104df7a4cc79b2991759c88e00246cf64d4..9585e531ac6bdc51c9cf2cfbd2e4a41bb7dadff6
@@@ -864,8 -864,8 +864,8 @@@ drm_mode_std(struct drm_connector *conn
                mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
                                    false);
                mode->hdisplay = 1366;
-               mode->vsync_start = mode->vsync_start - 1;
-               mode->vsync_end = mode->vsync_end - 1;
+               mode->hsync_start = mode->hsync_start - 1;
+               mode->hsync_end = mode->hsync_end - 1;
                return mode;
        }
  
@@@ -929,11 -929,13 +929,11 @@@ drm_mode_do_interlace_quirk(struct drm_
                { 1440,  576 },
                { 2880,  576 },
        };
 -      static const int n_sizes =
 -              sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
  
        if (!(pt->misc & DRM_EDID_PT_INTERLACED))
                return;
  
 -      for (i = 0; i < n_sizes; i++) {
 +      for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
                if ((mode->hdisplay == cea_interlaced[i].w) &&
                    (mode->vdisplay == cea_interlaced[i].h / 2)) {
                        mode->vdisplay *= 2;
@@@ -1373,6 -1375,7 +1373,6 @@@ static const struct 
        { 1920, 1440, 60, 0 },
        { 1920, 1440, 75, 0 },
  };
 -static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
  
  static int
  drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
        for (i = 0; i < 6; i++) {
                for (j = 7; j > 0; j--) {
                        m = (i * 8) + (7 - j);
 -                      if (m >= num_est3_modes)
 +                      if (m >= ARRAY_SIZE(est3_modes))
                                break;
                        if (est[i] & (1 << j)) {
                                mode = drm_mode_find_dmt(connector->dev,
index 646a2a5711f2e4ce287137685eb8023b316215d9,7065cb3105538f62cca0edcf8a2f951cddba210a..86b93f2ecca3ffbfeffb85b49f42ff47f87ec24f
@@@ -40,10 -40,10 +40,10 @@@ static int destroy_qp(struct c4iw_rdev 
         */
        dma_free_coherent(&(rdev->lldi.pdev->dev),
                          wq->rq.memsize, wq->rq.queue,
-                         pci_unmap_addr(&wq->rq, mapping));
+                         dma_unmap_addr(&wq->rq, mapping));
        dma_free_coherent(&(rdev->lldi.pdev->dev),
                          wq->sq.memsize, wq->sq.queue,
-                         pci_unmap_addr(&wq->sq, mapping));
+                         dma_unmap_addr(&wq->sq, mapping));
        c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
        kfree(wq->rq.sw_rq);
        kfree(wq->sq.sw_sq);
@@@ -99,7 -99,7 +99,7 @@@ static int create_qp(struct c4iw_rdev *
        if (!wq->sq.queue)
                goto err5;
        memset(wq->sq.queue, 0, wq->sq.memsize);
-       pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
+       dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
  
        wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
                                          wq->rq.memsize, &(wq->rq.dma_addr),
                wq->rq.queue,
                (unsigned long long)virt_to_phys(wq->rq.queue));
        memset(wq->rq.queue, 0, wq->rq.memsize);
-       pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
+       dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
  
        wq->db = rdev->lldi.db_reg;
        wq->gts = rdev->lldi.gts_reg;
  err7:
        dma_free_coherent(&(rdev->lldi.pdev->dev),
                          wq->rq.memsize, wq->rq.queue,
-                         pci_unmap_addr(&wq->rq, mapping));
+                         dma_unmap_addr(&wq->rq, mapping));
  err6:
        dma_free_coherent(&(rdev->lldi.pdev->dev),
                          wq->sq.memsize, wq->sq.queue,
-                         pci_unmap_addr(&wq->sq, mapping));
+                         dma_unmap_addr(&wq->sq, mapping));
  err5:
        c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
  err4:
@@@ -905,7 -905,7 +905,7 @@@ static void __flush_qp(struct c4iw_qp *
        atomic_inc(&qhp->refcnt);
        spin_unlock_irqrestore(&qhp->lock, *flag);
  
 -      /* locking heirarchy: cq lock first, then qp lock. */
 +      /* locking hierarchy: cq lock first, then qp lock. */
        spin_lock_irqsave(&rchp->lock, *flag);
        spin_lock(&qhp->lock);
        c4iw_flush_hw_cq(&rchp->cq);
        if (flushed)
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
  
 -      /* locking heirarchy: cq lock first, then qp lock. */
 +      /* locking hierarchy: cq lock first, then qp lock. */
        spin_lock_irqsave(&schp->lock, *flag);
        spin_lock(&qhp->lock);
        c4iw_flush_hw_cq(&schp->cq);
diff --combined drivers/net/gianfar.c
index ba190376e030958e999a0d69f417d10c41aa4033,28b53d1cd4f168701fa4ce41a7c5e0edbaa6f783..efd4c70753dbe1b616b6436be72a87d4ef667cd5
@@@ -381,10 -381,14 +381,14 @@@ static void gfar_init_mac(struct net_de
        /* Insert receive time stamps into padding alignment bytes */
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
                rctrl &= ~RCTRL_PAL_MASK;
-               rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
+               rctrl |= RCTRL_PADDING(8);
                priv->padding = 8;
        }
  
+       /* Enable HW time stamping if requested from user space */
+       if (priv->hwts_rx_en)
+               rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
        /* keep vlan related bits if it's enabled */
        if (priv->vlgrp) {
                rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@@ -747,7 -751,8 +751,8 @@@ static int gfar_of_init(struct of_devic
                        FSL_GIANFAR_DEV_HAS_CSUM |
                        FSL_GIANFAR_DEV_HAS_VLAN |
                        FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
-                       FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+                       FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+                       FSL_GIANFAR_DEV_HAS_TIMER;
  
        ctype = of_get_property(np, "phy-connection-type", NULL);
  
@@@ -805,12 -810,20 +810,20 @@@ static int gfar_hwtstamp_ioctl(struct n
  
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
-               priv->hwts_rx_en = 0;
+               if (priv->hwts_rx_en) {
+                       stop_gfar(netdev);
+                       priv->hwts_rx_en = 0;
+                       startup_gfar(netdev);
+               }
                break;
        default:
                if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
                        return -ERANGE;
-               priv->hwts_rx_en = 1;
+               if (!priv->hwts_rx_en) {
+                       stop_gfar(netdev);
+                       priv->hwts_rx_en = 1;
+                       startup_gfar(netdev);
+               }
                config.rx_filter = HWTSTAMP_FILTER_ALL;
                break;
        }
@@@ -903,7 -916,7 +916,7 @@@ static void gfar_init_filer_table(struc
        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
  
 -      /* cur_filer_idx indicated the fisrt non-masked rule */
 +      /* cur_filer_idx indicated the first non-masked rule */
        priv->cur_filer_idx = rqfar;
  
        /* Rest are masked rules */
@@@ -2642,6 -2655,10 +2655,10 @@@ int gfar_clean_rx_ring(struct gfar_priv
                dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
                                priv->rx_buffer_size, DMA_FROM_DEVICE);
  
+               if (unlikely(!(bdp->status & RXBD_ERR) &&
+                               bdp->length > priv->rx_buffer_size))
+                       bdp->status = RXBD_LARGE;
                /* We drop the frame if we failed to allocate a new buffer */
                if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
                                 bdp->status & RXBD_ERR)) {
index 315eb4cdb58efc7af4e1be6a0ab74abed9575468,6474c4973d3ad977542ba53529ed9f0165cd21d6..dc6bf042579dde0ccd32e2ccdb8ef67995975a03
@@@ -449,7 -449,7 +449,7 @@@ static u32 temac_setoptions(struct net_
        return (0);
  }
  
 -/* Initilize temac */
 +/* Initialize temac */
  static void temac_device_reset(struct net_device *ndev)
  {
        struct temac_local *lp = netdev_priv(ndev);
@@@ -964,7 -964,7 +964,7 @@@ temac_of_probe(struct of_device *op, co
        np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
        if (!np) {
                dev_err(&op->dev, "could not find DMA node\n");
-               goto nodev;
+               goto err_iounmap;
        }
  
        /* Setup the DMA register accesses, could be DCR or memory mapped */
                        dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
                } else {
                        dev_err(&op->dev, "unable to map DMA registers\n");
-                       goto nodev;
+                       goto err_iounmap;
                }
        }
  
        if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
                dev_err(&op->dev, "could not determine irqs\n");
                rc = -ENOMEM;
-               goto nodev;
+               goto err_iounmap_2;
        }
  
        of_node_put(np); /* Finished with the DMA node; drop the reference */
        if ((!addr) || (size != 6)) {
                dev_err(&op->dev, "could not find MAC address\n");
                rc = -ENODEV;
-               goto nodev;
+               goto err_iounmap_2;
        }
        temac_set_mac_address(ndev, (void *)addr);
  
        rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
        if (rc) {
                dev_err(lp->dev, "Error creating sysfs files\n");
-               goto nodev;
+               goto err_iounmap_2;
        }
  
        rc = register_netdev(lp->ndev);
  
   err_register_ndev:
        sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
+  err_iounmap_2:
+       if (lp->sdma_regs)
+               iounmap(lp->sdma_regs);
+  err_iounmap:
+       iounmap(lp->regs);
   nodev:
        free_netdev(ndev);
        ndev = NULL;
@@@ -1044,6 -1049,9 +1049,9 @@@ static int __devexit temac_of_remove(st
                of_node_put(lp->phy_node);
        lp->phy_node = NULL;
        dev_set_drvdata(&op->dev, NULL);
+       iounmap(lp->regs);
+       if (lp->sdma_regs)
+               iounmap(lp->sdma_regs);
        free_netdev(ndev);
        return 0;
  }
index 30c2976b36244e68840bd385b2a546e4d162b00d,2f999fc94f60ca9b36bb11d70fd29096c85eaf7e..e9d9d622a9b0a230a6ea0ce365b54e106230571d
@@@ -1896,7 -1896,7 +1896,7 @@@ fail
  /* Some SMP systems have reported number of odd errors with hostap_pci. fid
   * register has changed values between consecutive reads for an unknown reason.
   * This should really not happen, so more debugging is needed. This test
 - * version is a big slower, but it will detect most of such register changes
 + * version is a bit slower, but it will detect most of such register changes
   * and will try to get the correct fid eventually. */
  #define EXTRA_FID_READ_TESTS
  
@@@ -2621,6 -2621,18 +2621,18 @@@ static irqreturn_t prism2_interrupt(in
        iface = netdev_priv(dev);
        local = iface->local;
  
+       /* Detect early interrupt before driver is fully configued */
+       spin_lock(&local->irq_init_lock);
+       if (!dev->base_addr) {
+               if (net_ratelimit()) {
+                       printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
+                              dev->name);
+               }
+               spin_unlock(&local->irq_init_lock);
+               return IRQ_HANDLED;
+       }
+       spin_unlock(&local->irq_init_lock);
        prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 0);
  
        if (local->func->card_present && !local->func->card_present(local)) {
@@@ -3138,6 -3150,7 +3150,7 @@@ prism2_init_local_data(struct prism2_he
        spin_lock_init(&local->cmdlock);
        spin_lock_init(&local->baplock);
        spin_lock_init(&local->lock);
+       spin_lock_init(&local->irq_init_lock);
        mutex_init(&local->rid_bap_mtx);
  
        if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES)
index 9259e849f463306a4f328d211cceacae605c8ff1,cd6cf575902e4aab43a2dc232c7358db93ff8a82..6016179db533dc1b23100d2491a24adaff9bca05
@@@ -852,7 -852,7 +852,7 @@@ static void cpm_uart_init_smc(struct ua
         */
        cpm_set_smc_fcr(up);
  
 -      /* Using idle charater time requires some additional tuning.  */
 +      /* Using idle character time requires some additional tuning.  */
        out_be16(&up->smc_mrblr, pinfo->rx_fifosize);
        out_be16(&up->smc_maxidl, pinfo->rx_fifosize);
        out_be16(&up->smc_brklen, 0);
@@@ -930,6 -930,83 +930,83 @@@ static void cpm_uart_config_port(struc
        }
  }
  
+ #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE)
+ /*
+  * Write a string to the serial port
+  * Note that this is called with interrupts already disabled
+  */
+ static void cpm_uart_early_write(struct uart_cpm_port *pinfo,
+               const char *string, u_int count)
+ {
+       unsigned int i;
+       cbd_t __iomem *bdp, *bdbase;
+       unsigned char *cpm_outp_addr;
+       /* Get the address of the host memory buffer.
+        */
+       bdp = pinfo->tx_cur;
+       bdbase = pinfo->tx_bd_base;
+       /*
+        * Now, do each character.  This is not as bad as it looks
+        * since this is a holding FIFO and not a transmitting FIFO.
+        * We could add the complexity of filling the entire transmit
+        * buffer, but we would just wait longer between accesses......
+        */
+       for (i = 0; i < count; i++, string++) {
+               /* Wait for transmitter fifo to empty.
+                * Ready indicates output is ready, and xmt is doing
+                * that, not that it is ready for us to send.
+                */
+               while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+                       ;
+               /* Send the character out.
+                * If the buffer address is in the CPM DPRAM, don't
+                * convert it.
+                */
+               cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
+                                       pinfo);
+               *cpm_outp_addr = *string;
+               out_be16(&bdp->cbd_datlen, 1);
+               setbits16(&bdp->cbd_sc, BD_SC_READY);
+               if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+                       bdp = bdbase;
+               else
+                       bdp++;
+               /* if a LF, also do CR... */
+               if (*string == 10) {
+                       while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+                               ;
+                       cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
+                                               pinfo);
+                       *cpm_outp_addr = 13;
+                       out_be16(&bdp->cbd_datlen, 1);
+                       setbits16(&bdp->cbd_sc, BD_SC_READY);
+                       if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+                               bdp = bdbase;
+                       else
+                               bdp++;
+               }
+       }
+       /*
+        * Finally, Wait for transmitter & holding register to empty
+        *  and restore the IER
+        */
+       while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+               ;
+       pinfo->tx_cur = bdp;
+ }
+ #endif
  #ifdef CONFIG_CONSOLE_POLL
  /* Serial polling routines for writing and reading from the uart while
   * in an interrupt or debug context.
@@@ -999,7 -1076,7 +1076,7 @@@ static void cpm_put_poll_char(struct ua
        static char ch[2];
  
        ch[0] = (char)c;
-       cpm_uart_early_write(pinfo->port.line, ch, 1);
+       cpm_uart_early_write(pinfo, ch, 1);
  }
  #endif /* CONFIG_CONSOLE_POLL */
  
@@@ -1130,9 -1207,6 +1207,6 @@@ static void cpm_uart_console_write(stru
                                   u_int count)
  {
        struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
-       unsigned int i;
-       cbd_t __iomem *bdp, *bdbase;
-       unsigned char *cp;
        unsigned long flags;
        int nolock = oops_in_progress;
  
                spin_lock_irqsave(&pinfo->port.lock, flags);
        }
  
-       /* Get the address of the host memory buffer.
-        */
-       bdp = pinfo->tx_cur;
-       bdbase = pinfo->tx_bd_base;
-       /*
-        * Now, do each character.  This is not as bad as it looks
-        * since this is a holding FIFO and not a transmitting FIFO.
-        * We could add the complexity of filling the entire transmit
-        * buffer, but we would just wait longer between accesses......
-        */
-       for (i = 0; i < count; i++, s++) {
-               /* Wait for transmitter fifo to empty.
-                * Ready indicates output is ready, and xmt is doing
-                * that, not that it is ready for us to send.
-                */
-               while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
-                       ;
-               /* Send the character out.
-                * If the buffer address is in the CPM DPRAM, don't
-                * convert it.
-                */
-               cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
-               *cp = *s;
-               out_be16(&bdp->cbd_datlen, 1);
-               setbits16(&bdp->cbd_sc, BD_SC_READY);
-               if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
-                       bdp = bdbase;
-               else
-                       bdp++;
-               /* if a LF, also do CR... */
-               if (*s == 10) {
-                       while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
-                               ;
-                       cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
-                       *cp = 13;
-                       out_be16(&bdp->cbd_datlen, 1);
-                       setbits16(&bdp->cbd_sc, BD_SC_READY);
-                       if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
-                               bdp = bdbase;
-                       else
-                               bdp++;
-               }
-       }
-       /*
-        * Finally, Wait for transmitter & holding register to empty
-        *  and restore the IER
-        */
-       while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
-               ;
-       pinfo->tx_cur = bdp;
+       cpm_uart_early_write(pinfo, s, count);
  
        if (unlikely(nolock)) {
                local_irq_restore(flags);
index 8413a567c12de324d99baf0829679fc89a783991,162c95a088ed5efc2a747a1d739a44e30656de01..89d260d6b03194bf95f39f62fe305a6f0268e0f0
@@@ -2,7 -2,7 +2,7 @@@
   * cdc-acm.c
   *
   * Copyright (c) 1999 Armin Fuerst    <fuerst@in.tum.de>
 - * Copyright (c) 1999 Pavel Machek    <pavel@suse.cz>
 + * Copyright (c) 1999 Pavel Machek    <pavel@ucw.cz>
   * Copyright (c) 1999 Johannes Erdfelt        <johannes@erdfelt.com>
   * Copyright (c) 2000 Vojtech Pavlik  <vojtech@suse.cz>
   * Copyright (c) 2004 Oliver Neukum   <oliver@neukum.name>
@@@ -1596,6 -1596,7 +1596,7 @@@ static const struct usb_device_id acm_i
        { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
        { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
        { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
+       { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
  
        /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
  
index 7930ff69ee6b65e915e7fb7803ab215b7c5d3679,b21e4054c12cb250f621b3baa9e1774d354bebcb..50d09aa6d21cab8509261d4667ffd44e44ef3be8
@@@ -775,7 -775,7 +775,7 @@@ struct net_device 
        /*
         * This is the first field of the "visible" part of this structure
         * (i.e. as seen by users in the "Space.c" file).  It is the name
 -       * the interface.
 +       * of the interface.
         */
        char                    name[IFNAMSIZ];
  
@@@ -1656,6 -1656,9 +1656,9 @@@ static inline int netif_is_multiqueue(c
        return (dev->num_tx_queues > 1);
  }
  
+ extern void netif_set_real_num_tx_queues(struct net_device *dev,
+                                        unsigned int txq);
  /* Use this variant when it is known for sure that it
   * is executing from hardware interrupt context or with hardware interrupts
   * disabled.
@@@ -2329,7 -2332,7 +2332,7 @@@ do {                                                            
  #endif
  
  #if defined(VERBOSE_DEBUG)
- #define netif_vdbg    netdev_dbg
+ #define netif_vdbg    netif_dbg
  #else
  #define netif_vdbg(priv, type, dev, format, args...)          \
  ({                                                            \
index 568efbce80f733b1f159936661c8f142cee8201f,8bc5eeffec8a953f844d853cc9abe30bbb4d77c4..51d14fe876485447b5cb7738d7fa780f24c9d7d2
@@@ -6,7 -6,7 +6,7 @@@
   * Copyright (C) 2000-2001 VERITAS Software Corporation.
   * Copyright (C) 2002-2004 Timesys Corporation
   * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
 - * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
 + * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
   * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
   * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
   * Copyright (C) 2005-2009 Wind River Systems, Inc.
@@@ -605,13 -605,13 +605,13 @@@ cpu_master_loop
                if (dbg_kdb_mode) {
                        kgdb_connected = 1;
                        error = kdb_stub(ks);
+                       kgdb_connected = 0;
                } else {
                        error = gdb_serial_stub(ks);
                }
  
                if (error == DBG_PASS_EVENT) {
                        dbg_kdb_mode = !dbg_kdb_mode;
-                       kgdb_connected = 0;
                } else if (error == DBG_SWITCH_CPU_EVENT) {
                        dbg_cpu_switch(cpu, dbg_switch_cpu);
                        goto cpu_loop;
diff --combined kernel/debug/gdbstub.c
index 4e584721bcbbb0c39cc11c66ec78478e84184e50,e8fd6868682d1c3f9d5736aa7a9f9148be49e75e..6e81fd59566b75f896ef39715921a8a961857f24
@@@ -6,7 -6,7 +6,7 @@@
   * Copyright (C) 2000-2001 VERITAS Software Corporation.
   * Copyright (C) 2002-2004 Timesys Corporation
   * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
 - * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
 + * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
   * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
   * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
   * Copyright (C) 2005-2009 Wind River Systems, Inc.
@@@ -621,10 -621,8 +621,8 @@@ static void gdb_cmd_query(struct kgdb_s
        switch (remcom_in_buffer[1]) {
        case 's':
        case 'f':
-               if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
-                       error_packet(remcom_out_buffer, -EINVAL);
+               if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10))
                        break;
-               }
  
                i = 0;
                remcom_out_buffer[0] = 'm';
                pack_threadid(remcom_out_buffer + 2, thref);
                break;
        case 'T':
-               if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
-                       error_packet(remcom_out_buffer, -EINVAL);
+               if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16))
                        break;
-               }
                ks->threadid = 0;
                ptr = remcom_in_buffer + 17;
                kgdb_hex2long(&ptr, &ks->threadid);