Merge branch 'pm-opp' into pm-cpufreq
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 18 Dec 2014 00:43:16 +0000 (01:43 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 18 Dec 2014 00:43:16 +0000 (01:43 +0100)
164 files changed:
Documentation/cpu-freq/intel-pstate.txt
Documentation/kernel-parameters.txt
Documentation/networking/timestamping.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/exynos5250-snow.dts
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/configs/exynos_defconfig
arch/arm/include/asm/thread_info.h
arch/arm/kernel/traps.c
arch/arm/kvm/mmu.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-tegra/irq.c
arch/arm/mm/proc-v7.S
arch/arm/mm/proc-xscale.S
arch/arm64/kvm/sys_regs.c
arch/ia64/kvm/kvm-ia64.c
arch/mips/Kconfig
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/uaccess.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/bmips_vec.S
arch/mips/kernel/cps-vec.S
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/rtlx.c
arch/mips/kernel/setup.c
arch/mips/kernel/signal.c
arch/mips/loongson/common/Makefile
arch/mips/mm/tlbex.c
arch/mips/mti-sead3/sead3-leds.c
arch/mips/netlogic/xlp/Makefile
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/kernel/eeh_sysfs.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/vdso32/getcpu.S
arch/powerpc/platforms/powernv/opal-hmi.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/pseries/msi.c
arch/powerpc/xmon/xmon.c
arch/sparc/include/asm/dma-mapping.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/cpu/scattered.c
arch/x86/kvm/mmu.c
drivers/atm/solos-pci.c
drivers/clk/at91/clk-usb.c
drivers/clk/clk-divider.c
drivers/clk/pxa/clk-pxa27x.c
drivers/clk/qcom/mmcc-apq8084.c
drivers/clk/rockchip/clk.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/arm_big_little.h
drivers/cpufreq/arm_big_little_dt.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/exynos5440-cpufreq.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/ls1x-cpufreq.c [new file with mode: 0644]
drivers/cpufreq/pcc-cpufreq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/hwmon/g762.c
drivers/iio/accel/bmc150-accel.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/adc/men_z188_adc.c
drivers/iio/gyro/bmg160.c
drivers/input/joystick/xpad.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/irqchip/irq-atmel-aic-common.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/net/dsa/bcm_sf2.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/vxlan.c
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
drivers/net/xen-netback/xenbus.c
drivers/pci/msi.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/ufs/ufshcd-pltfrm.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/spi/spi-dw.c
drivers/spi/spi-sirf.c
drivers/spi/spi.c
drivers/staging/rtl8188eu/core/rtw_cmd.c
drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
drivers/staging/rtl8188eu/core/rtw_wlan_util.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/thermal/cpu_cooling.c
drivers/thermal/samsung/exynos_thermal_common.c
drivers/thermal/st/st_thermal.c
drivers/tty/serial/of_serial.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/ep0.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/keyspan.c
drivers/usb/serial/ssu100.c
drivers/usb/storage/unusual_uas.h
fs/aio.c
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/lzo.c
fs/btrfs/zlib.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfsd.h
include/dt-bindings/clock/qcom,mmcc-apq8084.h
include/linux/clk-provider.h
include/linux/cpufreq.h
include/linux/iio/events.h
include/linux/kvm_host.h
include/linux/pci.h
include/net/inet_common.h
include/sound/pcm.h
include/uapi/sound/asound.h
net/bridge/br_netlink.c
net/core/rtnetlink.c
net/ipv4/af_inet.c
net/ipv4/ip_vti.c
net/ipv4/ping.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_udp_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/tcp_ipv6.c
net/netfilter/nf_conntrack_core.c
net/packet/af_packet.c
net/sunrpc/svcsock.c
sound/core/pcm.c
sound/core/pcm_misc.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_priv.h
sound/pci/hda/patch_realtek.c
sound/usb/mixer_quirks.c
sound/usb/quirks.c
virt/kvm/arm/vgic.c
virt/kvm/kvm_main.c

index a69ffe1d54d5e17d4f4f2df80f16ddb4f824d728..765d7fc0e692182a6c9952f4a023eed553883612 100644 (file)
@@ -1,17 +1,28 @@
 Intel P-state driver
 --------------------
 
-This driver implements a scaling driver with an internal governor for
-Intel Core processors.  The driver follows the same model as the
-Transmeta scaling driver (longrun.c) and implements the setpolicy()
-instead of target().  Scaling drivers that implement setpolicy() are
-assumed to implement internal governors by the cpufreq core. All the
-logic for selecting the current P state is contained within the
-driver; no external governor is used by the cpufreq core.
-
-Intel SandyBridge+ processors are supported.
-
-New sysfs files for controlling P state selection have been added to
+This driver provides an interface to control the P state selection for
+SandyBridge+ Intel processors.  The driver can operate two different
+modes based on the processor model legacy and Hardware P state (HWP)
+mode.
+
+In legacy mode the driver implements a scaling driver with an internal
+governor for Intel Core processors.  The driver follows the same model
+as the Transmeta scaling driver (longrun.c) and implements the
+setpolicy() instead of target().  Scaling drivers that implement
+setpolicy() are assumed to implement internal governors by the cpufreq
+core. All the logic for selecting the current P state is contained
+within the driver; no external governor is used by the cpufreq core.
+
+In HWP mode P state selection is implemented in the processor
+itself. The driver provides the interfaces between the cpufreq core and
+the processor to control P state selection based on user preferences
+and reporting frequency to the cpufreq core.  In this mode the
+internal governor code is disabled.
+
+In addtion to the interfaces provided by the cpufreq core for
+controlling frequency the driver provides sysfs files for
+controlling P state selection. These files have been added to
 /sys/devices/system/cpu/intel_pstate/
 
       max_perf_pct: limits the maximum P state that will be requested by
@@ -33,7 +44,9 @@ frequency is fiction for Intel Core processors. Even if the scaling
 driver selects a single P state the actual frequency the processor
 will run at is selected by the processor itself.
 
-New debugfs files have also been added to /sys/kernel/debug/pstate_snb/
+For legacy mode debugfs files have also been added to allow tuning of
+the internal governor algorythm. These files are located at
+/sys/kernel/debug/pstate_snb/ These files are NOT present in HWP mode.
 
       deadband
       d_gain_pct
index 479f33204a3727a51ece7ba2cc2b747e27788e36..d006bb22f3cac5fd6d6b47f6a4c5a675beedef03 100644 (file)
@@ -1446,6 +1446,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                       disable
                         Do not enable intel_pstate as the default
                         scaling driver for the supported processors
+                      force
+                        Enable intel_pstate on systems that prohibit it by default
+                        in favor of acpi-cpufreq. Forcing the intel_pstate driver
+                        instead of acpi-cpufreq may disable platform features, such
+                        as thermal controls and power capping, that rely on ACPI
+                        P-States information being indicated to OSPM and therefore
+                        should be used with caution. This option does not work with
+                        processors that aren't supported by the intel_pstate driver
+                        or on platforms that use pcc-cpufreq instead of acpi-cpufreq.
+                      no_hwp
+                        Do not enable hardware P state control (HWP)
+                        if available.
 
        intremap=       [X86-64, Intel-IOMMU]
                        on      enable Interrupt Remapping (default)
index 412f45ca2d73e3cd31a487e5fe13b73fc552d9f5..1d6d02d6ba52b531642436e8a6d8c0640af5467d 100644 (file)
@@ -136,7 +136,7 @@ SOF_TIMESTAMPING_OPT_ID:
 
   This option is implemented only for transmit timestamps. There, the
   timestamp is always looped along with a struct sock_extended_err.
-  The option modifies field ee_info to pass an id that is unique
+  The option modifies field ee_data to pass an id that is unique
   among all possibly concurrently outstanding timestamp requests for
   that socket. In practice, it is a monotonically increasing u32
   (that wraps).
index 0ff630de8a6d37cba17212946536090fbb2b0e10..888d8bd8b5b785abc6a97c8367dcde91740079cd 100644 (file)
@@ -4837,6 +4837,12 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
 S:     Supported
 F:     drivers/idle/intel_idle.c
 
+INTEL PSTATE DRIVER
+M:     Kristen Carlson Accardi <kristen@linux.intel.com>
+L:     linux-pm@vger.kernel.org
+S:     Supported
+F:     drivers/cpufreq/intel_pstate.c
+
 INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
 M:     Maik Broemme <mbroemme@plusserver.de>
 L:     linux-fbdev@vger.kernel.org
index 2fd5c4e5c139b60d28344cc62d117bd9df7f7d09..ce70361f766e783d43cc9b6dab00d1cd34304a6f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
index e51fcef884a43d629ab12132e549cc8a0b731405..60429ad1c5d8451c1e481e0190ad5d312dc54522 100644 (file)
        num-cs = <1>;
 };
 
+&usbdrd_dwc3 {
+       dr_mode = "host";
+};
+
 #include "cros-ec-keyboard.dtsi"
index f21b9aa00fbb214f4dee8b0b1980884f5411c70f..d55c1a2eb798966340325afbb5c3009c8bcef28c 100644 (file)
                #size-cells = <1>;
                ranges;
 
-               dwc3 {
+               usbdrd_dwc3: dwc3 {
                        compatible = "synopsys,dwc3";
                        reg = <0x12000000 0x10000>;
                        interrupts = <0 72 0>;
index 72058b8a6f4d4ccce4a8e5a740f82ba0321ef561..e21ef830a48365a06db80d0127fa5a3f55f17f71 100644 (file)
@@ -142,11 +142,13 @@ CONFIG_MMC_DW_IDMAC=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_MAX77802=y
 CONFIG_RTC_DRV_S5M=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_DMADEVICES=y
 CONFIG_PL330_DMA=y
 CONFIG_COMMON_CLK_MAX77686=y
+CONFIG_COMMON_CLK_MAX77802=y
 CONFIG_COMMON_CLK_S2MPS11=y
 CONFIG_EXYNOS_IOMMU=y
 CONFIG_IIO=y
index fc44d3761f9e7d36eb8ff4911ff0120a63e7584f..ce73ab6354149f8c490319bdeb6acdbc92cd784c 100644 (file)
@@ -44,16 +44,6 @@ struct cpu_context_save {
        __u32   extra[2];               /* Xscale 'acc' register, etc */
 };
 
-struct arm_restart_block {
-       union {
-               /* For user cache flushing */
-               struct {
-                       unsigned long start;
-                       unsigned long end;
-               } cache;
-       };
-};
-
 /*
  * low level task data that entry.S needs immediate access to.
  * __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -79,7 +69,6 @@ struct thread_info {
        unsigned long           thumbee_state;  /* ThumbEE Handler Base register */
 #endif
        struct restart_block    restart_block;
-       struct arm_restart_block        arm_restart_block;
 };
 
 #define INIT_THREAD_INFO(tsk)                                          \
index 0c8b10801d36ad6a25806892ea283c7b93d28f61..9f5d81881eb6da9bdd599a7321cf83fbdcb0a0e2 100644 (file)
@@ -533,8 +533,6 @@ static int bad_syscall(int n, struct pt_regs *regs)
        return regs->ARM_r0;
 }
 
-static long do_cache_op_restart(struct restart_block *);
-
 static inline int
 __do_cache_op(unsigned long start, unsigned long end)
 {
@@ -543,24 +541,8 @@ __do_cache_op(unsigned long start, unsigned long end)
        do {
                unsigned long chunk = min(PAGE_SIZE, end - start);
 
-               if (signal_pending(current)) {
-                       struct thread_info *ti = current_thread_info();
-
-                       ti->restart_block = (struct restart_block) {
-                               .fn     = do_cache_op_restart,
-                       };
-
-                       ti->arm_restart_block = (struct arm_restart_block) {
-                               {
-                                       .cache = {
-                                               .start  = start,
-                                               .end    = end,
-                                       },
-                               },
-                       };
-
-                       return -ERESTART_RESTARTBLOCK;
-               }
+               if (fatal_signal_pending(current))
+                       return 0;
 
                ret = flush_cache_user_range(start, start + chunk);
                if (ret)
@@ -573,15 +555,6 @@ __do_cache_op(unsigned long start, unsigned long end)
        return 0;
 }
 
-static long do_cache_op_restart(struct restart_block *unused)
-{
-       struct arm_restart_block *restart_block;
-
-       restart_block = &current_thread_info()->arm_restart_block;
-       return __do_cache_op(restart_block->cache.start,
-                            restart_block->cache.end);
-}
-
 static inline int
 do_cache_op(unsigned long start, unsigned long end, int flags)
 {
index 57a403a5c22bf9e174ec88a0377b4ab07c3b0a29..8664ff17cbbeaf531b03174e1524cc00a6e86849 100644 (file)
@@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
        pgd = pgdp + pgd_index(addr);
        do {
                next = kvm_pgd_addr_end(addr, end);
-               unmap_puds(kvm, pgd, addr, next);
+               if (!pgd_none(*pgd))
+                       unmap_puds(kvm, pgd, addr, next);
        } while (pgd++, addr = next, addr != end);
 }
 
@@ -834,6 +835,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
        return kvm_vcpu_dabt_iswrite(vcpu);
 }
 
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+       return !pfn_valid(pfn);
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          struct kvm_memory_slot *memslot, unsigned long hva,
                          unsigned long fault_status)
@@ -904,7 +910,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (is_error_pfn(pfn))
                return -EFAULT;
 
-       if (kvm_is_mmio_pfn(pfn))
+       if (kvm_is_device_pfn(pfn))
                mem_type = PAGE_S2_DEVICE;
 
        spin_lock(&kvm->mmu_lock);
index 2bdc3233abe2bcc78c527bf8efe4b0032a5880dc..044b51185fccb2e68c1f89c4efb3822704d28488 100644 (file)
@@ -400,6 +400,8 @@ int __init coherency_init(void)
                 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
                armada_375_380_coherency_init(np);
 
+       of_node_put(np);
+
        return 0;
 }
 
index da7be13aecce3cd8d12de9b64c10b6e3facf252b..ab95f5391a2b631e5cace17bbb176766e7d410bf 100644 (file)
@@ -99,42 +99,42 @@ static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg)
 
 static void tegra_mask(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_CLR);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_CLR);
 }
 
 static void tegra_unmask(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_SET);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_SET);
 }
 
 static void tegra_ack(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
 }
 
 static void tegra_eoi(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
 }
 
 static int tegra_retrigger(struct irq_data *d)
 {
-       if (d->irq < FIRST_LEGACY_IRQ)
+       if (d->hwirq < FIRST_LEGACY_IRQ)
                return 0;
 
-       tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_SET);
+       tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_SET);
 
        return 1;
 }
@@ -142,7 +142,7 @@ static int tegra_retrigger(struct irq_data *d)
 #ifdef CONFIG_PM_SLEEP
 static int tegra_set_wake(struct irq_data *d, unsigned int enable)
 {
-       u32 irq = d->irq;
+       u32 irq = d->hwirq;
        u32 index, mask;
 
        if (irq < FIRST_LEGACY_IRQ ||
index b3a947863ac7bb7e38d47b7a640d698b55a34bbc..22ac2a6fbfe373b432f43b1041ca9cf42e189837 100644 (file)
@@ -270,7 +270,6 @@ __v7_pj4b_setup:
 /* Auxiliary Debug Modes Control 1 Register */
 #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
 #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
-#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
 #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
 
 /* Auxiliary Debug Modes Control 2 Register */
@@ -293,7 +292,6 @@ __v7_pj4b_setup:
        /* Auxiliary Debug Modes Control 1 Register */
        mrc     p15, 1, r0, c15, c1, 1
        orr     r0, r0, #PJ4B_CLEAN_LINE
-       orr     r0, r0, #PJ4B_BCK_OFF_STREX
        orr     r0, r0, #PJ4B_INTER_PARITY
        bic     r0, r0, #PJ4B_STATIC_BP
        mcr     p15, 1, r0, c15, c1, 1
index 23259f104c66fd367d4663cbd4adafd240ffa50d..afa2b3c4df4a267e5a609c13e6e7d6461be85616 100644 (file)
@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend)
        mrc     p15, 0, r5, c15, c1, 0  @ CP access reg
        mrc     p15, 0, r6, c13, c0, 0  @ PID
        mrc     p15, 0, r7, c3, c0, 0   @ domain ID
-       mrc     p15, 0, r8, c1, c1, 0   @ auxiliary control reg
+       mrc     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
        mrc     p15, 0, r9, c1, c0, 0   @ control reg
        bic     r4, r4, #2              @ clear frequency change bit
        stmia   r0, {r4 - r9}           @ store cp regs
@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume)
        mcr     p15, 0, r6, c13, c0, 0  @ PID
        mcr     p15, 0, r7, c3, c0, 0   @ domain ID
        mcr     p15, 0, r1, c2, c0, 0   @ translation table base addr
-       mcr     p15, 0, r8, c1, c1, 0   @ auxiliary control reg
+       mcr     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
        mov     r0, r9                  @ control register
        b       cpu_resume_mmu
 ENDPROC(cpu_xscale_do_resume)
index 4cc3b719208e0a8238930d44b409b2f7c2beb9f5..3d7c2df89946cc1d1606a4b3401115f10e44ab71 100644 (file)
@@ -424,6 +424,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        /* VBAR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
          NULL, reset_val, VBAR_EL1, 0 },
+
+       /* ICC_SRE_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
+         trap_raz_wi },
+
        /* CONTEXTIDR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
          access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
@@ -690,6 +695,10 @@ static const struct sys_reg_desc cp15_regs[] = {
        { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
        { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
        { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
+
+       /* ICC_SRE */
+       { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
+
        { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
 };
 
index ec6b9acb6bea8733a5f17bd7afe4a0a249ff4de9..dbe46f43884df183a69a2da387a941f55dbb371d 100644 (file)
@@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 
        for (i = 0; i < npages; i++) {
                pfn = gfn_to_pfn(kvm, base_gfn + i);
-               if (!kvm_is_mmio_pfn(pfn)) {
+               if (!kvm_is_reserved_pfn(pfn)) {
                        kvm_set_pmt_entry(kvm, base_gfn + i,
                                        pfn << PAGE_SHIFT,
                                _PAGE_AR_RWX | _PAGE_MA_WB);
index f43aa536c517437bc6778d6adb1d9e0212effc8c..9536ef912f594651be7e403264f3eb30c3355384 100644 (file)
@@ -2101,9 +2101,17 @@ config 64BIT_PHYS_ADDR
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool 64BIT_PHYS_ADDR
 
+choice
+       prompt "SmartMIPS or microMIPS ASE support"
+
+config CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS
+       bool "None"
+       help
+         Select this if you want neither microMIPS nor SmartMIPS support
+
 config CPU_HAS_SMARTMIPS
        depends on SYS_SUPPORTS_SMARTMIPS
-       bool "Support for the SmartMIPS ASE"
+       bool "SmartMIPS"
        help
          SmartMIPS is a extension of the MIPS32 architecture aimed at
          increased security at both hardware and software level for
@@ -2115,11 +2123,13 @@ config CPU_HAS_SMARTMIPS
 
 config CPU_MICROMIPS
        depends on SYS_SUPPORTS_MICROMIPS
-       bool "Build kernel using microMIPS ISA"
+       bool "microMIPS"
        help
          When this option is enabled the kernel will be built using the
          microMIPS ISA
 
+endchoice
+
 config CPU_HAS_MSA
        bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)"
        depends on CPU_SUPPORTS_MSA
index b46cd220a018d72deb84c6b8d5ea5f75d9e22a79..22a135ac91de3830e885342b834feb47ab109eff 100644 (file)
 #define MIPS_CONF6_SYND                (_ULCAST_(1) << 13)
 /* proAptiv FTLB on/off bit */
 #define MIPS_CONF6_FTLBEN      (_ULCAST_(1) << 15)
+/* FTLB probability bits */
+#define MIPS_CONF6_FTLBP_SHIFT (16)
 
 #define MIPS_CONF7_WII         (_ULCAST_(1) << 31)
 
index 4520adc8699b9c00835a4340c8da217e90dcc305..cd6e0afc683366e598eadbaf8c572e0434fdb9bf 100644 (file)
@@ -257,7 +257,11 @@ static inline void protected_flush_icache_line(unsigned long addr)
  */
 static inline void protected_writeback_dcache_line(unsigned long addr)
 {
+#ifdef CONFIG_EVA
+       protected_cachee_op(Hit_Writeback_Inv_D, addr);
+#else
        protected_cache_op(Hit_Writeback_Inv_D, addr);
+#endif
 }
 
 static inline void protected_writeback_scache_line(unsigned long addr)
index 133678ab4eb88cbf213d6516c21be76d06e23a22..22a5624e2fd2dcecf4b5592097e0c18620012426 100644 (file)
@@ -1422,7 +1422,7 @@ static inline long __strnlen_user(const char __user *s, long n)
 }
 
 /*
- * strlen_user: - Get the size of a string in user space.
+ * strnlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
  *
  * Context: User context only. This function may sleep.
@@ -1431,9 +1431,7 @@ static inline long __strnlen_user(const char __user *s, long n)
  *
  * Returns the size of the string INCLUDING the terminating NUL.
  * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
+ * If the string is too long, returns a value greater than @n.
  */
 static inline long strnlen_user(const char __user *s, long n)
 {
index 9dc58568f230096244b5db31f01b59a106b1cb3c..d001bb1ad177e7b6e2df2fbb7e42e894784b2e91 100644 (file)
 #define __NR_seccomp                   (__NR_Linux + 316)
 #define __NR_getrandom                 (__NR_Linux + 317)
 #define __NR_memfd_create              (__NR_Linux + 318)
-#define __NR_memfd_create              (__NR_Linux + 319)
+#define __NR_bpf                       (__NR_Linux + 319)
 
 /*
  * Offset of the last N32 flavoured syscall
index 290c23b516789ba16193f7b72fa61eafbf7907b2..86495072a922f31e0214cad3b6ac71c10aaf5fa8 100644 (file)
@@ -208,7 +208,6 @@ bmips_reset_nmi_vec_end:
 END(bmips_reset_nmi_vec)
 
        .set    pop
-       .previous
 
 /***********************************************************************
  * CPU1 warm restart vector (used for second and subsequent boots).
@@ -281,5 +280,3 @@ LEAF(bmips_enable_xks01)
        jr      ra
 
 END(bmips_enable_xks01)
-
-       .previous
index e6e97d2a5c9e68cccde81ab0f181184d1e27fd13..0384b05ab5a02413cbcb11a163375029f285255f 100644 (file)
@@ -229,6 +229,7 @@ LEAF(mips_cps_core_init)
         nop
 
        .set    push
+       .set    mips32r2
        .set    mt
 
        /* Only allow 1 TC per VPE to execute... */
@@ -345,6 +346,7 @@ LEAF(mips_cps_boot_vpes)
         nop
 
        .set    push
+       .set    mips32r2
        .set    mt
 
 1:     /* Enter VPE configuration state */
index d5a4f380b019bb8c8a4f5ca1ce0229c45689cf4f..dc49cf30c2db46f9e0f2caef74548459c71f6714 100644 (file)
@@ -193,6 +193,32 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
 static char unknown_isa[] = KERN_ERR \
        "Unsupported ISA type, c0.config0: %d.";
 
+static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
+{
+
+       unsigned int probability = c->tlbsize / c->tlbsizevtlb;
+
+       /*
+        * 0 = All TLBWR instructions go to FTLB
+        * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the
+        * FTLB and 1 goes to the VTLB.
+        * 2 = 7:1: As above with 7:1 ratio.
+        * 3 = 3:1: As above with 3:1 ratio.
+        *
+        * Use the linear midpoint as the probability threshold.
+        */
+       if (probability >= 12)
+               return 1;
+       else if (probability >= 6)
+               return 2;
+       else
+               /*
+                * So FTLB is less than 4 times bigger than VTLB.
+                * A 3:1 ratio can still be useful though.
+                */
+               return 3;
+}
+
 static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
 {
        unsigned int config6;
@@ -203,9 +229,14 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
        case CPU_P5600:
                /* proAptiv & related cores use Config6 to enable the FTLB */
                config6 = read_c0_config6();
+               /* Clear the old probability value */
+               config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
                if (enable)
                        /* Enable FTLB */
-                       write_c0_config6(config6 | MIPS_CONF6_FTLBEN);
+                       write_c0_config6(config6 |
+                                        (calculate_ftlb_probability(c)
+                                         << MIPS_CONF6_FTLBP_SHIFT)
+                                        | MIPS_CONF6_FTLBEN);
                else
                        /* Disable FTLB */
                        write_c0_config6(config6 &  ~MIPS_CONF6_FTLBEN);
index 31b1b763cb298841eee156c61752687c4056809d..c5c4fd54d797221256e147a8a0be5278a8806df5 100644 (file)
@@ -94,12 +94,12 @@ int rtlx_open(int index, int can_sleep)
        int ret = 0;
 
        if (index >= RTLX_CHANNELS) {
-               pr_debug(KERN_DEBUG "rtlx_open index out of range\n");
+               pr_debug("rtlx_open index out of range\n");
                return -ENOSYS;
        }
 
        if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
-               pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
+               pr_debug("rtlx_open channel %d already opened\n", index);
                ret = -EBUSY;
                goto out_fail;
        }
index d21ec57b6e952046db450d48929161b76b0710e3..f3b635f86c39c085ac67126929d4a7cec89e9702 100644 (file)
@@ -485,7 +485,7 @@ static void __init bootmem_init(void)
  * NOTE: historically plat_mem_setup did the entire platform initialization.
  *      This was rather impractical because it meant plat_mem_setup had to
  * get away without any kind of memory allocator.  To keep old code from
- * breaking plat_setup was just renamed to plat_setup and a second platform
+ * breaking plat_setup was just renamed to plat_mem_setup and a second platform
  * initialization hook for anything else was introduced.
  */
 
@@ -493,7 +493,7 @@ static int usermem __initdata;
 
 static int __init early_parse_mem(char *p)
 {
-       unsigned long start, size;
+       phys_t start, size;
 
        /*
         * If a user specifies memory size, we
index 1d57605e4615288a604403de1a7071d7112b20c0..16f1e4f2bf3c3c08896161106529b4a0c551dd9e 100644 (file)
@@ -658,13 +658,13 @@ static int signal_setup(void)
                save_fp_context = _save_fp_context;
                restore_fp_context = _restore_fp_context;
        } else {
-               save_fp_context = copy_fp_from_sigcontext;
-               restore_fp_context = copy_fp_to_sigcontext;
+               save_fp_context = copy_fp_to_sigcontext;
+               restore_fp_context = copy_fp_from_sigcontext;
        }
 #endif /* CONFIG_SMP */
 #else
-       save_fp_context = copy_fp_from_sigcontext;;
-       restore_fp_context = copy_fp_to_sigcontext;
+       save_fp_context = copy_fp_to_sigcontext;
+       restore_fp_context = copy_fp_from_sigcontext;
 #endif
 
        return 0;
index 0bb9cc9dc621f705dd77b139b0985c01213f9c8e..d87e03330b29ae0dd5e27e9e84536dcc96af6f5c 100644 (file)
@@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o
 # Serial port support
 #
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_SERIAL_8250) += serial.o
+loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
+obj-y += $(loongson-serial-m) $(loongson-serial-y)
 obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
 obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
 
index b5f228e7eae6144565e34c74bf6f86e5d973a760..e3328a96e80909b758a8d619b6a0f8398399d2da 100644 (file)
@@ -1872,8 +1872,16 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
        uasm_l_smp_pgtable_change(l, *p);
 #endif
        iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
-       if (!m4kc_tlbp_war())
+       if (!m4kc_tlbp_war()) {
                build_tlb_probe_entry(p);
+               if (cpu_has_htw) {
+                       /* race condition happens, leaving */
+                       uasm_i_ehb(p);
+                       uasm_i_mfc0(p, wr.r3, C0_INDEX);
+                       uasm_il_bltz(p, r, wr.r3, label_leave);
+                       uasm_i_nop(p);
+               }
+       }
        return wr;
 }
 
index 20102a6d41410fbba6854edab6ee1199d50cc7fa..c427c57781865e13d52dc75ea2bb2f8db5db9ad7 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/platform_device.h>
 
@@ -76,8 +76,4 @@ static int __init led_init(void)
        return platform_device_register(&fled_device);
 }
 
-module_init(led_init);
-
-MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("LED probe driver for SEAD-3");
+device_initcall(led_init);
index be358a8050c57c14377c1bcc44cd45b332b622b1..6b43af0a34d9dd39c4afd08526342bbdd514b068 100644 (file)
@@ -1,6 +1,10 @@
 obj-y                          += setup.o nlm_hal.o cop2-ex.o dt.o
 obj-$(CONFIG_SMP)              += wakeup.o
-obj-$(CONFIG_USB)              += usb-init.o
-obj-$(CONFIG_USB)              += usb-init-xlp2.o
-obj-$(CONFIG_SATA_AHCI)                += ahci-init.o
-obj-$(CONFIG_SATA_AHCI)                += ahci-init-xlp2.o
+ifdef CONFIG_USB
+obj-y                          += usb-init.o
+obj-y                          += usb-init-xlp2.o
+endif
+ifdef CONFIG_SATA_AHCI
+obj-y                          += ahci-init.o
+obj-y                          += ahci-init-xlp2.o
+endif
index 4ca90a39d6d01af63da46c73d19b816b0979e538..725247beebecda3493e9e477f7fd4ec29911b558 100644 (file)
@@ -159,8 +159,6 @@ struct pci_dn {
 
        int     pci_ext_config_space;   /* for pci devices */
 
-       bool    force_32bit_msi;
-
        struct  pci_dev *pcidev;        /* back-pointer to the pci device */
 #ifdef CONFIG_EEH
        struct eeh_dev *edev;           /* eeh device */
index f19b1e5cb06096e2bd9b68b8cd620669c8943cac..1ceecdda810b04722b88329d52b866c3c540ad0e 100644 (file)
@@ -65,7 +65,7 @@ static ssize_t eeh_pe_state_show(struct device *dev,
                return -ENODEV;
 
        state = eeh_ops->get_state(edev->pe, NULL);
-       return sprintf(buf, "%0x08x %0x08x\n",
+       return sprintf(buf, "0x%08x 0x%08x\n",
                       state, edev->pe->state);
 }
 
index 155013da27e05cb801ba961b102d41f3edbfb48d..b15194e2c5fc55ca934dba97fe4863b2c273baa5 100644 (file)
@@ -266,13 +266,3 @@ int pcibus_to_node(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pcibus_to_node);
 #endif
-
-static void quirk_radeon_32bit_msi(struct pci_dev *dev)
-{
-       struct pci_dn *pdn = pci_get_pdn(dev);
-
-       if (pdn)
-               pdn->force_32bit_msi = true;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
index 23eb9a9441bdad612481a7d1b2fcf12dd17a7ed3..c62be60c727485cce5108fcf4770b28f42129eee 100644 (file)
@@ -30,8 +30,8 @@
 V_FUNCTION_BEGIN(__kernel_getcpu)
   .cfi_startproc
        mfspr   r5,SPRN_SPRG_VDSO_READ
-       cmpdi   cr0,r3,0
-       cmpdi   cr1,r4,0
+       cmpwi   cr0,r3,0
+       cmpwi   cr1,r4,0
        clrlwi  r6,r5,16
        rlwinm  r7,r5,16,31-15,31-0
        beq     cr0,1f
index 5e1ed1575aabe23c245edcdff06433cfb0a62327..b322bfb51343f65fdfe76d265cdcb76928011d21 100644 (file)
@@ -57,7 +57,7 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
        };
 
        /* Print things out */
-       if (hmi_evt->version != OpalHMIEvt_V1) {
+       if (hmi_evt->version < OpalHMIEvt_V1) {
                pr_err("HMI Interrupt, Unknown event version %d !\n",
                        hmi_evt->version);
                return;
index 468a0f23c7f2b5f756c1b553315793c03492c0d6..3ba435ec3dcd584e5f466b78eae18379a69d482b 100644 (file)
@@ -1509,7 +1509,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                                  unsigned int is_64, struct msi_msg *msg)
 {
        struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
-       struct pci_dn *pdn = pci_get_pdn(dev);
        unsigned int xive_num = hwirq - phb->msi_base;
        __be32 data;
        int rc;
@@ -1523,7 +1522,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                return -ENXIO;
 
        /* Force 32-bit MSI on some broken devices */
-       if (pdn && pdn->force_32bit_msi)
+       if (dev->no_64bit_msi)
                is_64 = 0;
 
        /* Assign XIVE to PE */
@@ -1997,7 +1996,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
        if (is_kdump_kernel()) {
                pr_info("  Issue PHB reset ...\n");
                ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
-               ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET);
+               ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
        }
 
        /* Configure M64 window */
index b2187d0068b876e6909376c81d390cbf7b8bad00..4b20f2c6b3b24ba950d3ea10014e0b1fffbfc0b6 100644 (file)
@@ -50,7 +50,6 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 {
        struct pci_controller *hose = pci_bus_to_host(pdev->bus);
        struct pnv_phb *phb = hose->private_data;
-       struct pci_dn *pdn = pci_get_pdn(pdev);
        struct msi_desc *entry;
        struct msi_msg msg;
        int hwirq;
@@ -60,7 +59,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
        if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
                return -ENODEV;
 
-       if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
+       if (pdev->no_64bit_msi && !phb->msi32_support)
                return -ENODEV;
 
        list_for_each_entry(entry, &pdev->msi_list, list) {
index 8ab5add4ac824f43c6a6b299b24ed15bf0deafb2..8b909e94fd9a10bbee407c2e1a04df7320e93a71 100644 (file)
@@ -420,7 +420,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
         */
 again:
        if (type == PCI_CAP_ID_MSI) {
-               if (pdn->force_32bit_msi) {
+               if (pdev->no_64bit_msi) {
                        rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
                        if (rc < 0) {
                                /*
index b988b5addf864a581ff8c36e177379c32ba92518..c8efbb37d6e076ab123a3d5d8066f58edd36acd8 100644 (file)
@@ -293,10 +293,10 @@ static inline void disable_surveillance(void)
        args.token = rtas_token("set-indicator");
        if (args.token == RTAS_UNKNOWN_SERVICE)
                return;
-       args.nargs = 3;
-       args.nret = 1;
+       args.nargs = cpu_to_be32(3);
+       args.nret = cpu_to_be32(1);
        args.rets = &args.args[3];
-       args.args[0] = SURVEILLANCE_TOKEN;
+       args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
        args.args[1] = 0;
        args.args[2] = 0;
        enter_rtas(__pa(&args));
index 5b1b52a04ad6283fb67308d9bf84b08494870140..7e064c68c5ec8a0ab538a15947d5c44b2db0a322 100644 (file)
@@ -12,6 +12,14 @@ int dma_supported(struct device *dev, u64 mask);
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+                                 enum dma_data_direction dir)
+{
+       /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
+        * routine can be a nop.
+        */
+}
+
 extern struct dma_map_ops *dma_ops;
 extern struct dma_map_ops *leon_dma_ops;
 extern struct dma_map_ops pci32_dma_ops;
index 0bb1335313b249ca2c3c12d7758275eb7d674c00..aede2c347bde307d9b74aa4ff4887b2b0b05eac6 100644 (file)
 #define X86_FEATURE_DTHERM     ( 7*32+ 7) /* Digital Thermal Sensor */
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_HWP                ( 7*32+ 10) /* "hwp" Intel HWP */
+#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
+#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
+#define X86_FEATURE_HWP_EPP    ( 7*32+13) /* Intel HWP_EPP */
+#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
index e21331ce368fc60a4ae7fd727448a4ca30d7bdf2..62838e54947dee12f3bd41b8936dcc7a0af6278f 100644 (file)
 #define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
 #define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
 
+/* Hardware P state interface */
+#define MSR_PPERF                      0x0000064e
+#define MSR_PERF_LIMIT_REASONS         0x0000064f
+#define MSR_PM_ENABLE                  0x00000770
+#define MSR_HWP_CAPABILITIES           0x00000771
+#define MSR_HWP_REQUEST_PKG            0x00000772
+#define MSR_HWP_INTERRUPT              0x00000773
+#define MSR_HWP_REQUEST                0x00000774
+#define MSR_HWP_STATUS                 0x00000777
+
+/* CPUID.6.EAX */
+#define HWP_BASE_BIT                   (1<<7)
+#define HWP_NOTIFICATIONS_BIT          (1<<8)
+#define HWP_ACTIVITY_WINDOW_BIT                (1<<9)
+#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10)
+#define HWP_PACKAGE_LEVEL_REQUEST_BIT  (1<<11)
+
+/* IA32_HWP_CAPABILITIES */
+#define HWP_HIGHEST_PERF(x)            (x & 0xff)
+#define HWP_GUARANTEED_PERF(x)         ((x & (0xff << 8)) >>8)
+#define HWP_MOSTEFFICIENT_PERF(x)      ((x & (0xff << 16)) >>16)
+#define HWP_LOWEST_PERF(x)             ((x & (0xff << 24)) >>24)
+
+/* IA32_HWP_REQUEST */
+#define HWP_MIN_PERF(x)                (x & 0xff)
+#define HWP_MAX_PERF(x)                ((x & 0xff) << 8)
+#define HWP_DESIRED_PERF(x)            ((x & 0xff) << 16)
+#define HWP_ENERGY_PERF_PREFERENCE(x)  ((x & 0xff) << 24)
+#define HWP_ACTIVITY_WINDOW(x)         ((x & 0xff3) << 32)
+#define HWP_PACKAGE_CONTROL(x)         ((x & 0x1) << 42)
+
+/* IA32_HWP_STATUS */
+#define HWP_GUARANTEED_CHANGE(x)       (x & 0x1)
+#define HWP_EXCURSION_TO_MINIMUM(x)    (x & 0x4)
+
+/* IA32_HWP_INTERRUPT */
+#define HWP_CHANGE_TO_GUARANTEED_INT(x)        (x & 0x1)
+#define HWP_EXCURSION_TO_MINIMUM_INT(x)        (x & 0x2)
+
 #define MSR_AMD64_MC0_MASK             0xc0010044
 
 #define MSR_IA32_MCx_CTL(x)            (MSR_IA32_MC0_CTL + 4*(x))
 
 #define MSR_IA32_TEMPERATURE_TARGET    0x000001a2
 
+#define MSR_MISC_PWR_MGMT              0x000001aa
+
 #define MSR_IA32_ENERGY_PERF_BIAS      0x000001b0
 #define ENERGY_PERF_BIAS_PERFORMANCE   0
 #define ENERGY_PERF_BIAS_NORMAL                6
index 4a8013d5594760c3edb240da23309631a9466203..60639093d536a98806d21f9f675ffba6526c1322 100644 (file)
@@ -36,6 +36,11 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
                { X86_FEATURE_ARAT,             CR_EAX, 2, 0x00000006, 0 },
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
                { X86_FEATURE_PTS,              CR_EAX, 6, 0x00000006, 0 },
+               { X86_FEATURE_HWP,              CR_EAX, 7, 0x00000006, 0 },
+               { X86_FEATURE_HWP_NOITFY,       CR_EAX, 8, 0x00000006, 0 },
+               { X86_FEATURE_HWP_ACT_WINDOW,   CR_EAX, 9, 0x00000006, 0 },
+               { X86_FEATURE_HWP_EPP,          CR_EAX,10, 0x00000006, 0 },
+               { X86_FEATURE_HWP_PKG_REQ,      CR_EAX,11, 0x00000006, 0 },
                { X86_FEATURE_APERFMPERF,       CR_ECX, 0, 0x00000006, 0 },
                { X86_FEATURE_EPB,              CR_ECX, 3, 0x00000006, 0 },
                { X86_FEATURE_HW_PSTATE,        CR_EDX, 7, 0x80000007, 0 },
index ac1c4de3a48491d9b0cf939897e9af57238b3f71..978f402006eef21ee569720a0d573a6a48e12c97 100644 (file)
@@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
         * kvm mmu, before reclaiming the page, we should
         * unmap it from mmu first.
         */
-       WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
+       WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
 
        if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);
@@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                spte |= PT_PAGE_SIZE_MASK;
        if (tdp_enabled)
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
-                       kvm_is_mmio_pfn(pfn));
+                       kvm_is_reserved_pfn(pfn));
 
        if (host_writable)
                spte |= SPTE_HOST_WRITEABLE;
@@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
         * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
         * here.
         */
-       if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
+       if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
            level == PT_PAGE_TABLE_LEVEL &&
            PageTransCompound(pfn_to_page(pfn)) &&
            !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
index 7652e8dc188f93036e03a23a99ac7aee3b543811..21b0bc6a9c969ea677630a827f69c45545a9e78a 100644 (file)
@@ -1225,11 +1225,13 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
        card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE);
        if (!card->config_regs) {
                dev_warn(&dev->dev, "Failed to ioremap config registers\n");
+               err = -ENOMEM;
                goto out_release_regions;
        }
        card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE);
        if (!card->buffers) {
                dev_warn(&dev->dev, "Failed to ioremap data buffers\n");
+               err = -ENOMEM;
                goto out_unmap_config;
        }
 
index 24b5b020753a9e4a66a5d3db7c8f7ad8bee0b928..a23ac0c724f014643e66bc2485f7c79cef523920 100644 (file)
@@ -52,29 +52,26 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
 
        tmp = pmc_read(pmc, AT91_PMC_USB);
        usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
-       return parent_rate / (usbdiv + 1);
+
+       return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
 }
 
 static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
                                          unsigned long *parent_rate)
 {
        unsigned long div;
-       unsigned long bestrate;
-       unsigned long tmp;
+
+       if (!rate)
+               return -EINVAL;
 
        if (rate >= *parent_rate)
                return *parent_rate;
 
-       div = *parent_rate / rate;
-       if (div >= SAM9X5_USB_MAX_DIV)
-               return *parent_rate / (SAM9X5_USB_MAX_DIV + 1);
-
-       bestrate = *parent_rate / div;
-       tmp = *parent_rate / (div + 1);
-       if (bestrate - rate > rate - tmp)
-               bestrate = tmp;
+       div = DIV_ROUND_CLOSEST(*parent_rate, rate);
+       if (div > SAM9X5_USB_MAX_DIV + 1)
+               div = SAM9X5_USB_MAX_DIV + 1;
 
-       return bestrate;
+       return DIV_ROUND_CLOSEST(*parent_rate, div);
 }
 
 static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
@@ -106,9 +103,13 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
        u32 tmp;
        struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
        struct at91_pmc *pmc = usb->pmc;
-       unsigned long div = parent_rate / rate;
+       unsigned long div;
+
+       if (!rate)
+               return -EINVAL;
 
-       if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV)
+       div = DIV_ROUND_CLOSEST(parent_rate, rate);
+       if (div > SAM9X5_USB_MAX_DIV + 1 || !div)
                return -EINVAL;
 
        tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
@@ -253,7 +254,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
 
                tmp_parent_rate = rate * usb->divisors[i];
                tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate);
-               tmprate = tmp_parent_rate / usb->divisors[i];
+               tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
                if (tmprate < rate)
                        tmpdiff = rate - tmprate;
                else
@@ -281,10 +282,10 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
        struct at91_pmc *pmc = usb->pmc;
        unsigned long div;
 
-       if (!rate || parent_rate % rate)
+       if (!rate)
                return -EINVAL;
 
-       div = parent_rate / rate;
+       div = DIV_ROUND_CLOSEST(parent_rate, rate);
 
        for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
                if (usb->divisors[i] == div) {
index 18a9de29df0e0c31dadd3de0b2bdb2485fab2733..c0a842b335c520c6c28f08308a1b62a743038dd3 100644 (file)
@@ -263,6 +263,14 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
        if (!rate)
                rate = 1;
 
+       /* if read only, just return current value */
+       if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+               bestdiv = readl(divider->reg) >> divider->shift;
+               bestdiv &= div_mask(divider);
+               bestdiv = _get_div(divider, bestdiv);
+               return bestdiv;
+       }
+
        maxdiv = _get_maxdiv(divider);
 
        if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
@@ -361,11 +369,6 @@ const struct clk_ops clk_divider_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_divider_ops);
 
-const struct clk_ops clk_divider_ro_ops = {
-       .recalc_rate = clk_divider_recalc_rate,
-};
-EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
-
 static struct clk *_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
@@ -391,10 +394,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
        }
 
        init.name = name;
-       if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
-               init.ops = &clk_divider_ro_ops;
-       else
-               init.ops = &clk_divider_ops;
+       init.ops = &clk_divider_ops;
        init.flags = flags | CLK_IS_BASIC;
        init.parent_names = (parent_name ? &parent_name: NULL);
        init.num_parents = (parent_name ? 1 : 0);
index b345cc791e5defdeeb57d0b8df4d566bd41aef2c..88b9fe13fa444b2a81a3bd8a2588b035357d0048 100644 (file)
@@ -322,7 +322,7 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
        unsigned long ccsr = CCSR;
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
-       a = cccr & CCCR_A_BIT;
+       a = cccr & (1 << CCCR_A_BIT);
        l  = ccsr & CCSR_L_MASK;
 
        if (osc_forced || a)
@@ -341,7 +341,7 @@ static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
        unsigned long ccsr = CCSR;
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
-       a = cccr & CCCR_A_BIT;
+       a = cccr & (1 << CCCR_A_BIT);
        if (osc_forced)
                return PXA_MEM_13Mhz;
        if (a)
index dab988ab8cf12740ac931c5f5efaa39b90887ec3..157139a5c1ca956d76d1be30dfb6687f82d01816 100644 (file)
@@ -3122,7 +3122,7 @@ static struct clk_regmap *mmcc_apq8084_clocks[] = {
        [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
        [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
        [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
-       [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+       [MMSS_RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
        [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
        [MAPLE_CLK_SRC] = &maple_clk_src.clkr,
        [VDP_CLK_SRC] = &vdp_clk_src.clkr,
index 1e68bff481b8e32ec440959002a2467287c269da..880a266f01431b3b9e7040565d3a3e81f0716a8b 100644 (file)
@@ -90,9 +90,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
                div->width = div_width;
                div->lock = lock;
                div->table = div_table;
-               div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
-                                               ? &clk_divider_ro_ops
-                                               : &clk_divider_ops;
+               div_ops = &clk_divider_ops;
        }
 
        clk = clk_register_composite(NULL, name, parent_names, num_parents,
index 3489f8f5fadabee1b8db494c7b5dbd996bd70e33..29b2ef5a68b9318b3791c37e8e1b94c12b6553d6 100644 (file)
@@ -63,7 +63,6 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
 
 config CPU_FREQ_DEFAULT_GOV_POWERSAVE
        bool "powersave"
-       depends on EXPERT
        select CPU_FREQ_GOV_POWERSAVE
        help
          Use the CPUFreq governor 'powersave' as default. This sets
@@ -183,6 +182,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
 
          If in doubt, say N.
 
+comment "CPU frequency scaling drivers"
+
 config CPUFREQ_DT
        tristate "Generic DT based cpufreq driver"
        depends on HAVE_CLK && OF
@@ -196,19 +197,19 @@ config CPUFREQ_DT
 
          If in doubt, say N.
 
-menu "x86 CPU frequency scaling drivers"
-depends on X86
+if X86
 source "drivers/cpufreq/Kconfig.x86"
-endmenu
+endif
 
-menu "ARM CPU frequency scaling drivers"
-depends on ARM || ARM64
+if ARM || ARM64
 source "drivers/cpufreq/Kconfig.arm"
-endmenu
+endif
 
-menu "AVR32 CPU frequency scaling drivers"
-depends on AVR32
+if PPC32 || PPC64
+source "drivers/cpufreq/Kconfig.powerpc"
+endif
 
+if AVR32
 config AVR32_AT32AP_CPUFREQ
        bool "CPU frequency driver for AT32AP"
        depends on PLATFORM_AT32AP
@@ -216,12 +217,9 @@ config AVR32_AT32AP_CPUFREQ
        help
          This enables the CPU frequency driver for AT32AP processors.
          If in doubt, say N.
+endif
 
-endmenu
-
-menu "CPUFreq processor drivers"
-depends on IA64
-
+if IA64
 config IA64_ACPI_CPUFREQ
        tristate "ACPI Processor P-States driver"
        depends on ACPI_PROCESSOR
@@ -232,12 +230,9 @@ config IA64_ACPI_CPUFREQ
        For details, take a look at <file:Documentation/cpu-freq/>.
 
        If in doubt, say N.
+endif
 
-endmenu
-
-menu "MIPS CPUFreq processor drivers"
-depends on MIPS
-
+if MIPS
 config LOONGSON2_CPUFREQ
        tristate "Loongson2 CPUFreq Driver"
        help
@@ -250,15 +245,18 @@ config LOONGSON2_CPUFREQ
 
          If in doubt, say N.
 
-endmenu
+config LOONGSON1_CPUFREQ
+       tristate "Loongson1 CPUFreq Driver"
+       help
+         This option adds a CPUFreq driver for loongson1 processors which
+         support software configurable cpu frequency.
 
-menu "PowerPC CPU frequency scaling drivers"
-depends on PPC32 || PPC64
-source "drivers/cpufreq/Kconfig.powerpc"
-endmenu
+         For details, take a look at <file:Documentation/cpu-freq/>.
 
-menu "SPARC CPU frequency scaling drivers"
-depends on SPARC64
+         If in doubt, say N.
+endif
+
+if SPARC64
 config SPARC_US3_CPUFREQ
        tristate "UltraSPARC-III CPU Frequency driver"
        help
@@ -276,10 +274,9 @@ config SPARC_US2E_CPUFREQ
          For details, take a look at <file:Documentation/cpu-freq>.
 
          If in doubt, say N.
-endmenu
+endif
 
-menu "SH CPU Frequency scaling"
-depends on SUPERH
+if SUPERH
 config SH_CPU_FREQ
        tristate "SuperH CPU Frequency driver"
        help
@@ -293,7 +290,7 @@ config SH_CPU_FREQ
          For details, take a look at <file:Documentation/cpu-freq>.
 
          If unsure, say N.
-endmenu
+endif
 
 endif
 endmenu
index 83a75dc84761a3c4e9a4e385c66d19ab17706b31..0f9a2c3c0e0d3eb1699a6a19ebee0532872dbccc 100644 (file)
@@ -247,3 +247,11 @@ config ARM_TEGRA_CPUFREQ
        default y
        help
          This adds the CPUFreq driver support for TEGRA SOCs.
+
+config ARM_PXA2xx_CPUFREQ
+       tristate "Intel PXA2xx CPUfreq driver"
+       depends on PXA27x || PXA25x
+       help
+         This add the CPUFreq driver support for Intel PXA2xx SOCs.
+
+         If in doubt, say N.
index 40c53dc1937ec6a4fc22e61f497939ba2ada54bd..b3ca7b0b2c33aa1ae65b639da68a7c6d328a980b 100644 (file)
@@ -61,8 +61,7 @@ obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)               += imx6q-cpufreq.o
 obj-$(CONFIG_ARM_INTEGRATOR)           += integrator-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)     += kirkwood-cpufreq.o
 obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)    += omap-cpufreq.o
-obj-$(CONFIG_PXA25x)                   += pxa2xx-cpufreq.o
-obj-$(CONFIG_PXA27x)                   += pxa2xx-cpufreq.o
+obj-$(CONFIG_ARM_PXA2xx_CPUFREQ)       += pxa2xx-cpufreq.o
 obj-$(CONFIG_PXA3xx)                   += pxa3xx-cpufreq.o
 obj-$(CONFIG_ARM_S3C24XX_CPUFREQ)      += s3c24xx-cpufreq.o
 obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
@@ -98,6 +97,7 @@ obj-$(CONFIG_CRIS_MACH_ARTPEC3)               += cris-artpec3-cpufreq.o
 obj-$(CONFIG_ETRAXFS)                  += cris-etraxfs-cpufreq.o
 obj-$(CONFIG_IA64_ACPI_CPUFREQ)                += ia64-acpi-cpufreq.o
 obj-$(CONFIG_LOONGSON2_CPUFREQ)                += loongson2_cpufreq.o
+obj-$(CONFIG_LOONGSON1_CPUFREQ)                += ls1x-cpufreq.o
 obj-$(CONFIG_SH_CPU_FREQ)              += sh-cpufreq.o
 obj-$(CONFIG_SPARC_US2E_CPUFREQ)       += sparc-us2e-cpufreq.o
 obj-$(CONFIG_SPARC_US3_CPUFREQ)                += sparc-us3-cpufreq.o
index a46c223c2506283fa0636b3c24013c3a8a76d094..e1a6ba66a7f5568fc2fbb5541e5c8ddb460f9ead 100644 (file)
@@ -289,6 +289,8 @@ static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
 
        clk_put(clk[cluster]);
        dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+       if (arm_bL_ops->free_opp_table)
+               arm_bL_ops->free_opp_table(cpu_dev);
        dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
 }
 
@@ -337,7 +339,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
        if (ret) {
                dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
                                __func__, cpu_dev->id, ret);
-               goto out;
+               goto free_opp_table;
        }
 
        name[12] = cluster + '0';
@@ -354,6 +356,9 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
        ret = PTR_ERR(clk[cluster]);
        dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
 
+free_opp_table:
+       if (arm_bL_ops->free_opp_table)
+               arm_bL_ops->free_opp_table(cpu_dev);
 out:
        dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
                        cluster);
index 70f18fc12d4ad2a1a2c14f00ae1844e5378cdaee..a211f7db9d32d20bbc6c5a471399ee778941735d 100644 (file)
 
 struct cpufreq_arm_bL_ops {
        char name[CPUFREQ_NAME_LEN];
-       int (*get_transition_latency)(struct device *cpu_dev);
 
        /*
         * This must set opp table for cpu_dev in a similar way as done by
         * of_init_opp_table().
         */
        int (*init_opp_table)(struct device *cpu_dev);
+
+       /* Optional */
+       int (*get_transition_latency)(struct device *cpu_dev);
+       void (*free_opp_table)(struct device *cpu_dev);
 };
 
 int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
index 4550f6976768ac83706b76e1fb29ea18a124abe5..ef0b3f1324d59421c810f50f8ee77228b7c98e0a 100644 (file)
@@ -82,6 +82,7 @@ static struct cpufreq_arm_bL_ops dt_bL_ops = {
        .name   = "dt-bl",
        .get_transition_latency = dt_get_transition_latency,
        .init_opp_table = dt_init_opp_table,
+       .free_opp_table = of_free_opp_table,
 };
 
 static int generic_bL_probe(struct platform_device *pdev)
index f657c571b18e4e6baaa52250640aac4e1fa26267..9bc2720628a4c874cae6a5ad71409ef1083baac8 100644 (file)
@@ -58,6 +58,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
        old_freq = clk_get_rate(cpu_clk) / 1000;
 
        if (!IS_ERR(cpu_reg)) {
+               unsigned long opp_freq;
+
                rcu_read_lock();
                opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
                if (IS_ERR(opp)) {
@@ -67,13 +69,16 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
                        return PTR_ERR(opp);
                }
                volt = dev_pm_opp_get_voltage(opp);
+               opp_freq = dev_pm_opp_get_freq(opp);
                rcu_read_unlock();
                tol = volt * priv->voltage_tolerance / 100;
                volt_old = regulator_get_voltage(cpu_reg);
+               dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
+                       opp_freq / 1000, volt);
        }
 
        dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
-               old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+               old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
                new_freq / 1000, volt ? volt / 1000 : -1);
 
        /* scaling up?  scale voltage before frequency */
@@ -89,7 +94,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
        ret = clk_set_rate(cpu_clk, freq_exact);
        if (ret) {
                dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
-               if (!IS_ERR(cpu_reg))
+               if (!IS_ERR(cpu_reg) && volt_old > 0)
                        regulator_set_voltage_tol(cpu_reg, volt_old, tol);
                return ret;
        }
@@ -181,7 +186,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
 {
        struct cpufreq_dt_platform_data *pd;
        struct cpufreq_frequency_table *freq_table;
-       struct thermal_cooling_device *cdev;
        struct device_node *np;
        struct private_data *priv;
        struct device *cpu_dev;
@@ -210,7 +214,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv) {
                ret = -ENOMEM;
-               goto out_put_node;
+               goto out_free_opp;
        }
 
        of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
@@ -264,20 +268,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
                goto out_free_priv;
        }
 
-       /*
-        * For now, just loading the cooling device;
-        * thermal DT code takes care of matching them.
-        */
-       if (of_find_property(np, "#cooling-cells", NULL)) {
-               cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
-               if (IS_ERR(cdev))
-                       dev_err(cpu_dev,
-                               "running cpufreq without cooling device: %ld\n",
-                               PTR_ERR(cdev));
-               else
-                       priv->cdev = cdev;
-       }
-
        priv->cpu_dev = cpu_dev;
        priv->cpu_reg = cpu_reg;
        policy->driver_data = priv;
@@ -287,7 +277,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
        if (ret) {
                dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
                        ret);
-               goto out_cooling_unregister;
+               goto out_free_cpufreq_table;
        }
 
        policy->cpuinfo.transition_latency = transition_latency;
@@ -300,12 +290,12 @@ static int cpufreq_init(struct cpufreq_policy *policy)
 
        return 0;
 
-out_cooling_unregister:
-       cpufreq_cooling_unregister(priv->cdev);
+out_free_cpufreq_table:
        dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 out_free_priv:
        kfree(priv);
-out_put_node:
+out_free_opp:
+       of_free_opp_table(cpu_dev);
        of_node_put(np);
 out_put_reg_clk:
        clk_put(cpu_clk);
@@ -319,8 +309,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
 {
        struct private_data *priv = policy->driver_data;
 
-       cpufreq_cooling_unregister(priv->cdev);
+       if (priv->cdev)
+               cpufreq_cooling_unregister(priv->cdev);
        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+       of_free_opp_table(priv->cpu_dev);
        clk_put(policy->clk);
        if (!IS_ERR(priv->cpu_reg))
                regulator_put(priv->cpu_reg);
@@ -329,6 +321,33 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
+static void cpufreq_ready(struct cpufreq_policy *policy)
+{
+       struct private_data *priv = policy->driver_data;
+       struct device_node *np = of_node_get(priv->cpu_dev->of_node);
+
+       if (WARN_ON(!np))
+               return;
+
+       /*
+        * For now, just loading the cooling device;
+        * thermal DT code takes care of matching them.
+        */
+       if (of_find_property(np, "#cooling-cells", NULL)) {
+               priv->cdev = of_cpufreq_cooling_register(np,
+                                                        policy->related_cpus);
+               if (IS_ERR(priv->cdev)) {
+                       dev_err(priv->cpu_dev,
+                               "running cpufreq without cooling device: %ld\n",
+                               PTR_ERR(priv->cdev));
+
+                       priv->cdev = NULL;
+               }
+       }
+
+       of_node_put(np);
+}
+
 static struct cpufreq_driver dt_cpufreq_driver = {
        .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
        .verify = cpufreq_generic_frequency_table_verify,
@@ -336,6 +355,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
        .get = cpufreq_generic_get,
        .init = cpufreq_init,
        .exit = cpufreq_exit,
+       .ready = cpufreq_ready,
        .name = "cpufreq-dt",
        .attr = cpufreq_generic_attr,
 };
index 4473eba1d6b0b6084f632a8cb25e7c7cd815170b..a09a29c312a9cbeef8cb43a2862c07f13c78a24e 100644 (file)
@@ -535,7 +535,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
 static ssize_t store_##file_name                                       \
 (struct cpufreq_policy *policy, const char *buf, size_t count)         \
 {                                                                      \
-       int ret;                                                        \
+       int ret, temp;                                                  \
        struct cpufreq_policy new_policy;                               \
                                                                        \
        ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
@@ -546,8 +546,10 @@ static ssize_t store_##file_name                                   \
        if (ret != 1)                                                   \
                return -EINVAL;                                         \
                                                                        \
+       temp = new_policy.object;                                       \
        ret = cpufreq_set_policy(policy, &new_policy);          \
-       policy->user_policy.object = policy->object;                    \
+       if (!ret)                                                       \
+               policy->user_policy.object = temp;                      \
                                                                        \
        return ret ? ret : count;                                       \
 }
@@ -898,46 +900,31 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
        struct freq_attr **drv_attr;
        int ret = 0;
 
-       /* prepare interface data */
-       ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
-                                  &dev->kobj, "cpufreq");
-       if (ret)
-               return ret;
-
        /* set up files for this cpu device */
        drv_attr = cpufreq_driver->attr;
        while ((drv_attr) && (*drv_attr)) {
                ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
                if (ret)
-                       goto err_out_kobj_put;
+                       return ret;
                drv_attr++;
        }
        if (cpufreq_driver->get) {
                ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
                if (ret)
-                       goto err_out_kobj_put;
+                       return ret;
        }
 
        ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
        if (ret)
-               goto err_out_kobj_put;
+               return ret;
 
        if (cpufreq_driver->bios_limit) {
                ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
                if (ret)
-                       goto err_out_kobj_put;
+                       return ret;
        }
 
-       ret = cpufreq_add_dev_symlink(policy);
-       if (ret)
-               goto err_out_kobj_put;
-
-       return ret;
-
-err_out_kobj_put:
-       kobject_put(&policy->kobj);
-       wait_for_completion(&policy->kobj_unregister);
-       return ret;
+       return cpufreq_add_dev_symlink(policy);
 }
 
 static void cpufreq_init_policy(struct cpufreq_policy *policy)
@@ -1196,6 +1183,8 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
                goto err_set_policy_cpu;
        }
 
+       down_write(&policy->rwsem);
+
        /* related cpus should atleast have policy->cpus */
        cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
 
@@ -1208,9 +1197,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        if (!recover_policy) {
                policy->user_policy.min = policy->min;
                policy->user_policy.max = policy->max;
+
+               /* prepare interface data */
+               ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
+                                          &dev->kobj, "cpufreq");
+               if (ret) {
+                       pr_err("%s: failed to init policy->kobj: %d\n",
+                              __func__, ret);
+                       goto err_init_policy_kobj;
+               }
        }
 
-       down_write(&policy->rwsem);
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        for_each_cpu(j, policy->cpus)
                per_cpu(cpufreq_cpu_data, j) = policy;
@@ -1288,8 +1285,13 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        up_write(&policy->rwsem);
 
        kobject_uevent(&policy->kobj, KOBJ_ADD);
+
        up_read(&cpufreq_rwsem);
 
+       /* Callback for handling stuff after policy is ready */
+       if (cpufreq_driver->ready)
+               cpufreq_driver->ready(policy);
+
        pr_debug("initialization complete\n");
 
        return 0;
@@ -1301,6 +1303,11 @@ err_get_freq:
                per_cpu(cpufreq_cpu_data, j) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+       if (!recover_policy) {
+               kobject_put(&policy->kobj);
+               wait_for_completion(&policy->kobj_unregister);
+       }
+err_init_policy_kobj:
        up_write(&policy->rwsem);
 
        if (cpufreq_driver->exit)
index f33f25b483ca6ce0ac87cfda264ba0f1a1eff8d6..27a57ed9eb2ccd3e29276a5486809084ece84855 100644 (file)
@@ -371,7 +371,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(dvfs_info->dev,
                        "failed to init cpufreq table: %d\n", ret);
-               goto err_put_node;
+               goto err_free_opp;
        }
        dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
        exynos_sort_descend_freq_table();
@@ -423,6 +423,8 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
 
 err_free_table:
        dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+err_free_opp:
+       of_free_opp_table(dvfs_info->dev);
 err_put_node:
        of_node_put(np);
        dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
@@ -433,6 +435,7 @@ static int exynos_cpufreq_remove(struct platform_device *pdev)
 {
        cpufreq_unregister_driver(&exynos_driver);
        dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+       of_free_opp_table(dvfs_info->dev);
        return 0;
 }
 
index c2d30765bf3d209618bc3b9049cff85f5d5c1745..5da1d131f7700ad00ff69196ce412fd62e7cd259 100644 (file)
@@ -31,6 +31,7 @@ static struct clk *step_clk;
 static struct clk *pll2_pfd2_396m_clk;
 
 static struct device *cpu_dev;
+static bool free_opp;
 static struct cpufreq_frequency_table *freq_table;
 static unsigned int transition_latency;
 
@@ -207,11 +208,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
                        goto put_reg;
                }
 
+               /* Because we have added the OPPs here, we must free them */
+               free_opp = true;
+
                num = dev_pm_opp_get_opp_count(cpu_dev);
                if (num < 0) {
                        ret = num;
                        dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
-                       goto put_reg;
+                       goto out_free_opp;
                }
        }
 
@@ -306,6 +310,9 @@ soc_opp_out:
 
 free_freq_table:
        dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_opp:
+       if (free_opp)
+               of_free_opp_table(cpu_dev);
 put_reg:
        if (!IS_ERR(arm_reg))
                regulator_put(arm_reg);
@@ -332,6 +339,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
 {
        cpufreq_unregister_driver(&imx6q_cpufreq_driver);
        dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+       if (free_opp)
+               of_free_opp_table(cpu_dev);
        regulator_put(arm_reg);
        if (!IS_ERR(pu_reg))
                regulator_put(pu_reg);
index 27bb6d3877ed6cc64ea1fc5bde5c1094a6ab34b0..742eefba12c2101d8bc8e035958d7bb59596ef93 100644 (file)
@@ -137,6 +137,7 @@ struct cpu_defaults {
 
 static struct pstate_adjust_policy pid_params;
 static struct pstate_funcs pstate_funcs;
+static int hwp_active;
 
 struct perf_limits {
        int no_turbo;
@@ -198,7 +199,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
 
        pid->integral += fp_error;
 
-       /* limit the integral term */
+       /*
+        * We limit the integral here so that it will never
+        * get higher than 30.  This prevents it from becoming
+        * too large an input over long periods of time and allows
+        * it to get factored out sooner.
+        *
+        * The value of 30 was chosen through experimentation.
+        */
        integral_limit = int_tofp(30);
        if (pid->integral > integral_limit)
                pid->integral = integral_limit;
@@ -244,6 +252,34 @@ static inline void update_turbo_state(void)
                 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
 }
 
+#define PCT_TO_HWP(x) (x * 255 / 100)
+static void intel_pstate_hwp_set(void)
+{
+       int min, max, cpu;
+       u64 value, freq;
+
+       get_online_cpus();
+
+       for_each_online_cpu(cpu) {
+               rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+               min = PCT_TO_HWP(limits.min_perf_pct);
+               value &= ~HWP_MIN_PERF(~0L);
+               value |= HWP_MIN_PERF(min);
+
+               max = PCT_TO_HWP(limits.max_perf_pct);
+               if (limits.no_turbo) {
+                       rdmsrl( MSR_HWP_CAPABILITIES, freq);
+                       max = HWP_GUARANTEED_PERF(freq);
+               }
+
+               value &= ~HWP_MAX_PERF(~0L);
+               value |= HWP_MAX_PERF(max);
+               wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+       }
+
+       put_online_cpus();
+}
+
 /************************** debugfs begin ************************/
 static int pid_param_set(void *data, u64 val)
 {
@@ -279,6 +315,8 @@ static void __init intel_pstate_debug_expose_params(void)
        struct dentry *debugfs_parent;
        int i = 0;
 
+       if (hwp_active)
+               return;
        debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
        if (IS_ERR_OR_NULL(debugfs_parent))
                return;
@@ -329,8 +367,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
                pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
                return -EPERM;
        }
+
        limits.no_turbo = clamp_t(int, input, 0, 1);
 
+       if (hwp_active)
+               intel_pstate_hwp_set();
+
        return count;
 }
 
@@ -348,6 +390,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
        limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
        limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
+       if (hwp_active)
+               intel_pstate_hwp_set();
        return count;
 }
 
@@ -363,6 +407,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
        limits.min_perf_pct = clamp_t(int, input, 0 , 100);
        limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
 
+       if (hwp_active)
+               intel_pstate_hwp_set();
        return count;
 }
 
@@ -395,8 +441,16 @@ static void __init intel_pstate_sysfs_expose_params(void)
        rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
        BUG_ON(rc);
 }
-
 /************************** sysfs end ************************/
+
+static void intel_pstate_hwp_enable(void)
+{
+       hwp_active++;
+       pr_info("intel_pstate HWP enabled\n");
+
+       wrmsrl( MSR_PM_ENABLE, 0x1);
+}
+
 static int byt_get_min_pstate(void)
 {
        u64 value;
@@ -569,6 +623,11 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
        if (limits.no_turbo || limits.turbo_disabled)
                max_perf = cpu->pstate.max_pstate;
 
+       /*
+        * performance can be limited by user through sysfs, by cpufreq
+        * policy, or by cpu specific default values determined through
+        * experimentation.
+        */
        max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
        *max = clamp_t(int, max_perf_adj,
                        cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
@@ -648,6 +707,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
        cpu->prev_mperf = mperf;
 }
 
+static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
+{
+       int delay;
+
+       delay = msecs_to_jiffies(50);
+       mod_timer_pinned(&cpu->timer, jiffies + delay);
+}
+
 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
 {
        int delay;
@@ -662,11 +729,29 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
        u32 duration_us;
        u32 sample_time;
 
+       /*
+        * core_busy is the ratio of actual performance to max
+        * max_pstate is the max non turbo pstate available
+        * current_pstate was the pstate that was requested during
+        *      the last sample period.
+        *
+        * We normalize core_busy, which was our actual percent
+        * performance to what we requested during the last sample
+        * period. The result will be a percentage of busy at a
+        * specified pstate.
+        */
        core_busy = cpu->sample.core_pct_busy;
        max_pstate = int_tofp(cpu->pstate.max_pstate);
        current_pstate = int_tofp(cpu->pstate.current_pstate);
        core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
 
+       /*
+        * Since we have a deferred timer, it will not fire unless
+        * we are in C0.  So, determine if the actual elapsed time
+        * is significantly greater (3x) than our sample interval.  If it
+        * is, then we were idle for a long enough period of time
+        * to adjust our busyness.
+        */
        sample_time = pid_params.sample_rate_ms  * USEC_PER_MSEC;
        duration_us = (u32) ktime_us_delta(cpu->sample.time,
                                           cpu->last_sample_time);
@@ -694,6 +779,14 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
        intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
 }
 
+static void intel_hwp_timer_func(unsigned long __data)
+{
+       struct cpudata *cpu = (struct cpudata *) __data;
+
+       intel_pstate_sample(cpu);
+       intel_hwp_set_sample_time(cpu);
+}
+
 static void intel_pstate_timer_func(unsigned long __data)
 {
        struct cpudata *cpu = (struct cpudata *) __data;
@@ -730,6 +823,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x3f, core_params),
        ICPU(0x45, core_params),
        ICPU(0x46, core_params),
+       ICPU(0x47, core_params),
        ICPU(0x4c, byt_params),
        ICPU(0x4f, core_params),
        ICPU(0x56, core_params),
@@ -737,6 +831,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
 
+static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
+       ICPU(0x56, core_params),
+       {}
+};
+
 static int intel_pstate_init_cpu(unsigned int cpunum)
 {
        struct cpudata *cpu;
@@ -753,9 +852,14 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        intel_pstate_get_cpu_pstates(cpu);
 
        init_timer_deferrable(&cpu->timer);
-       cpu->timer.function = intel_pstate_timer_func;
        cpu->timer.data = (unsigned long)cpu;
        cpu->timer.expires = jiffies + HZ/100;
+
+       if (!hwp_active)
+               cpu->timer.function = intel_pstate_timer_func;
+       else
+               cpu->timer.function = intel_hwp_timer_func;
+
        intel_pstate_busy_pid_reset(cpu);
        intel_pstate_sample(cpu);
 
@@ -792,6 +896,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                limits.no_turbo = 0;
                return 0;
        }
+
        limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
        limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
        limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
@@ -801,6 +906,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
        limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
+       if (hwp_active)
+               intel_pstate_hwp_set();
+
        return 0;
 }
 
@@ -823,6 +931,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
        pr_info("intel_pstate CPU %d exiting\n", cpu_num);
 
        del_timer_sync(&all_cpu_data[cpu_num]->timer);
+       if (hwp_active)
+               return;
+
        intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
 }
 
@@ -866,6 +977,8 @@ static struct cpufreq_driver intel_pstate_driver = {
 };
 
 static int __initdata no_load;
+static int __initdata no_hwp;
+static unsigned int force_load;
 
 static int intel_pstate_msrs_not_valid(void)
 {
@@ -943,15 +1056,46 @@ static bool intel_pstate_no_acpi_pss(void)
        return true;
 }
 
+static bool intel_pstate_has_acpi_ppc(void)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               struct acpi_processor *pr = per_cpu(processors, i);
+
+               if (!pr)
+                       continue;
+               if (acpi_has_method(pr->handle, "_PPC"))
+                       return true;
+       }
+       return false;
+}
+
+enum {
+       PSS,
+       PPC,
+};
+
 struct hw_vendor_info {
        u16  valid;
        char oem_id[ACPI_OEM_ID_SIZE];
        char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
+       int  oem_pwr_table;
 };
 
 /* Hardware vendor-specific info that has its own power management modes */
 static struct hw_vendor_info vendor_info[] = {
-       {1, "HP    ", "ProLiant"},
+       {1, "HP    ", "ProLiant", PSS},
+       {1, "ORACLE", "X4-2    ", PPC},
+       {1, "ORACLE", "X4-2L   ", PPC},
+       {1, "ORACLE", "X4-2B   ", PPC},
+       {1, "ORACLE", "X3-2    ", PPC},
+       {1, "ORACLE", "X3-2L   ", PPC},
+       {1, "ORACLE", "X3-2B   ", PPC},
+       {1, "ORACLE", "X4470M2 ", PPC},
+       {1, "ORACLE", "X4270M3 ", PPC},
+       {1, "ORACLE", "X4270M2 ", PPC},
+       {1, "ORACLE", "X4170M2 ", PPC},
        {0, "", ""},
 };
 
@@ -959,6 +1103,15 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
 {
        struct acpi_table_header hdr;
        struct hw_vendor_info *v_info;
+       const struct x86_cpu_id *id;
+       u64 misc_pwr;
+
+       id = x86_match_cpu(intel_pstate_cpu_oob_ids);
+       if (id) {
+               rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
+               if ( misc_pwr & (1 << 8))
+                       return true;
+       }
 
        if (acpi_disabled ||
            ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
@@ -966,15 +1119,22 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
 
        for (v_info = vendor_info; v_info->valid; v_info++) {
                if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
-                   !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
-                   intel_pstate_no_acpi_pss())
-                       return true;
+                       !strncmp(hdr.oem_table_id, v_info->oem_table_id,
+                                               ACPI_OEM_TABLE_ID_SIZE))
+                       switch (v_info->oem_pwr_table) {
+                       case PSS:
+                               return intel_pstate_no_acpi_pss();
+                       case PPC:
+                               return intel_pstate_has_acpi_ppc() &&
+                                       (!force_load);
+                       }
        }
 
        return false;
 }
 #else /* CONFIG_ACPI not enabled */
 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
+static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
 #endif /* CONFIG_ACPI */
 
 static int __init intel_pstate_init(void)
@@ -982,6 +1142,7 @@ static int __init intel_pstate_init(void)
        int cpu, rc = 0;
        const struct x86_cpu_id *id;
        struct cpu_defaults *cpu_info;
+       struct cpuinfo_x86 *c = &boot_cpu_data;
 
        if (no_load)
                return -ENODEV;
@@ -1011,6 +1172,9 @@ static int __init intel_pstate_init(void)
        if (!all_cpu_data)
                return -ENOMEM;
 
+       if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
+               intel_pstate_hwp_enable();
+
        rc = cpufreq_register_driver(&intel_pstate_driver);
        if (rc)
                goto out;
@@ -1041,6 +1205,10 @@ static int __init intel_pstate_setup(char *str)
 
        if (!strcmp(str, "disable"))
                no_load = 1;
+       if (!strcmp(str, "no_hwp"))
+               no_hwp = 1;
+       if (!strcmp(str, "force"))
+               force_load = 1;
        return 0;
 }
 early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c
new file mode 100644 (file)
index 0000000..25fbd6a
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * CPU Frequency Scaling for Loongson 1 SoC
+ *
+ * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/mach-loongson1/cpufreq.h>
+#include <asm/mach-loongson1/loongson1.h>
+
+static struct {
+       struct device *dev;
+       struct clk *clk;        /* CPU clk */
+       struct clk *mux_clk;    /* MUX of CPU clk */
+       struct clk *pll_clk;    /* PLL clk */
+       struct clk *osc_clk;    /* OSC clk */
+       unsigned int max_freq;
+       unsigned int min_freq;
+} ls1x_cpufreq;
+
+static int ls1x_cpufreq_notifier(struct notifier_block *nb,
+                                unsigned long val, void *data)
+{
+       if (val == CPUFREQ_POSTCHANGE)
+               current_cpu_data.udelay_val = loops_per_jiffy;
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block ls1x_cpufreq_notifier_block = {
+       .notifier_call = ls1x_cpufreq_notifier
+};
+
+static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
+                              unsigned int index)
+{
+       unsigned int old_freq, new_freq;
+
+       old_freq = policy->cur;
+       new_freq = policy->freq_table[index].frequency;
+
+       /*
+        * The procedure of reconfiguring CPU clk is as below.
+        *
+        *  - Reparent CPU clk to OSC clk
+        *  - Reset CPU clock (very important)
+        *  - Reconfigure CPU DIV
+        *  - Reparent CPU clk back to CPU DIV clk
+        */
+
+       dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
+       clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk);
+       __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
+                    LS1X_CLK_PLL_DIV);
+       __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
+                    LS1X_CLK_PLL_DIV);
+       clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000);
+       clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk);
+
+       return 0;
+}
+
+static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
+{
+       struct cpufreq_frequency_table *freq_tbl;
+       unsigned int pll_freq, freq;
+       int steps, i, ret;
+
+       pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000;
+
+       steps = 1 << DIV_CPU_WIDTH;
+       freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL);
+       if (!freq_tbl) {
+               dev_err(ls1x_cpufreq.dev,
+                       "failed to alloc cpufreq_frequency_table\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       for (i = 0; i < (steps - 1); i++) {
+               freq = pll_freq / (i + 1);
+               if ((freq < ls1x_cpufreq.min_freq) ||
+                   (freq > ls1x_cpufreq.max_freq))
+                       freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
+               else
+                       freq_tbl[i].frequency = freq;
+               dev_dbg(ls1x_cpufreq.dev,
+                       "cpufreq table: index %d: frequency %d\n", i,
+                       freq_tbl[i].frequency);
+       }
+       freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+
+       policy->clk = ls1x_cpufreq.clk;
+       ret = cpufreq_generic_init(policy, freq_tbl, 0);
+       if (ret)
+               kfree(freq_tbl);
+out:
+       return ret;
+}
+
+static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
+{
+       kfree(policy->freq_table);
+       return 0;
+}
+
+static struct cpufreq_driver ls1x_cpufreq_driver = {
+       .name           = "cpufreq-ls1x",
+       .flags          = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+       .verify         = cpufreq_generic_frequency_table_verify,
+       .target_index   = ls1x_cpufreq_target,
+       .get            = cpufreq_generic_get,
+       .init           = ls1x_cpufreq_init,
+       .exit           = ls1x_cpufreq_exit,
+       .attr           = cpufreq_generic_attr,
+};
+
+static int ls1x_cpufreq_remove(struct platform_device *pdev)
+{
+       cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
+                                   CPUFREQ_TRANSITION_NOTIFIER);
+       cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+
+       return 0;
+}
+
+static int ls1x_cpufreq_probe(struct platform_device *pdev)
+{
+       struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
+       struct clk *clk;
+       int ret;
+
+       if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
+               return -EINVAL;
+
+       ls1x_cpufreq.dev = &pdev->dev;
+
+       clk = devm_clk_get(&pdev->dev, pdata->clk_name);
+       if (IS_ERR(clk)) {
+               dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
+                       pdata->clk_name);
+               ret = PTR_ERR(clk);
+               goto out;
+       }
+       ls1x_cpufreq.clk = clk;
+
+       clk = clk_get_parent(clk);
+       if (IS_ERR(clk)) {
+               dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
+                       __clk_get_name(ls1x_cpufreq.clk));
+               ret = PTR_ERR(clk);
+               goto out;
+       }
+       ls1x_cpufreq.mux_clk = clk;
+
+       clk = clk_get_parent(clk);
+       if (IS_ERR(clk)) {
+               dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
+                       __clk_get_name(ls1x_cpufreq.mux_clk));
+               ret = PTR_ERR(clk);
+               goto out;
+       }
+       ls1x_cpufreq.pll_clk = clk;
+
+       clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
+       if (IS_ERR(clk)) {
+               dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
+                       pdata->osc_clk_name);
+               ret = PTR_ERR(clk);
+               goto out;
+       }
+       ls1x_cpufreq.osc_clk = clk;
+
+       ls1x_cpufreq.max_freq = pdata->max_freq;
+       ls1x_cpufreq.min_freq = pdata->min_freq;
+
+       ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
+       if (ret) {
+               dev_err(ls1x_cpufreq.dev,
+                       "failed to register cpufreq driver: %d\n", ret);
+               goto out;
+       }
+
+       ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
+                                       CPUFREQ_TRANSITION_NOTIFIER);
+
+       if (!ret)
+               goto out;
+
+       dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
+               ret);
+
+       cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+out:
+       return ret;
+}
+
+static struct platform_driver ls1x_cpufreq_platdrv = {
+       .driver = {
+               .name   = "ls1x-cpufreq",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = ls1x_cpufreq_probe,
+       .remove         = ls1x_cpufreq_remove,
+};
+
+module_platform_driver(ls1x_cpufreq_platdrv);
+
+MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson 1 CPUFreq driver");
+MODULE_LICENSE("GPL");
index 4d2c8e861089a45a9fe12e1106e08e0fd7482195..2a0d58959acfe2861cdf9e826ef397958fd6fef2 100644 (file)
@@ -603,6 +603,13 @@ static void __exit pcc_cpufreq_exit(void)
        free_percpu(pcc_cpu_info);
 }
 
+static const struct acpi_device_id processor_device_ids[] = {
+       {ACPI_PROCESSOR_OBJECT_HID, },
+       {ACPI_PROCESSOR_DEVICE_HID, },
+       {},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
 MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
 MODULE_VERSION(PCC_VERSION);
 MODULE_DESCRIPTION("Processor Clocking Control interface driver");
index f0a1a56406ebde9bf7119b4a56280348805c533f..8bcdb981d540979f1ebcb2eb59bc33b989a904e5 100644 (file)
@@ -9408,6 +9408,10 @@ static bool page_flip_finished(struct intel_crtc *crtc)
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+           crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+               return true;
+
        /*
         * The relevant registers doen't exist on pre-ctg.
         * As the flip done interrupt doesn't trigger for mmio
index 5ad45bfff3feba593460ffd3f47984b3ef6f5b0e..4bcd9175732182dac168c60b0cd5a87730a527d1 100644 (file)
@@ -4450,6 +4450,7 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
         * vdd might still be enabled do to the delayed vdd off.
         * Make sure vdd is actually turned off here.
         */
+       cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
        pps_lock(intel_dp);
        edp_panel_vdd_off_sync(intel_dp);
        pps_unlock(intel_dp);
index 300c4b3d4669426d5085d7e5d2e52e23321988d3..26baa9c05f6c49ac40e2a238a6079fa8e0e40920 100644 (file)
@@ -322,6 +322,12 @@ static void radeon_connector_get_edid(struct drm_connector *connector)
        }
 
        if (!radeon_connector->edid) {
+               /* don't fetch the edid from the vbios if ddc fails and runpm is
+                * enabled so we report disconnected.
+                */
+               if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+                       return;
+
                if (rdev->is_atom_bios) {
                        /* some laptops provide a hardcoded edid in rom for LCDs */
                        if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
@@ -826,6 +832,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
 static enum drm_connector_status
 radeon_lvds_detect(struct drm_connector *connector, bool force)
 {
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder = radeon_best_single_encoder(connector);
        enum drm_connector_status ret = connector_status_disconnected;
@@ -842,7 +850,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
                /* check if panel is valid */
                if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
                        ret = connector_status_connected;
-
+               /* don't fetch the edid from the vbios if ddc fails and runpm is
+                * enabled so we report disconnected.
+                */
+               if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+                       ret = connector_status_disconnected;
        }
 
        /* check for edid as well */
@@ -1589,6 +1601,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
                        /* check if panel is valid */
                        if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
                                ret = connector_status_connected;
+                       /* don't fetch the edid from the vbios if ddc fails and runpm is
+                        * enabled so we report disconnected.
+                        */
+                       if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+                               ret = connector_status_disconnected;
                }
                /* eDP is always DP */
                radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
index 7784911d78ef6fc54d6aeea23950f4585d3c74c4..00fc59762e0df3bba0758d1f18e90328e5726635 100644 (file)
@@ -185,6 +185,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
        if (rdev->flags & RADEON_IS_AGP)
                return false;
 
+       /*
+        * Older chips have a HW limitation, they can only generate 40 bits
+        * of address for "64-bit" MSIs which breaks on some platforms, notably
+        * IBM POWER servers, so we limit them
+        */
+       if (rdev->family < CHIP_BONAIRE) {
+               dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
+               rdev->pdev->no_64bit_msi = 1;
+       }
+
        /* force MSI on */
        if (radeon_msi == 1)
                return true;
index 6aac695b1688beaf2adc5336bd842bb7993d2d09..9b55e673b67caf1365c7452ce51a22a37510af02 100644 (file)
@@ -1084,10 +1084,8 @@ static int g762_probe(struct i2c_client *client, const struct i2c_device_id *id)
        if (ret)
                goto clock_dis;
 
-       data->hwmon_dev = devm_hwmon_device_register_with_groups(dev,
-                                                                client->name,
-                                                                data,
-                                                                g762_groups);
+       data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+                                                           data, g762_groups);
        if (IS_ERR(data->hwmon_dev)) {
                ret = PTR_ERR(data->hwmon_dev);
                goto clock_dis;
index 22c096ce39ad765c6a50d26ff80e77158fe3bbf6..513bd6d14293d80e5ce502080a092b5f970fe840 100644 (file)
@@ -44,6 +44,9 @@
 
 #define BMC150_ACCEL_REG_INT_STATUS_2          0x0B
 #define BMC150_ACCEL_ANY_MOTION_MASK           0x07
+#define BMC150_ACCEL_ANY_MOTION_BIT_X          BIT(0)
+#define BMC150_ACCEL_ANY_MOTION_BIT_Y          BIT(1)
+#define BMC150_ACCEL_ANY_MOTION_BIT_Z          BIT(2)
 #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN       BIT(3)
 
 #define BMC150_ACCEL_REG_PMU_LPW               0x11
@@ -92,9 +95,9 @@
 #define BMC150_ACCEL_SLOPE_THRES_MASK          0xFF
 
 /* Slope duration in terms of number of samples */
-#define BMC150_ACCEL_DEF_SLOPE_DURATION        2
+#define BMC150_ACCEL_DEF_SLOPE_DURATION                1
 /* in terms of multiples of g's/LSB, based on range */
-#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD       5
+#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD       1
 
 #define BMC150_ACCEL_REG_XOUT_L                0x02
 
@@ -536,6 +539,9 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
        if (ret < 0) {
                dev_err(&data->client->dev,
                        "Failed: bmc150_accel_set_power_state for %d\n", on);
+               if (on)
+                       pm_runtime_put_noidle(&data->client->dev);
+
                return ret;
        }
 
@@ -811,6 +817,7 @@ static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
 
        ret =  bmc150_accel_setup_any_motion_interrupt(data, state);
        if (ret < 0) {
+               bmc150_accel_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -846,7 +853,7 @@ static const struct attribute_group bmc150_accel_attrs_group = {
 
 static const struct iio_event_spec bmc150_accel_event = {
                .type = IIO_EV_TYPE_ROC,
-               .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING,
+               .dir = IIO_EV_DIR_EITHER,
                .mask_separate = BIT(IIO_EV_INFO_VALUE) |
                                 BIT(IIO_EV_INFO_ENABLE) |
                                 BIT(IIO_EV_INFO_PERIOD)
@@ -1054,6 +1061,7 @@ static int bmc150_accel_data_rdy_trigger_set_state(struct iio_trigger *trig,
        else
                ret = bmc150_accel_setup_new_data_interrupt(data, state);
        if (ret < 0) {
+               bmc150_accel_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -1092,12 +1100,26 @@ static irqreturn_t bmc150_accel_event_handler(int irq, void *private)
        else
                dir = IIO_EV_DIR_RISING;
 
-       if (ret & BMC150_ACCEL_ANY_MOTION_MASK)
+       if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
+                                                       0,
+                                                       IIO_MOD_X,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y)
                iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
                                                        0,
-                                                       IIO_MOD_X_OR_Y_OR_Z,
+                                                       IIO_MOD_Y,
                                                        IIO_EV_TYPE_ROC,
-                                                       IIO_EV_DIR_EITHER),
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
+                                                       0,
+                                                       IIO_MOD_Z,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
                                                        data->timestamp);
 ack_intr_status:
        if (!data->dready_trigger_on)
@@ -1354,10 +1376,14 @@ static int bmc150_accel_runtime_suspend(struct device *dev)
 {
        struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct bmc150_accel_data *data = iio_priv(indio_dev);
+       int ret;
 
        dev_dbg(&data->client->dev,  __func__);
+       ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
+       if (ret < 0)
+               return -EAGAIN;
 
-       return bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
+       return 0;
 }
 
 static int bmc150_accel_runtime_resume(struct device *dev)
index a23e58c4ed99b222b674ce2386c9c36a3809efbe..320aa72c0349ecabeae7ea4a0bdb59d2c84cd63d 100644 (file)
@@ -269,6 +269,8 @@ static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index)
                return ret;
        }
 
+       ret &= ~(KXCJK1013_REG_CTRL1_BIT_GSEL0 |
+                KXCJK1013_REG_CTRL1_BIT_GSEL1);
        ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3);
        ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4);
 
index b58d6302521f4d651359715331e83a5a416583a0..d095efe1ba149caa57136ec1f27f1c6caac10cd8 100644 (file)
@@ -152,6 +152,7 @@ static void men_z188_remove(struct mcb_device *dev)
 
 static const struct mcb_device_id men_z188_ids[] = {
        { .device = 0xbc },
+       { }
 };
 MODULE_DEVICE_TABLE(mcb, men_z188_ids);
 
index 1f967e0d688e47a084f29e2b484621016ada7c3c..d2fa526740ca188e00926f42b733af91d4dcd9df 100644 (file)
@@ -67,6 +67,9 @@
 #define BMG160_REG_INT_EN_0            0x15
 #define BMG160_DATA_ENABLE_INT         BIT(7)
 
+#define BMG160_REG_INT_EN_1            0x16
+#define BMG160_INT1_BIT_OD             BIT(1)
+
 #define BMG160_REG_XOUT_L              0x02
 #define BMG160_AXIS_TO_REG(axis)       (BMG160_REG_XOUT_L + (axis * 2))
 
@@ -82,6 +85,9 @@
 
 #define BMG160_REG_INT_STATUS_2        0x0B
 #define BMG160_ANY_MOTION_MASK         0x07
+#define BMG160_ANY_MOTION_BIT_X                BIT(0)
+#define BMG160_ANY_MOTION_BIT_Y                BIT(1)
+#define BMG160_ANY_MOTION_BIT_Z                BIT(2)
 
 #define BMG160_REG_TEMP                0x08
 #define BMG160_TEMP_CENTER_VAL         23
@@ -222,6 +228,19 @@ static int bmg160_chip_init(struct bmg160_data *data)
        data->slope_thres = ret;
 
        /* Set default interrupt mode */
+       ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_EN_1);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "Error reading reg_int_en_1\n");
+               return ret;
+       }
+       ret &= ~BMG160_INT1_BIT_OD;
+       ret = i2c_smbus_write_byte_data(data->client,
+                                       BMG160_REG_INT_EN_1, ret);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "Error writing reg_int_en_1\n");
+               return ret;
+       }
+
        ret = i2c_smbus_write_byte_data(data->client,
                                        BMG160_REG_INT_RST_LATCH,
                                        BMG160_INT_MODE_LATCH_INT |
@@ -250,6 +269,9 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
        if (ret < 0) {
                dev_err(&data->client->dev,
                        "Failed: bmg160_set_power_state for %d\n", on);
+               if (on)
+                       pm_runtime_put_noidle(&data->client->dev);
+
                return ret;
        }
 #endif
@@ -705,6 +727,7 @@ static int bmg160_write_event_config(struct iio_dev *indio_dev,
 
        ret =  bmg160_setup_any_motion_interrupt(data, state);
        if (ret < 0) {
+               bmg160_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -743,7 +766,7 @@ static const struct attribute_group bmg160_attrs_group = {
 
 static const struct iio_event_spec bmg160_event = {
                .type = IIO_EV_TYPE_ROC,
-               .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING,
+               .dir = IIO_EV_DIR_EITHER,
                .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
                                       BIT(IIO_EV_INFO_ENABLE)
 };
@@ -871,6 +894,7 @@ static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig,
        else
                ret = bmg160_setup_new_data_interrupt(data, state);
        if (ret < 0) {
+               bmg160_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
        }
@@ -908,10 +932,24 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
        else
                dir = IIO_EV_DIR_FALLING;
 
-       if (ret & BMG160_ANY_MOTION_MASK)
+       if (ret & BMG160_ANY_MOTION_BIT_X)
                iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
                                                        0,
-                                                       IIO_MOD_X_OR_Y_OR_Z,
+                                                       IIO_MOD_X,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMG160_ANY_MOTION_BIT_Y)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
+                                                       0,
+                                                       IIO_MOD_Y,
+                                                       IIO_EV_TYPE_ROC,
+                                                       dir),
+                                                       data->timestamp);
+       if (ret & BMG160_ANY_MOTION_BIT_Z)
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
+                                                       0,
+                                                       IIO_MOD_Z,
                                                        IIO_EV_TYPE_ROC,
                                                        dir),
                                                        data->timestamp);
@@ -1169,8 +1207,15 @@ static int bmg160_runtime_suspend(struct device *dev)
 {
        struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct bmg160_data *data = iio_priv(indio_dev);
+       int ret;
+
+       ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "set mode failed\n");
+               return -EAGAIN;
+       }
 
-       return bmg160_set_mode(data, BMG160_MODE_SUSPEND);
+       return 0;
 }
 
 static int bmg160_runtime_resume(struct device *dev)
index 2ed7905a068fc9033e8998e547bd7d750b1fedb9..fc55f0d15b70118a3a5be5fc221f151475f014e3 100644 (file)
@@ -1179,9 +1179,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
                }
 
                ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
-               usb_fill_bulk_urb(xpad->bulk_out, udev,
-                               usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress),
-                               xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad);
+               if (usb_endpoint_is_bulk_out(ep_irq_in)) {
+                       usb_fill_bulk_urb(xpad->bulk_out, udev,
+                                         usb_sndbulkpipe(udev,
+                                                         ep_irq_in->bEndpointAddress),
+                                         xpad->bdata, XPAD_PKT_LEN,
+                                         xpad_bulk_out, xpad);
+               } else {
+                       usb_fill_int_urb(xpad->bulk_out, udev,
+                                        usb_sndintpipe(udev,
+                                                       ep_irq_in->bEndpointAddress),
+                                        xpad->bdata, XPAD_PKT_LEN,
+                                        xpad_bulk_out, xpad, 0);
+               }
 
                /*
                 * Submit the int URB immediately rather than waiting for open
index 3fcb6b3cb0bdaea5ba0f17dfd6a228c6cbd126ad..f2b97802640755aacfcde04005b125717cb63818 100644 (file)
@@ -428,14 +428,6 @@ static void elantech_report_trackpoint(struct psmouse *psmouse,
        int x, y;
        u32 t;
 
-       if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev,
-                         !tp_dev,
-                         psmouse_fmt("Unexpected trackpoint message\n"))) {
-               if (etd->debug == 1)
-                       elantech_packet_dump(psmouse);
-               return;
-       }
-
        t = get_unaligned_le32(&packet[0]);
 
        switch (t & ~7U) {
@@ -793,7 +785,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
        unsigned char packet_type = packet[3] & 0x03;
        bool sanity_check;
 
-       if ((packet[3] & 0x0f) == 0x06)
+       if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
                return PACKET_TRACKPOINT;
 
        /*
index 2a7a9174c702a44df3072a2f61c72a4ce16ecb05..f9472920d986368f7aa83eb7d0621489d774b050 100644 (file)
@@ -143,6 +143,10 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                (const char * const []){"LEN2001", NULL},
                1024, 5022, 2508, 4832
        },
+       {
+               (const char * const []){"LEN2006", NULL},
+               1264, 5675, 1171, 4688
+       },
        { }
 };
 
index 6ae3cdee0681a8008218fbcf25762280b64e48ce..cc4f9d80122ea618e7543f4885843359194770a7 100644 (file)
@@ -217,8 +217,9 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
        }
 
        ret = irq_alloc_domain_generic_chips(domain, 32, 1, name,
-                                            handle_level_irq, 0, 0,
-                                            IRQCHIP_SKIP_SET_WAKE);
+                                            handle_fasteoi_irq,
+                                            IRQ_NOREQUEST | IRQ_NOPROBE |
+                                            IRQ_NOAUTOEN, 0, 0);
        if (ret)
                goto err_domain_remove;
 
@@ -230,7 +231,6 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
                gc->unused = 0;
                gc->wake_enabled = ~0;
                gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK;
-               gc->chip_types[0].handler = handle_fasteoi_irq;
                gc->chip_types[0].chip.irq_eoi = irq_gc_eoi;
                gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
                gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown;
index b9f4fb808e49a4afefa0bf66c707dbc01d7c3fa2..5fb38a2ac2261ca06c5bb338ae044a9ed61dc361 100644 (file)
@@ -101,9 +101,9 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
        int parent_irq;
 
        parent_irq = irq_of_parse_and_map(dn, irq);
-       if (parent_irq < 0) {
+       if (!parent_irq) {
                pr_err("failed to map interrupt %d\n", irq);
-               return parent_irq;
+               return -EINVAL;
        }
 
        data->irq_map_mask |= be32_to_cpup(map_mask + irq);
index c15c840987d2808e82cf1b056c231005933c5f8b..14691a4cb84cdf82fb38eefc0081a07460efae7b 100644 (file)
@@ -135,9 +135,9 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
        __raw_writel(0xffffffff, data->base + CPU_CLEAR);
 
        data->parent_irq = irq_of_parse_and_map(np, 0);
-       if (data->parent_irq < 0) {
+       if (!data->parent_irq) {
                pr_err("failed to find parent interrupt\n");
-               ret = data->parent_irq;
+               ret = -EINVAL;
                goto out_unmap;
        }
 
index b9625968daacc0eb89c0f7371a3a4e70242f95ce..4f4c2a7888e5d74ee06ae58df8feaf5f1dea3123 100644 (file)
@@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
+{
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       reg = core_readl(priv, CORE_WATCHDOG_CTRL);
+       reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
+       core_writel(priv, reg, CORE_WATCHDOG_CTRL);
+
+       do {
+               reg = core_readl(priv, CORE_WATCHDOG_CTRL);
+               if (!(reg & SOFTWARE_RESET))
+                       break;
+
+               usleep_range(1000, 2000);
+       } while (timeout-- > 0);
+
+       if (timeout == 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
 {
        const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
                *base = of_iomap(dn, i);
                if (*base == NULL) {
                        pr_err("unable to find register: %s\n", reg_names[i]);
-                       return -ENODEV;
+                       ret = -ENOMEM;
+                       goto out_unmap;
                }
                base++;
        }
 
+       ret = bcm_sf2_sw_rst(priv);
+       if (ret) {
+               pr_err("unable to software reset switch: %d\n", ret);
+               goto out_unmap;
+       }
+
        /* Disable all interrupts and request them */
        intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
        intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
@@ -484,7 +514,8 @@ out_free_irq0:
 out_unmap:
        base = &priv->core;
        for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
-               iounmap(*base);
+               if (*base)
+                       iounmap(*base);
                base++;
        }
        return ret;
@@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
        return 0;
 }
 
-static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
-{
-       unsigned int timeout = 1000;
-       u32 reg;
-
-       reg = core_readl(priv, CORE_WATCHDOG_CTRL);
-       reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
-       core_writel(priv, reg, CORE_WATCHDOG_CTRL);
-
-       do {
-               reg = core_readl(priv, CORE_WATCHDOG_CTRL);
-               if (!(reg & SOFTWARE_RESET))
-                       break;
-
-               usleep_range(1000, 2000);
-       } while (timeout-- > 0);
-
-       if (timeout == 0)
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
 {
        struct bcm_sf2_priv *priv = ds_to_priv(ds);
index dbb41c1923e60cf1a152bf414ef3428c2d0f2167..77f8f836cbbe18a75d1ffa58fc61c077414eab5b 100644 (file)
@@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp)
                if (tnapi->rx_rcb)
                        memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 
-               if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+               if (tnapi->prodring.rx_std &&
+                   tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
                        tg3_free_rings(tp);
                        return -ENOMEM;
                }
index 3e8475cae4f96739f6479e4dc84d90c1f811a9e5..597c463e384d0d5d9fb0f46a9b363e9c9165c6cf 100644 (file)
@@ -4309,11 +4309,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
                return -EOPNOTSUPP;
 
        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+       if (!br_spec)
+               return -EINVAL;
 
        nla_for_each_nested(attr, br_spec, rem) {
                if (nla_type(attr) != IFLA_BRIDGE_MODE)
                        continue;
 
+               if (nla_len(attr) < sizeof(mode))
+                       return -EINVAL;
+
                mode = nla_get_u16(attr);
                if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
                        return -EINVAL;
index a2d72a87cbde40465c16277e32d4a242a2873ee3..487cd9c4ac0d33a3ce07bb12fbe3f5db00e01586 100644 (file)
@@ -1012,7 +1012,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
        /* igb_get_stats64() might access the rings on this vector,
         * we must wait a grace period before freeing it.
         */
-       kfree_rcu(q_vector, rcu);
+       if (q_vector)
+               kfree_rcu(q_vector, rcu);
 }
 
 /**
@@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter)
        adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
 
        for (i = 0; i < adapter->num_q_vectors; i++) {
-               napi_synchronize(&(adapter->q_vector[i]->napi));
-               napi_disable(&(adapter->q_vector[i]->napi));
+               if (adapter->q_vector[i]) {
+                       napi_synchronize(&adapter->q_vector[i]->napi);
+                       napi_disable(&adapter->q_vector[i]->napi);
+               }
        }
 
 
@@ -3717,7 +3720,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               igb_free_tx_resources(adapter->tx_ring[i]);
+               if (adapter->tx_ring[i])
+                       igb_free_tx_resources(adapter->tx_ring[i]);
 }
 
 void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
@@ -3782,7 +3786,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               igb_clean_tx_ring(adapter->tx_ring[i]);
+               if (adapter->tx_ring[i])
+                       igb_clean_tx_ring(adapter->tx_ring[i]);
 }
 
 /**
@@ -3819,7 +3824,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_free_rx_resources(adapter->rx_ring[i]);
+               if (adapter->rx_ring[i])
+                       igb_free_rx_resources(adapter->rx_ring[i]);
 }
 
 /**
@@ -3874,7 +3880,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_clean_rx_ring(adapter->rx_ring[i]);
+               if (adapter->rx_ring[i])
+                       igb_clean_rx_ring(adapter->rx_ring[i]);
 }
 
 /**
@@ -7404,6 +7411,8 @@ static int igb_resume(struct device *dev)
        pci_restore_state(pdev);
        pci_save_state(pdev);
 
+       if (!pci_device_is_present(pdev))
+               return -ENODEV;
        err = pci_enable_device_mem(pdev);
        if (err) {
                dev_err(&pdev->dev,
index d2df4e3d1032496dbf294f4d7b0b741ddfaac6d8..cc51554c9e99a49e74c24ab7bf8f97848a06d7a3 100644 (file)
@@ -3936,8 +3936,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                 * if SR-IOV and VMDQ are disabled - otherwise ensure
                 * that hardware VLAN filters remain enabled.
                 */
-               if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
-                                       IXGBE_FLAG_SRIOV_ENABLED)))
+               if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
+                                     IXGBE_FLAG_SRIOV_ENABLED))
                        vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
@@ -7669,6 +7669,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                return -EOPNOTSUPP;
 
        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+       if (!br_spec)
+               return -EINVAL;
 
        nla_for_each_nested(attr, br_spec, rem) {
                __u16 mode;
@@ -7677,6 +7679,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                if (nla_type(attr) != IFLA_BRIDGE_MODE)
                        continue;
 
+               if (nla_len(attr) < sizeof(mode))
+                       return -EINVAL;
+
                mode = nla_get_u16(attr);
                if (mode == BRIDGE_MODE_VEPA) {
                        reg = 0;
@@ -7979,6 +7984,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        int i, err, pci_using_dac, expected_gts;
        unsigned int indices = MAX_TX_QUEUES;
        u8 part_str[IXGBE_PBANUM_LENGTH];
+       bool disable_dev = false;
 #ifdef IXGBE_FCOE
        u16 device_caps;
 #endif
@@ -8369,13 +8375,14 @@ err_sw_init:
        iounmap(adapter->io_addr);
        kfree(adapter->mac_table);
 err_ioremap:
+       disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
        free_netdev(netdev);
 err_alloc_etherdev:
        pci_release_selected_regions(pdev,
                                     pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
-       if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+       if (!adapter || disable_dev)
                pci_disable_device(pdev);
        return err;
 }
@@ -8393,6 +8400,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
 {
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev = adapter->netdev;
+       bool disable_dev;
 
        ixgbe_dbg_adapter_exit(adapter);
 
@@ -8442,11 +8450,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
        e_dev_info("complete\n");
 
        kfree(adapter->mac_table);
+       disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
        free_netdev(netdev);
 
        pci_disable_pcie_error_reporting(pdev);
 
-       if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+       if (disable_dev)
                pci_disable_device(pdev);
 }
 
index 5d2498dcf536d5e96cc13b9274b00137d4508c5d..cd5cf6d957c7afa98d76ad54ba42beb1b395e3c8 100644 (file)
@@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 
        switch (op) {
        case RES_OP_RESERVE:
-               count = get_param_l(&in_param);
+               count = get_param_l(&in_param) & 0xffffff;
                align = get_param_h(&in_param);
                err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
                if (err)
index db56fa7ce8f91ae816b4733c501f60504c2ae228..5b0da398621668402f06c64845fe44b9938d69ff 100644 (file)
@@ -177,12 +177,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
         */
        plat->maxmtu = JUMBO_LEN;
 
-       /* Set default value for multicast hash bins */
-       plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
-       /* Set default value for unicast filter entries */
-       plat->unicast_filter_entries = 1;
-
        /*
         * Currently only the properties needed on SPEAr600
         * are provided. All other properties should be added
@@ -270,6 +264,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
                return PTR_ERR(addr);
 
        plat_dat = dev_get_platdata(&pdev->dev);
+
+       /* Set default value for multicast hash bins */
+       plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+       /* Set default value for unicast filter entries */
+       plat_dat->unicast_filter_entries = 1;
+
        if (pdev->dev.of_node) {
                if (!plat_dat)
                        plat_dat = devm_kzalloc(&pdev->dev,
index e1e335c339e30182ee9015a8c28f7f27cea580ba..be4649a49c5e8bb2bec68aca08d27a82f5563d40 100644 (file)
@@ -2306,9 +2306,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
        if (ipv6) {
                udp_conf.family = AF_INET6;
                udp_conf.use_udp6_tx_checksums =
-                   !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+                   !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
                udp_conf.use_udp6_rx_checksums =
-                   !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
+                   !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
        } else {
                udp_conf.family = AF_INET;
                udp_conf.local_ip.s_addr = INADDR_ANY;
index 4f6e66892acc4658473aed57fc4c6cc72dd33fc4..b894a84e8393062a113102c8bbe96cf4b28f4f24 100644 (file)
@@ -155,6 +155,7 @@ enum iwl_ucode_tlv_api {
  * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
  * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
  *     which also implies support for the scheduler configuration command
+ * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  */
 enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = BIT(0),
@@ -163,6 +164,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT       = BIT(10),
        IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT         = BIT(11),
        IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = BIT(12),
+       IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = BIT(18),
 };
 
 /* The default calibrate table size if not specified by firmware file */
index b62405865b25cd185c560731d54ab36304c54812..b6d2683da3a96dab9c53d0c6f291ab69de6e9e8f 100644 (file)
@@ -2448,9 +2448,15 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
-               /* Use aux roc framework (HS20) */
-               ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
-                                              vif, duration);
+               if (mvm->fw->ucode_capa.capa[0] &
+                   IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
+                       /* Use aux roc framework (HS20) */
+                       ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
+                                                      vif, duration);
+                       goto out_unlock;
+               }
+               IWL_ERR(mvm, "hotspot not supported\n");
+               ret = -EINVAL;
                goto out_unlock;
        case NL80211_IFTYPE_P2P_DEVICE:
                /* handle below */
index 61f5d36eca6aaa50c5b33da090736662cc27b77a..846a2e6e34d855d62726eda65b51ee427bc1a939 100644 (file)
@@ -2249,6 +2249,16 @@ int rtl_pci_probe(struct pci_dev *pdev,
        /*like read eeprom and so on */
        rtlpriv->cfg->ops->read_eeprom_info(hw);
 
+       if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+               err = -ENODEV;
+               goto fail3;
+       }
+       rtlpriv->cfg->ops->init_sw_leds(hw);
+
+       /*aspm */
+       rtl_pci_init_aspm(hw);
+
        /* Init mac80211 sw */
        err = rtl_init_core(hw);
        if (err) {
@@ -2264,16 +2274,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
                goto fail3;
        }
 
-       if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
-               err = -ENODEV;
-               goto fail3;
-       }
-       rtlpriv->cfg->ops->init_sw_leds(hw);
-
-       /*aspm */
-       rtl_pci_init_aspm(hw);
-
        err = ieee80211_register_hw(hw);
        if (err) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index 310d3163dc5b6a3f1e51a9a59ed999fe991a2f06..8ec8200002c7311025b3ae645c9956bf08c44a17 100644 (file)
@@ -3672,8 +3672,9 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
                mac->opmode == NL80211_IFTYPE_ADHOC)
                macid = sta->aid + 1;
        if (wirelessmode == WIRELESS_MODE_N_5G ||
-           wirelessmode == WIRELESS_MODE_AC_5G)
-               ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ];
+           wirelessmode == WIRELESS_MODE_AC_5G ||
+           wirelessmode == WIRELESS_MODE_A)
+               ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4;
        else
                ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];
 
index 4e56a27f9689a925ff3cf26118c6ee505eb963a4..fab0d4b42f58fca511dc447b62ea91607732ba81 100644 (file)
@@ -39,7 +39,7 @@ struct backend_info {
 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
 static void connect(struct backend_info *be);
 static int read_xenbus_vif_flags(struct backend_info *be);
-static void backend_create_xenvif(struct backend_info *be);
+static int backend_create_xenvif(struct backend_info *be);
 static void unregister_hotplug_status_watch(struct backend_info *be);
 static void set_backend_state(struct backend_info *be,
                              enum xenbus_state state);
@@ -352,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev,
        be->state = XenbusStateInitWait;
 
        /* This kicks hotplug scripts, so do it immediately. */
-       backend_create_xenvif(be);
+       err = backend_create_xenvif(be);
+       if (err)
+               goto fail;
 
        return 0;
 
@@ -397,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev,
 }
 
 
-static void backend_create_xenvif(struct backend_info *be)
+static int backend_create_xenvif(struct backend_info *be)
 {
        int err;
        long handle;
        struct xenbus_device *dev = be->dev;
 
        if (be->vif != NULL)
-               return;
+               return 0;
 
        err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
        if (err != 1) {
                xenbus_dev_fatal(dev, err, "reading handle");
-               return;
+               return (err < 0) ? err : -EINVAL;
        }
 
        be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
@@ -417,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be)
                err = PTR_ERR(be->vif);
                be->vif = NULL;
                xenbus_dev_fatal(dev, err, "creating interface");
-               return;
+               return err;
        }
 
        kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
+       return 0;
 }
 
 static void backend_disconnect(struct backend_info *be)
index 9fab30af0e75abdcec135707363951d7e9e26f8c..084587d7cd134ce0e8e20410368f5b60b9e88f74 100644 (file)
@@ -590,6 +590,20 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
        return entry;
 }
 
+static int msi_verify_entries(struct pci_dev *dev)
+{
+       struct msi_desc *entry;
+
+       list_for_each_entry(entry, &dev->msi_list, list) {
+               if (!dev->no_64bit_msi || !entry->msg.address_hi)
+                       continue;
+               dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
+                       " tried to assign one above 4G\n");
+               return -EIO;
+       }
+       return 0;
+}
+
 /**
  * msi_capability_init - configure device's MSI capability structure
  * @dev: pointer to the pci_dev data structure of MSI device function
@@ -627,6 +641,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
                return ret;
        }
 
+       ret = msi_verify_entries(dev);
+       if (ret) {
+               msi_mask_irq(entry, mask, ~mask);
+               free_msi_irqs(dev);
+               return ret;
+       }
+
        ret = populate_msi_sysfs(dev);
        if (ret) {
                msi_mask_irq(entry, mask, ~mask);
@@ -739,6 +760,11 @@ static int msix_capability_init(struct pci_dev *dev,
        if (ret)
                goto out_avail;
 
+       /* Check if all MSI entries honor device restrictions */
+       ret = msi_verify_entries(dev);
+       if (ret)
+               goto out_free;
+
        /*
         * Some devices require MSI-X to be enabled before we can touch the
         * MSI-X registers.  We need to mask all the vectors to prevent
index 79e5c94107a9cc44fe8269f55ab72e8150005e0b..72533c58c1f3bc0d6a18412a197651399abbf6a2 100644 (file)
@@ -412,6 +412,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
        struct fc_frame_header *fh;
        struct fcoe_rcv_info *fr;
        struct fcoe_percpu_s *bg;
+       struct sk_buff *tmp_skb;
        unsigned short oxid;
 
        interface = container_of(ptype, struct bnx2fc_interface,
@@ -424,6 +425,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
                goto err;
        }
 
+       tmp_skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!tmp_skb)
+               goto err;
+
+       skb = tmp_skb;
+
        if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
                printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
                goto err;
index 49014a143c6a9ab56ec81a56d3c7180156341d95..c1d04d4d3c6c140457c19e50865b29bd3287d54f 100644 (file)
@@ -202,6 +202,7 @@ static struct {
        {"IOMEGA", "Io20S         *F", NULL, BLIST_KEY},
        {"INSITE", "Floptical   F*8I", NULL, BLIST_KEY},
        {"INSITE", "I325VM", NULL, BLIST_KEY},
+       {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
        {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
        {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
        {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
index 8adf067ff019344eaf0c42b97007b4abac65e79e..1c3467b8256612b96bafaee3827312e21711de28 100644 (file)
@@ -102,7 +102,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
        clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
                        GFP_KERNEL);
        if (!clkfreq) {
-               dev_err(dev, "%s: no memory\n", "freq-table-hz");
                ret = -ENOMEM;
                goto out;
        }
@@ -112,19 +111,19 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
        if (ret && (ret != -EINVAL)) {
                dev_err(dev, "%s: error reading array %d\n",
                                "freq-table-hz", ret);
-               goto free_clkfreq;
+               return ret;
        }
 
        for (i = 0; i < sz; i += 2) {
                ret = of_property_read_string_index(np,
                                "clock-names", i/2, (const char **)&name);
                if (ret)
-                       goto free_clkfreq;
+                       goto out;
 
                clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
                if (!clki) {
                        ret = -ENOMEM;
-                       goto free_clkfreq;
+                       goto out;
                }
 
                clki->min_freq = clkfreq[i];
@@ -134,8 +133,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
                                clki->min_freq, clki->max_freq, clki->name);
                list_add_tail(&clki->list, &hba->clk_list_head);
        }
-free_clkfreq:
-       kfree(clkfreq);
 out:
        return ret;
 }
@@ -162,10 +159,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
        }
 
        vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
-       if (!vreg) {
-               dev_err(dev, "No memory for %s regulator\n", name);
-               goto out;
-       }
+       if (!vreg)
+               return -ENOMEM;
 
        vreg->name = kstrdup(name, GFP_KERNEL);
 
index 497c38a4a86615178e367e40666937e2d969b41f..605ca60e8a10da25bed98f9d2ac6fdb42b13176f 100644 (file)
@@ -744,6 +744,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
        if (!ufshcd_is_clkgating_allowed(hba))
                return;
        device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+       cancel_work_sync(&hba->clk_gating.ungate_work);
+       cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 }
 
 /* Must be called with host lock acquired */
@@ -2246,6 +2248,22 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
        return ret;
 }
 
+ /**
+ * ufshcd_init_pwr_info - setting the POR (power on reset)
+ * values in hba power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_init_pwr_info(struct ufs_hba *hba)
+{
+       hba->pwr_info.gear_rx = UFS_PWM_G1;
+       hba->pwr_info.gear_tx = UFS_PWM_G1;
+       hba->pwr_info.lane_rx = 1;
+       hba->pwr_info.lane_tx = 1;
+       hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
+       hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
+       hba->pwr_info.hs_rate = 0;
+}
+
 /**
  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
  * @hba: per-adapter instance
@@ -2844,8 +2862,13 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
        hba = shost_priv(sdev->host);
        scsi_deactivate_tcq(sdev, hba->nutrs);
        /* Drop the reference as it won't be needed anymore */
-       if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+       if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
+               unsigned long flags;
+
+               spin_lock_irqsave(hba->host->host_lock, flags);
                hba->sdev_ufs_device = NULL;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
 }
 
 /**
@@ -4062,6 +4085,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
 {
        int ret = 0;
+       struct scsi_device *sdev_rpmb;
+       struct scsi_device *sdev_boot;
 
        hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
@@ -4070,56 +4095,33 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
                hba->sdev_ufs_device = NULL;
                goto out;
        }
+       scsi_device_put(hba->sdev_ufs_device);
 
-       hba->sdev_boot = __scsi_add_device(hba->host, 0, 0,
+       sdev_boot = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
-       if (IS_ERR(hba->sdev_boot)) {
-               ret = PTR_ERR(hba->sdev_boot);
-               hba->sdev_boot = NULL;
+       if (IS_ERR(sdev_boot)) {
+               ret = PTR_ERR(sdev_boot);
                goto remove_sdev_ufs_device;
        }
+       scsi_device_put(sdev_boot);
 
-       hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+       sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
-       if (IS_ERR(hba->sdev_rpmb)) {
-               ret = PTR_ERR(hba->sdev_rpmb);
-               hba->sdev_rpmb = NULL;
+       if (IS_ERR(sdev_rpmb)) {
+               ret = PTR_ERR(sdev_rpmb);
                goto remove_sdev_boot;
        }
+       scsi_device_put(sdev_rpmb);
        goto out;
 
 remove_sdev_boot:
-       scsi_remove_device(hba->sdev_boot);
+       scsi_remove_device(sdev_boot);
 remove_sdev_ufs_device:
        scsi_remove_device(hba->sdev_ufs_device);
 out:
        return ret;
 }
 
-/**
- * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
- *                          ufshcd_scsi_add_wlus()
- * @hba: per-adapter instance
- *
- */
-static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
-{
-       if (hba->sdev_ufs_device) {
-               scsi_remove_device(hba->sdev_ufs_device);
-               hba->sdev_ufs_device = NULL;
-       }
-
-       if (hba->sdev_boot) {
-               scsi_remove_device(hba->sdev_boot);
-               hba->sdev_boot = NULL;
-       }
-
-       if (hba->sdev_rpmb) {
-               scsi_remove_device(hba->sdev_rpmb);
-               hba->sdev_rpmb = NULL;
-       }
-}
-
 /**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
@@ -4134,6 +4136,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
        if (ret)
                goto out;
 
+       ufshcd_init_pwr_info(hba);
+
        /* UniPro link is active now */
        ufshcd_set_link_active(hba);
 
@@ -4264,12 +4268,18 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
 {
+       if (!vreg)
+               return 0;
+
        return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
 }
 
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg)
 {
+       if (!vreg)
+               return 0;
+
        return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
 }
 
@@ -4471,7 +4481,7 @@ out:
                        if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
                                clk_disable_unprepare(clki->clk);
                }
-       } else if (!ret && on) {
+       } else if (on) {
                spin_lock_irqsave(hba->host->host_lock, flags);
                hba->clk_gating.state = CLKS_ON;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -4675,11 +4685,25 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
 {
        unsigned char cmd[6] = { START_STOP };
        struct scsi_sense_hdr sshdr;
-       struct scsi_device *sdp = hba->sdev_ufs_device;
+       struct scsi_device *sdp;
+       unsigned long flags;
        int ret;
 
-       if (!sdp || !scsi_device_online(sdp))
-               return -ENODEV;
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       sdp = hba->sdev_ufs_device;
+       if (sdp) {
+               ret = scsi_device_get(sdp);
+               if (!ret && !scsi_device_online(sdp)) {
+                       ret = -ENODEV;
+                       scsi_device_put(sdp);
+               }
+       } else {
+               ret = -ENODEV;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (ret)
+               return ret;
 
        /*
         * If scsi commands fail, the scsi mid-layer schedules scsi error-
@@ -4718,6 +4742,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
        if (!ret)
                hba->curr_dev_pwr_mode = pwr_mode;
 out:
+       scsi_device_put(sdp);
        hba->host->eh_noresume = 0;
        return ret;
 }
@@ -5087,7 +5112,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
        int ret = 0;
 
        if (!hba || !hba->is_powered)
-               goto out;
+               return 0;
 
        if (pm_runtime_suspended(hba->dev)) {
                if (hba->rpm_lvl == hba->spm_lvl)
@@ -5231,7 +5256,6 @@ EXPORT_SYMBOL(ufshcd_shutdown);
 void ufshcd_remove(struct ufs_hba *hba)
 {
        scsi_remove_host(hba->host);
-       ufshcd_scsi_remove_wlus(hba);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
        ufshcd_hba_stop(hba);
index 58ecdff5065c27d2dc3b6c563f641675799435dc..4a574aa458557a14ecc15b0d96d69dc8fa1fc9d8 100644 (file)
@@ -392,8 +392,6 @@ struct ufs_hba {
         * "UFS device" W-LU.
         */
        struct scsi_device *sdev_ufs_device;
-       struct scsi_device *sdev_rpmb;
-       struct scsi_device *sdev_boot;
 
        enum ufs_dev_pwr_mode curr_dev_pwr_mode;
        enum uic_link_state uic_link_state;
index 72e12bad14b9c478a8025db3ef7d31601c083aa4..d0d5542efc06db7a74b46a6a7230a4ce65ba53d5 100644 (file)
@@ -376,9 +376,6 @@ static void pump_transfers(unsigned long data)
        chip = dws->cur_chip;
        spi = message->spi;
 
-       if (unlikely(!chip->clk_div))
-               chip->clk_div = dws->max_freq / chip->speed_hz;
-
        if (message->state == ERROR_STATE) {
                message->status = -EIO;
                goto early_exit;
@@ -419,7 +416,7 @@ static void pump_transfers(unsigned long data)
        if (transfer->speed_hz) {
                speed = chip->speed_hz;
 
-               if (transfer->speed_hz != speed) {
+               if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
                        speed = transfer->speed_hz;
 
                        /* clk_div doesn't support odd number */
@@ -581,7 +578,6 @@ static int dw_spi_setup(struct spi_device *spi)
                dev_err(&spi->dev, "No max speed HZ parameter\n");
                return -EINVAL;
        }
-       chip->speed_hz = spi->max_speed_hz;
 
        chip->tmode = 0; /* Tx & Rx */
        /* Default SPI mode is SCPOL = 0, SCPH = 0 */
index 39e2c0a55a2865acc6c50354cf90fa27263bc922..f63de781c72959c7c29b8fb2bb215f4e33b28f87 100644 (file)
@@ -562,9 +562,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
 
        sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
        txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
-                                          sspi->word_width;
+                                          (sspi->word_width >> 1);
        rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
-                                          sspi->word_width;
+                                          (sspi->word_width >> 1);
 
        if (!(spi->mode & SPI_CS_HIGH))
                regval |= SIRFSOC_SPI_CS_IDLE_STAT;
index ebcb33df2eb22facb58cebc10277c3ab12925a42..50f20f243981e68b0d007ff714476d0a29a42a80 100644 (file)
@@ -615,13 +615,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
                                sg_free_table(sgt);
                                return -ENOMEM;
                        }
-                       sg_buf = page_address(vm_page) +
-                               ((size_t)buf & ~PAGE_MASK);
+                       sg_set_page(&sgt->sgl[i], vm_page,
+                                   min, offset_in_page(buf));
                } else {
                        sg_buf = buf;
+                       sg_set_buf(&sgt->sgl[i], sg_buf, min);
                }
 
-               sg_set_buf(&sgt->sgl[i], sg_buf, min);
 
                buf += min;
                len -= min;
index 9935e66935af191e25d1ee4fee4ff70564228f20..eddef9cd2e1662087a4595cc48382b88c121832f 100644 (file)
@@ -275,11 +275,11 @@ u8 rtw_sitesurvey_cmd(struct adapter  *padapter, struct ndis_802_11_ssid *ssid,
        if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
                rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ph2c == NULL)
                return _FAIL;
 
-       psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL);
+       psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
        if (psurveyPara == NULL) {
                kfree(ph2c);
                return _FAIL;
@@ -405,7 +405,7 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
        else
                RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
 
-       pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (pcmd == NULL) {
                res = _FAIL;
                RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
@@ -755,13 +755,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
        u8      res = _SUCCESS;
 
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ph2c == NULL) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
        if (pdrvextra_cmd_parm == NULL) {
                kfree(ph2c);
                res = _FAIL;
@@ -967,13 +967,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
        u8      res = _SUCCESS;
 
        if (enqueue) {
-               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
                if (ph2c == NULL) {
                        res = _FAIL;
                        goto exit;
                }
 
-               pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+               pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
                if (pdrvextra_cmd_parm == NULL) {
                        kfree(ph2c);
                        res = _FAIL;
@@ -1010,13 +1010,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
 
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ph2c == NULL) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
        if (pdrvextra_cmd_parm == NULL) {
                kfree(ph2c);
                res = _FAIL;
@@ -1088,13 +1088,13 @@ u8 rtw_ps_cmd(struct adapter *padapter)
 
        u8      res = _SUCCESS;
 
-       ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (ppscmd == NULL) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
+       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
        if (pdrvextra_cmd_parm == NULL) {
                kfree(ppscmd);
                res = _FAIL;
index 5ba5099ec20d1af8fd4415b8dbeab8d3dd1fdcc8..70b1bc3e0e63333abaa5ee2a2f63e4778f3124c2 100644 (file)
@@ -4241,12 +4241,12 @@ void report_survey_event(struct adapter *padapter,
        pcmdpriv = &padapter->cmdpriv;
 
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (pcmd_obj == NULL)
                return;
 
        cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
-       pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
+       pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
        if (pevtcmd == NULL) {
                kfree(pcmd_obj);
                return;
@@ -4339,12 +4339,12 @@ void report_join_res(struct adapter *padapter, int res)
        struct mlme_ext_info    *pmlmeinfo = &(pmlmeext->mlmext_info);
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
        if (pcmd_obj == NULL)
                return;
 
        cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
-       pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
+       pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
        if (pevtcmd == NULL) {
                kfree(pcmd_obj);
                return;
@@ -4854,11 +4854,11 @@ void survey_timer_hdl(void *function_context)
                        pmlmeext->scan_abort = false;/* reset */
                }
 
-               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
                if (ph2c == NULL)
                        goto exit_survey_timer_hdl;
 
-               psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL);
+               psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
                if (psurveyPara == NULL) {
                        kfree(ph2c);
                        goto exit_survey_timer_hdl;
index 33ccbbbd8ed6903fb8dd5ff61c26b9c89ca0e94c..d300369977fae5e51834ffd7f06962ea97db6195 100644 (file)
@@ -935,7 +935,7 @@ int rtw_check_bcn_info(struct adapter  *Adapter, u8 *pframe, u32 packet_len)
                return true;
        }
 
-       bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_KERNEL);
+       bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
 
        subtype = GetFrameSubType(pframe) >> 4;
 
index 407a318b09dbe2837dc64573792e6f886cc88d62..2f87150a21b7e2c1b3f09dd5df10d3085c8fda83 100644 (file)
@@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
        {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
        {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
        {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
+       {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
        {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
        {}      /* Terminating entry */
 };
index 1ab0018271c5c622c0b7556fb92a3a1d9ad38e5b..ad09e51ffae4d097109241d9a19b97c97858109b 100644 (file)
@@ -50,15 +50,14 @@ struct cpufreq_cooling_device {
        unsigned int cpufreq_state;
        unsigned int cpufreq_val;
        struct cpumask allowed_cpus;
+       struct list_head node;
 };
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
 static unsigned int cpufreq_dev_count;
 
-/* notify_table passes value to the CPUFREQ_ADJUST callback function. */
-#define NOTIFY_INVALID NULL
-static struct cpufreq_cooling_device *notify_device;
+static LIST_HEAD(cpufreq_dev_list);
 
 /**
  * get_idr - function to get a unique id.
@@ -287,15 +286,12 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
 
        cpufreq_device->cpufreq_state = cooling_state;
        cpufreq_device->cpufreq_val = clip_freq;
-       notify_device = cpufreq_device;
 
        for_each_cpu(cpuid, mask) {
                if (is_cpufreq_valid(cpuid))
                        cpufreq_update_policy(cpuid);
        }
 
-       notify_device = NOTIFY_INVALID;
-
        return 0;
 }
 
@@ -316,21 +312,28 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
 {
        struct cpufreq_policy *policy = data;
        unsigned long max_freq = 0;
+       struct cpufreq_cooling_device *cpufreq_dev;
 
-       if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID)
+       if (event != CPUFREQ_ADJUST)
                return 0;
 
-       if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
-               max_freq = notify_device->cpufreq_val;
-       else
-               return 0;
+       mutex_lock(&cooling_cpufreq_lock);
+       list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+               if (!cpumask_test_cpu(policy->cpu,
+                                       &cpufreq_dev->allowed_cpus))
+                       continue;
+
+               if (!cpufreq_dev->cpufreq_val)
+                       cpufreq_dev->cpufreq_val = get_cpu_frequency(
+                                       cpumask_any(&cpufreq_dev->allowed_cpus),
+                                       cpufreq_dev->cpufreq_state);
 
-       /* Never exceed user_policy.max */
-       if (max_freq > policy->user_policy.max)
-               max_freq = policy->user_policy.max;
+               max_freq = cpufreq_dev->cpufreq_val;
 
-       if (policy->max != max_freq)
-               cpufreq_verify_within_limits(policy, 0, max_freq);
+               if (policy->max != max_freq)
+                       cpufreq_verify_within_limits(policy, 0, max_freq);
+       }
+       mutex_unlock(&cooling_cpufreq_lock);
 
        return 0;
 }
@@ -486,6 +489,7 @@ __cpufreq_cooling_register(struct device_node *np,
                cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                          CPUFREQ_POLICY_NOTIFIER);
        cpufreq_dev_count++;
+       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
 
        mutex_unlock(&cooling_cpufreq_lock);
 
@@ -549,6 +553,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 
        cpufreq_dev = cdev->devdata;
        mutex_lock(&cooling_cpufreq_lock);
+       list_del(&cpufreq_dev->node);
        cpufreq_dev_count--;
 
        /* Unregister the notifier for the last cpufreq cooling device */
index 3f5ad25ddca811cf5a9c61509af9ac8c89550b28..b6be572704a4c7ff97055f1cb273ff3016399469 100644 (file)
@@ -417,13 +417,10 @@ void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
 
        th_zone = sensor_conf->pzone_data;
 
-       if (th_zone->therm_dev)
-               thermal_zone_device_unregister(th_zone->therm_dev);
+       thermal_zone_device_unregister(th_zone->therm_dev);
 
-       for (i = 0; i < th_zone->cool_dev_size; i++) {
-               if (th_zone->cool_dev[i])
-                       cpufreq_cooling_unregister(th_zone->cool_dev[i]);
-       }
+       for (i = 0; i < th_zone->cool_dev_size; ++i)
+               cpufreq_cooling_unregister(th_zone->cool_dev[i]);
 
        dev_info(sensor_conf->dev,
                "Exynos: Kernel Thermal management unregistered\n");
index 90163b384660247b343d9fc551dfe067128d6219..d1ec5804c0bb94cebeb22d5038b4861393047ba0 100644 (file)
@@ -275,6 +275,7 @@ int st_thermal_unregister(struct platform_device *pdev)
 }
 EXPORT_SYMBOL_GPL(st_thermal_unregister);
 
+#ifdef CONFIG_PM_SLEEP
 static int st_thermal_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -305,6 +306,8 @@ static int st_thermal_resume(struct device *dev)
 
        return 0;
 }
+#endif
+
 SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume);
 EXPORT_SYMBOL_GPL(st_thermal_pm_ops);
 
index 56982da4a9e9f77ac4fd062f95c00724cc7ade34..bf355050eab695f50c6220589faf69a0b9e3b6b6 100644 (file)
@@ -240,32 +240,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int of_serial_suspend(struct device *dev)
-{
-       struct of_serial_info *info = dev_get_drvdata(dev);
-
-       serial8250_suspend_port(info->line);
-       if (info->clk)
-               clk_disable_unprepare(info->clk);
-
-       return 0;
-}
-
-static int of_serial_resume(struct device *dev)
-{
-       struct of_serial_info *info = dev_get_drvdata(dev);
-
-       if (info->clk)
-               clk_prepare_enable(info->clk);
-
-       serial8250_resume_port(info->line);
-
-       return 0;
-}
-#endif
-static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume);
-
 /*
  * A few common types, add more as needed.
  */
@@ -297,7 +271,6 @@ static struct platform_driver of_platform_serial_driver = {
                .name = "of_serial",
                .owner = THIS_MODULE,
                .of_match_table = of_platform_serial_table,
-               .pm = &of_serial_pm_ops,
        },
        .probe = of_platform_serial_probe,
        .remove = of_platform_serial_remove,
index 39b4081b632df2ce600501add8c011c7dbde75b3..96fafed92b76b0972401a13b4eb67a5b1dfdddb1 100644 (file)
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Microsoft Wireless Laser Mouse 6000 Receiver */
+       { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Microsoft LifeCam-VX700 v2.0 */
        { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
 
index 711b23019d541f1fcc589e93a22d80ef677ec207..df38e7ef49761ce87f3532c37c556a1399ab8199 100644 (file)
@@ -791,6 +791,10 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
 
        trb = dwc->ep0_trb;
 
+       r = next_request(&ep0->request_list);
+       if (!r)
+               return;
+
        status = DWC3_TRB_SIZE_TRBSTS(trb->size);
        if (status == DWC3_TRBSTS_SETUP_PENDING) {
                dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
@@ -801,10 +805,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
                return;
        }
 
-       r = next_request(&ep0->request_list);
-       if (!r)
-               return;
-
        ur = &r->request;
 
        length = trb->size & DWC3_TRB_SIZE_MASK;
index 696160d48ae8521651f4f313ee9998288c245ab4..388cfd83b6b667a8e40dffc6c61d9257839f2f81 100644 (file)
@@ -22,7 +22,6 @@
 
 
 #include <linux/slab.h>
-#include <linux/device.h>
 #include <asm/unaligned.h>
 
 #include "xhci.h"
@@ -1149,9 +1148,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME
                 * is enabled, so also enable remote wake here.
                 */
-               if (hcd->self.root_hub->do_remote_wakeup
-                               && device_may_wakeup(hcd->self.controller)) {
-
+               if (hcd->self.root_hub->do_remote_wakeup) {
                        if (t1 & PORT_CONNECT) {
                                t2 |= PORT_WKOC_E | PORT_WKDISC_E;
                                t2 &= ~PORT_WKCONN_E;
index 9a69b1f1b300889d56200ac35d6d1cf22195c588..142b601f95636fdff622bca8c4fb1a9aef87093b 100644 (file)
@@ -281,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
        if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
                pdev->no_d3cold = true;
 
-       return xhci_suspend(xhci);
+       return xhci_suspend(xhci, do_wakeup);
 }
 
 static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
index 3d78b0cd674b4cd07485dfe493b3cd67d0253fdf..646300cbe5f75d34fabf3fd4d13d1100c52e97c0 100644 (file)
@@ -204,7 +204,15 @@ static int xhci_plat_suspend(struct device *dev)
        struct usb_hcd  *hcd = dev_get_drvdata(dev);
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
-       return xhci_suspend(xhci);
+       /*
+        * xhci_suspend() needs `do_wakeup` to know whether host is allowed
+        * to do wakeup during suspend. Since xhci_plat_suspend is currently
+        * only designed for system suspend, device_may_wakeup() is enough
+        * to dertermine whether host is allowed to do wakeup. Need to
+        * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
+        * also applies to runtime suspend.
+        */
+       return xhci_suspend(xhci, device_may_wakeup(dev));
 }
 
 static int xhci_plat_resume(struct device *dev)
index bc6fcbc16f61ec820ba93d5fb6700cfcbd0ae2a2..06433aec81d71511f0583a1099d42d9977f8da3b 100644 (file)
@@ -1067,9 +1067,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
                                false);
                xhci_ring_cmd_db(xhci);
        } else {
-               /* Clear our internal halted state and restart the ring(s) */
+               /* Clear our internal halted state */
                xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
-               ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        }
 }
 
@@ -1823,22 +1822,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
                ep->stopped_td = td;
                return 0;
        } else {
-               if (trb_comp_code == COMP_STALL) {
-                       /* The transfer is completed from the driver's
-                        * perspective, but we need to issue a set dequeue
-                        * command for this stalled endpoint to move the dequeue
-                        * pointer past the TD.  We can't do that here because
-                        * the halt condition must be cleared first.  Let the
-                        * USB class driver clear the stall later.
-                        */
-                       ep->stopped_td = td;
-                       ep->stopped_stream = ep_ring->stream_id;
-               } else if (xhci_requires_manual_halt_cleanup(xhci,
-                                       ep_ctx, trb_comp_code)) {
-                       /* Other types of errors halt the endpoint, but the
-                        * class driver doesn't call usb_reset_endpoint() unless
-                        * the error is -EPIPE.  Clear the halted status in the
-                        * xHCI hardware manually.
+               if (trb_comp_code == COMP_STALL ||
+                   xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+                                                     trb_comp_code)) {
+                       /* Issue a reset endpoint command to clear the host side
+                        * halt, followed by a set dequeue command to move the
+                        * dequeue pointer past the TD.
+                        * The class driver clears the device side halt later.
                         */
                        xhci_cleanup_halted_endpoint(xhci,
                                        slot_id, ep_index, ep_ring->stream_id,
@@ -1958,9 +1948,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                else
                        td->urb->actual_length = 0;
 
-               xhci_cleanup_halted_endpoint(xhci,
-                       slot_id, ep_index, 0, td, event_trb);
-               return finish_td(xhci, td, event_trb, event, ep, status, true);
+               return finish_td(xhci, td, event_trb, event, ep, status, false);
        }
        /*
         * Did we transfer any data, despite the errors that might have
@@ -2519,17 +2507,8 @@ cleanup:
                if (ret) {
                        urb = td->urb;
                        urb_priv = urb->hcpriv;
-                       /* Leave the TD around for the reset endpoint function
-                        * to use(but only if it's not a control endpoint,
-                        * since we already queued the Set TR dequeue pointer
-                        * command for stalled control endpoints).
-                        */
-                       if (usb_endpoint_xfer_control(&urb->ep->desc) ||
-                               (trb_comp_code != COMP_STALL &&
-                                       trb_comp_code != COMP_BABBLE))
-                               xhci_urb_free_priv(xhci, urb_priv);
-                       else
-                               kfree(urb_priv);
+
+                       xhci_urb_free_priv(xhci, urb_priv);
 
                        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
                        if ((urb->actual_length != urb->transfer_buffer_length &&
index 2a5d45b4cb15ef30d82294de6c5d8e015449a383..033b46c470bdff8120b1e903ee3debbb9b998218 100644 (file)
@@ -35,6 +35,8 @@
 #define DRIVER_AUTHOR "Sarah Sharp"
 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
 
+#define        PORT_WAKE_BITS  (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
+
 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
 static int link_quirk;
 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
@@ -851,13 +853,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
        xhci_set_cmd_ring_deq(xhci);
 }
 
+static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
+{
+       int port_index;
+       __le32 __iomem **port_array;
+       unsigned long flags;
+       u32 t1, t2;
+
+       spin_lock_irqsave(&xhci->lock, flags);
+
+       /* disble usb3 ports Wake bits*/
+       port_index = xhci->num_usb3_ports;
+       port_array = xhci->usb3_ports;
+       while (port_index--) {
+               t1 = readl(port_array[port_index]);
+               t1 = xhci_port_state_to_neutral(t1);
+               t2 = t1 & ~PORT_WAKE_BITS;
+               if (t1 != t2)
+                       writel(t2, port_array[port_index]);
+       }
+
+       /* disble usb2 ports Wake bits*/
+       port_index = xhci->num_usb2_ports;
+       port_array = xhci->usb2_ports;
+       while (port_index--) {
+               t1 = readl(port_array[port_index]);
+               t1 = xhci_port_state_to_neutral(t1);
+               t2 = t1 & ~PORT_WAKE_BITS;
+               if (t1 != t2)
+                       writel(t2, port_array[port_index]);
+       }
+
+       spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
 /*
  * Stop HC (not bus-specific)
  *
  * This is called when the machine transition into S3/S4 mode.
  *
  */
-int xhci_suspend(struct xhci_hcd *xhci)
+int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
 {
        int                     rc = 0;
        unsigned int            delay = XHCI_MAX_HALT_USEC;
@@ -868,6 +904,10 @@ int xhci_suspend(struct xhci_hcd *xhci)
                        xhci->shared_hcd->state != HC_STATE_SUSPENDED)
                return -EINVAL;
 
+       /* Clear root port wake on bits if wakeup not allowed. */
+       if (!do_wakeup)
+               xhci_disable_port_wake_on_bits(xhci);
+
        /* Don't poll the roothubs on bus suspend. */
        xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
        clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2912,68 +2952,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
        }
 }
 
-/* Deal with stalled endpoints.  The core should have sent the control message
- * to clear the halt condition.  However, we need to make the xHCI hardware
- * reset its sequence number, since a device will expect a sequence number of
- * zero after the halt condition is cleared.
+/* Called when clearing halted device. The core should have sent the control
+ * message to clear the device halt condition. The host side of the halt should
+ * already be cleared with a reset endpoint command issued when the STALL tx
+ * event was received.
+ *
  * Context: in_interrupt
  */
+
 void xhci_endpoint_reset(struct usb_hcd *hcd,
                struct usb_host_endpoint *ep)
 {
        struct xhci_hcd *xhci;
-       struct usb_device *udev;
-       unsigned int ep_index;
-       unsigned long flags;
-       int ret;
-       struct xhci_virt_ep *virt_ep;
-       struct xhci_command *command;
 
        xhci = hcd_to_xhci(hcd);
-       udev = (struct usb_device *) ep->hcpriv;
-       /* Called with a root hub endpoint (or an endpoint that wasn't added
-        * with xhci_add_endpoint()
-        */
-       if (!ep->hcpriv)
-               return;
-       ep_index = xhci_get_endpoint_index(&ep->desc);
-       virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
-       if (!virt_ep->stopped_td) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                       "Endpoint 0x%x not halted, refusing to reset.",
-                       ep->desc.bEndpointAddress);
-               return;
-       }
-       if (usb_endpoint_xfer_control(&ep->desc)) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                               "Control endpoint stall already handled.");
-               return;
-       }
 
-       command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
-       if (!command)
-               return;
-
-       xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
-                       "Queueing reset endpoint command");
-       spin_lock_irqsave(&xhci->lock, flags);
-       ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index);
        /*
-        * Can't change the ring dequeue pointer until it's transitioned to the
-        * stopped state, which is only upon a successful reset endpoint
-        * command.  Better hope that last command worked!
+        * We might need to implement the config ep cmd in xhci 4.8.1 note:
+        * The Reset Endpoint Command may only be issued to endpoints in the
+        * Halted state. If software wishes reset the Data Toggle or Sequence
+        * Number of an endpoint that isn't in the Halted state, then software
+        * may issue a Configure Endpoint Command with the Drop and Add bits set
+        * for the target endpoint. that is in the Stopped state.
         */
-       if (!ret) {
-               xhci_cleanup_stalled_ring(xhci, udev, ep_index);
-               kfree(virt_ep->stopped_td);
-               xhci_ring_cmd_db(xhci);
-       }
-       virt_ep->stopped_td = NULL;
-       virt_ep->stopped_stream = 0;
-       spin_unlock_irqrestore(&xhci->lock, flags);
 
-       if (ret)
-               xhci_warn(xhci, "FIXME allocate a new ring segment\n");
+       /* For now just print debug to follow the situation */
+       xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
+                ep->desc.bEndpointAddress);
 }
 
 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
index df76d642e7190bd04854a10024c06714dd7e48d0..d745715a1e2f53648b1e1c2b1288f9c963bb42be 100644 (file)
@@ -1746,7 +1746,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
 void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *));
 
 #ifdef CONFIG_PM
-int xhci_suspend(struct xhci_hcd *xhci);
+int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
 int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
 #else
 #define        xhci_suspend    NULL
index cfd009dc401826cc8e052325249c791bf8a959d6..6c4eb3cf5efd599653641e5d96d20b05610a6ed5 100644 (file)
@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
        { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
        { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+       { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
index 0dad8ce5a60946431e41f38683971dfc8f1dab13..1ebb351b9e9a59c9dbd90ffda56cae1769de764e 100644 (file)
@@ -470,6 +470,39 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
index 6786b705ccf606ca47cb471d76b25c4b2981bf10..e52409c9be999f817cbdb627f4be8ec6f127cbe6 100644 (file)
 #define BAYER_CONTOUR_CABLE_PID        0x6001
 
 /*
- * The following are the values for the Matrix Orbital FTDI Range
- * Anything in this range will use an FT232RL.
+ * Matrix Orbital Intelligent USB displays.
+ * http://www.matrixorbital.com
  */
 #define MTXORB_VID                     0x1B3D
 #define MTXORB_FTDI_RANGE_0100_PID     0x0100
 #define MTXORB_FTDI_RANGE_01FD_PID     0x01FD
 #define MTXORB_FTDI_RANGE_01FE_PID     0x01FE
 #define MTXORB_FTDI_RANGE_01FF_PID     0x01FF
-
-
+#define MTXORB_FTDI_RANGE_4701_PID     0x4701
+#define MTXORB_FTDI_RANGE_9300_PID     0x9300
+#define MTXORB_FTDI_RANGE_9301_PID     0x9301
+#define MTXORB_FTDI_RANGE_9302_PID     0x9302
+#define MTXORB_FTDI_RANGE_9303_PID     0x9303
+#define MTXORB_FTDI_RANGE_9304_PID     0x9304
+#define MTXORB_FTDI_RANGE_9305_PID     0x9305
+#define MTXORB_FTDI_RANGE_9306_PID     0x9306
+#define MTXORB_FTDI_RANGE_9307_PID     0x9307
+#define MTXORB_FTDI_RANGE_9308_PID     0x9308
+#define MTXORB_FTDI_RANGE_9309_PID     0x9309
+#define MTXORB_FTDI_RANGE_930A_PID     0x930A
+#define MTXORB_FTDI_RANGE_930B_PID     0x930B
+#define MTXORB_FTDI_RANGE_930C_PID     0x930C
+#define MTXORB_FTDI_RANGE_930D_PID     0x930D
+#define MTXORB_FTDI_RANGE_930E_PID     0x930E
+#define MTXORB_FTDI_RANGE_930F_PID     0x930F
+#define MTXORB_FTDI_RANGE_9310_PID     0x9310
+#define MTXORB_FTDI_RANGE_9311_PID     0x9311
+#define MTXORB_FTDI_RANGE_9312_PID     0x9312
+#define MTXORB_FTDI_RANGE_9313_PID     0x9313
+#define MTXORB_FTDI_RANGE_9314_PID     0x9314
+#define MTXORB_FTDI_RANGE_9315_PID     0x9315
+#define MTXORB_FTDI_RANGE_9316_PID     0x9316
+#define MTXORB_FTDI_RANGE_9317_PID     0x9317
+#define MTXORB_FTDI_RANGE_9318_PID     0x9318
+#define MTXORB_FTDI_RANGE_9319_PID     0x9319
+#define MTXORB_FTDI_RANGE_931A_PID     0x931A
+#define MTXORB_FTDI_RANGE_931B_PID     0x931B
+#define MTXORB_FTDI_RANGE_931C_PID     0x931C
+#define MTXORB_FTDI_RANGE_931D_PID     0x931D
+#define MTXORB_FTDI_RANGE_931E_PID     0x931E
+#define MTXORB_FTDI_RANGE_931F_PID     0x931F
 
 /*
  * The Mobility Lab (TML)
index 93cb7cebda62760bcaae46f3710e7477ff59a507..077c714f1285171ee3b9e4c418e0df42f60cd42c 100644 (file)
@@ -311,24 +311,30 @@ static void       usa26_indat_callback(struct urb *urb)
                if ((data[0] & 0x80) == 0) {
                        /* no errors on individual bytes, only
                           possible overrun err */
-                       if (data[0] & RXERROR_OVERRUN)
-                               err = TTY_OVERRUN;
-                       else
-                               err = 0;
+                       if (data[0] & RXERROR_OVERRUN) {
+                               tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                       }
                        for (i = 1; i < urb->actual_length ; ++i)
-                               tty_insert_flip_char(&port->port, data[i], err);
+                               tty_insert_flip_char(&port->port, data[i],
+                                                               TTY_NORMAL);
                } else {
                        /* some bytes had errors, every byte has status */
                        dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
                        for (i = 0; i + 1 < urb->actual_length; i += 2) {
-                               int stat = data[i], flag = 0;
-                               if (stat & RXERROR_OVERRUN)
-                                       flag |= TTY_OVERRUN;
-                               if (stat & RXERROR_FRAMING)
-                                       flag |= TTY_FRAME;
-                               if (stat & RXERROR_PARITY)
-                                       flag |= TTY_PARITY;
+                               int stat = data[i];
+                               int flag = TTY_NORMAL;
+
+                               if (stat & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                /* XXX should handle break (0x10) */
+                               if (stat & RXERROR_PARITY)
+                                       flag = TTY_PARITY;
+                               else if (stat & RXERROR_FRAMING)
+                                       flag = TTY_FRAME;
+
                                tty_insert_flip_char(&port->port, data[i+1],
                                                flag);
                        }
@@ -649,14 +655,19 @@ static void       usa49_indat_callback(struct urb *urb)
                } else {
                        /* some bytes had errors, every byte has status */
                        for (i = 0; i + 1 < urb->actual_length; i += 2) {
-                               int stat = data[i], flag = 0;
-                               if (stat & RXERROR_OVERRUN)
-                                       flag |= TTY_OVERRUN;
-                               if (stat & RXERROR_FRAMING)
-                                       flag |= TTY_FRAME;
-                               if (stat & RXERROR_PARITY)
-                                       flag |= TTY_PARITY;
+                               int stat = data[i];
+                               int flag = TTY_NORMAL;
+
+                               if (stat & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                /* XXX should handle break (0x10) */
+                               if (stat & RXERROR_PARITY)
+                                       flag = TTY_PARITY;
+                               else if (stat & RXERROR_FRAMING)
+                                       flag = TTY_FRAME;
+
                                tty_insert_flip_char(&port->port, data[i+1],
                                                flag);
                        }
@@ -713,15 +724,19 @@ static void usa49wg_indat_callback(struct urb *urb)
                         */
                        for (x = 0; x + 1 < len &&
                                    i + 1 < urb->actual_length; x += 2) {
-                               int stat = data[i], flag = 0;
+                               int stat = data[i];
+                               int flag = TTY_NORMAL;
 
-                               if (stat & RXERROR_OVERRUN)
-                                       flag |= TTY_OVERRUN;
-                               if (stat & RXERROR_FRAMING)
-                                       flag |= TTY_FRAME;
-                               if (stat & RXERROR_PARITY)
-                                       flag |= TTY_PARITY;
+                               if (stat & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                /* XXX should handle break (0x10) */
+                               if (stat & RXERROR_PARITY)
+                                       flag = TTY_PARITY;
+                               else if (stat & RXERROR_FRAMING)
+                                       flag = TTY_FRAME;
+
                                tty_insert_flip_char(&port->port, data[i+1],
                                                     flag);
                                i += 2;
@@ -773,25 +788,31 @@ static void usa90_indat_callback(struct urb *urb)
                        if ((data[0] & 0x80) == 0) {
                                /* no errors on individual bytes, only
                                   possible overrun err*/
-                               if (data[0] & RXERROR_OVERRUN)
-                                       err = TTY_OVERRUN;
-                               else
-                                       err = 0;
+                               if (data[0] & RXERROR_OVERRUN) {
+                                       tty_insert_flip_char(&port->port, 0,
+                                                               TTY_OVERRUN);
+                               }
                                for (i = 1; i < urb->actual_length ; ++i)
                                        tty_insert_flip_char(&port->port,
-                                                       data[i], err);
+                                                       data[i], TTY_NORMAL);
                        }  else {
                        /* some bytes had errors, every byte has status */
                                dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
                                for (i = 0; i + 1 < urb->actual_length; i += 2) {
-                                       int stat = data[i], flag = 0;
-                                       if (stat & RXERROR_OVERRUN)
-                                               flag |= TTY_OVERRUN;
-                                       if (stat & RXERROR_FRAMING)
-                                               flag |= TTY_FRAME;
-                                       if (stat & RXERROR_PARITY)
-                                               flag |= TTY_PARITY;
+                                       int stat = data[i];
+                                       int flag = TTY_NORMAL;
+
+                                       if (stat & RXERROR_OVERRUN) {
+                                               tty_insert_flip_char(
+                                                               &port->port, 0,
+                                                               TTY_OVERRUN);
+                                       }
                                        /* XXX should handle break (0x10) */
+                                       if (stat & RXERROR_PARITY)
+                                               flag = TTY_PARITY;
+                                       else if (stat & RXERROR_FRAMING)
+                                               flag = TTY_FRAME;
+
                                        tty_insert_flip_char(&port->port,
                                                        data[i+1], flag);
                                }
index a7fe664b6b7d164e628c5b466e548efe7e10e7ec..70a098de429fc39934ef8808d3e6c5011f063352 100644 (file)
@@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
                        if (*tty_flag == TTY_NORMAL)
                                *tty_flag = TTY_FRAME;
                }
-               if (lsr & UART_LSR_OE){
+               if (lsr & UART_LSR_OE) {
                        port->icount.overrun++;
-                       if (*tty_flag == TTY_NORMAL)
-                               *tty_flag = TTY_OVERRUN;
+                       tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
                }
        }
 
@@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb)
        if ((len >= 4) &&
            (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
            ((packet[2] == 0x00) || (packet[2] == 0x01))) {
-               if (packet[2] == 0x00) {
+               if (packet[2] == 0x00)
                        ssu100_update_lsr(port, packet[3], &flag);
-                       if (flag == TTY_OVERRUN)
-                               tty_insert_flip_char(&port->port, 0,
-                                               TTY_OVERRUN);
-               }
                if (packet[2] == 0x01)
                        ssu100_update_msr(port, packet[3]);
 
index 2fefaf923e4a2ecc6e3fd39b32f05631478806ed..18a283d6de1c8bd18663b57bbf7499510c49fa2d 100644 (file)
@@ -103,3 +103,10 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
                "VL711",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
+
+/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
+               "Hitachi",
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_UAS),
index 84a751005f5b8ad0f0f5cd6e3c5ec67df5f7440c..14b93159ef83a140483bb31a4a6c70286d209ed8 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -165,6 +165,15 @@ static struct vfsmount *aio_mnt;
 static const struct file_operations aio_ring_fops;
 static const struct address_space_operations aio_ctx_aops;
 
+/* Backing dev info for aio fs.
+ * -no dirty page accounting or writeback happens
+ */
+static struct backing_dev_info aio_fs_backing_dev_info = {
+       .name           = "aiofs",
+       .state          = 0,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
+};
+
 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
 {
        struct qstr this = QSTR_INIT("[aio]", 5);
@@ -176,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
 
        inode->i_mapping->a_ops = &aio_ctx_aops;
        inode->i_mapping->private_data = ctx;
+       inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
        inode->i_size = PAGE_SIZE * nr_pages;
 
        path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
@@ -220,6 +230,9 @@ static int __init aio_setup(void)
        if (IS_ERR(aio_mnt))
                panic("Failed to create aio fs mount.");
 
+       if (bdi_init(&aio_fs_backing_dev_info))
+               panic("Failed to init aio fs backing dev info.");
+
        kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
        kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
 
@@ -281,11 +294,6 @@ static const struct file_operations aio_ring_fops = {
        .mmap = aio_ring_mmap,
 };
 
-static int aio_set_page_dirty(struct page *page)
-{
-       return 0;
-}
-
 #if IS_ENABLED(CONFIG_MIGRATION)
 static int aio_migratepage(struct address_space *mapping, struct page *new,
                        struct page *old, enum migrate_mode mode)
@@ -357,7 +365,7 @@ out:
 #endif
 
 static const struct address_space_operations aio_ctx_aops = {
-       .set_page_dirty = aio_set_page_dirty,
+       .set_page_dirty = __set_page_dirty_no_writeback,
 #if IS_ENABLED(CONFIG_MIGRATION)
        .migratepage    = aio_migratepage,
 #endif
@@ -412,7 +420,6 @@ static int aio_setup_ring(struct kioctx *ctx)
                pr_debug("pid(%d) page[%d]->count=%d\n",
                         current->pid, i, page_count(page));
                SetPageUptodate(page);
-               SetPageDirty(page);
                unlock_page(page);
 
                ctx->ring_pages[i] = page;
index d3220d31d3cbf0e653898d15816f5895045c06d5..dcd9be32ac579451597dcf61e97b54631a69aa68 100644 (file)
@@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
                bytes = min(bytes, working_bytes);
                kaddr = kmap_atomic(page_out);
                memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
-               if (*pg_index == (vcnt - 1) && *pg_offset == 0)
-                       memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
                kunmap_atomic(kaddr);
                flush_dcache_page(page_out);
 
@@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 
        return 1;
 }
+
+/*
+ * When uncompressing data, we need to make sure and zero any parts of
+ * the biovec that were not filled in by the decompression code.  pg_index
+ * and pg_offset indicate the last page and the last offset of that page
+ * that have been filled in.  This will zero everything remaining in the
+ * biovec.
+ */
+void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
+                                  unsigned long pg_index,
+                                  unsigned long pg_offset)
+{
+       while (pg_index < vcnt) {
+               struct page *page = bvec[pg_index].bv_page;
+               unsigned long off = bvec[pg_index].bv_offset;
+               unsigned long len = bvec[pg_index].bv_len;
+
+               if (pg_offset < off)
+                       pg_offset = off;
+               if (pg_offset < off + len) {
+                       unsigned long bytes = off + len - pg_offset;
+                       char *kaddr;
+
+                       kaddr = kmap_atomic(page);
+                       memset(kaddr + pg_offset, 0, bytes);
+                       kunmap_atomic(kaddr);
+               }
+               pg_index++;
+               pg_offset = 0;
+       }
+}
index 0c803b4fbf93dc8062e644952abb7f36ff0e8504..d181f70caae01471ca80e181818a833b41f057de 100644 (file)
@@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                  unsigned long nr_pages);
 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags);
-
+void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
+                                  unsigned long pg_index,
+                                  unsigned long pg_offset);
 struct btrfs_compress_op {
        struct list_head *(*alloc_workspace)(void);
 
index 78285f30909edd09f19cc6eb48985d47eed3c565..617553cdb7d3b36b1b8ca6a87b28526c5a060b4d 100644 (file)
@@ -373,6 +373,8 @@ cont:
        }
 done:
        kunmap(pages_in[page_in_index]);
+       if (!ret)
+               btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
        return ret;
 }
 
@@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
                goto out;
        }
 
+       /*
+        * the caller is already checking against PAGE_SIZE, but lets
+        * move this check closer to the memcpy/memset
+        */
+       destlen = min_t(unsigned long, destlen, PAGE_SIZE);
        bytes = min_t(unsigned long, destlen, out_len - start_byte);
 
        kaddr = kmap_atomic(dest_page);
        memcpy(kaddr, workspace->buf + start_byte, bytes);
+
+       /*
+        * btrfs_getblock is doing a zero on the tail of the page too,
+        * but this will cover anything missing from the decompressed
+        * data.
+        */
+       if (bytes < destlen)
+               memset(kaddr+bytes, 0, destlen-bytes);
        kunmap_atomic(kaddr);
 out:
        return ret;
index 759fa4e2de8fec28d3f6448456e1e12cec1add17..fb22fd8d8fb8fad73cb4d2b63d4d525da3eea876 100644 (file)
@@ -299,6 +299,8 @@ done:
        zlib_inflateEnd(&workspace->strm);
        if (data_in)
                kunmap(pages_in[page_in_index]);
+       if (!ret)
+               btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
        return ret;
 }
 
@@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
        struct workspace *workspace = list_entry(ws, struct workspace, list);
        int ret = 0;
        int wbits = MAX_WBITS;
-       unsigned long bytes_left = destlen;
+       unsigned long bytes_left;
        unsigned long total_out = 0;
+       unsigned long pg_offset = 0;
        char *kaddr;
 
+       destlen = min_t(unsigned long, destlen, PAGE_SIZE);
+       bytes_left = destlen;
+
        workspace->strm.next_in = data_in;
        workspace->strm.avail_in = srclen;
        workspace->strm.total_in = 0;
@@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
                unsigned long buf_start;
                unsigned long buf_offset;
                unsigned long bytes;
-               unsigned long pg_offset = 0;
 
                ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
                if (ret != Z_OK && ret != Z_STREAM_END)
@@ -384,6 +389,17 @@ next:
                ret = 0;
 
        zlib_inflateEnd(&workspace->strm);
+
+       /*
+        * this should only happen if zlib returned fewer bytes than we
+        * expected.  btrfs_get_block is responsible for zeroing from the
+        * end of the inline extent (destlen) to the end of the page
+        */
+       if (pg_offset < destlen) {
+               kaddr = kmap_atomic(dest_page);
+               memset(kaddr + pg_offset, 0, destlen - pg_offset);
+               kunmap_atomic(kaddr);
+       }
        return ret;
 }
 
index ed2b1151b171f275b6a5ebeea1946860876fb391..7cbdf1b2e4abd7286b6033c66a9d999e735737c9 100644 (file)
@@ -774,8 +774,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
 {
        if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
                rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
-               dprintk("%s slot is busy\n", __func__);
-               return false;
+               /* Race breaker */
+               if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
+                       dprintk("%s slot is busy\n", __func__);
+                       return false;
+               }
+               rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
        }
        return true;
 }
index 747f3b95bd11118aa477153fa97594127e2fbc1c..33a46a8dfaf73aaa65ecec7b4603b86ea4e49d83 100644 (file)
@@ -335,12 +335,15 @@ void              nfsd_lockd_shutdown(void);
        (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
-       (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL)
+#define NFSD4_2_SECURITY_ATTRS         FATTR4_WORD2_SECURITY_LABEL
 #else
-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0
+#define NFSD4_2_SECURITY_ATTRS         0
 #endif
 
+#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
+       (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
+       NFSD4_2_SECURITY_ATTRS)
+
 static inline u32 nfsd_suppattrs0(u32 minorversion)
 {
        return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
index a929f86d0dddd52816d6fdfc41905b8fab76da88..d72b5b35f15edd965b89de2c4759bd9bc964f14f 100644 (file)
@@ -60,7 +60,7 @@
 #define ESC1_CLK_SRC                   43
 #define HDMI_CLK_SRC                   44
 #define VSYNC_CLK_SRC                  45
-#define RBCPR_CLK_SRC                  46
+#define MMSS_RBCPR_CLK_SRC             46
 #define RBBMTIMER_CLK_SRC              47
 #define MAPLE_CLK_SRC                  48
 #define VDP_CLK_SRC                    49
index be21af149f119394c68bd8018a8de39f2db067ee..2839c639f0920942d1e835dd464598432e22e974 100644 (file)
@@ -352,7 +352,6 @@ struct clk_divider {
 #define CLK_DIVIDER_READ_ONLY          BIT(5)
 
 extern const struct clk_ops clk_divider_ops;
-extern const struct clk_ops clk_divider_ro_ops;
 struct clk *clk_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
index 503b085b7832f66a9207b2fdcb8a2f3e98a6bf3e..4d078cebafd2dd5f059c65bd75a683a81b80e15f 100644 (file)
@@ -217,26 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
 
 
 struct cpufreq_driver {
-       char                    name[CPUFREQ_NAME_LEN];
-       u8                      flags;
-       void                    *driver_data;
+       char            name[CPUFREQ_NAME_LEN];
+       u8              flags;
+       void            *driver_data;
 
        /* needed by all drivers */
-       int     (*init)         (struct cpufreq_policy *policy);
-       int     (*verify)       (struct cpufreq_policy *policy);
+       int             (*init)(struct cpufreq_policy *policy);
+       int             (*verify)(struct cpufreq_policy *policy);
 
        /* define one out of two */
-       int     (*setpolicy)    (struct cpufreq_policy *policy);
+       int             (*setpolicy)(struct cpufreq_policy *policy);
 
        /*
         * On failure, should always restore frequency to policy->restore_freq
         * (i.e. old freq).
         */
-       int     (*target)       (struct cpufreq_policy *policy, /* Deprecated */
-                                unsigned int target_freq,
-                                unsigned int relation);
-       int     (*target_index) (struct cpufreq_policy *policy,
-                                unsigned int index);
+       int             (*target)(struct cpufreq_policy *policy,
+                                 unsigned int target_freq,
+                                 unsigned int relation);       /* Deprecated */
+       int             (*target_index)(struct cpufreq_policy *policy,
+                                       unsigned int index);
        /*
         * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
         * unset.
@@ -252,27 +252,31 @@ struct cpufreq_driver {
         * wish to switch to intermediate frequency for some target frequency.
         * In that case core will directly call ->target_index().
         */
-       unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
-                                        unsigned int index);
-       int     (*target_intermediate)(struct cpufreq_policy *policy,
-                                      unsigned int index);
+       unsigned int    (*get_intermediate)(struct cpufreq_policy *policy,
+                                           unsigned int index);
+       int             (*target_intermediate)(struct cpufreq_policy *policy,
+                                              unsigned int index);
 
        /* should be defined, if possible */
-       unsigned int    (*get)  (unsigned int cpu);
+       unsigned int    (*get)(unsigned int cpu);
 
        /* optional */
-       int     (*bios_limit)   (int cpu, unsigned int *limit);
+       int             (*bios_limit)(int cpu, unsigned int *limit);
+
+       int             (*exit)(struct cpufreq_policy *policy);
+       void            (*stop_cpu)(struct cpufreq_policy *policy);
+       int             (*suspend)(struct cpufreq_policy *policy);
+       int             (*resume)(struct cpufreq_policy *policy);
+
+       /* Will be called after the driver is fully initialized */
+       void            (*ready)(struct cpufreq_policy *policy);
 
-       int     (*exit)         (struct cpufreq_policy *policy);
-       void    (*stop_cpu)     (struct cpufreq_policy *policy);
-       int     (*suspend)      (struct cpufreq_policy *policy);
-       int     (*resume)       (struct cpufreq_policy *policy);
-       struct freq_attr        **attr;
+       struct freq_attr **attr;
 
        /* platform specific boost support code */
-       bool                    boost_supported;
-       bool                    boost_enabled;
-       int     (*set_boost)    (int state);
+       bool            boost_supported;
+       bool            boost_enabled;
+       int             (*set_boost)(int state);
 };
 
 /* flags */
index 8bbd7bc1043d9c4d26ffff38b35b8540093fdb71..03fa332ad2a8cec4e26c212b9333e56f0c6d6169 100644 (file)
@@ -72,7 +72,7 @@ struct iio_event_data {
 
 #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
 
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
 
 #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
 
index ea53b04993f22745028d402e0238a30c91595afb..a6059bdf7b03baa4955c069637f686b3d709d819 100644 (file)
@@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 
-bool kvm_is_mmio_pfn(pfn_t pfn);
+bool kvm_is_reserved_pfn(pfn_t pfn);
 
 struct kvm_irq_ack_notifier {
        struct hlist_node link;
index 5be8db45e368b23eb4c0d7b16b92f9db49290998..4c8ac5fcc224e2ab4c6af62cd01e130b2bf7dbb1 100644 (file)
@@ -331,6 +331,7 @@ struct pci_dev {
        unsigned int    is_added:1;
        unsigned int    is_busmaster:1; /* device is busmaster */
        unsigned int    no_msi:1;       /* device may not use msi */
+       unsigned int    no_64bit_msi:1; /* device may only use 32-bit MSIs */
        unsigned int    block_cfg_access:1;     /* config space access is blocked */
        unsigned int    broken_parity_status:1; /* Device generates false positive parity */
        unsigned int    irq_reroute_variant:2;  /* device needs IRQ rerouting variant */
index fe7994c48b75685174134e7817bbb20ef375f890..b2828a06a5a63355f1aa2be27e74a472cc99fc62 100644 (file)
@@ -37,6 +37,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
                         unsigned short type, unsigned char protocol,
                         struct net *net);
+int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
+                   int *addr_len);
 
 static inline void inet_ctl_sock_destroy(struct sock *sk)
 {
index e862497f75568d11cd4deb4f5f5a06712f63d6de..8bb00a27e219e902c9bdc2ac52f0a7c3ed53f005 100644 (file)
@@ -184,6 +184,8 @@ struct snd_pcm_ops {
 #define SNDRV_PCM_FMTBIT_DSD_U8                _SNDRV_PCM_FMTBIT(DSD_U8)
 #define SNDRV_PCM_FMTBIT_DSD_U16_LE    _SNDRV_PCM_FMTBIT(DSD_U16_LE)
 #define SNDRV_PCM_FMTBIT_DSD_U32_LE    _SNDRV_PCM_FMTBIT(DSD_U32_LE)
+#define SNDRV_PCM_FMTBIT_DSD_U16_BE    _SNDRV_PCM_FMTBIT(DSD_U16_BE)
+#define SNDRV_PCM_FMTBIT_DSD_U32_BE    _SNDRV_PCM_FMTBIT(DSD_U32_BE)
 
 #ifdef SNDRV_LITTLE_ENDIAN
 #define SNDRV_PCM_FMTBIT_S16           SNDRV_PCM_FMTBIT_S16_LE
index 6ee586728df97a0fc335a3c314e0828ac970023f..941d32f007dc250afb73a8045044d68b2646eecb 100644 (file)
@@ -220,7 +220,9 @@ typedef int __bitwise snd_pcm_format_t;
 #define        SNDRV_PCM_FORMAT_DSD_U8         ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */
 #define        SNDRV_PCM_FORMAT_DSD_U16_LE     ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */
 #define        SNDRV_PCM_FORMAT_DSD_U32_LE     ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */
-#define        SNDRV_PCM_FORMAT_LAST           SNDRV_PCM_FORMAT_DSD_U32_LE
+#define        SNDRV_PCM_FORMAT_DSD_U16_BE     ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
+#define        SNDRV_PCM_FORMAT_DSD_U32_BE     ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
+#define        SNDRV_PCM_FORMAT_LAST           SNDRV_PCM_FORMAT_DSD_U32_BE
 
 #ifdef SNDRV_LITTLE_ENDIAN
 #define        SNDRV_PCM_FORMAT_S16            SNDRV_PCM_FORMAT_S16_LE
index 2ff9706647f2cb9f7930d22d0a3092fb3fe0d225..e5ec470b851f1f55fe37ec4ecaf557d75483af26 100644 (file)
@@ -280,6 +280,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
        [IFLA_BRPORT_MODE]      = { .type = NLA_U8 },
        [IFLA_BRPORT_GUARD]     = { .type = NLA_U8 },
        [IFLA_BRPORT_PROTECT]   = { .type = NLA_U8 },
+       [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
        [IFLA_BRPORT_LEARNING]  = { .type = NLA_U8 },
        [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
 };
index a6882686ca3a10fc3be7ced6299dc7385ffd239d..b9b7dfaf202b9be668bf29153593aa2c7dba86ee 100644 (file)
@@ -2685,13 +2685,20 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
        int idx = 0;
        u32 portid = NETLINK_CB(cb->skb).portid;
        u32 seq = cb->nlh->nlmsg_seq;
-       struct nlattr *extfilt;
        u32 filter_mask = 0;
 
-       extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
-                                 IFLA_EXT_MASK);
-       if (extfilt)
-               filter_mask = nla_get_u32(extfilt);
+       if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
+               struct nlattr *extfilt;
+
+               extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
+                                         IFLA_EXT_MASK);
+               if (extfilt) {
+                       if (nla_len(extfilt) < sizeof(filter_mask))
+                               return -EINVAL;
+
+                       filter_mask = nla_get_u32(extfilt);
+               }
+       }
 
        rcu_read_lock();
        for_each_netdev_rcu(net, dev) {
@@ -2798,6 +2805,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (br_spec) {
                nla_for_each_nested(attr, br_spec, rem) {
                        if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
+                               if (nla_len(attr) < sizeof(flags))
+                                       return -EINVAL;
+
                                have_flags = true;
                                flags = nla_get_u16(attr);
                                break;
@@ -2868,6 +2878,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (br_spec) {
                nla_for_each_nested(attr, br_spec, rem) {
                        if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
+                               if (nla_len(attr) < sizeof(flags))
+                                       return -EINVAL;
+
                                have_flags = true;
                                flags = nla_get_u16(attr);
                                break;
index 8b7fe5b039068559a931b931529f02841cc13fe2..e67da4e6c3240bb20a7d8a03a3b579f7484f629e 100644 (file)
@@ -1386,6 +1386,17 @@ out:
        return pp;
 }
 
+int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+{
+       if (sk->sk_family == AF_INET)
+               return ip_recv_error(sk, msg, len, addr_len);
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6)
+               return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
+#endif
+       return -EINVAL;
+}
+
 static int inet_gro_complete(struct sk_buff *skb, int nhoff)
 {
        __be16 newlen = htons(skb->len - nhoff);
index 3e861011e4a31e57b21c3fa507c178d3693a7970..1a7e979e80ba356f685ecfe020b98855f19db0a3 100644 (file)
@@ -528,6 +528,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
        .validate       = vti_tunnel_validate,
        .newlink        = vti_newlink,
        .changelink     = vti_changelink,
+       .dellink        = ip_tunnel_dellink,
        .get_size       = vti_get_size,
        .fill_info      = vti_fill_info,
 };
index 57f7c98041394998fe390735aa5b8cd2602b3f90..5d740cccf69ec29d9778ddb0568c726266904ce9 100644 (file)
@@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
                                             &ipv6_hdr(skb)->daddr))
                                continue;
 #endif
+               } else {
+                       continue;
                }
 
                if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
@@ -853,16 +855,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (flags & MSG_OOB)
                goto out;
 
-       if (flags & MSG_ERRQUEUE) {
-               if (family == AF_INET) {
-                       return ip_recv_error(sk, msg, len, addr_len);
-#if IS_ENABLED(CONFIG_IPV6)
-               } else if (family == AF_INET6) {
-                       return pingv6_ops.ipv6_recv_error(sk, msg, len,
-                                                         addr_len);
-#endif
-               }
-       }
+       if (flags & MSG_ERRQUEUE)
+               return inet_recv_error(sk, msg, len, addr_len);
 
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb)
index 39ec0c379545afe07d6016c9180e9c408d22d4df..38c2bcb8dd5da4f2c8dede10942a1388fd3d9954 100644 (file)
@@ -1598,7 +1598,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        u32 urg_hole = 0;
 
        if (unlikely(flags & MSG_ERRQUEUE))
-               return ip_recv_error(sk, msg, len, addr_len);
+               return inet_recv_error(sk, msg, len, addr_len);
 
        if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
            (sk->sk_state == TCP_ESTABLISHED))
index 9c7d7621466b1241f404a5ca11de809dcff2d02a..147be202429064d03b34405dba575c8d002b267c 100644 (file)
@@ -598,7 +598,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
        if (th->rst)
                return;
 
-       if (skb_rtable(skb)->rt_type != RTN_LOCAL)
+       /* If sk not NULL, it means we did a successful lookup and incoming
+        * route had to be correct. prequeue might have dropped our dst.
+        */
+       if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
                return;
 
        /* Swap the send and the receive. */
index 4564e1fca3eb42ab23c8370069417a456cdc76eb..0e32d2e1bdbfecabd3ae7e8ddb33a99f51cd7b51 100644 (file)
@@ -502,11 +502,11 @@ static int ip6gre_rcv(struct sk_buff *skb)
 
                skb->protocol = gre_proto;
                /* WCCP version 1 and 2 protocol decoding.
-                * - Change protocol to IP
+                * - Change protocol to IPv6
                 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
                 */
                if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
-                       skb->protocol = htons(ETH_P_IP);
+                       skb->protocol = htons(ETH_P_IPV6);
                        if ((*(h + offset) & 0xF0) != 0x40)
                                offset += 4;
                }
index a071563a7e6e9c1f6d0fa9d8490c1d35ce89b3f7..01e12d0d8fcc9284403629fd17d7de010463d480 100644 (file)
@@ -69,7 +69,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        int nhoff;
 
        if (unlikely(skb_shinfo(skb)->gso_type &
-                    ~(SKB_GSO_UDP |
+                    ~(SKB_GSO_TCPV4 |
+                      SKB_GSO_UDP |
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
                       SKB_GSO_GRE |
index b04ed72c454247886d7d99ae5c9e36e949b48cd1..8db6c98fe21858f4b3f630af277a0137e438aa8d 100644 (file)
@@ -79,15 +79,13 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
        uh->source = src_port;
 
        uh->len = htons(skb->len);
-       uh->check = 0;
 
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
                            | IPSKB_REROUTED);
        skb_dst_set(skb, dst);
 
-       udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr,
-                     &sk->sk_v6_daddr, skb->len);
+       udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len);
 
        __skb_push(skb, sizeof(*ip6h));
        skb_reset_network_header(skb);
index 31089d153fd332136fcb9f89305ad7ab5bfb214d..bcda14de7f84822a17b382a6256e49b665ed7a83 100644 (file)
@@ -905,6 +905,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev,
        return vti6_tnl_create2(dev);
 }
 
+static void vti6_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       if (dev != ip6n->fb_tnl_dev)
+               unregister_netdevice_queue(dev, head);
+}
+
 static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
                           struct nlattr *data[])
 {
@@ -980,6 +989,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
        .setup          = vti6_dev_setup,
        .validate       = vti6_validate,
        .newlink        = vti6_newlink,
+       .dellink        = vti6_dellink,
        .changelink     = vti6_changelink,
        .get_size       = vti6_get_size,
        .fill_info      = vti6_fill_info,
@@ -1020,6 +1030,7 @@ static int __net_init vti6_init_net(struct net *net)
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
        dev_net_set(ip6n->fb_tnl_dev, net);
+       ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops;
 
        err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
        if (err < 0)
index ace29b60813cf8a1d7182ad2262cbcbd21810fa7..dc495ae2ead05aca0190bcab8c2d95b58beb2ac9 100644 (file)
@@ -903,7 +903,10 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
        if (th->rst)
                return;
 
-       if (!ipv6_unicast_destination(skb))
+       /* If sk not NULL, it means we did a successful lookup and incoming
+        * route had to be correct. prequeue might have dropped our dst.
+        */
+       if (!sk && !ipv6_unicast_destination(skb))
                return;
 
 #ifdef CONFIG_TCP_MD5SIG
index 2c699757bccf2fce427c5e825625c90328568967..5016a6929085ebdbf151a1f47582d36195885540 100644 (file)
@@ -611,16 +611,12 @@ __nf_conntrack_confirm(struct sk_buff *skb)
         */
        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
        pr_debug("Confirming conntrack %p\n", ct);
-
-       /* We have to check the DYING flag after unlink to prevent
-        * a race against nf_ct_get_next_corpse() possibly called from
-        * user context, else we insert an already 'dead' hash, blocking
-        * further use of that particular connection -JM.
-        */
-       nf_ct_del_from_dying_or_unconfirmed_list(ct);
+       /* We have to check the DYING flag inside the lock to prevent
+          a race against nf_ct_get_next_corpse() possibly called from
+          user context, else we insert an already 'dead' hash, blocking
+          further use of that particular connection -JM */
 
        if (unlikely(nf_ct_is_dying(ct))) {
-               nf_ct_add_to_dying_list(ct);
                nf_conntrack_double_unlock(hash, reply_hash);
                local_bh_enable();
                return NF_ACCEPT;
@@ -640,6 +636,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
 
+       nf_ct_del_from_dying_or_unconfirmed_list(ct);
+
        /* Timer relative to confirmation time, not original
           setting time, otherwise we'd get timer wrap in
           weird delay cases. */
index 87d20f48ff06195766e8ecd20a3fcfae5ccae690..07c04a841ba09c5ee5b0bc7718d4e6c87ac561e1 100644 (file)
@@ -378,7 +378,7 @@ static void unregister_prot_hook(struct sock *sk, bool sync)
                __unregister_prot_hook(sk, sync);
 }
 
-static inline __pure struct page *pgv_to_page(void *addr)
+static inline struct page * __pure pgv_to_page(void *addr)
 {
        if (is_vmalloc_addr(addr))
                return vmalloc_to_page(addr);
index 3f959c681885ba41ccdf6300bb27a35745ec11be..f9c052d508f008c4fb74567ea8cbad13f305d497 100644 (file)
@@ -1019,17 +1019,12 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
        xid = *p++;
        calldir = *p;
 
-       if (bc_xprt)
-               req = xprt_lookup_rqst(bc_xprt, xid);
-
-       if (!req) {
-               printk(KERN_NOTICE
-                       "%s: Got unrecognized reply: "
-                       "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
-                       __func__, ntohl(calldir),
-                       bc_xprt, ntohl(xid));
+       if (!bc_xprt)
                return -EAGAIN;
-       }
+       spin_lock_bh(&bc_xprt->transport_lock);
+       req = xprt_lookup_rqst(bc_xprt, xid);
+       if (!req)
+               goto unlock_notfound;
 
        memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
        /*
@@ -1040,11 +1035,21 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
        dst = &req->rq_private_buf.head[0];
        src = &rqstp->rq_arg.head[0];
        if (dst->iov_len < src->iov_len)
-               return -EAGAIN; /* whatever; just giving up. */
+               goto unlock_eagain; /* whatever; just giving up. */
        memcpy(dst->iov_base, src->iov_base, src->iov_len);
        xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
        rqstp->rq_arg.len = 0;
+       spin_unlock_bh(&bc_xprt->transport_lock);
        return 0;
+unlock_notfound:
+       printk(KERN_NOTICE
+               "%s: Got unrecognized reply: "
+               "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
+               __func__, ntohl(calldir),
+               bc_xprt, ntohl(xid));
+unlock_eagain:
+       spin_unlock_bh(&bc_xprt->transport_lock);
+       return -EAGAIN;
 }
 
 static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
index 42ded997b223b7ece3d8535000d4defeea2ba8f5..c6ff94ab1ad65a883e5b969437d05afb837e1a02 100644 (file)
@@ -216,6 +216,8 @@ static char *snd_pcm_format_names[] = {
        FORMAT(DSD_U8),
        FORMAT(DSD_U16_LE),
        FORMAT(DSD_U32_LE),
+       FORMAT(DSD_U16_BE),
+       FORMAT(DSD_U32_BE),
 };
 
 const char *snd_pcm_format_name(snd_pcm_format_t format)
index ae7a0feb3b76001f54555187c19343bce352f0c8..ebe8444de6c6ea8f44a5cacfb39b963939d9880d 100644 (file)
@@ -152,6 +152,14 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
                .width = 32, .phys = 32, .le = 1, .signd = 0,
                .silence = { 0x69, 0x69, 0x69, 0x69 },
        },
+       [SNDRV_PCM_FORMAT_DSD_U16_BE] = {
+               .width = 16, .phys = 16, .le = 0, .signd = 0,
+               .silence = { 0x69, 0x69 },
+       },
+       [SNDRV_PCM_FORMAT_DSD_U32_BE] = {
+               .width = 32, .phys = 32, .le = 0, .signd = 0,
+               .silence = { 0x69, 0x69, 0x69, 0x69 },
+       },
        /* FIXME: the following three formats are not defined properly yet */
        [SNDRV_PCM_FORMAT_MPEG] = {
                .le = -1, .signd = -1,
index 16660f312043a71fac284dd7948baab3491d05f3..48b6c5a3884f3b1ed729d542fd286ba2840a938b 100644 (file)
@@ -298,7 +298,8 @@ enum {
 
 /* quirks for ATI/AMD HDMI */
 #define AZX_DCAPS_PRESET_ATI_HDMI \
-       (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
+       (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB|\
+        AZX_DCAPS_NO_MSI64)
 
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
@@ -1486,6 +1487,7 @@ static int azx_first_init(struct azx *chip)
        struct snd_card *card = chip->card;
        int err;
        unsigned short gcap;
+       unsigned int dma_bits = 64;
 
 #if BITS_PER_LONG != 64
        /* Fix up base address on ULI M5461 */
@@ -1509,9 +1511,14 @@ static int azx_first_init(struct azx *chip)
                return -ENXIO;
        }
 
-       if (chip->msi)
+       if (chip->msi) {
+               if (chip->driver_caps & AZX_DCAPS_NO_MSI64) {
+                       dev_dbg(card->dev, "Disabling 64bit MSI\n");
+                       pci->no_64bit_msi = true;
+               }
                if (pci_enable_msi(pci) < 0)
                        chip->msi = 0;
+       }
 
        if (azx_acquire_irq(chip, 0) < 0)
                return -EBUSY;
@@ -1522,9 +1529,14 @@ static int azx_first_init(struct azx *chip)
        gcap = azx_readw(chip, GCAP);
        dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap);
 
+       /* AMD devices support 40 or 48bit DMA, take the safe one */
+       if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
+               dma_bits = 40;
+
        /* disable SB600 64bit support for safety */
        if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
                struct pci_dev *p_smbus;
+               dma_bits = 40;
                p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
                                         PCI_DEVICE_ID_ATI_SBX00_SMBUS,
                                         NULL);
@@ -1554,9 +1566,11 @@ static int azx_first_init(struct azx *chip)
        }
 
        /* allow 64bit DMA address if supported by H/W */
-       if ((gcap & AZX_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
-               pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
-       else {
+       if (!(gcap & AZX_GCAP_64OK))
+               dma_bits = 32;
+       if (!pci_set_dma_mask(pci, DMA_BIT_MASK(dma_bits))) {
+               pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(dma_bits));
+       } else {
                pci_set_dma_mask(pci, DMA_BIT_MASK(32));
                pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
        }
index 949cd437eeb264798aec5d9b2f5c5e61a87fc294..5016014e57f2f1dc65f68d5b71db8d12f065493f 100644 (file)
@@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 #define AZX_DCAPS_I915_POWERWELL (1 << 27)     /* HSW i915 powerwell support */
 #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
+#define AZX_DCAPS_NO_MSI64      (1 << 29)      /* Stick to 32-bit MSIs */
 
 /* HD Audio class code */
 #define PCI_CLASS_MULTIMEDIA_HD_AUDIO  0x0403
index 8fea1b86df25ebe5e66ea56bfb9babdae031a117..14f16be3f3747a3c72f1ba2938d2a1ca3f8007b2 100644 (file)
@@ -4818,7 +4818,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
index 7c83bab69deef832690c98c7babb00ded92d5784..8c9bf4b7aaf0e003db413347efe1ca2ae09053fe 100644 (file)
@@ -593,10 +593,10 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
        if (mixer->chip->shutdown)
                ret = -ENODEV;
        else
-               ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
+               ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
                                  USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
                                  0, wIndex,
-                                 &tmp, sizeof(tmp), 1000);
+                                 &tmp, sizeof(tmp));
        up_read(&mixer->chip->shutdown_rwsem);
 
        if (ret < 0) {
index a5941f80fc5bc6321d26ce523953312acae2e09f..60dfe0d28771bbc244ae8b4e41b737d8281c6f04 100644 (file)
@@ -1193,12 +1193,12 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        /* iFi Audio micro/nano iDSD */
        case USB_ID(0x20b1, 0x3008):
                if (fp->altsetting == 2)
-                       return SNDRV_PCM_FMTBIT_DSD_U32_LE;
+                       return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
        /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
        case USB_ID(0x20b1, 0x2009):
                if (fp->altsetting == 3)
-                       return SNDRV_PCM_FMTBIT_DSD_U32_LE;
+                       return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
        default:
                break;
index 3aaca49de3257eed0bd905ac26112cacd49588dd..aacdb59f30dedcd780ee29e2903d928194a42a5c 100644 (file)
@@ -1933,7 +1933,7 @@ out:
 
 int kvm_vgic_create(struct kvm *kvm)
 {
-       int i, vcpu_lock_idx = -1, ret = 0;
+       int i, vcpu_lock_idx = -1, ret;
        struct kvm_vcpu *vcpu;
 
        mutex_lock(&kvm->lock);
@@ -1948,6 +1948,7 @@ int kvm_vgic_create(struct kvm *kvm)
         * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
         * that no other VCPUs are run while we create the vgic.
         */
+       ret = -EBUSY;
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (!mutex_trylock(&vcpu->mutex))
                        goto out_unlock;
@@ -1955,11 +1956,10 @@ int kvm_vgic_create(struct kvm *kvm)
        }
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (vcpu->arch.has_run_once) {
-                       ret = -EBUSY;
+               if (vcpu->arch.has_run_once)
                        goto out_unlock;
-               }
        }
+       ret = 0;
 
        spin_lock_init(&kvm->arch.vgic.lock);
        kvm->arch.vgic.in_kernel = true;
index 25ffac9e947d9d3e2d554e6c351dfa51811c0354..3cee7b167052b58e07c147abb65985865e39e0f9 100644 (file)
@@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 static bool largepages_enabled = true;
 
-bool kvm_is_mmio_pfn(pfn_t pfn)
+bool kvm_is_reserved_pfn(pfn_t pfn)
 {
        if (pfn_valid(pfn))
-               return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
+               return PageReserved(pfn_to_page(pfn));
 
        return true;
 }
@@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
        else if ((vma->vm_flags & VM_PFNMAP)) {
                pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
                        vma->vm_pgoff;
-               BUG_ON(!kvm_is_mmio_pfn(pfn));
+               BUG_ON(!kvm_is_reserved_pfn(pfn));
        } else {
                if (async && vma_is_valid(vma, write_fault))
                        *async = true;
@@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
        if (is_error_noslot_pfn(pfn))
                return KVM_ERR_PTR_BAD_PAGE;
 
-       if (kvm_is_mmio_pfn(pfn)) {
+       if (kvm_is_reserved_pfn(pfn)) {
                WARN_ON(1);
                return KVM_ERR_PTR_BAD_PAGE;
        }
@@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
 
 void kvm_release_pfn_clean(pfn_t pfn)
 {
-       if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
+       if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
                put_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
 
 void kvm_set_pfn_dirty(pfn_t pfn)
 {
-       if (!kvm_is_mmio_pfn(pfn)) {
+       if (!kvm_is_reserved_pfn(pfn)) {
                struct page *page = pfn_to_page(pfn);
                if (!PageReserved(page))
                        SetPageDirty(page);
@@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
 
 void kvm_set_pfn_accessed(pfn_t pfn)
 {
-       if (!kvm_is_mmio_pfn(pfn))
+       if (!kvm_is_reserved_pfn(pfn))
                mark_page_accessed(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
 
 void kvm_get_pfn(pfn_t pfn)
 {
-       if (!kvm_is_mmio_pfn(pfn))
+       if (!kvm_is_reserved_pfn(pfn))
                get_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_get_pfn);