Merge tag 'mfd-fixes-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Aug 2014 17:46:25 +0000 (10:46 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Aug 2014 17:46:25 +0000 (10:46 -0700)
Pull mfd fixes from Lee Jones:
 "Couple of simple fixes due for the 3.17 rcs

  (and a sneaky document addition that slipped from the previous
  pull-request)"

* tag 'mfd-fixes-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd:
  mfd: twl4030-power: Fix PM idle pin configuration to not conflict with regulators
  mfd: tc3589x: Add device tree bindings
  mfd: ab8500-core: Use 'ifdef' for config options
  mfd: htc-i2cpld: Fix %d confusingly prefixed with 0x in format string
  mfd: omap-usb-host: Fix %d confusingly prefixed with 0x in format string

293 files changed:
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
Documentation/devicetree/bindings/pci/designware-pcie.txt
Documentation/devicetree/bindings/pci/ti-pci.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
Documentation/dma-buf-sharing.txt
Documentation/this_cpu_ops.txt
Documentation/x86/tlb.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/exynos4412-odroid-common.dtsi
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
arch/arm/boot/dts/imx6sx-pinfunc.h
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/rk3066a-bqcurie2.dts
arch/arm/boot/dts/rk3188-radxarock.dts
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/tegra30-apalis.dtsi
arch/arm/boot/dts/tegra30-colibri.dtsi
arch/arm/boot/dts/vf610-twr.dts
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/Makefile
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/suspend-imx6.S
arch/arm/mach-shmobile/Kconfig
arch/arm64/Makefile
arch/arm64/configs/defconfig
arch/arm64/include/asm/sparsemem.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/head.S
arch/arm64/kernel/ptrace.c
arch/arm64/mm/init.c
arch/frv/include/asm/processor.h
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/entry.S
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/syscall_table.S
arch/mips/alchemy/devboards/db1200.c
arch/mips/bcm47xx/setup.c
arch/mips/cavium-octeon/setup.c
arch/mips/include/asm/eva.h [new file with mode: 0644]
arch/mips/include/asm/gic.h
arch/mips/include/asm/irq.h
arch/mips/include/asm/mach-malta/kernel-entry-init.h
arch/mips/include/asm/mach-netlogic/topology.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/syscall.h
arch/mips/kernel/cps-vec.S
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/scall64-o32.S
arch/mips/loongson/loongson-3/cop2-ex.c
arch/mips/loongson/loongson-3/numa.c
arch/mips/mm/cache.c
arch/mips/mti-malta/malta-memory.c
arch/mips/pmcs-msp71xx/msp_irq.c
arch/powerpc/kvm/book3s_hv_builtin.c
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/compat_wrapper.c
arch/s390/kernel/ipl.c
arch/s390/kernel/setup.c
arch/s390/kernel/syscalls.S
arch/sh/Kconfig
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/entry_32.S
arch/x86/kvm/emulate.c
arch/x86/mm/tlb.c
drivers/ata/ahci_tegra.c
drivers/ata/ahci_xgene.c
drivers/ata/libata-core.c
drivers/ata/pata_samsung_cf.c
drivers/ata/pata_scc.c
drivers/bus/arm-ccn.c
drivers/dma-buf/fence.c
drivers/firmware/efi/vars.c
drivers/gpio/devres.c
drivers/gpio/gpio-lynxpoint.c
drivers/gpio/gpio-zynq.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/ast/ast_tables.h
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_dma.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_ib.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_semaphore.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770_dma.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dma.c
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/gpu/drm/radeon/uvd_v1_0.c
drivers/gpu/drm/sti/Kconfig
drivers/gpu/drm/sti/sti_drm_drv.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_tvout.c
drivers/hid/hid-cherry.c
drivers/hid/hid-huion.c
drivers/hid/hid-kye.c
drivers/hid/hid-lg.c
drivers/hid/hid-lg4ff.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-logitech-dj.h
drivers/hid/hid-magicmouse.c
drivers/hid/hid-monterey.c
drivers/hid/hid-petalynx.c
drivers/hid/hid-picolcd_core.c
drivers/hid/hid-rmi.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-sunplus.c
drivers/i2c/Kconfig
drivers/i2c/Makefile
drivers/i2c/busses/i2c-i801.c
drivers/i2c/i2c-acpi.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
drivers/isdn/hardware/eicon/xdi_msg.h
drivers/md/raid10.c
drivers/md/raid5.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/flexcan.c
drivers/net/can/sja1000/sja1000.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/macvlan.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/smsc.c
drivers/of/fdt.c
drivers/of/irq.c
drivers/of/selftest.c
drivers/pci/host/Kconfig
drivers/pci/host/Makefile
drivers/pci/host/pci-dra7xx.c [new file with mode: 0644]
drivers/pci/host/pci-tegra.c
drivers/pci/host/pcie-designware.c
drivers/pci/host/pcie-designware.h
drivers/pinctrl/nomadik/pinctrl-abx500.c
drivers/pinctrl/pinctrl-at91.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-tegra-xusb.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-samsung.h
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
drivers/platform/x86/toshiba_acpi.c
drivers/pwm/core.c
drivers/s390/char/con3215.c
drivers/s390/char/sclp_tty.c
drivers/scsi/scsi.c
drivers/scsi/scsi_lib.c
drivers/sh/Makefile
drivers/sh/intc/Kconfig
fs/aio.c
fs/btrfs/async-thread.c
fs/btrfs/async-thread.h
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/reada.c
fs/btrfs/scrub.c
fs/btrfs/sysfs.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/smb1ops.c
fs/cifs/smb2maperror.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smbfsctl.h
fs/ext3/super.c
fs/isofs/inode.c
fs/isofs/isofs.h
fs/isofs/rock.c
fs/nfs/pagelist.c
fs/nfs/write.c
fs/udf/namei.c
include/drm/drm_pciids.h
include/linux/brcmphy.h
include/linux/ftrace.h
include/linux/gpio/consumer.h
include/linux/i2c.h
include/linux/nfs_page.h
include/linux/seqno-fence.h
include/uapi/asm-generic/unistd.h
include/uapi/drm/radeon_drm.h
kernel/events/core.c
kernel/kprobes.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
lib/Kconfig.debug
net/atm/lec.c
net/batman-adv/fragmentation.c
net/ipv6/ip6_fib.c
net/mac80211/chan.c
net/openvswitch/actions.c
net/packet/af_packet.c
net/packet/internal.h
net/sched/sch_cbq.c
net/sctp/associola.c
net/tipc/port.h
net/tipc/socket.c
scripts/kernel-doc
security/tomoyo/realpath.c
sound/core/info.c
sound/core/pcm_misc.c
sound/pci/ctxfi/ct20k1reg.h
sound/pci/hda/ca0132_regs.h
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/arizona.c
sound/soc/codecs/pcm512x.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/fsl/Kconfig
sound/soc/fsl/fsl_esai.c
sound/soc/intel/sst-acpi.c
sound/soc/intel/sst-baytrail-ipc.c
sound/soc/intel/sst-baytrail-ipc.h
sound/soc/intel/sst-baytrail-pcm.c
sound/soc/pxa/pxa-ssp.c
sound/soc/soc-dapm.c
virt/kvm/assigned-dev.c
virt/kvm/iommu.c

index 1486497a24c1d41c9d64208b4178a8cb2e2c28d1..ce6a1a0720285bd9be4549d478ea49b4b985ee31 100644 (file)
@@ -4,11 +4,13 @@ Specifying interrupt information for devices
 1) Interrupt client nodes
 -------------------------
 
-Nodes that describe devices which generate interrupts must contain an either an
-"interrupts" property or an "interrupts-extended" property. These properties
-contain a list of interrupt specifiers, one per output interrupt. The format of
-the interrupt specifier is determined by the interrupt controller to which the
-interrupts are routed; see section 2 below for details.
+Nodes that describe devices which generate interrupts must contain an
+"interrupts" property, an "interrupts-extended" property, or both. If both are
+present, the latter should take precedence; the former may be provided simply
+for compatibility with software that does not recognize the latter. These
+properties contain a list of interrupt specifiers, one per output interrupt. The
+format of the interrupt specifier is determined by the interrupt controller to
+which the interrupts are routed; see section 2 below for details.
 
   Example:
        interrupt-parent = <&intc1>;
index d0d15ee42834089abfd2ffad2bad205b28e33d18..ed0d9b9fff2be5f5be5902554d0da70ed81591df 100644 (file)
@@ -2,6 +2,10 @@
 
 Required properties:
 - compatible: should contain "snps,dw-pcie" to identify the core.
+- reg: Should contain the configuration address space.
+- reg-names: Must be "config" for the PCIe configuration space.
+    (The old way of getting the configuration address space from "ranges"
+    is deprecated and should be avoided.)
 - #address-cells: set to <3>
 - #size-cells: set to <2>
 - device_type: set to "pci"
diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt
new file mode 100644 (file)
index 0000000..3d21791
--- /dev/null
@@ -0,0 +1,59 @@
+TI PCI Controllers
+
+PCIe Designware Controller
+ - compatible: Should be "ti,dra7-pcie""
+ - reg : Two register ranges as listed in the reg-names property
+ - reg-names : The first entry must be "ti-conf" for the TI specific registers
+              The second entry must be "rc-dbics" for the designware pcie
+              registers
+              The third entry must be "config" for the PCIe configuration space
+ - phys : list of PHY specifiers (used by generic PHY framework)
+ - phy-names : must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
+              number of PHYs as specified in *phys* property.
+ - ti,hwmods : Name of the hwmod associated to the pcie, "pcie<X>",
+              where <X> is the instance number of the pcie from the HW spec.
+ - interrupts : Two interrupt entries must be specified. The first one is for
+               main interrupt line and the second for MSI interrupt line.
+ - #address-cells,
+   #size-cells,
+   #interrupt-cells,
+   device_type,
+   ranges,
+   num-lanes,
+   interrupt-map-mask,
+   interrupt-map : as specified in ../designware-pcie.txt
+
+Example:
+axi {
+       compatible = "simple-bus";
+       #size-cells = <1>;
+       #address-cells = <1>;
+       ranges = <0x51000000 0x51000000 0x3000
+                 0x0        0x20000000 0x10000000>;
+       pcie@51000000 {
+               compatible = "ti,dra7-pcie";
+               reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>;
+               reg-names = "rc_dbics", "ti_conf", "config";
+               interrupts = <0 232 0x4>, <0 233 0x4>;
+               #address-cells = <3>;
+               #size-cells = <2>;
+               device_type = "pci";
+               ranges = <0x81000000 0 0          0x03000 0 0x00010000
+                         0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+               #interrupt-cells = <1>;
+               num-lanes = <1>;
+               ti,hwmods = "pcie1";
+               phys = <&pcie1_phy>;
+               phy-names = "pcie-phy0";
+               interrupt-map-mask = <0 0 0 7>;
+               interrupt-map = <0 0 0 1 &pcie_intc 1>,
+                               <0 0 0 2 &pcie_intc 2>,
+                               <0 0 0 3 &pcie_intc 3>,
+                               <0 0 0 4 &pcie_intc 4>;
+               pcie_intc: interrupt-controller {
+                       interrupt-controller;
+                       #address-cells = <0>;
+                       #interrupt-cells = <1>;
+               };
+       };
+};
index 0211c6d8a5229e17866eb90f751a015ffaf609a0..92fae82f35f2174ca64cb6de14825245870d9f5f 100644 (file)
@@ -62,7 +62,7 @@ Example:
                #gpio-cells = <2>;
                interrupt-controller;
                #interrupt-cells = <2>;
-               interrupts = <0 32 0x4>;
+               interrupts = <0 16 0x4>;
 
                pinctrl-names = "default";
                pinctrl-0 = <&gsbi5_uart_default>;
index 67a4087d53f9c731a1c0bf57e3ec48b7c1b90610..bb9753b635a3a5e8d8f794814a83584878b550e5 100644 (file)
@@ -56,10 +56,10 @@ The dma_buf buffer sharing API usage contains the following steps:
                                     size_t size, int flags,
                                     const char *exp_name)
 
-   If this succeeds, dma_buf_export allocates a dma_buf structure, and returns a
-   pointer to the same. It also associates an anonymous file with this buffer,
-   so it can be exported. On failure to allocate the dma_buf object, it returns
-   NULL.
+   If this succeeds, dma_buf_export_named allocates a dma_buf structure, and
+   returns a pointer to the same. It also associates an anonymous file with this
+   buffer, so it can be exported. On failure to allocate the dma_buf object,
+   it returns NULL.
 
    'exp_name' is the name of exporter - to facilitate information while
    debugging.
@@ -76,7 +76,7 @@ The dma_buf buffer sharing API usage contains the following steps:
    drivers and/or processes.
 
    Interface:
-      int dma_buf_fd(struct dma_buf *dmabuf)
+      int dma_buf_fd(struct dma_buf *dmabuf, int flags)
 
    This API installs an fd for the anonymous file associated with this buffer;
    returns either 'fd', or error.
@@ -157,7 +157,9 @@ to request use of buffer for allocation.
    "dma_buf->ops->" indirection from the users of this interface.
 
    In struct dma_buf_ops, unmap_dma_buf is defined as
-      void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *);
+      void (*unmap_dma_buf)(struct dma_buf_attachment *,
+                            struct sg_table *,
+                            enum dma_data_direction);
 
    unmap_dma_buf signifies the end-of-DMA for the attachment provided. Like
    map_dma_buf, this API also must be implemented by the exporter.
index 1a4ce7e3e05f4836d6219838fc263440611e621d..0ec995712176ed83d4dd04ecfed81aa442848c07 100644 (file)
@@ -2,26 +2,26 @@ this_cpu operations
 -------------------
 
 this_cpu operations are a way of optimizing access to per cpu
-variables associated with the *currently* executing processor through
-the use of segment registers (or a dedicated register where the cpu
-permanently stored the beginning of the per cpu area for a specific
-processor).
+variables associated with the *currently* executing processor. This is
+done through the use of segment registers (or a dedicated register where
+the cpu permanently stored the beginning of the per cpu        area for a
+specific processor).
 
-The this_cpu operations add a per cpu variable offset to the processor
-specific percpu base and encode that operation in the instruction
+this_cpu operations add a per cpu variable offset to the processor
+specific per cpu base and encode that operation in the instruction
 operating on the per cpu variable.
 
-This means there are no atomicity issues between the calculation of
+This means that there are no atomicity issues between the calculation of
 the offset and the operation on the data. Therefore it is not
-necessary to disable preempt or interrupts to ensure that the
+necessary to disable preemption or interrupts to ensure that the
 processor is not changed between the calculation of the address and
 the operation on the data.
 
 Read-modify-write operations are of particular interest. Frequently
 processors have special lower latency instructions that can operate
-without the typical synchronization overhead but still provide some
-sort of relaxed atomicity guarantee. The x86 for example can execute
-RMV (Read Modify Write) instructions like inc/dec/cmpxchg without the
+without the typical synchronization overhead, but still provide some
+sort of relaxed atomicity guarantees. The x86, for example, can execute
+RMW (Read Modify Write) instructions like inc/dec/cmpxchg without the
 lock prefix and the associated latency penalty.
 
 Access to the variable without the lock prefix is not synchronized but
@@ -30,6 +30,38 @@ data specific to the currently executing processor. Only the current
 processor should be accessing that variable and therefore there are no
 concurrency issues with other processors in the system.
 
+Please note that accesses by remote processors to a per cpu area are
+exceptional situations and may impact performance and/or correctness
+(remote write operations) of local RMW operations via this_cpu_*.
+
+The main use of the this_cpu operations has been to optimize counter
+operations.
+
+The following this_cpu() operations with implied preemption protection
+are defined. These operations can be used without worrying about
+preemption and interrupts.
+
+       this_cpu_add()
+       this_cpu_read(pcp)
+       this_cpu_write(pcp, val)
+       this_cpu_add(pcp, val)
+       this_cpu_and(pcp, val)
+       this_cpu_or(pcp, val)
+       this_cpu_add_return(pcp, val)
+       this_cpu_xchg(pcp, nval)
+       this_cpu_cmpxchg(pcp, oval, nval)
+       this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+       this_cpu_sub(pcp, val)
+       this_cpu_inc(pcp)
+       this_cpu_dec(pcp)
+       this_cpu_sub_return(pcp, val)
+       this_cpu_inc_return(pcp)
+       this_cpu_dec_return(pcp)
+
+
+Inner working of this_cpu operations
+------------------------------------
+
 On x86 the fs: or the gs: segment registers contain the base of the
 per cpu area. It is then possible to simply use the segment override
 to relocate a per cpu relative address to the proper per cpu area for
@@ -48,22 +80,21 @@ results in a single instruction
        mov ax, gs:[x]
 
 instead of a sequence of calculation of the address and then a fetch
-from that address which occurs with the percpu operations. Before
+from that address which occurs with the per cpu operations. Before
 this_cpu_ops such sequence also required preempt disable/enable to
 prevent the kernel from moving the thread to a different processor
 while the calculation is performed.
 
-The main use of the this_cpu operations has been to optimize counter
-operations.
+Consider the following this_cpu operation:
 
        this_cpu_inc(x)
 
-results in the following single instruction (no lock prefix!)
+The above results in the following single instruction (no lock prefix!)
 
        inc gs:[x]
 
 instead of the following operations required if there is no segment
-register.
+register:
 
        int *y;
        int cpu;
@@ -73,10 +104,10 @@ register.
        (*y)++;
        put_cpu();
 
-Note that these operations can only be used on percpu data that is
+Note that these operations can only be used on per cpu data that is
 reserved for a specific processor. Without disabling preemption in the
 surrounding code this_cpu_inc() will only guarantee that one of the
-percpu counters is correctly incremented. However, there is no
+per cpu counters is correctly incremented. However, there is no
 guarantee that the OS will not move the process directly before or
 after the this_cpu instruction is executed. In general this means that
 the value of the individual counters for each processor are
@@ -86,9 +117,9 @@ that is of interest.
 Per cpu variables are used for performance reasons. Bouncing cache
 lines can be avoided if multiple processors concurrently go through
 the same code paths.  Since each processor has its own per cpu
-variables no concurrent cacheline updates take place. The price that
+variables no concurrent cache line updates take place. The price that
 has to be paid for this optimization is the need to add up the per cpu
-counters when the value of the counter is needed.
+counters when the value of a counter is needed.
 
 
 Special operations:
@@ -100,33 +131,39 @@ Takes the offset of a per cpu variable (&x !) and returns the address
 of the per cpu variable that belongs to the currently executing
 processor.  this_cpu_ptr avoids multiple steps that the common
 get_cpu/put_cpu sequence requires. No processor number is
-available. Instead the offset of the local per cpu area is simply
-added to the percpu offset.
+available. Instead, the offset of the local per cpu area is simply
+added to the per cpu offset.
 
+Note that this operation is usually used in a code segment when
+preemption has been disabled. The pointer is then used to
+access local per cpu data in a critical section. When preemption
+is re-enabled this pointer is usually no longer useful since it may
+no longer point to per cpu data of the current processor.
 
 
 Per cpu variables and offsets
 -----------------------------
 
-Per cpu variables have *offsets* to the beginning of the percpu
+Per cpu variables have *offsets* to the beginning of the per cpu
 area. They do not have addresses although they look like that in the
 code. Offsets cannot be directly dereferenced. The offset must be
-added to a base pointer of a percpu area of a processor in order to
+added to a base pointer of a per cpu area of a processor in order to
 form a valid address.
 
 Therefore the use of x or &x outside of the context of per cpu
 operations is invalid and will generally be treated like a NULL
 pointer dereference.
 
-In the context of per cpu operations
+       DEFINE_PER_CPU(int, x);
 
-       x is a per cpu variable. Most this_cpu operations take a cpu
-       variable.
+In the context of per cpu operations the above implies that x is a per
+cpu variable. Most this_cpu operations take a cpu variable.
 
-       &x is the *offset* a per cpu variable. this_cpu_ptr() takes
-       the offset of a per cpu variable which makes this look a bit
-       strange.
+       int __percpu *p = &x;
 
+&x and hence p is the *offset* of a per cpu variable. this_cpu_ptr()
+takes the offset of a per cpu variable which makes this look a bit
+strange.
 
 
 Operations on a field of a per cpu structure
@@ -152,7 +189,7 @@ If we have an offset to struct s:
 
        struct s __percpu *ps = &p;
 
-       z = this_cpu_dec(ps->m);
+       this_cpu_dec(ps->m);
 
        z = this_cpu_inc_return(ps->n);
 
@@ -172,29 +209,52 @@ if we do not make use of this_cpu ops later to manipulate fields:
 Variants of this_cpu ops
 -------------------------
 
-this_cpu ops are interrupt safe. Some architecture do not support
+this_cpu ops are interrupt safe. Some architectures do not support
 these per cpu local operations. In that case the operation must be
 replaced by code that disables interrupts, then does the operations
-that are guaranteed to be atomic and then reenable interrupts. Doing
+that are guaranteed to be atomic and then re-enable interrupts. Doing
 so is expensive. If there are other reasons why the scheduler cannot
 change the processor we are executing on then there is no reason to
-disable interrupts. For that purpose the __this_cpu operations are
-provided. For example.
-
-       __this_cpu_inc(x);
-
-Will increment x and will not fallback to code that disables
+disable interrupts. For that purpose the following __this_cpu operations
+are provided.
+
+These operations have no guarantee against concurrent interrupts or
+preemption. If a per cpu variable is not used in an interrupt context
+and the scheduler cannot preempt, then they are safe. If any interrupts
+still occur while an operation is in progress and if the interrupt too
+modifies the variable, then RMW actions can not be guaranteed to be
+safe.
+
+       __this_cpu_add()
+       __this_cpu_read(pcp)
+       __this_cpu_write(pcp, val)
+       __this_cpu_add(pcp, val)
+       __this_cpu_and(pcp, val)
+       __this_cpu_or(pcp, val)
+       __this_cpu_add_return(pcp, val)
+       __this_cpu_xchg(pcp, nval)
+       __this_cpu_cmpxchg(pcp, oval, nval)
+       __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+       __this_cpu_sub(pcp, val)
+       __this_cpu_inc(pcp)
+       __this_cpu_dec(pcp)
+       __this_cpu_sub_return(pcp, val)
+       __this_cpu_inc_return(pcp)
+       __this_cpu_dec_return(pcp)
+
+
+Will increment x and will not fall-back to code that disables
 interrupts on platforms that cannot accomplish atomicity through
 address relocation and a Read-Modify-Write operation in the same
 instruction.
 
 
-
 &this_cpu_ptr(pp)->n vs this_cpu_ptr(&pp->n)
 --------------------------------------------
 
 The first operation takes the offset and forms an address and then
-adds the offset of the n field.
+adds the offset of the n field. This may result in two add
+instructions emitted by the compiler.
 
 The second one first adds the two offsets and then does the
 relocation.  IMHO the second form looks cleaner and has an easier time
@@ -202,4 +262,73 @@ with (). The second form also is consistent with the way
 this_cpu_read() and friends are used.
 
 
-Christoph Lameter, April 3rd, 2013
+Remote access to per cpu data
+------------------------------
+
+Per cpu data structures are designed to be used by one cpu exclusively.
+If you use the variables as intended, this_cpu_ops() are guaranteed to
+be "atomic" as no other CPU has access to these data structures.
+
+There are special cases where you might need to access per cpu data
+structures remotely. It is usually safe to do a remote read access
+and that is frequently done to summarize counters. Remote write access
+something which could be problematic because this_cpu ops do not
+have lock semantics. A remote write may interfere with a this_cpu
+RMW operation.
+
+Remote write accesses to percpu data structures are highly discouraged
+unless absolutely necessary. Please consider using an IPI to wake up
+the remote CPU and perform the update to its per cpu area.
+
+To access per-cpu data structure remotely, typically the per_cpu_ptr()
+function is used:
+
+
+       DEFINE_PER_CPU(struct data, datap);
+
+       struct data *p = per_cpu_ptr(&datap, cpu);
+
+This makes it explicit that we are getting ready to access a percpu
+area remotely.
+
+You can also do the following to convert the datap offset to an address
+
+       struct data *p = this_cpu_ptr(&datap);
+
+but, passing of pointers calculated via this_cpu_ptr to other cpus is
+unusual and should be avoided.
+
+Remote access are typically only for reading the status of another cpus
+per cpu data. Write accesses can cause unique problems due to the
+relaxed synchronization requirements for this_cpu operations.
+
+One example that illustrates some concerns with write operations is
+the following scenario that occurs because two per cpu variables
+share a cache-line but the relaxed synchronization is applied to
+only one process updating the cache-line.
+
+Consider the following example
+
+
+       struct test {
+               atomic_t a;
+               int b;
+       };
+
+       DEFINE_PER_CPU(struct test, onecacheline);
+
+There is some concern about what would happen if the field 'a' is updated
+remotely from one processor and the local processor would use this_cpu ops
+to update field b. Care should be taken that such simultaneous accesses to
+data within the same cache line are avoided. Also costly synchronization
+may be necessary. IPIs are generally recommended in such scenarios instead
+of a remote write to the per cpu area of another processor.
+
+Even in cases where the remote writes are rare, please bear in
+mind that a remote write will evict the cache line from the processor
+that most likely will access it. If the processor wakes up and finds a
+missing local cache line of a per cpu area, its performance and hence
+the wake up times will be affected.
+
+Christoph Lameter, August 4th, 2014
+Pranith Kumar, Aug 2nd, 2014
index 2b3a82e69151419239e8b6b8adb29095beb8c423..39d1723267036a5ba7dff02dc82e39d69a1eeae8 100644 (file)
@@ -35,7 +35,7 @@ invlpg instruction (or instructions _near_ it) show up high in
 profiles.  If you believe that individual invalidations being
 called too often, you can lower the tunable:
 
-       /sys/debug/kernel/x86/tlb_single_page_flush_ceiling
+       /sys/kernel/debug/x86/tlb_single_page_flush_ceiling
 
 This will cause us to do the global flush for more cases.
 Lowering it to 0 will disable the use of the individual flushes.
index aefa94841ff3eee6cc124221ab4bcb56dd7d4ca3..1ff06dee651d5da4806dfcec1ab12e8281110e58 100644 (file)
@@ -1277,6 +1277,7 @@ F:        drivers/scsi/arm/
 ARM/Rockchip SoC support
 M:     Heiko Stuebner <heiko@sntech.de>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-rockchip@lists.infradead.org
 S:     Maintained
 F:     arch/arm/mach-rockchip/
 F:     drivers/*/*rockchip*
@@ -1843,6 +1844,12 @@ S:       Orphan
 F:     Documentation/filesystems/befs.txt
 F:     fs/befs/
 
+BECKHOFF CX5020 ETHERCAT MASTER DRIVER
+M: Dariusz Marcinkiewicz <reksio@newterm.pl>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/ec_bhf.c
+
 BFS FILE SYSTEM
 M:     "Tigran A. Aivazian" <tigran@aivazian.fsnet.co.uk>
 S:     Maintained
@@ -2059,7 +2066,7 @@ S:        Supported
 F:     drivers/scsi/bnx2i/
 
 BROADCOM KONA GPIO DRIVER
-M:     Markus Mayer <markus.mayer@linaro.org>
+M:     Ray Jui <rjui@broadcom.com>
 L:     bcm-kernel-feedback-list@broadcom.com
 S:     Supported
 F:     drivers/gpio/gpio-bcm-kona.c
@@ -3115,6 +3122,17 @@ F:       include/linux/host1x.h
 F:     include/uapi/drm/tegra_drm.h
 F:     Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
 
+DRM DRIVERS FOR RENESAS
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L:     dri-devel@lists.freedesktop.org
+L:     linux-sh@vger.kernel.org
+T:     git git://people.freedesktop.org/~airlied/linux
+S:     Supported
+F:     drivers/gpu/drm/rcar-du/
+F:     drivers/gpu/drm/shmobile/
+F:     include/linux/platform_data/rcar-du.h
+F:     include/linux/platform_data/shmob_drm.h
+
 DSBR100 USB FM RADIO DRIVER
 M:     Alexey Klimov <klimov.linux@gmail.com>
 L:     linux-media@vger.kernel.org
@@ -3843,10 +3861,13 @@ F:      drivers/tty/serial/ucc_uart.c
 
 FREESCALE SOC SOUND DRIVERS
 M:     Timur Tabi <timur@tabi.org>
+M:     Nicolin Chen <nicoleotsuka@gmail.com>
+M:     Xiubo Li <Li.Xiubo@freescale.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     sound/soc/fsl/fsl*
+F:     sound/soc/fsl/imx*
 F:     sound/soc/fsl/mpc8610_hpcd.c
 
 FREEVXFS FILESYSTEM
@@ -4446,6 +4467,13 @@ F:       include/linux/i2c-*.h
 F:     include/uapi/linux/i2c.h
 F:     include/uapi/linux/i2c-*.h
 
+I2C ACPI SUPPORT
+M:     Mika Westerberg <mika.westerberg@linux.intel.com>
+L:     linux-i2c@vger.kernel.org
+L:     linux-acpi@vger.kernel.org
+S:     Maintained
+F:     drivers/i2c/i2c-acpi.c
+
 I2C-TAOS-EVM DRIVER
 M:     Jean Delvare <jdelvare@suse.de>
 L:     linux-i2c@vger.kernel.org
@@ -5972,6 +6000,12 @@ T:       git git://linuxtv.org/media_tree.git
 S:     Maintained
 F:     drivers/media/radio/radio-mr800.c
 
+MRF24J40 IEEE 802.15.4 RADIO DRIVER
+M:     Alan Ott <alan@signal11.us>
+L:     linux-wpan@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ieee802154/mrf24j40.c
+
 MSI LAPTOP SUPPORT
 M:     "Lee, Chun-Yi" <jlee@suse.com>
 L:     platform-driver-x86@vger.kernel.org
@@ -6858,6 +6892,14 @@ S:       Supported
 F:     Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
 F:     drivers/pci/host/pci-tegra.c
 
+PCI DRIVER FOR TI DRA7XX
+M:     Kishon Vijay Abraham I <kishon@ti.com>
+L:     linux-omap@vger.kernel.org
+L:     linux-pci@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/pci/ti-pci.txt
+F:     drivers/pci/host/pci-dra7xx.c
+
 PCI DRIVER FOR RENESAS R-CAR
 M:     Simon Horman <horms@verge.net.au>
 L:     linux-pci@vger.kernel.org
index e43244263306e0b9321b003a2aede021dcb7cc88..f64fc7804a031a54ba82e9a4629680cfec6dd7ae 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 17
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index 6d6d23c83d30a70e63eae3f5d04d9b8a77da8013..adadaf97ac01b78cf02c05d2e117ae181f1a09a0 100644 (file)
        i2c@13860000 {
                pinctrl-0 = <&i2c0_bus>;
                pinctrl-names = "default";
+               samsung,i2c-sda-delay = <100>;
+               samsung,i2c-max-bus-freq = <400000>;
                status = "okay";
 
                usb3503: usb3503@08 {
 
                max77686: pmic@09 {
                        compatible = "maxim,max77686";
+                       interrupt-parent = <&gpx3>;
+                       interrupts = <2 0>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&max77686_irq>;
                        reg = <0x09>;
                        #clock-cells = <1>;
 
                samsung,pins = "gpx1-3";
                samsung,pin-pud = <0>;
        };
+
+       max77686_irq: max77686-irq {
+               samsung,pins = "gpx3-2";
+               samsung,pin-function = <0>;
+               samsung,pin-pud = <0>;
+               samsung,pin-drv = <0>;
+       };
 };
index 64fa27b36be0c957b7e8b39610eac563c730b99b..c6c58c1c00e3d866149bc17f12d4cff71fb2b96b 100644 (file)
                                compatible = "fsl,imx53-vpu";
                                reg = <0x63ff4000 0x1000>;
                                interrupts = <9>;
-                               clocks = <&clks IMX5_CLK_VPU_GATE>,
+                               clocks = <&clks IMX5_CLK_VPU_REFERENCE_GATE>,
                                         <&clks IMX5_CLK_VPU_GATE>;
                                clock-names = "per", "ahb";
                                resets = <&src 1>;
index 8c1cb53464a0f6bb7e96cd7a412c0a2e2eea8489..4fa25434779828806c49a0d0477a46cb15cd2544 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet>;
        phy-mode = "rgmii";
-       phy-reset-gpios = <&gpio3 23 0>;
+       phy-reset-gpios = <&gpio1 25 0>;
        phy-supply = <&vgen2_1v2_eth>;
        status = "okay";
 };
                                MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK    0x1b0b0
                                MX6QDL_PAD_ENET_MDIO__ENET_MDIO         0x1b0b0
                                MX6QDL_PAD_ENET_MDC__ENET_MDC           0x1b0b0
+                               MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25      0x1b0b0
                                MX6QDL_PAD_GPIO_16__ENET_REF_CLK        0x4001b0a8
                        >;
                };
index 3e0b816dac08ff368ef6259d2d67e72df7011bf9..bb9c6b78cb97c676ec03925c5d8ea78bd47579f5 100644 (file)
@@ -78,7 +78,7 @@
 #define MX6SX_PAD_GPIO1_IO07__USDHC2_WP                           0x0030 0x0378 0x0870 0x1 0x1
 #define MX6SX_PAD_GPIO1_IO07__ENET2_MDIO                          0x0030 0x0378 0x0770 0x2 0x0
 #define MX6SX_PAD_GPIO1_IO07__AUDMUX_MCLK                         0x0030 0x0378 0x0000 0x3 0x0
-#define MX6SX_PAD_GPIO1_IO07__UART1_CTS_B                         0x0030 0x0378 0x082C 0x4 0x1
+#define MX6SX_PAD_GPIO1_IO07__UART1_CTS_B                         0x0030 0x0378 0x0000 0x4 0x0
 #define MX6SX_PAD_GPIO1_IO07__GPIO1_IO_7                          0x0030 0x0378 0x0000 0x5 0x0
 #define MX6SX_PAD_GPIO1_IO07__SRC_EARLY_RESET                     0x0030 0x0378 0x0000 0x6 0x0
 #define MX6SX_PAD_GPIO1_IO07__DCIC2_OUT                           0x0030 0x0378 0x0000 0x7 0x0
@@ -96,7 +96,7 @@
 #define MX6SX_PAD_GPIO1_IO09__WDOG2_WDOG_B                        0x0038 0x0380 0x0000 0x1 0x0
 #define MX6SX_PAD_GPIO1_IO09__SDMA_EXT_EVENT_1                    0x0038 0x0380 0x0820 0x2 0x0
 #define MX6SX_PAD_GPIO1_IO09__CCM_OUT0                            0x0038 0x0380 0x0000 0x3 0x0
-#define MX6SX_PAD_GPIO1_IO09__UART2_CTS_B                         0x0038 0x0380 0x0834 0x4 0x1
+#define MX6SX_PAD_GPIO1_IO09__UART2_CTS_B                         0x0038 0x0380 0x0000 0x4 0x0
 #define MX6SX_PAD_GPIO1_IO09__GPIO1_IO_9                          0x0038 0x0380 0x0000 0x5 0x0
 #define MX6SX_PAD_GPIO1_IO09__SRC_INT_BOOT                        0x0038 0x0380 0x0000 0x6 0x0
 #define MX6SX_PAD_GPIO1_IO09__OBSERVE_MUX_OUT_4                   0x0038 0x0380 0x0000 0x7 0x0
 #define MX6SX_PAD_CSI_DATA07__ESAI_TX3_RX2                        0x0068 0x03B0 0x079C 0x1 0x1
 #define MX6SX_PAD_CSI_DATA07__I2C4_SDA                            0x0068 0x03B0 0x07C4 0x2 0x2
 #define MX6SX_PAD_CSI_DATA07__KPP_ROW_7                           0x0068 0x03B0 0x07DC 0x3 0x0
-#define MX6SX_PAD_CSI_DATA07__UART6_CTS_B                         0x0068 0x03B0 0x0854 0x4 0x1
+#define MX6SX_PAD_CSI_DATA07__UART6_CTS_B                         0x0068 0x03B0 0x0000 0x4 0x0
 #define MX6SX_PAD_CSI_DATA07__GPIO1_IO_21                         0x0068 0x03B0 0x0000 0x5 0x0
 #define MX6SX_PAD_CSI_DATA07__WEIM_DATA_16                        0x0068 0x03B0 0x0000 0x6 0x0
 #define MX6SX_PAD_CSI_DATA07__DCIC1_OUT                           0x0068 0x03B0 0x0000 0x7 0x0
 #define MX6SX_PAD_CSI_VSYNC__CSI1_VSYNC                           0x0078 0x03C0 0x0708 0x0 0x0
 #define MX6SX_PAD_CSI_VSYNC__ESAI_TX5_RX0                         0x0078 0x03C0 0x07A4 0x1 0x1
 #define MX6SX_PAD_CSI_VSYNC__AUDMUX_AUD6_RXD                      0x0078 0x03C0 0x0674 0x2 0x1
-#define MX6SX_PAD_CSI_VSYNC__UART4_CTS_B                          0x0078 0x03C0 0x0844 0x3 0x3
+#define MX6SX_PAD_CSI_VSYNC__UART4_CTS_B                          0x0078 0x03C0 0x0000 0x3 0x0
 #define MX6SX_PAD_CSI_VSYNC__MQS_RIGHT                            0x0078 0x03C0 0x0000 0x4 0x0
 #define MX6SX_PAD_CSI_VSYNC__GPIO1_IO_25                          0x0078 0x03C0 0x0000 0x5 0x0
 #define MX6SX_PAD_CSI_VSYNC__WEIM_DATA_24                         0x0078 0x03C0 0x0000 0x6 0x0
 #define MX6SX_PAD_ENET2_TX_CLK__ENET2_TX_CLK                      0x00A0 0x03E8 0x0000 0x0 0x0
 #define MX6SX_PAD_ENET2_TX_CLK__ENET2_REF_CLK2                    0x00A0 0x03E8 0x076C 0x1 0x1
 #define MX6SX_PAD_ENET2_TX_CLK__I2C3_SDA                          0x00A0 0x03E8 0x07BC 0x2 0x1
-#define MX6SX_PAD_ENET2_TX_CLK__UART1_CTS_B                       0x00A0 0x03E8 0x082C 0x3 0x3
+#define MX6SX_PAD_ENET2_TX_CLK__UART1_CTS_B                       0x00A0 0x03E8 0x0000 0x3 0x0
 #define MX6SX_PAD_ENET2_TX_CLK__MLB_CLK                           0x00A0 0x03E8 0x07E8 0x4 0x1
 #define MX6SX_PAD_ENET2_TX_CLK__GPIO2_IO_9                        0x00A0 0x03E8 0x0000 0x5 0x0
 #define MX6SX_PAD_ENET2_TX_CLK__USB_OTG2_PWR                      0x00A0 0x03E8 0x0000 0x6 0x0
 #define MX6SX_PAD_KEY_COL4__SAI2_RX_BCLK                          0x00B4 0x03FC 0x0808 0x7 0x0
 #define MX6SX_PAD_KEY_ROW0__KPP_ROW_0                             0x00B8 0x0400 0x0000 0x0 0x0
 #define MX6SX_PAD_KEY_ROW0__USDHC3_WP                             0x00B8 0x0400 0x0000 0x1 0x0
-#define MX6SX_PAD_KEY_ROW0__UART6_CTS_B                           0x00B8 0x0400 0x0854 0x2 0x3
+#define MX6SX_PAD_KEY_ROW0__UART6_CTS_B                           0x00B8 0x0400 0x0000 0x2 0x0
 #define MX6SX_PAD_KEY_ROW0__ECSPI1_MOSI                           0x00B8 0x0400 0x0718 0x3 0x0
 #define MX6SX_PAD_KEY_ROW0__AUDMUX_AUD5_TXD                       0x00B8 0x0400 0x0660 0x4 0x0
 #define MX6SX_PAD_KEY_ROW0__GPIO2_IO_15                           0x00B8 0x0400 0x0000 0x5 0x0
 #define MX6SX_PAD_KEY_ROW1__M4_NMI                                0x00BC 0x0404 0x0000 0x8 0x0
 #define MX6SX_PAD_KEY_ROW2__KPP_ROW_2                             0x00C0 0x0408 0x0000 0x0 0x0
 #define MX6SX_PAD_KEY_ROW2__USDHC4_WP                             0x00C0 0x0408 0x0878 0x1 0x1
-#define MX6SX_PAD_KEY_ROW2__UART5_CTS_B                           0x00C0 0x0408 0x084C 0x2 0x3
+#define MX6SX_PAD_KEY_ROW2__UART5_CTS_B                           0x00C0 0x0408 0x0000 0x2 0x0
 #define MX6SX_PAD_KEY_ROW2__CAN1_RX                               0x00C0 0x0408 0x068C 0x3 0x1
 #define MX6SX_PAD_KEY_ROW2__CANFD_RX1                             0x00C0 0x0408 0x0694 0x4 0x1
 #define MX6SX_PAD_KEY_ROW2__GPIO2_IO_17                           0x00C0 0x0408 0x0000 0x5 0x0
 #define MX6SX_PAD_NAND_DATA05__RAWNAND_DATA05                     0x0164 0x04AC 0x0000 0x0 0x0
 #define MX6SX_PAD_NAND_DATA05__USDHC2_DATA5                       0x0164 0x04AC 0x0000 0x1 0x0
 #define MX6SX_PAD_NAND_DATA05__QSPI2_B_DQS                        0x0164 0x04AC 0x0000 0x2 0x0
-#define MX6SX_PAD_NAND_DATA05__UART3_CTS_B                        0x0164 0x04AC 0x083C 0x3 0x1
+#define MX6SX_PAD_NAND_DATA05__UART3_CTS_B                        0x0164 0x04AC 0x0000 0x3 0x0
 #define MX6SX_PAD_NAND_DATA05__AUDMUX_AUD4_RXC                    0x0164 0x04AC 0x064C 0x4 0x0
 #define MX6SX_PAD_NAND_DATA05__GPIO4_IO_9                         0x0164 0x04AC 0x0000 0x5 0x0
 #define MX6SX_PAD_NAND_DATA05__WEIM_AD_5                          0x0164 0x04AC 0x0000 0x6 0x0
 #define MX6SX_PAD_QSPI1A_SS1_B__SIM_M_HADDR_12                    0x019C 0x04E4 0x0000 0x7 0x0
 #define MX6SX_PAD_QSPI1A_SS1_B__SDMA_DEBUG_PC_3                   0x019C 0x04E4 0x0000 0x9 0x0
 #define MX6SX_PAD_QSPI1B_DATA0__QSPI1_B_DATA_0                    0x01A0 0x04E8 0x0000 0x0 0x0
-#define MX6SX_PAD_QSPI1B_DATA0__UART3_CTS_B                       0x01A0 0x04E8 0x083C 0x1 0x4
+#define MX6SX_PAD_QSPI1B_DATA0__UART3_CTS_B                       0x01A0 0x04E8 0x0000 0x1 0x0
 #define MX6SX_PAD_QSPI1B_DATA0__ECSPI3_MOSI                       0x01A0 0x04E8 0x0738 0x2 0x1
 #define MX6SX_PAD_QSPI1B_DATA0__ESAI_RX_FS                        0x01A0 0x04E8 0x0778 0x3 0x2
 #define MX6SX_PAD_QSPI1B_DATA0__CSI1_DATA_22                      0x01A0 0x04E8 0x06F4 0x4 0x1
 #define MX6SX_PAD_SD1_DATA2__AUDMUX_AUD5_TXFS                     0x0230 0x0578 0x0670 0x1 0x1
 #define MX6SX_PAD_SD1_DATA2__PWM3_OUT                             0x0230 0x0578 0x0000 0x2 0x0
 #define MX6SX_PAD_SD1_DATA2__GPT_COMPARE2                         0x0230 0x0578 0x0000 0x3 0x0
-#define MX6SX_PAD_SD1_DATA2__UART2_CTS_B                          0x0230 0x0578 0x0834 0x4 0x2
+#define MX6SX_PAD_SD1_DATA2__UART2_CTS_B                          0x0230 0x0578 0x0000 0x4 0x0
 #define MX6SX_PAD_SD1_DATA2__GPIO6_IO_4                           0x0230 0x0578 0x0000 0x5 0x0
 #define MX6SX_PAD_SD1_DATA2__ECSPI4_RDY                           0x0230 0x0578 0x0000 0x6 0x0
 #define MX6SX_PAD_SD1_DATA2__CCM_OUT0                             0x0230 0x0578 0x0000 0x7 0x0
 #define MX6SX_PAD_SD2_DATA3__VADC_CLAMP_CURRENT_3                 0x024C 0x0594 0x0000 0x8 0x0
 #define MX6SX_PAD_SD2_DATA3__MMDC_DEBUG_31                        0x024C 0x0594 0x0000 0x9 0x0
 #define MX6SX_PAD_SD3_CLK__USDHC3_CLK                             0x0250 0x0598 0x0000 0x0 0x0
-#define MX6SX_PAD_SD3_CLK__UART4_CTS_B                            0x0250 0x0598 0x0844 0x1 0x0
+#define MX6SX_PAD_SD3_CLK__UART4_CTS_B                            0x0250 0x0598 0x0000 0x1 0x0
 #define MX6SX_PAD_SD3_CLK__ECSPI4_SCLK                            0x0250 0x0598 0x0740 0x2 0x0
 #define MX6SX_PAD_SD3_CLK__AUDMUX_AUD6_RXFS                       0x0250 0x0598 0x0680 0x3 0x0
 #define MX6SX_PAD_SD3_CLK__LCDIF2_VSYNC                           0x0250 0x0598 0x0000 0x4 0x0
 #define MX6SX_PAD_SD3_DATA7__USDHC3_DATA7                         0x0274 0x05BC 0x0000 0x0 0x0
 #define MX6SX_PAD_SD3_DATA7__CAN1_RX                              0x0274 0x05BC 0x068C 0x1 0x0
 #define MX6SX_PAD_SD3_DATA7__CANFD_RX1                            0x0274 0x05BC 0x0694 0x2 0x0
-#define MX6SX_PAD_SD3_DATA7__UART3_CTS_B                          0x0274 0x05BC 0x083C 0x3 0x3
+#define MX6SX_PAD_SD3_DATA7__UART3_CTS_B                          0x0274 0x05BC 0x0000 0x3 0x0
 #define MX6SX_PAD_SD3_DATA7__LCDIF2_DATA_5                        0x0274 0x05BC 0x0000 0x4 0x0
 #define MX6SX_PAD_SD3_DATA7__GPIO7_IO_9                           0x0274 0x05BC 0x0000 0x5 0x0
 #define MX6SX_PAD_SD3_DATA7__ENET1_1588_EVENT0_IN                 0x0274 0x05BC 0x0000 0x6 0x0
 #define MX6SX_PAD_SD4_DATA6__SDMA_DEBUG_EVENT_CHANNEL_1           0x0298 0x05E0 0x0000 0x9 0x0
 #define MX6SX_PAD_SD4_DATA7__USDHC4_DATA7                         0x029C 0x05E4 0x0000 0x0 0x0
 #define MX6SX_PAD_SD4_DATA7__RAWNAND_DATA08                       0x029C 0x05E4 0x0000 0x1 0x0
-#define MX6SX_PAD_SD4_DATA7__UART5_CTS_B                          0x029C 0x05E4 0x084C 0x2 0x1
+#define MX6SX_PAD_SD4_DATA7__UART5_CTS_B                          0x029C 0x05E4 0x0000 0x2 0x0
 #define MX6SX_PAD_SD4_DATA7__ECSPI3_SS0                           0x029C 0x05E4 0x073C 0x3 0x0
 #define MX6SX_PAD_SD4_DATA7__LCDIF2_DATA_15                       0x029C 0x05E4 0x0000 0x4 0x0
 #define MX6SX_PAD_SD4_DATA7__GPIO6_IO_21                          0x029C 0x05E4 0x0000 0x5 0x0
index 23486c081a69891096cd08c157d8dfc7fd94384b..be59014474b20114b77ff0d7043b05635b4d9013 100644 (file)
                renesas,function = "msiof0";
        };
 
-       i2c6_pins: i2c6 {
-               renesas,groups = "i2c6";
-               renesas,function = "i2c6";
-       };
-
        usb0_pins: usb0 {
                renesas,groups = "usb0";
                renesas,function = "usb0";
 };
 
 &i2c6 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&i2c6_pins>;
        status = "okay";
        clock-frequency = <100000>;
 
index 042f821d9e4d8bdbe0ab48722293278a86a44496..c9d912da61415b104d62f045b88de810924ac009 100644 (file)
 &mmc0 { /* sdmmc */
        num-slots = <1>;
        status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
        vmmc-supply = <&vcc_sd0>;
 
        slot@0 {
index 171b610db709f9e4f94355c31d54d858d89a84c2..5e4e3c238b2d1d79faca9c3ac803e4bf97c322fb 100644 (file)
 &mmc0 {
        num-slots = <1>;
        status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
        vmmc-supply = <&vcc_sd0>;
 
        slot@0 {
index 44b07e512c2448cbc24821668dc43ceaf6915638..e06fbfc55bb7ee6dee85d601bd3b12fdcac269b6 100644 (file)
                        clock-frequency = <100000>;
                        resets = <&apb2_rst 0>;
                        status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
                };
 
                i2c1: i2c@01c2b000 {
                        clock-frequency = <100000>;
                        resets = <&apb2_rst 1>;
                        status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
                };
 
                i2c2: i2c@01c2b400 {
                        clock-frequency = <100000>;
                        resets = <&apb2_rst 2>;
                        status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
                };
 
                i2c3: i2c@01c2b800 {
                        clock-frequency = <100000>;
                        resets = <&apb2_rst 3>;
                        status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
                };
 
                gmac: ethernet@01c30000 {
index 8adaa7871dd361cf17ac212a79b7d3333c410010..a5446cba9804d3c6c24f7c2207642820869ccaa8 100644 (file)
                        vcc4-supply = <&sys_3v3_reg>;
                        vcc5-supply = <&sys_3v3_reg>;
                        vcc6-supply = <&vio_reg>;
-                       vcc7-supply = <&sys_5v0_reg>;
+                       vcc7-supply = <&charge_pump_5v0_reg>;
                        vccio-supply = <&sys_3v3_reg>;
 
                        regulators {
                        regulator-max-microvolt = <3300000>;
                        regulator-always-on;
                };
+
+               charge_pump_5v0_reg: regulator@101 {
+                       compatible = "regulator-fixed";
+                       reg = <101>;
+                       regulator-name = "5v0";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       regulator-always-on;
+               };
        };
 };
index bf16f8e6562767a3c5a5185758fa5e2b72e01a9d..c4ed1bec4d92afad7ae50eb5f16be4748aa77fc3 100644 (file)
                        vcc4-supply = <&sys_3v3_reg>;
                        vcc5-supply = <&sys_3v3_reg>;
                        vcc6-supply = <&vio_reg>;
-                       vcc7-supply = <&sys_5v0_reg>;
+                       vcc7-supply = <&charge_pump_5v0_reg>;
                        vccio-supply = <&sys_3v3_reg>;
 
                        regulators {
                        regulator-max-microvolt = <3300000>;
                        regulator-always-on;
                };
+
+               charge_pump_5v0_reg: regulator@101 {
+                       compatible = "regulator-fixed";
+                       reg = <101>;
+                       regulator-name = "5v0";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       regulator-always-on;
+               };
        };
 };
index 11d733406c7ed5f1078228b93a1837b3102eedb7..b8a5e8c68f06eedde75d010df044959bfe543d2a 100644 (file)
                };
 
                pinctrl_esdhc1: esdhc1grp {
-                       fsl,fsl,pins = <
+                       fsl,pins = <
                                VF610_PAD_PTA24__ESDHC1_CLK     0x31ef
                                VF610_PAD_PTA25__ESDHC1_CMD     0x31ef
                                VF610_PAD_PTA26__ESDHC1_DAT0    0x31ef
index 9de84a215abd98adc78e9c480d19603da6e82637..be9a51afe05a02ff374f6c4d73db362907fa49ea 100644 (file)
@@ -85,7 +85,6 @@ config SOC_IMX25
 
 config SOC_IMX27
        bool
-       select ARCH_HAS_OPP
        select CPU_ARM926T
        select IMX_HAVE_IOMUX_V1
        select MXC_AVIC
@@ -659,7 +658,6 @@ comment "Device tree only"
 
 config SOC_IMX5
        bool
-       select ARCH_HAS_OPP
        select HAVE_IMX_SRC
        select MXC_TZIC
 
index ac88599ca0805a5cc7da4c79f308fe83963183c4..23c02932bf843e1c4862f7c18b16b49f4a0d4551 100644 (file)
@@ -93,9 +93,11 @@ obj-$(CONFIG_HAVE_IMX_ANATOP) += anatop.o
 obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
 obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
 obj-$(CONFIG_HAVE_IMX_SRC) += src.o
+ifdef CONFIG_SOC_IMX6
 AFLAGS_headsmp.o :=-Wa,-march=armv7-a
 obj-$(CONFIG_SMP) += headsmp.o platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+endif
 obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
 obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o
 obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o mach-imx6sx.o
index 6cceb7765c14a19f9426309376e13268e3f2091a..29d412975affce9b4141fb7b3e99716b6b49b2f0 100644 (file)
@@ -194,6 +194,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
        clk[IMX6QDL_CLK_PLL3_80M]  = imx_clk_fixed_factor("pll3_80m",  "pll3_usb_otg",   1, 6);
        clk[IMX6QDL_CLK_PLL3_60M]  = imx_clk_fixed_factor("pll3_60m",  "pll3_usb_otg",   1, 8);
        clk[IMX6QDL_CLK_TWD]       = imx_clk_fixed_factor("twd",       "arm",            1, 2);
+       if (cpu_is_imx6dl()) {
+               clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1);
+               clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1);
+       }
 
        clk[IMX6QDL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
        clk[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
@@ -217,8 +221,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
        clk[IMX6QDL_CLK_ESAI_SEL]         = imx_clk_mux("esai_sel",         base + 0x20, 19, 2, audio_sels,        ARRAY_SIZE(audio_sels));
        clk[IMX6QDL_CLK_ASRC_SEL]         = imx_clk_mux("asrc_sel",         base + 0x30, 7,  2, audio_sels,        ARRAY_SIZE(audio_sels));
        clk[IMX6QDL_CLK_SPDIF_SEL]        = imx_clk_mux("spdif_sel",        base + 0x30, 20, 2, audio_sels,        ARRAY_SIZE(audio_sels));
-       clk[IMX6QDL_CLK_GPU2D_AXI]        = imx_clk_mux("gpu2d_axi",        base + 0x18, 0,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
-       clk[IMX6QDL_CLK_GPU3D_AXI]        = imx_clk_mux("gpu3d_axi",        base + 0x18, 1,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
+       if (cpu_is_imx6q()) {
+               clk[IMX6QDL_CLK_GPU2D_AXI]        = imx_clk_mux("gpu2d_axi",        base + 0x18, 0,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
+               clk[IMX6QDL_CLK_GPU3D_AXI]        = imx_clk_mux("gpu3d_axi",        base + 0x18, 1,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
+       }
        clk[IMX6QDL_CLK_GPU2D_CORE_SEL]   = imx_clk_mux("gpu2d_core_sel",   base + 0x18, 16, 2, gpu2d_core_sels,   ARRAY_SIZE(gpu2d_core_sels));
        clk[IMX6QDL_CLK_GPU3D_CORE_SEL]   = imx_clk_mux("gpu3d_core_sel",   base + 0x18, 4,  2, gpu3d_core_sels,   ARRAY_SIZE(gpu3d_core_sels));
        clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8,  2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
index 74b50f1982dbc3526e8cb1a84b25ffff188dd45b..ca4ea2daf25b3e7da3bae73151abff8146154bf3 100644 (file)
@@ -173,6 +173,8 @@ ENTRY(imx6_suspend)
        ldr     r6, [r11, #0x0]
        ldr     r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
        ldr     r6, [r11, #0x0]
+       ldr     r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
+       ldr     r6, [r11, #0x0]
 
        /* use r11 to store the IO address */
        ldr     r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET]
index e15dff790dbbbdc4aaa9937bbff18072828369b9..1e6c51c7c2d5694d0581603f355bb6e4fbc16108 100644 (file)
@@ -75,6 +75,7 @@ config ARCH_SH7372
        select ARM_CPU_SUSPEND if PM || CPU_IDLE
        select CPU_V7
        select SH_CLK_CPG
+       select SH_INTC
        select SYS_SUPPORTS_SH_CMT
        select SYS_SUPPORTS_SH_TMU
 
@@ -85,6 +86,7 @@ config ARCH_SH73A0
        select CPU_V7
        select I2C
        select SH_CLK_CPG
+       select SH_INTC
        select RENESAS_INTC_IRQPIN
        select SYS_SUPPORTS_SH_CMT
        select SYS_SUPPORTS_SH_TMU
index 57833546bf003b5f8ffe5e70fb65c10c8dd1ef55..2df5e5daeebeaca04dc9c9948556b75eb21ffe78 100644 (file)
@@ -39,7 +39,7 @@ head-y                := arch/arm64/kernel/head.o
 
 # The byte offset of the kernel image in RAM from the start of RAM.
 ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%04x0\n", int(65535 * rand())}')
+TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
 else
 TEXT_OFFSET := 0x00080000
 endif
index 1e52b741d80644f0f293d247d412781dc18895e9..d92ef3c541617ec5bbe5a45f8da91bd9e27d1461 100644 (file)
@@ -64,6 +64,8 @@ CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_ATA=y
+CONFIG_AHCI_XGENE=y
+CONFIG_PHY_XGENE=y
 CONFIG_PATA_PLATFORM=y
 CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
@@ -71,6 +73,7 @@ CONFIG_TUN=y
 CONFIG_VIRTIO_NET=y
 CONFIG_SMC91X=y
 CONFIG_SMSC911X=y
+CONFIG_NET_XGENE=y
 # CONFIG_WLAN is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_SERIO_SERPORT is not set
index 1be62bcb9d474e42e6b457eaecc4272bfc37d2b1..74a9d301819fbfa1128bcce879b95ae9c24a2e76 100644 (file)
@@ -17,7 +17,7 @@
 #define __ASM_SPARSEMEM_H
 
 #ifdef CONFIG_SPARSEMEM
-#define MAX_PHYSMEM_BITS       40
+#define MAX_PHYSMEM_BITS       48
 #define SECTION_SIZE_BITS      30
 #endif
 
index 4bc95d27e0636f785383ce222fdcc7adc3073d6a..6d2bf419431d86aece4a3aa0650091f120d3efcd 100644 (file)
@@ -41,7 +41,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           383
+#define __NR_compat_syscalls           386
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index e242600c4046c87e1cd554bdb55d02a16fc5f0f4..da1f06b535e38a97bbd981ade5c414d1131ef4e8 100644 (file)
@@ -787,3 +787,8 @@ __SYSCALL(__NR_sched_setattr, sys_sched_setattr)
 __SYSCALL(__NR_sched_getattr, sys_sched_getattr)
 #define __NR_renameat2 382
 __SYSCALL(__NR_renameat2, sys_renameat2)
+                       /* 383 for seccomp */
+#define __NR_getrandom 384
+__SYSCALL(__NR_getrandom, sys_getrandom)
+#define __NR_memfd_create 385
+__SYSCALL(__NR_memfd_create, sys_memfd_create)
index f798f66634afaa1853f6845c75ab6fa9ac09958e..1771696230269673a64606714011b923995353e0 100644 (file)
@@ -49,7 +49,7 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
 
        if (l1ip != ICACHE_POLICY_PIPT)
                set_bit(ICACHEF_ALIASING, &__icache_flags);
-       if (l1ip == ICACHE_POLICY_AIVIVT);
+       if (l1ip == ICACHE_POLICY_AIVIVT)
                set_bit(ICACHEF_AIVIVT, &__icache_flags);
 
        pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
index e72f3100958f204ce5b72ba2ce788e2e6cdc3d95..03aaa99e1ea00a3d4caf79d27cea669d857a9090 100644 (file)
@@ -188,6 +188,8 @@ static __init void reserve_regions(void)
                if (uefi_debug)
                        pr_cont("\n");
        }
+
+       set_bit(EFI_MEMMAP, &efi.flags);
 }
 
 
@@ -463,6 +465,8 @@ static int __init arm64_enter_virtual_mode(void)
        efi_native_runtime_setup();
        set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 
+       efi.runtime_version = efi.systab->hdr.revision;
+
        return 0;
 
 err_unmap:
index 144f10567f82eaa3a6c9a428e3461e08ed911253..bed028364a93235006b467c20daeb8725b9a4615 100644 (file)
 
 #define KERNEL_RAM_VADDR       (PAGE_OFFSET + TEXT_OFFSET)
 
-#if (TEXT_OFFSET & 0xf) != 0
-#error TEXT_OFFSET must be at least 16B aligned
-#elif (PAGE_OFFSET & 0xfffff) != 0
+#if (TEXT_OFFSET & 0xfff) != 0
+#error TEXT_OFFSET must be at least 4KB aligned
+#elif (PAGE_OFFSET & 0x1fffff) != 0
 #error PAGE_OFFSET must be at least 2MB aligned
-#elif TEXT_OFFSET > 0xfffff
+#elif TEXT_OFFSET > 0x1fffff
 #error TEXT_OFFSET must be less than 2MB
 #endif
 
index 0310811bd77d891fe248d0ddb6042af46d3f63ab..70526cfda056e7216ef7f773b059d31b6ca4f88a 100644 (file)
@@ -1115,19 +1115,15 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_enter(regs, regs->syscallno);
 
-#ifdef CONFIG_AUDITSYSCALL
        audit_syscall_entry(syscall_get_arch(), regs->syscallno,
                regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
-#endif
 
        return regs->syscallno;
 }
 
 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
 {
-#ifdef CONFIG_AUDITSYSCALL
        audit_syscall_exit(regs);
-#endif
 
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_exit(regs, regs_return_value(regs));
index 5b4526ee3a01a6b9c65f15a4985b70cccaf3edea..5472c24018766ea5348d2d0cb56f9a932f46356a 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/of_fdt.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
+#include <linux/efi.h>
 
 #include <asm/fixmap.h>
 #include <asm/sections.h>
@@ -148,7 +149,8 @@ void __init arm64_memblock_init(void)
                memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
 #endif
 
-       early_init_fdt_scan_reserved_mem();
+       if (!efi_enabled(EFI_MEMMAP))
+               early_init_fdt_scan_reserved_mem();
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA))
index a34f309e580199b5f573db3f975a199cd74d5372..6554e78893f26bc88e96e103811e1f0881a3a5ca 100644 (file)
@@ -129,7 +129,8 @@ unsigned long get_wchan(struct task_struct *p);
 #define        KSTK_EIP(tsk)   ((tsk)->thread.frame0->pc)
 #define        KSTK_ESP(tsk)   ((tsk)->thread.frame0->sp)
 
-#define cpu_relax()    barrier()
+#define cpu_relax() barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /* data cache prefetch */
 #define ARCH_HAS_PREFETCH
index 4254f5d3218c7ed4dc72c9ec2e966b331f3af3c0..10a14ead70b9d9fd627b416c8567008d1aebc606 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    316 /* length of syscall table */
+#define NR_syscalls                    317 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 99801c3be914afda7f30ccd7fe89306186ad1602..6a65bb7d06571050157bda55a30f9fe366387cbf 100644 (file)
 #define __NR_sched_getattr             1337
 #define __NR_renameat2                 1338
 #define __NR_getrandom                 1339
+#define __NR_memfd_create              1339
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index 4c13837a9269faecf350be25c9bfd10053d86936..01edf242eb296443293fc8b1ee178d72d0a89f55 100644 (file)
@@ -1777,6 +1777,7 @@ sys_call_table:
        data8 sys_sched_getattr
        data8 sys_renameat2
        data8 sys_getrandom
+       data8 sys_memfd_create                  // 1340
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index 4e1ddc930a685ef0d6c137d0dc9d165f111acbba..1c2380bf8fe60850274dce0bdbc9e9bb16cc531f 100644 (file)
 #define __NR_sched_setattr     381
 #define __NR_sched_getattr     382
 #define __NR_renameat2         383
+#define __NR_seccomp           384
+#define __NR_getrandom         385
+#define __NR_memfd_create      386
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index 1a23d5d5480c7da12bae37f8a77e0015459c4704..de59ee1d7010a788cf916b4ade098375af6214d6 100644 (file)
@@ -384,3 +384,6 @@ ENTRY(sys_call_table)
        .long sys_sched_setattr
        .long sys_sched_getattr
        .long sys_renameat2
+       .long sys_seccomp
+       .long sys_getrandom             /* 385 */
+       .long sys_memfd_create
index 776188908dfc4916c34f73568b1f14467d0f13b8..8c13675a12e74618b64b640f852f30856970fa85 100644 (file)
@@ -847,6 +847,7 @@ int __init db1200_dev_setup(void)
                        pr_warn("DB1200: cant get I2C close to 50MHz\n");
                else
                        clk_set_rate(c, pfc);
+               clk_prepare_enable(c);
                clk_put(c);
        }
 
@@ -922,11 +923,6 @@ int __init db1200_dev_setup(void)
        }
 
        /* Audio PSC clock is supplied externally. (FIXME: platdata!!) */
-       c = clk_get(NULL, "psc1_intclk");
-       if (!IS_ERR(c)) {
-               clk_prepare_enable(c);
-               clk_put(c);
-       }
        __raw_writel(PSC_SEL_CLK_SERCLK,
            (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
        wmb();
index 2b63e7e7d3d35d4414b28cfcecfeacdfed03e458..ad439c27300350ce47ef49e579d0688bc2ca48f4 100644 (file)
@@ -59,12 +59,21 @@ static void bcm47xx_machine_restart(char *command)
        switch (bcm47xx_bus_type) {
 #ifdef CONFIG_BCM47XX_SSB
        case BCM47XX_BUS_TYPE_SSB:
-               ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 3);
+               if (bcm47xx_bus.ssb.chip_id == 0x4785)
+                       write_c0_diag4(1 << 22);
+               ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 1);
+               if (bcm47xx_bus.ssb.chip_id == 0x4785) {
+                       __asm__ __volatile__(
+                               ".set\tmips3\n\t"
+                               "sync\n\t"
+                               "wait\n\t"
+                               ".set\tmips0");
+               }
                break;
 #endif
 #ifdef CONFIG_BCM47XX_BCMA
        case BCM47XX_BUS_TYPE_BCMA:
-               bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 3);
+               bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 1);
                break;
 #endif
        }
index 008e9c8b8eac0f2ff13edf8f0df7f21172dd6a90..38f4c32e28165543d99bbd391ad3df993789ea06 100644 (file)
@@ -263,7 +263,6 @@ static uint64_t crashk_size, crashk_base;
 static int octeon_uart;
 
 extern asmlinkage void handle_int(void);
-extern asmlinkage void plat_irq_dispatch(void);
 
 /**
  * Return non zero if we are currently running in the Octeon simulator
@@ -458,6 +457,18 @@ static void octeon_halt(void)
        octeon_kill_core(NULL);
 }
 
+static char __read_mostly octeon_system_type[80];
+
+static int __init init_octeon_system_type(void)
+{
+       snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
+               cvmx_board_type_to_string(octeon_bootinfo->board_type),
+               octeon_model_get_string(read_c0_prid()));
+
+       return 0;
+}
+early_initcall(init_octeon_system_type);
+
 /**
  * Return a string representing the system type
  *
@@ -465,11 +476,7 @@ static void octeon_halt(void)
  */
 const char *octeon_board_type_string(void)
 {
-       static char name[80];
-       sprintf(name, "%s (%s)",
-               cvmx_board_type_to_string(octeon_bootinfo->board_type),
-               octeon_model_get_string(read_c0_prid()));
-       return name;
+       return octeon_system_type;
 }
 
 const char *get_system_type(void)
diff --git a/arch/mips/include/asm/eva.h b/arch/mips/include/asm/eva.h
new file mode 100644 (file)
index 0000000..a3d1807
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ *
+ * EVA functions for generic code
+ */
+
+#ifndef _ASM_EVA_H
+#define _ASM_EVA_H
+
+#include <kernel-entry-init.h>
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_EVA
+
+/*
+ * EVA early init code
+ *
+ * Platforms must define their own 'platform_eva_init' macro in
+ * their kernel-entry-init.h header. This macro usually does the
+ * platform specific configuration of the segmentation registers,
+ * and it is normally called from assembly code.
+ *
+ */
+
+.macro eva_init
+platform_eva_init
+.endm
+
+#else
+
+.macro eva_init
+.endm
+
+#endif /* CONFIG_EVA */
+
+#endif /* __ASSEMBLY__ */
+
+#endif
index 3f20b2111d56c3ad8b653f0bbab81d3d14d46ba9..d7699cf7e135e22f84b0f4c5e45028ce5567d404 100644 (file)
@@ -49,7 +49,7 @@
 #endif
 #define GICBIS(reg, mask, bits)                        \
        do { u32 data;                          \
-               GICREAD((reg), data);           \
+               GICREAD(reg, data);             \
                data &= ~(mask);                \
                data |= ((bits) & (mask));      \
                GICWRITE((reg), data);          \
index ae1f7b24dd1a511daa15423dd8186cea9812009c..39f07aec640cf27b865d270e45f34fed6b547274 100644 (file)
@@ -26,6 +26,8 @@ static inline int irq_canonicalize(int irq)
 #define irq_canonicalize(irq) (irq)    /* Sane hardware, sane code ... */
 #endif
 
+asmlinkage void plat_irq_dispatch(void);
+
 extern void do_IRQ(unsigned int irq);
 
 extern void arch_init_irq(void);
index 77eeda77e73c7332f9175ed4706cf18d92d8a5b1..0cf8622db27f4689b9e7bb076b422d67a0c28cf9 100644 (file)
 #ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
 #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
 
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+
        /*
         * Prepare segments for EVA boot:
         *
         * This is in case the processor boots in legacy configuration
         * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
         *
-        * On entry, t1 is loaded with CP0_CONFIG
-        *
         * ========================= Mappings =============================
         * Virtual memory           Physical memory           Mapping
         * 0x00000000 - 0x7fffffff  0x80000000 - 0xfffffffff   MUSUK (kuseg)
         *
         *
         * Lowmem is expanded to 2GB
+        *
+        * The following code uses the t0, t1, t2 and ra registers without
+        * previously preserving them.
+        *
         */
-       .macro  eva_entry
+       .macro  platform_eva_init
+
+       .set    push
+       .set    reorder
        /*
         * Get Config.K0 value and use it to program
         * the segmentation registers
         */
+       mfc0    t1, CP0_CONFIG
        andi    t1, 0x7 /* CCA */
        move    t2, t1
        ins     t2, t1, 16, 3
@@ -77,6 +86,8 @@
        mtc0    t0, $16, 5
        sync
        jal     mips_ihb
+
+       .set    pop
        .endm
 
        .macro  kernel_entry_setup
        sll     t0, t0, 6   /* SC bit */
        bgez    t0, 9f
 
-       eva_entry
+       platform_eva_init
        b       0f
 9:
        /* Assume we came from YAMON... */
@@ -127,8 +138,7 @@ nonsc_processor:
 #ifdef CONFIG_EVA
        sync
        ehb
-       mfc0    t1, CP0_CONFIG
-       eva_entry
+       platform_eva_init
 #endif
        .endm
 
index ceeb1f5e7129b0692a088d673444359106bb264d..0eb43c832b2546d9254278be6ef53273f2a1ff4b 100644 (file)
 
 #include <asm/mach-netlogic/multi-node.h>
 
-#ifdef CONFIG_SMP
-#define topology_physical_package_id(cpu)      cpu_to_node(cpu)
-#define topology_core_id(cpu)  (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE)
-#define topology_thread_cpumask(cpu)           (&cpu_sibling_map[cpu])
-#define topology_core_cpumask(cpu)     cpumask_of_node(cpu_to_node(cpu))
-#endif
-
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_MACH_NETLOGIC_TOPOLOGY_H */
index 027c74db13f94399ea8a99088ee9095725b1fe75..df49a308085caa568c74e48a64a1eea0d7f2b845 100644 (file)
@@ -122,6 +122,9 @@ do {                                                                        \
        }                                                               \
 } while(0)
 
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+       pte_t pteval);
+
 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 
 #define pte_none(pte)          (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
@@ -145,7 +148,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
                }
        }
 }
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
@@ -183,7 +185,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
        }
 #endif
 }
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
@@ -390,15 +391,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 
 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
        pte_t pte);
-extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
-       pte_t pte);
 
 static inline void update_mmu_cache(struct vm_area_struct *vma,
        unsigned long address, pte_t *ptep)
 {
        pte_t pte = *ptep;
        __update_tlb(vma, address, pte);
-       __update_cache(vma, address, pte);
 }
 
 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
index 17960fe7a8ce4ef21b7a94cca10c7b5af61cec17..cdf68b33bd65ac5826ac369a4d06aeedc3d1fd90 100644 (file)
@@ -131,10 +131,12 @@ static inline int syscall_get_arch(void)
 {
        int arch = EM_MIPS;
 #ifdef CONFIG_64BIT
-       if (!test_thread_flag(TIF_32BIT_REGS))
+       if (!test_thread_flag(TIF_32BIT_REGS)) {
                arch |= __AUDIT_ARCH_64BIT;
-       if (test_thread_flag(TIF_32BIT_ADDR))
-               arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
+               /* N32 sets only TIF_32BIT_ADDR */
+               if (test_thread_flag(TIF_32BIT_ADDR))
+                       arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
+       }
 #endif
 #if defined(__LITTLE_ENDIAN)
        arch |=  __AUDIT_ARCH_LE;
index 6f4f739dad9635521eb0c8697365a3efd69f28eb..e6e97d2a5c9e68cccde81ab0f181184d1e27fd13 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/asmmacro.h>
 #include <asm/cacheops.h>
+#include <asm/eva.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/pm.h>
@@ -166,6 +167,9 @@ dcache_done:
 1:     jal     mips_cps_core_init
         nop
 
+       /* Do any EVA initialization if necessary */
+       eva_init
+
        /*
         * Boot any other VPEs within this core that should be online, and
         * deactivate this VPE if it should be offline.
index 14bf74b0f51c066f488caba872ebdaddd8caefe8..b63f2482f2881c24a3d62d2f95ca9a2eb5cf2799 100644 (file)
@@ -558,7 +558,7 @@ static int mipspmu_get_irq(void)
        if (mipspmu.irq >= 0) {
                /* Request my own irq handler. */
                err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
-                       IRQF_PERCPU | IRQF_NOBALANCING,
+                       IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD,
                        "mips_perf_pmu", NULL);
                if (err) {
                        pr_warning("Unable to request IRQ%d for MIPS "
index 13b964fddc4a9e15e4bdc8fbfd2f7312a04a59ca..25bb8400156da3905d1ba2d586da26a4672b4379 100644 (file)
@@ -113,15 +113,19 @@ trace_a_syscall:
        move    s0, t2                  # Save syscall pointer
        move    a0, sp
        /*
-        * syscall number is in v0 unless we called syscall(__NR_###)
+        * absolute syscall number is in v0 unless we called syscall(__NR_###)
         * where the real syscall number is in a0
         * note: NR_syscall is the first O32 syscall but the macro is
         * only defined when compiling with -mabi=32 (CONFIG_32BIT)
         * therefore __NR_O32_Linux is used (4000)
         */
-       addiu   a1, v0,  __NR_O32_Linux
-       bnez    v0, 1f /* __NR_syscall at offset 0 */
-       lw      a1, PT_R4(sp)
+       .set    push
+       .set    reorder
+       subu    t1, v0,  __NR_O32_Linux
+       move    a1, v0
+       bnez    t1, 1f /* __NR_syscall at offset 0 */
+       lw      a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+       .set    pop
 
 1:     jal     syscall_trace_enter
 
index 9182e8d2967c774ff39456968d07d832773fe362..b03e37d2071ac867e65a702f7e052a00c13f496c 100644 (file)
 static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
        void *data)
 {
-       int fpu_enabled;
+       int fpu_owned;
        int fr = !test_thread_flag(TIF_32BIT_FPREGS);
 
        switch (action) {
        case CU2_EXCEPTION:
                preempt_disable();
-               fpu_enabled = read_c0_status() & ST0_CU1;
+               fpu_owned = __is_fpu_owner();
                if (!fr)
                        set_c0_status(ST0_CU1 | ST0_CU2);
                else
@@ -39,8 +39,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
                        KSTK_STATUS(current) |= ST0_FR;
                else
                        KSTK_STATUS(current) &= ~ST0_FR;
-               /* If FPU is enabled, we needn't init or restore fp */
-               if(!fpu_enabled) {
+               /* If FPU is owned, we needn't init or restore fp */
+               if (!fpu_owned) {
                        set_thread_flag(TIF_USEDFPU);
                        if (!used_math()) {
                                _init_fpu();
index ca025a6ba559bbe644e555290af2a617fd90d9ce..37ed184398c6bbf8d017f130c73317ed2e548518 100644 (file)
@@ -24,8 +24,6 @@
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
-#include <linux/bootmem.h>
-#include <linux/init.h>
 #include <linux/irq.h>
 #include <asm/bootinfo.h>
 #include <asm/mc146818-time.h>
index f7b91d3a371dd07e7c69c38290f1f0c77b5b3a0a..7e3ea77668224ff3da82eed073721a203b1a0588 100644 (file)
@@ -119,25 +119,36 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
 
 EXPORT_SYMBOL(__flush_anon_page);
 
-void __update_cache(struct vm_area_struct *vma, unsigned long address,
-       pte_t pte)
+static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
 {
        struct page *page;
-       unsigned long pfn, addr;
-       int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
+       unsigned long pfn = pte_pfn(pteval);
 
-       pfn = pte_pfn(pte);
        if (unlikely(!pfn_valid(pfn)))
                return;
+
        page = pfn_to_page(pfn);
        if (page_mapping(page) && Page_dcache_dirty(page)) {
-               addr = (unsigned long) page_address(page);
-               if (exec || pages_do_alias(addr, address & PAGE_MASK))
-                       flush_data_cache_page(addr);
+               unsigned long page_addr = (unsigned long) page_address(page);
+
+               if (!cpu_has_ic_fills_f_dc ||
+                   pages_do_alias(page_addr, address & PAGE_MASK))
+                       flush_data_cache_page(page_addr);
                ClearPageDcacheDirty(page);
        }
 }
 
+void set_pte_at(struct mm_struct *mm, unsigned long addr,
+        pte_t *ptep, pte_t pteval)
+{
+        if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
+                if (pte_present(pteval))
+                        mips_flush_dcache_from_pte(pteval, addr);
+        }
+
+        set_pte(ptep, pteval);
+}
+
 unsigned long _page_cachable_default;
 EXPORT_SYMBOL(_page_cachable_default);
 
index 0c35dee0a2150b4ca284a03dd3637cad6eb77b86..8fddd2cdbff72920cb7a9c73154a7b95413fb306 100644 (file)
@@ -35,13 +35,19 @@ fw_memblock_t * __init fw_getmdesc(int eva)
        /* otherwise look in the environment */
 
        memsize_str = fw_getenv("memsize");
-       if (memsize_str)
-               tmp = kstrtol(memsize_str, 0, &memsize);
+       if (memsize_str) {
+               tmp = kstrtoul(memsize_str, 0, &memsize);
+               if (tmp)
+                       pr_warn("Failed to read the 'memsize' env variable.\n");
+       }
        if (eva) {
        /* Look for ememsize for EVA */
                ememsize_str = fw_getenv("ememsize");
-               if (ememsize_str)
-                       tmp = kstrtol(ememsize_str, 0, &ememsize);
+               if (ememsize_str) {
+                       tmp = kstrtoul(ememsize_str, 0, &ememsize);
+                       if (tmp)
+                               pr_warn("Failed to read the 'ememsize' env variable.\n");
+               }
        }
        if (!memsize && !ememsize) {
                pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
index 941744aabb513dd3ad69fc3a44c4dbd804e9f744..f914c753de21dcc9982a3f13ec007db9069d42a4 100644 (file)
@@ -51,7 +51,7 @@ static inline void sec_int_dispatch(void)  { do_IRQ(MSP_INT_SEC);  }
  * the range 40-71.
  */
 
-asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
+asmlinkage void plat_irq_dispatch(void)
 {
        u32 pending;
 
index 329d7fdd0a6ab7be8b9e203ac53b1870c1d37b0c..b9615ba5b083a6ddeea76b878925396561b5fb42 100644 (file)
@@ -101,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma()
        ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
        if (!ri)
                return NULL;
-       page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages));
+       page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
        if (!page)
                goto err_out;
        atomic_set(&ri->use_count, 1);
@@ -135,12 +135,12 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages)
 {
        unsigned long align_pages = HPT_ALIGN_PAGES;
 
-       VM_BUG_ON(get_order(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+       VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 
        /* Old CPUs require HPT aligned on a multiple of its size */
        if (!cpu_has_feature(CPU_FTR_ARCH_206))
                align_pages = nr_pages;
-       return cma_alloc(kvm_cma, nr_pages, get_order(align_pages));
+       return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
 }
 EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
 
index 3802d2d3a18d7cf4abf5604d6f66e01b6e34424d..940ac49198db1dd406b99944f51183df869cd420 100644 (file)
 #define __NR_sched_setattr     345
 #define __NR_sched_getattr     346
 #define __NR_renameat2         347
-#define NR_syscalls 348
+#define __NR_seccomp           348
+#define __NR_getrandom         349
+#define __NR_memfd_create      350
+#define NR_syscalls 351
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index 45cdb37aa6f812813c69d08102e4e1f521e5df1f..faf6caa510dcedb862ad65be7108e1042a754939 100644 (file)
@@ -214,3 +214,6 @@ COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, fla
 COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags);
 COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags);
 COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags);
+COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs)
+COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags)
+COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags)
index 633ca7504536c10a517667b5f582653f763390e3..22aac5885ba23eca4590d9219aefc1fab34d9325 100644 (file)
@@ -2060,6 +2060,13 @@ void s390_reset_system(void (*func)(void *), void *data)
        S390_lowcore.program_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
 
+       /*
+        * Clear subchannel ID and number to signal new kernel that no CCW or
+        * SCSI IPL has been done (for kexec and kdump)
+        */
+       S390_lowcore.subchannel_id = 0;
+       S390_lowcore.subchannel_nr = 0;
+
        /* Store status at absolute zero */
        store_status();
 
index ae1d5be7dd885d4d20f7b1f37d56ebbab9f92526..82bc113e8c1dd3718cf663a88d7bc9a2c9c97fb4 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/stddef.h>
 #include <linux/unistd.h>
 #include <linux/ptrace.h>
+#include <linux/random.h>
 #include <linux/user.h>
 #include <linux/tty.h>
 #include <linux/ioport.h>
@@ -61,6 +62,7 @@
 #include <asm/diag.h>
 #include <asm/os_info.h>
 #include <asm/sclp.h>
+#include <asm/sysinfo.h>
 #include "entry.h"
 
 /*
@@ -766,6 +768,7 @@ static void __init setup_hwcaps(void)
 #endif
 
        get_cpu_id(&cpu_id);
+       add_device_randomness(&cpu_id, sizeof(cpu_id));
        switch (cpu_id.machine) {
        case 0x9672:
 #if !defined(CONFIG_64BIT)
@@ -803,6 +806,19 @@ static void __init setup_hwcaps(void)
        }
 }
 
+/*
+ * Add system information as device randomness
+ */
+static void __init setup_randomness(void)
+{
+       struct sysinfo_3_2_2 *vmms;
+
+       vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
+       if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
+               add_device_randomness(&vmms, vmms->count);
+       free_page((unsigned long) vmms);
+}
+
 /*
  * Setup function called from init/main.c just after the banner
  * was printed.
@@ -901,6 +917,9 @@ void __init setup_arch(char **cmdline_p)
 
        /* Setup zfcpdump support */
        setup_zfcpdump();
+
+       /* Add system specific data to the random pool */
+       setup_randomness();
 }
 
 #ifdef CONFIG_32BIT
index fe5cdf29a001be0e52fd0436bbbabd3603d37f66..6fe886ac2db596a81f67a80e69e4f5f54609c45f 100644 (file)
@@ -356,3 +356,6 @@ SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module)
 SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
 SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr)
 SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2)
+SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
+SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
+SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
index 453fa5c09550c592a9dae8c6dc7c24ebbbded80f..b319846ad97f340465bf6231720acd8445f1e781 100644 (file)
@@ -172,6 +172,7 @@ menu "System type"
 #
 config CPU_SH2
        bool
+       select SH_INTC
 
 config CPU_SH2A
        bool
@@ -182,6 +183,7 @@ config CPU_SH3
        bool
        select CPU_HAS_INTEVT
        select CPU_HAS_SR_RB
+       select SH_INTC
        select SYS_SUPPORTS_SH_TMU
 
 config CPU_SH4
@@ -189,6 +191,7 @@ config CPU_SH4
        select CPU_HAS_INTEVT
        select CPU_HAS_SR_RB
        select CPU_HAS_FPU if !CPU_SH4AL_DSP
+       select SH_INTC
        select SYS_SUPPORTS_SH_TMU
        select SYS_SUPPORTS_HUGETLBFS if MMU
 
index 572460175ba509d9317e408e96975cf10780eef7..7c492ed9087b24510a150dad80ededaaa9873b85 100644 (file)
@@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 #define KVM_REFILL_PAGES 25
 #define KVM_MAX_CPUID_ENTRIES 80
 #define KVM_NR_FIXED_MTRR_REGION 88
-#define KVM_NR_VAR_MTRR 10
+#define KVM_NR_VAR_MTRR 8
 
 #define ASYNC_PF_PER_VCPU 64
 
index 47c410d99f5daaae8a7d4f3f00093a6496e099ec..4b0e1dfa222627cd5a36fb7b8f51234508822536 100644 (file)
@@ -683,7 +683,7 @@ END(syscall_badsys)
 sysenter_badsys:
        movl $-ENOSYS,%eax
        jmp sysenter_after_call
-END(syscall_badsys)
+END(sysenter_badsys)
        CFI_ENDPROC
 
 .macro FIXUP_ESPFIX_STACK
index 56657b0bb3bb14f14b76fdcd99a746598ce5e8b4..03954f7900f522a496d8249174ec63cc760aa140 100644 (file)
@@ -1491,9 +1491,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                        goto exception;
                break;
        case VCPU_SREG_CS:
-               if (in_task_switch && rpl != dpl)
-                       goto exception;
-
                if (!(seg_desc.type & 8))
                        goto exception;
 
@@ -4394,8 +4391,11 @@ done_prefixes:
 
        ctxt->execute = opcode.u.execute;
 
+       if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
+               return EMULATION_FAILED;
+
        if (unlikely(ctxt->d &
-                    (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
+                    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
                /*
                 * These are copied unconditionally here, and checked unconditionally
                 * in x86_emulate_insn.
@@ -4406,9 +4406,6 @@ done_prefixes:
                if (ctxt->d & NotImpl)
                        return EMULATION_FAILED;
 
-               if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
-                       return EMULATION_FAILED;
-
                if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
                        ctxt->op_bytes = 8;
 
index 1fe33987de027f73c2997960198978a557bb1027..ee61c36d64f84dd944873ec0acf80b4f8ad06da7 100644 (file)
@@ -49,7 +49,13 @@ void leave_mm(int cpu)
        if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
                cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
                load_cr3(swapper_pg_dir);
-               trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+               /*
+                * This gets called in the idle path where RCU
+                * functions differently.  Tracing normally
+                * uses RCU, so we have to call the tracepoint
+                * specially here.
+                */
+               trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
        }
 }
 EXPORT_SYMBOL_GPL(leave_mm);
@@ -174,7 +180,7 @@ void flush_tlb_current_task(void)
  *
  * This is in units of pages.
  */
-unsigned long tlb_single_page_flush_ceiling = 33;
+static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
 
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                                unsigned long end, unsigned long vmflag)
index fc3df47fca3547b46d71073e3e5b429c0b3d29e4..f1fef74e503cd94716dfe090ab397d83118065f5 100644 (file)
@@ -24,8 +24,8 @@
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
-#include <linux/tegra-powergate.h>
 #include <linux/regulator/consumer.h>
+#include <soc/tegra/pmc.h>
 #include "ahci.h"
 
 #define SATA_CONFIGURATION_0                           0x180
index bc281115490ba5064e5495e0a48883b1d14f744f..c6962300b93c7c6292ed91d62611db6678af1f49 100644 (file)
@@ -344,7 +344,7 @@ static struct ata_port_operations xgene_ahci_ops = {
 };
 
 static const struct ata_port_info xgene_ahci_port_info = {
-       .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+       .flags = AHCI_FLAG_COMMON,
        .pio_mask = ATA_PIO4,
        .udma_mask = ATA_UDMA6,
        .port_ops = &xgene_ahci_ops,
@@ -480,7 +480,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
        /* Configure the host controller */
        xgene_ahci_hw_init(hpriv);
 
-       hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
+       hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
 
        rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
        if (rc)
index dbdc5d32343f53f7ed96a94cf2d6d69258a54237..f3e7b9f894cd131daea921fbd0e149fcaccadaeb 100644 (file)
@@ -4228,7 +4228,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
        { "Crucial_CT???M500SSD*",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
        { "Micron_M550*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
-       { "Crucial_CT???M550SSD*",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Crucial_CT*M550SSD*",        NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
 
        /*
         * Some WD SATA-I drives spin up and down erratically when the link
index 2578fc16960ad1d1671ab554cbbc40d6f5810e60..1a24a5dc39400705e709853908070ff9a4e3f45b 100644 (file)
@@ -360,7 +360,7 @@ static int pata_s3c_wait_after_reset(struct ata_link *link,
 /*
  * pata_s3c_bus_softreset - PATA device software reset
  */
-static unsigned int pata_s3c_bus_softreset(struct ata_port *ap,
+static int pata_s3c_bus_softreset(struct ata_port *ap,
                unsigned long deadline)
 {
        struct ata_ioports *ioaddr = &ap->ioaddr;
index 4e006d74bef8c0599e11ddda98c8f37a90c839bb..7f4cb76ed9fac1c6ff51a5f91ad4df0468ac5096 100644 (file)
@@ -585,7 +585,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
  *     Note: Original code is ata_bus_softreset().
  */
 
-static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
                                       unsigned long deadline)
 {
        struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -599,9 +599,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
        udelay(20);
        out_be32(ioaddr->ctl_addr, ap->ctl);
 
-       scc_wait_after_reset(&ap->link, devmask, deadline);
-
-       return 0;
+       return scc_wait_after_reset(&ap->link, devmask, deadline);
 }
 
 /**
@@ -618,7 +616,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
 {
        struct ata_port *ap = link->ap;
        unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
-       unsigned int devmask = 0, err_mask;
+       unsigned int devmask = 0;
+       int rc;
        u8 err;
 
        DPRINTK("ENTER\n");
@@ -634,9 +633,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
 
        /* issue bus reset */
        DPRINTK("about to softreset, devmask=%x\n", devmask);
-       err_mask = scc_bus_softreset(ap, devmask, deadline);
-       if (err_mask) {
-               ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
+       rc = scc_bus_softreset(ap, devmask, deadline);
+       if (rc) {
+               ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
                return -EIO;
        }
 
index 3266f8ff931133e88ab419bc08ddad3a96bf1314..6f550d9e7a2dbd1258a2ba833bcda6fb97a003a2 100644 (file)
@@ -662,7 +662,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
                }
                if (e->num_vcs && vc >= e->num_vcs) {
                        dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
-                                       port, node_xp);
+                                       vc, node_xp);
                        return -EINVAL;
                }
                valid = 1;
index 4222cb2aa96aa18e66a75f76d0d56b3feb346982..7bb9d65d9a2c9c99f12314c537fa8cf00c0929ef 100644 (file)
@@ -29,7 +29,7 @@
 EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
 EXPORT_TRACEPOINT_SYMBOL(fence_emit);
 
-/**
+/*
  * fence context counter: each execution context should have its own
  * fence context, this allows checking if fences belong to the same
  * context or not. One device can have multiple separate contexts,
index f0a43646a2f3f4b36ddd64af33d39920c3cb006a..5abe943e34042df45d8d1f643b0334e6ceb19748 100644 (file)
@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
  */
 static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
 {
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        list_del(&entry->list);
        spin_unlock_irq(&__efivars->lock);
@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
        const struct efivar_operations *ops = __efivars->ops;
        efi_status_t status;
 
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        status = ops->set_variable(entry->var.VariableName,
                                   &entry->var.VendorGuid,
@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
        int strsize1, strsize2;
        bool found = false;
 
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        list_for_each_entry_safe(entry, n, head, list) {
                strsize1 = ucs2_strsize(name, 1024);
@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
        const struct efivar_operations *ops = __efivars->ops;
        efi_status_t status;
 
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        status = ops->get_variable(entry->var.VariableName,
                                   &entry->var.VendorGuid,
index 41b2f40578d5e78e88ab1afdb1d5f066a3913658..954b9f6b0ef82eb74437e0efce4781b53ba58e0f 100644 (file)
@@ -90,7 +90,7 @@ struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
        struct gpio_desc **dr;
        struct gpio_desc *desc;
 
-       dr = devres_alloc(devm_gpiod_release, sizeof(struct gpiod_desc *),
+       dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
                          GFP_KERNEL);
        if (!dr)
                return ERR_PTR(-ENOMEM);
index ff9eb911b5e4267295404d6e6a5443571edeb166..fa945ec9ccff52ab54f34d6d535e8df4d4dc2fd1 100644 (file)
@@ -407,9 +407,27 @@ static int lp_gpio_runtime_resume(struct device *dev)
        return 0;
 }
 
+static int lp_gpio_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct lp_gpio *lg = platform_get_drvdata(pdev);
+       unsigned long reg;
+       int i;
+
+       /* on some hardware suspend clears input sensing, re-enable it here */
+       for (i = 0; i < lg->chip.ngpio; i++) {
+               if (gpiochip_is_requested(&lg->chip, i) != NULL) {
+                       reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
+                       outl(inl(reg) & ~GPINDIS_BIT, reg);
+               }
+       }
+       return 0;
+}
+
 static const struct dev_pm_ops lp_gpio_pm_ops = {
        .runtime_suspend = lp_gpio_runtime_suspend,
        .runtime_resume = lp_gpio_runtime_resume,
+       .resume = lp_gpio_resume,
 };
 
 static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
index c3145f91fda32f62b60e3dde98c26b6830f5095e..31ad5df5dbc95d58c395dc27f865b0eb0ce036bc 100644 (file)
@@ -95,6 +95,9 @@ struct zynq_gpio {
        struct clk *clk;
 };
 
+static struct irq_chip zynq_gpio_level_irqchip;
+static struct irq_chip zynq_gpio_edge_irqchip;
+
 /**
  * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank
  * for a given pin in the GPIO device
@@ -410,6 +413,15 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
                       gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num));
        writel_relaxed(int_any,
                       gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+
+       if (type & IRQ_TYPE_LEVEL_MASK) {
+               __irq_set_chip_handler_name_locked(irq_data->irq,
+                       &zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL);
+       } else {
+               __irq_set_chip_handler_name_locked(irq_data->irq,
+                       &zynq_gpio_edge_irqchip, handle_level_irq, NULL);
+       }
+
        return 0;
 }
 
@@ -424,9 +436,21 @@ static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
 }
 
 /* irq chip descriptor */
-static struct irq_chip zynq_gpio_irqchip = {
+static struct irq_chip zynq_gpio_level_irqchip = {
        .name           = DRIVER_NAME,
        .irq_enable     = zynq_gpio_irq_enable,
+       .irq_eoi        = zynq_gpio_irq_ack,
+       .irq_mask       = zynq_gpio_irq_mask,
+       .irq_unmask     = zynq_gpio_irq_unmask,
+       .irq_set_type   = zynq_gpio_set_irq_type,
+       .irq_set_wake   = zynq_gpio_set_wake,
+       .flags          = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
+};
+
+static struct irq_chip zynq_gpio_edge_irqchip = {
+       .name           = DRIVER_NAME,
+       .irq_enable     = zynq_gpio_irq_enable,
+       .irq_ack        = zynq_gpio_irq_ack,
        .irq_mask       = zynq_gpio_irq_mask,
        .irq_unmask     = zynq_gpio_irq_unmask,
        .irq_set_type   = zynq_gpio_set_irq_type,
@@ -469,10 +493,6 @@ static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc)
                                                        offset);
                                generic_handle_irq(gpio_irq);
                        }
-
-                       /* clear IRQ in HW */
-                       writel_relaxed(int_sts, gpio->base_addr +
-                                       ZYNQ_GPIO_INTSTS_OFFSET(bank_num));
                }
        }
 
@@ -610,14 +630,14 @@ static int zynq_gpio_probe(struct platform_device *pdev)
                writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
                               ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
 
-       ret = gpiochip_irqchip_add(chip, &zynq_gpio_irqchip, 0,
-                                  handle_simple_irq, IRQ_TYPE_NONE);
+       ret = gpiochip_irqchip_add(chip, &zynq_gpio_edge_irqchip, 0,
+                                  handle_level_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(&pdev->dev, "Failed to add irq chip\n");
                goto err_rm_gpiochip;
        }
 
-       gpiochip_set_chained_irqchip(chip, &zynq_gpio_irqchip, irq,
+       gpiochip_set_chained_irqchip(chip, &zynq_gpio_edge_irqchip, irq,
                                     zynq_gpio_irqhandler);
 
        pm_runtime_set_active(&pdev->dev);
index 7cfdc22789053eacdb9b6dfc52f22d57ee00b41f..604dbe60bdee1abdddb8d706947391987628ab9e 100644 (file)
@@ -307,7 +307,5 @@ void of_gpiochip_add(struct gpio_chip *chip)
 void of_gpiochip_remove(struct gpio_chip *chip)
 {
        gpiochip_remove_pin_ranges(chip);
-
-       if (chip->of_node)
-               of_node_put(chip->of_node);
+       of_node_put(chip->of_node);
 }
index 4c761dcea97217e9495ed16a6403250ac04a7ff3..05c01ea8529487c9d37f32cd31237291728c82c9 100644 (file)
@@ -99,6 +99,7 @@ static struct ast_vbios_dclk_info dclk_table[] = {
        {0x25, 0x65, 0x80},                                     /* 16: VCLK88.75    */
        {0x77, 0x58, 0x80},                                     /* 17: VCLK119      */
        {0x32, 0x67, 0x80},                                 /* 18: VCLK85_5     */
+       {0x6a, 0x6d, 0x80},                                     /* 19: VCLK97_75        */
 };
 
 static struct ast_vbios_stdtable vbios_stdtable[] = {
index fa2be249999c70711e1b19cbe0df92fd5e081631..90e773019eac78f0a247f840eb5ada047a0b5573 100644 (file)
@@ -4696,8 +4696,9 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
                return -EINVAL;
 
        /* overflow checks for 32bit size calculations */
+       /* NOTE: DIV_ROUND_UP() can overflow */
        cpp = DIV_ROUND_UP(args->bpp, 8);
-       if (cpp > 0xffffffffU / args->width)
+       if (!cpp || cpp > 0xffffffffU / args->width)
                return -EINVAL;
        stride = cpp * args->width;
        if (args->height > 0xffffffffU / stride)
index ec96f9a9724c809363cd2cd88baf2cd7a31b9a3e..e27cdbe9d524a79dbd4168febc15de0660e82d9f 100644 (file)
@@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
        return true;
 }
 
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+       spin_lock_irq(&dev_priv->irq_lock);
+
+       dev_priv->long_hpd_port_mask = 0;
+       dev_priv->short_hpd_port_mask = 0;
+       dev_priv->hpd_event_bits = 0;
+
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       cancel_work_sync(&dev_priv->dig_port_work);
+       cancel_work_sync(&dev_priv->hotplug_work);
+       cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
+}
+
+static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct drm_encoder *encoder;
+
+       drm_modeset_lock_all(dev);
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+               if (intel_encoder->suspend)
+                       intel_encoder->suspend(intel_encoder);
+       }
+       drm_modeset_unlock_all(dev);
+}
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev)
                flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
                intel_runtime_pm_disable_interrupts(dev);
+               intel_hpd_cancel_work(dev_priv);
+
+               intel_suspend_encoders(dev_priv);
 
                intel_suspend_gt_powersave(dev);
 
index 4412f6a4383bd2af5df6d5119e3f0f044bed59e5..7a830eac5ba30a7859eb65313d9ee567c5d324eb 100644 (file)
@@ -1458,7 +1458,7 @@ struct drm_i915_private {
                } hpd_mark;
        } hpd_stats[HPD_NUM_PINS];
        u32 hpd_event_bits;
-       struct timer_list hotplug_reenable_timer;
+       struct delayed_work hotplug_reenable_work;
 
        struct i915_fbc fbc;
        struct i915_drrs drrs;
@@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 
 extern void intel_console_resume(struct work_struct *work);
 
index 390ccc2a3096670d636af14a0164dd237a43d66c..0050ee9470f196748621b96ffd001cd3e4cd85d8 100644 (file)
@@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
          * some connectors */
        if (hpd_disabled) {
                drm_kms_helper_poll_enable(dev);
-               mod_timer(&dev_priv->hotplug_reenable_timer,
-                         jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
+               mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
+                                msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
        }
 
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
                drm_kms_helper_hotplug_event(dev);
 }
 
-static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
-{
-       del_timer_sync(&dev_priv->hotplug_reenable_timer);
-}
-
 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
        if (!dev_priv)
                return;
 
-       intel_hpd_irq_uninstall(dev_priv);
-
        gen8_irq_reset(dev);
 }
 
@@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
 
        I915_WRITE(VLV_MASTER_IER, 0);
 
-       intel_hpd_irq_uninstall(dev_priv);
-
        for_each_pipe(pipe)
                I915_WRITE(PIPESTAT(pipe), 0xffff);
 
@@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
        if (!dev_priv)
                return;
 
-       intel_hpd_irq_uninstall(dev_priv);
-
        ironlake_irq_reset(dev);
 }
 
@@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
 
-       intel_hpd_irq_uninstall(dev_priv);
-
        if (I915_HAS_HOTPLUG(dev)) {
                I915_WRITE(PORT_HOTPLUG_EN, 0);
                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
        if (!dev_priv)
                return;
 
-       intel_hpd_irq_uninstall(dev_priv);
-
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
@@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
        I915_WRITE(IIR, I915_READ(IIR));
 }
 
-static void intel_hpd_irq_reenable(unsigned long data)
+static void intel_hpd_irq_reenable(struct work_struct *work)
 {
-       struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv),
+                            hotplug_reenable_work.work);
        struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
        unsigned long irqflags;
        int i;
 
+       intel_runtime_pm_get(dev_priv);
+
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
                struct drm_connector *connector;
@@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data)
        if (dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+       intel_runtime_pm_put(dev_priv);
 }
 
 void intel_irq_init(struct drm_device *dev)
@@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev)
        setup_timer(&dev_priv->gpu_error.hangcheck_timer,
                    i915_hangcheck_elapsed,
                    (unsigned long) dev);
-       setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
-                   (unsigned long) dev_priv);
+       INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
+                         intel_hpd_irq_reenable);
 
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
index 2efaf8e8d9c49b6ca47491e8d5693f2072c8a333..e8abfce40976c74a2f9cff93c4d93c897eda112e 100644 (file)
@@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force)
                goto out;
        }
 
+       drm_modeset_acquire_init(&ctx, 0);
+
        /* for pre-945g platforms use load detect */
        if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
                if (intel_crt_detect_ddc(connector))
                        status = connector_status_connected;
                else
                        status = intel_crt_load_detect(crt);
-               intel_release_load_detect_pipe(connector, &tmp, &ctx);
+               intel_release_load_detect_pipe(connector, &tmp);
        } else
                status = connector_status_unknown;
 
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+
 out:
        intel_display_power_put(dev_priv, power_domain);
        return status;
index 018fb7222f60ecee10fcb0f2db65e89d29c42e0f..d074d704f458f3b8f81774391d2a6459e416f360 100644 (file)
@@ -8462,8 +8462,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
                      connector->base.id, connector->name,
                      encoder->base.id, encoder->name);
 
-       drm_modeset_acquire_init(ctx, 0);
-
 retry:
        ret = drm_modeset_lock(&config->connection_mutex, ctx);
        if (ret)
@@ -8502,10 +8500,14 @@ retry:
                i++;
                if (!(encoder->possible_crtcs & (1 << i)))
                        continue;
-               if (!possible_crtc->enabled) {
-                       crtc = possible_crtc;
-                       break;
-               }
+               if (possible_crtc->enabled)
+                       continue;
+               /* This can occur when applying the pipe A quirk on resume. */
+               if (to_intel_crtc(possible_crtc)->new_enabled)
+                       continue;
+
+               crtc = possible_crtc;
+               break;
        }
 
        /*
@@ -8574,15 +8576,11 @@ fail_unlock:
                goto retry;
        }
 
-       drm_modeset_drop_locks(ctx);
-       drm_modeset_acquire_fini(ctx);
-
        return false;
 }
 
 void intel_release_load_detect_pipe(struct drm_connector *connector,
-                                   struct intel_load_detect_pipe *old,
-                                   struct drm_modeset_acquire_ctx *ctx)
+                                   struct intel_load_detect_pipe *old)
 {
        struct intel_encoder *intel_encoder =
                intel_attached_encoder(connector);
@@ -8606,17 +8604,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                        drm_framebuffer_unreference(old->release_fb);
                }
 
-               goto unlock;
                return;
        }
 
        /* Switch crtc and encoder back off if necessary */
        if (old->dpms_mode != DRM_MODE_DPMS_ON)
                connector->funcs->dpms(connector, old->dpms_mode);
-
-unlock:
-       drm_modeset_drop_locks(ctx);
-       drm_modeset_acquire_fini(ctx);
 }
 
 static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11700,8 +11693,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        };
        const struct drm_rect clip = {
                /* integer pixels */
-               .x2 = intel_crtc->config.pipe_src_w,
-               .y2 = intel_crtc->config.pipe_src_h,
+               .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+               .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
        };
        bool visible;
        int ret;
@@ -12659,7 +12652,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
        struct intel_connector *connector;
        struct drm_connector *crt = NULL;
        struct intel_load_detect_pipe load_detect_temp;
-       struct drm_modeset_acquire_ctx ctx;
+       struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
 
        /* We can't just switch on the pipe A, we need to set things up with a
         * proper mode and output configuration. As a gross hack, enable pipe A
@@ -12676,10 +12669,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)
        if (!crt)
                return;
 
-       if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx))
-               intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx);
-
-
+       if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
+               intel_release_load_detect_pipe(crt, &load_detect_temp);
 }
 
 static bool
@@ -13112,7 +13103,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
         * experience fancy races otherwise.
         */
        drm_irq_uninstall(dev);
-       cancel_work_sync(&dev_priv->hotplug_work);
+       intel_hpd_cancel_work(dev_priv);
        dev_priv->pm._irqs_disabled = true;
 
        /*
index ee3942f0b0683f5747c53842cb7f9ae0dc6745fa..67cfed6d911ae99eae257a67b8e05ea64ba145d1 100644 (file)
@@ -3553,6 +3553,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
        if (WARN_ON(!intel_encoder->base.crtc))
                return;
 
+       if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+               return;
+
        /* Try to read receiver status if the link appears to be up */
        if (!intel_dp_get_link_status(intel_dp, link_status)) {
                return;
@@ -4003,6 +4006,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        kfree(intel_dig_port);
 }
 
+static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+       if (!is_edp(intel_dp))
+               return;
+
+       edp_panel_vdd_off_sync(intel_dp);
+}
+
 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
        intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
@@ -4037,15 +4050,21 @@ bool
 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 {
        struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
+       enum intel_display_power_domain power_domain;
+       bool ret = true;
+
        if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
                intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
 
        DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
                      long_hpd ? "long" : "short");
 
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
        if (long_hpd) {
                if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
                        goto mst_fail;
@@ -4061,8 +4080,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 
        } else {
                if (intel_dp->is_mst) {
-                       ret = intel_dp_check_mst_status(intel_dp);
-                       if (ret == -EINVAL)
+                       if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
                                goto mst_fail;
                }
 
@@ -4076,7 +4094,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
                        drm_modeset_unlock(&dev->mode_config.connection_mutex);
                }
        }
-       return false;
+       ret = false;
+       goto put_power;
 mst_fail:
        /* if we were in MST mode, and device is not there get out of MST mode */
        if (intel_dp->is_mst) {
@@ -4084,7 +4103,10 @@ mst_fail:
                intel_dp->is_mst = false;
                drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
        }
-       return true;
+put_power:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 /* Return which DP Port should be selected for Transcoder DP control */
@@ -4722,6 +4744,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
        intel_encoder->disable = intel_disable_dp;
        intel_encoder->get_hw_state = intel_dp_get_hw_state;
        intel_encoder->get_config = intel_dp_get_config;
+       intel_encoder->suspend = intel_dp_encoder_suspend;
        if (IS_CHERRYVIEW(dev)) {
                intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
                intel_encoder->pre_enable = chv_pre_enable_dp;
index 4b2664bd5b81d0afa2b59ae403c850549e2e5b10..b8c8bbd8e5f990b256ffe20f853395dbe6e784a2 100644 (file)
@@ -153,6 +153,12 @@ struct intel_encoder {
         * be set correctly before calling this function. */
        void (*get_config)(struct intel_encoder *,
                           struct intel_crtc_config *pipe_config);
+       /*
+        * Called during system suspend after all pending requests for the
+        * encoder are flushed (for example for DP AUX transactions) and
+        * device interrupts are disabled.
+        */
+       void (*suspend)(struct intel_encoder *);
        int crtc_mask;
        enum hpd_pin hpd_pin;
 };
@@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
                                struct intel_load_detect_pipe *old,
                                struct drm_modeset_acquire_ctx *ctx);
 void intel_release_load_detect_pipe(struct drm_connector *connector,
-                                   struct intel_load_detect_pipe *old,
-                                   struct drm_modeset_acquire_ctx *ctx);
+                                   struct intel_load_detect_pipe *old);
 int intel_pin_and_fence_fb_obj(struct drm_device *dev,
                               struct drm_i915_gem_object *obj,
                               struct intel_engine_cs *pipelined);
index e211eef4b7e4cac5a1eae5c88119537621f788a8..32186a656816e1e4de4c533cbf699e9886dda2d8 100644 (file)
@@ -1323,11 +1323,16 @@ intel_tv_detect(struct drm_connector *connector, bool force)
                struct intel_load_detect_pipe tmp;
                struct drm_modeset_acquire_ctx ctx;
 
+               drm_modeset_acquire_init(&ctx, 0);
+
                if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
                        type = intel_tv_detect_type(intel_tv, connector);
-                       intel_release_load_detect_pipe(connector, &tmp, &ctx);
+                       intel_release_load_detect_pipe(connector, &tmp);
                } else
                        return connector_status_unknown;
+
+               drm_modeset_drop_locks(&ctx);
+               drm_modeset_acquire_fini(&ctx);
        } else
                return connector->status;
 
index 74cebb51e8c285e23475c6df40ae64a271857b9f..c6c80ea28c35809dbe63bb22aec0a50da9f4d0c6 100644 (file)
@@ -397,6 +397,7 @@ static void mdp4_crtc_prepare(struct drm_crtc *crtc)
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
        DBG("%s", mdp4_crtc->name);
        /* make sure we hold a ref to mdp clks while setting up mode: */
+       drm_crtc_vblank_get(crtc);
        mdp4_enable(get_kms(crtc));
        mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 }
@@ -407,6 +408,7 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)
        crtc_flush(crtc);
        /* drop the ref to mdp clk's that we got in prepare: */
        mdp4_disable(get_kms(crtc));
+       drm_crtc_vblank_put(crtc);
 }
 
 static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
index b447c01ad89c86c909b92679444843989df22c9e..26ee80db17af9fec7308f2e4defbac06f6dd6d3d 100644 (file)
@@ -974,12 +974,11 @@ static int msm_pdev_probe(struct platform_device *pdev)
 
        for (i = 0; i < ARRAY_SIZE(devnames); i++) {
                struct device *dev;
-               int ret;
 
                dev = bus_find_device_by_name(&platform_bus_type,
                                NULL, devnames[i]);
                if (!dev) {
-                       dev_info(master, "still waiting for %s\n", devnames[i]);
+                       dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]);
                        return -EPROBE_DEFER;
                }
 
index 9c5221ce391ab063cac34ebce428e46939828649..ab5bfd2d0ebf2e29897fa56e1df2c77c0f47aeb8 100644 (file)
@@ -143,7 +143,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
        if (ret) {
                dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
-               goto fail;
+               goto fail_unlock;
        }
 
        fbi = framebuffer_alloc(0, dev->dev);
index 099af483fdf0328925b23be1fc9c635a5c4c63ba..7acdaa5688b77e89f3afa786da19903d0d0c7b6d 100644 (file)
@@ -27,8 +27,8 @@ struct msm_iommu {
 static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
                unsigned long iova, int flags, void *arg)
 {
-       DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
-       return -ENOSYS;
+       pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
+       return 0;
 }
 
 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
index 0013ad0db9efceea452959db8a3bfe12fc4440ee..f77b7135ee4cde8e07b96c619f0bca8f94cdc37b 100644 (file)
@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
        evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
        evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
        atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
-       si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
+       si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \
        r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
        rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
        trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
index 022561e2870722bd56c22fa65ebaf5b9bf1987a8..d416bb2ff48dae0376b15e4789652799ff0638c3 100644 (file)
@@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
        WREG32_SMC(CG_THERMAL_CTRL, tmp);
 #endif
 
+       rdev->pm.dpm.thermal.min_temp = low_temp;
+       rdev->pm.dpm.thermal.max_temp = high_temp;
+
        return 0;
 }
 
index b625646bf3e242c11d21fd268c698a502604fae0..fa9565957f9d4516ab801e645b5ac2153db535ad 100644 (file)
@@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
        u32 mc_shared_chmap, mc_arb_ramcfg;
        u32 hdp_host_path_cntl;
        u32 tmp;
-       int i, j, k;
+       int i, j;
 
        switch (rdev->family) {
        case CHIP_BONAIRE:
@@ -3544,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
                           (rdev->pdev->device == 0x130B) ||
                           (rdev->pdev->device == 0x130E) ||
                           (rdev->pdev->device == 0x1315) ||
+                          (rdev->pdev->device == 0x1318) ||
                           (rdev->pdev->device == 0x131B)) {
                        rdev->config.cik.max_cu_per_sh = 4;
                        rdev->config.cik.max_backends_per_se = 1;
@@ -3672,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev)
                     rdev->config.cik.max_sh_per_se,
                     rdev->config.cik.max_backends_per_se);
 
+       rdev->config.cik.active_cus = 0;
        for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
                for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
-                       for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) {
-                               rdev->config.cik.active_cus +=
-                                       hweight32(cik_get_cu_active_bitmap(rdev, i, j));
-                       }
+                       rdev->config.cik.active_cus +=
+                               hweight32(cik_get_cu_active_bitmap(rdev, i, j));
                }
        }
 
@@ -3801,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
        radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
@@ -3920,6 +3920,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, 0);
 }
 
+/**
+ * cik_semaphore_ring_emit - emit a semaphore on the CP ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring buffer object
+ * @semaphore: radeon semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
 bool cik_semaphore_ring_emit(struct radeon_device *rdev,
                             struct radeon_ring *ring,
                             struct radeon_semaphore *semaphore,
@@ -3932,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, lower_32_bits(addr));
        radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
 
+       if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
+               /* Prevent the PFP from running ahead of the semaphore wait */
+               radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+               radeon_ring_write(ring, 0x0);
+       }
+
        return true;
 }
 
@@ -4004,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
@@ -4103,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
        ib.ptr[2] = 0xDEADBEEF;
        ib.length_dw = 3;
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                radeon_scratch_free(rdev, scratch);
                radeon_ib_free(rdev, &ib);
@@ -4324,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
        radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
        radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        return 0;
 }
@@ -5732,20 +5749,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(0x15D8, 0);
        WREG32(0x15DC, 0);
 
-       /* empty context1-15 */
-       /* FIXME start with 4G, once using 2 level pt switch to full
-        * vm size space
-        */
+       /* restore context1-15 */
        /* set vm size, must be a multiple of 4 */
        WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
        WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
-                              rdev->gart.table_addr >> 12);
+                              rdev->vm_manager.saved_table_addr[i]);
                else
                        WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
-                              rdev->gart.table_addr >> 12);
+                              rdev->vm_manager.saved_table_addr[i]);
        }
 
        /* enable context1-15 */
@@ -5810,6 +5824,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
  */
 static void cik_pcie_gart_disable(struct radeon_device *rdev)
 {
+       unsigned i;
+
+       for (i = 1; i < 16; ++i) {
+               uint32_t reg;
+               if (i < 8)
+                       reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
+               else
+                       reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
+               rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
+       }
+
        /* Disable all tables */
        WREG32(VM_CONTEXT0_CNTL, 0);
        WREG32(VM_CONTEXT1_CNTL, 0);
@@ -5958,14 +5983,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 
        /* update SH_MEM_* regs */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, VMID(vm->id));
 
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, SH_MEM_BASES >> 2);
        radeon_ring_write(ring, 0);
@@ -5976,7 +6001,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
 
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
        radeon_ring_write(ring, 0);
@@ -5987,7 +6012,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 
        /* bits 0-15 are the VM contexts0-15 */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
        radeon_ring_write(ring, 0);
@@ -9538,6 +9563,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
        int ret, i;
        u16 tmp16;
 
+       if (pci_is_root_bus(rdev->pdev->bus))
+               return;
+
        if (radeon_pcie_gen2 == 0)
                return;
 
@@ -9764,7 +9792,8 @@ static void cik_program_aspm(struct radeon_device *rdev)
                        if (orig != data)
                                WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
 
-                       if (!disable_clkreq) {
+                       if (!disable_clkreq &&
+                           !pci_is_root_bus(rdev->pdev->bus)) {
                                struct pci_dev *root = rdev->pdev->bus->self;
                                u32 lnkcap;
 
index bcf480510ac228af4bdf6e177eb0ccfb5951b24c..192278bc993ce581e42b9eb2e47a207533a45cd5 100644 (file)
@@ -596,7 +596,7 @@ int cik_copy_dma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
@@ -638,7 +638,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
        radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
        radeon_ring_write(ring, 1); /* number of DWs to follow */
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = readl(ptr);
@@ -695,7 +695,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        ib.ptr[4] = 0xDEADBEEF;
        ib.length_dw = 5;
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                radeon_ib_free(rdev, &ib);
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
index 4fedd14e670aeb33e07fadcf5ff2564c2cc2681c..dbca60c7d097664e381d9bd5c6539d140aa72c16 100644 (file)
@@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 0);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        cp_me = 0xff;
        WREG32(CP_ME_CNTL, cp_me);
@@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
        radeon_ring_write(ring, 0x00000010); /*  */
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        return 0;
 }
index 478caefe0fef918011fadd78e1419ac92183ae54..afaba388c36dec2fcd6cd5b986a9ab4d70406385 100644 (file)
@@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
index 9ef8c38f2d6622c25a5b3e1e43a37064c8cf1776..8b58e11b64fa5a13611941b3aca6b04d2c1600a0 100644 (file)
@@ -1438,14 +1438,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
        return kv_enable_uvd_dpm(rdev, !gate);
 }
 
-static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
+static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
 {
        u8 i;
        struct radeon_vce_clock_voltage_dependency_table *table =
                &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
 
        for (i = 0; i < table->count; i++) {
-               if (table->entries[i].evclk >= 0) /* XXX */
+               if (table->entries[i].evclk >= evclk)
                        break;
        }
 
@@ -1468,7 +1468,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
                if (pi->caps_stable_p_state)
                        pi->vce_boot_level = table->count - 1;
                else
-                       pi->vce_boot_level = kv_get_vce_boot_level(rdev);
+                       pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
 
                ret = kv_copy_bytes_to_smc(rdev,
                                           pi->dpm_table_start +
@@ -2726,7 +2726,10 @@ int kv_dpm_init(struct radeon_device *rdev)
        pi->caps_sclk_ds = true;
        pi->enable_auto_thermal_throttling = true;
        pi->disable_nb_ps3_in_battery = false;
-       pi->bapm_enable = true;
+       if (radeon_bapm == 0)
+               pi->bapm_enable = false;
+       else
+               pi->bapm_enable = true;
        pi->voltage_drop_t = 0;
        pi->caps_sclk_throttle_low_notification = false;
        pi->caps_fps = false; /* true? */
index 327b85f7fd0d45eb434a8f8232a206ac736ab0be..3faee58946dd0027b67d5d835af50f8de899ec93 100644 (file)
@@ -1271,7 +1271,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
                WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
                WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
                WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
-                       rdev->gart.table_addr >> 12);
+                      rdev->vm_manager.saved_table_addr[i]);
        }
 
        /* enable context1-7 */
@@ -1303,6 +1303,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
 
 static void cayman_pcie_gart_disable(struct radeon_device *rdev)
 {
+       unsigned i;
+
+       for (i = 1; i < 8; ++i) {
+               rdev->vm_manager.saved_table_addr[i] = RREG32(
+                       VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
+       }
+
        /* Disable all tables */
        WREG32(VM_CONTEXT0_CNTL, 0);
        WREG32(VM_CONTEXT1_CNTL, 0);
@@ -1505,7 +1512,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 0);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        cayman_cp_enable(rdev, true);
 
@@ -1547,7 +1554,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
        radeon_ring_write(ring, 0x00000010); /*  */
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        /* XXX init other rings */
 
index 04b5940b89234893f3f3042e5a00f30f97e899cf..4c5ec44ff328b3c63203e4c7c92f5283126e65e9 100644 (file)
@@ -925,7 +925,7 @@ int r100_copy_blit(struct radeon_device *rdev,
        if (fence) {
                r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
        }
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        return r;
 }
 
@@ -958,7 +958,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
                          RADEON_ISYNC_ANY3D_IDLE2D |
                          RADEON_ISYNC_WAIT_IDLEGUI |
                          RADEON_ISYNC_CPSCRATCH_IDLEGUI);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 }
 
 
@@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        }
        radeon_ring_write(ring, PACKET0(scratch, 0));
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF) {
@@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        ib.ptr[6] = PACKET2(0);
        ib.ptr[7] = PACKET2(0);
        ib.length_dw = 8;
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
                goto free_ib;
index 58f0473aa73fba3cd1e62a786d6776c7a20ccd3b..67780374a65203fc57736510685f917b14d4ce89 100644 (file)
@@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev,
        if (fence) {
                r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
        }
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        return r;
 }
 
index 75b30338c226f01ef263edcac893e16dec496643..1bc4704034ce9d90f9563f598bb6135b44878f06 100644 (file)
@@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
        radeon_ring_write(ring,
                          R300_GEOMETRY_ROUND_NEAREST |
                          R300_COLOR_ROUND_NEAREST);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 }
 
 static void r300_errata(struct radeon_device *rdev)
index 802b19220a21358712dc304a894db892ec3537fb..2828605aef3fcdeb76b529e7b478bdc866d1a27f 100644 (file)
@@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
        radeon_ring_write(ring, rdev->config.r300.resync_scratch);
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 }
 
 static void r420_cp_errata_fini(struct radeon_device *rdev)
@@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev)
        radeon_ring_lock(rdev, ring, 8);
        radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
        radeon_ring_write(ring, R300_RB3D_DC_FINISH);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
 }
 
index c70a504d96af5a8ac6f8a9f8e40dd2039917e467..e616eb5f6e7a4076979650147acad69b9b40d567 100644 (file)
@@ -1812,7 +1812,6 @@ static void r600_gpu_init(struct radeon_device *rdev)
 {
        u32 tiling_config;
        u32 ramcfg;
-       u32 cc_rb_backend_disable;
        u32 cc_gc_shader_pipe_config;
        u32 tmp;
        int i, j;
@@ -1939,29 +1938,20 @@ static void r600_gpu_init(struct radeon_device *rdev)
        }
        tiling_config |= BANK_SWAPS(1);
 
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-       tmp = R6XX_MAX_BACKENDS -
-               r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
-       if (tmp < rdev->config.r600.max_backends) {
-               rdev->config.r600.max_backends = tmp;
-       }
-
        cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
-       tmp = R6XX_MAX_PIPES -
-               r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
-       if (tmp < rdev->config.r600.max_pipes) {
-               rdev->config.r600.max_pipes = tmp;
-       }
-       tmp = R6XX_MAX_SIMDS -
-               r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
-       if (tmp < rdev->config.r600.max_simds) {
-               rdev->config.r600.max_simds = tmp;
-       }
        tmp = rdev->config.r600.max_simds -
                r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
        rdev->config.r600.active_simds = tmp;
 
        disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+       tmp = 0;
+       for (i = 0; i < rdev->config.r600.max_backends; i++)
+               tmp |= (1 << i);
+       /* if all the backends are disabled, fix it up here */
+       if ((disabled_rb_mask & tmp) == tmp) {
+               for (i = 0; i < rdev->config.r600.max_backends; i++)
+                       disabled_rb_mask &= ~(1 << i);
+       }
        tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
        tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
                                        R6XX_MAX_BACKENDS, disabled_rb_mask);
@@ -2547,7 +2537,7 @@ int r600_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 0);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        cp_me = 0xff;
        WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2683,7 +2673,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
        radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
@@ -2753,6 +2743,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
        }
 }
 
+/**
+ * r600_semaphore_ring_emit - emit a semaphore on the CP ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring buffer object
+ * @semaphore: radeon semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
 bool r600_semaphore_ring_emit(struct radeon_device *rdev,
                              struct radeon_ring *ring,
                              struct radeon_semaphore *semaphore,
@@ -2768,6 +2769,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, lower_32_bits(addr));
        radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
 
+       /* PFP_SYNC_ME packet only exists on 7xx+ */
+       if (emit_wait && (rdev->family >= CHIP_RV770)) {
+               /* Prevent the PFP from running ahead of the semaphore wait */
+               radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+               radeon_ring_write(ring, 0x0);
+       }
+
        return true;
 }
 
@@ -2845,7 +2853,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
@@ -3165,7 +3173,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
        ib.ptr[2] = 0xDEADBEEF;
        ib.length_dw = 3;
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
                goto free_ib;
index 4969cef44a1911b706e933fb8252397ddd893785..51fd98553eaf6c4a34e73c5baeecf95b94220e29 100644 (file)
@@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,
        radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
        radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = readl(ptr);
@@ -368,7 +368,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        ib.ptr[3] = 0xDEADBEEF;
        ib.length_dw = 4;
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                radeon_ib_free(rdev, &ib);
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
@@ -493,7 +493,7 @@ int r600_copy_dma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
index f94e7a9afe754c602e5e40ddf600f7bef73add6e..0c4a7d8d93e0465e9f2d094fe127f283602b3239 100644 (file)
                 */
 #              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
 #              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#define        PACKET3_PFP_SYNC_ME                             0x42 /* r7xx+ only */
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_FULL_CACHE_ENA       (1 << 20) /* r7xx+ only */
index 9e1732eb402c5ec831c9bbd514084e7ce30462c2..5f05b4c8433807bf26a1b0ef80fd31a2c25d9a36 100644 (file)
@@ -105,6 +105,7 @@ extern int radeon_vm_size;
 extern int radeon_vm_block_size;
 extern int radeon_deep_color;
 extern int radeon_use_pflipirq;
+extern int radeon_bapm;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -914,6 +915,8 @@ struct radeon_vm_manager {
        u64                             vram_base_offset;
        /* is vm enabled? */
        bool                            enabled;
+       /* for hw to save the PD addr on suspend/resume */
+       uint32_t                        saved_table_addr[RADEON_NUM_VM];
 };
 
 /*
@@ -967,7 +970,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
                  unsigned size);
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
-                      struct radeon_ib *const_ib);
+                      struct radeon_ib *const_ib, bool hdp_flush);
 int radeon_ib_pool_init(struct radeon_device *rdev);
 void radeon_ib_pool_fini(struct radeon_device *rdev);
 int radeon_ib_ring_tests(struct radeon_device *rdev);
@@ -977,8 +980,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
+                       bool hdp_flush);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
+                              bool hdp_flush);
 void radeon_ring_undo(struct radeon_ring *ring);
 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
 int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
index ee712c199b2573f5978e1a254094e35f11090af0..83f382e8e40e35cb380d4fc6fd835a7ddac9c01a 100644 (file)
@@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                 * the buffers used for read only, which doubles the range
                 * to 0 to 31. 32 is reserved for the kernel driver.
                 */
-               priority = (r->flags & 0xf) * 2 + !!r->write_domain;
+               priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
+                          + !!r->write_domain;
 
                /* the first reloc of an UVD job is the msg and that must be in
                   VRAM, also but everything into VRAM on AGP cards to avoid
@@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
                radeon_vce_note_usage(rdev);
 
        radeon_cs_sync_rings(parser);
-       r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+       r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
        if (r) {
                DRM_ERROR("Failed to schedule IB !\n");
        }
@@ -541,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 
        if ((rdev->family >= CHIP_TAHITI) &&
            (parser->chunk_const_ib_idx != -1)) {
-               r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+               r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
        } else {
-               r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+               r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
        }
 
 out:
index c8ea050c8fa463b8ae5fa6768a5cf67cc924f119..6a219bcee66d2d8a5d7b8bf49f2344584a0a36fd 100644 (file)
@@ -1680,8 +1680,8 @@ int radeon_gpu_reset(struct radeon_device *rdev)
        radeon_save_bios_scratch_regs(rdev);
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
-       radeon_pm_suspend(rdev);
        radeon_suspend(rdev);
+       radeon_hpd_fini(rdev);
 
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
@@ -1726,9 +1726,39 @@ retry:
                }
        }
 
-       radeon_pm_resume(rdev);
+       if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+               /* do dpm late init */
+               r = radeon_pm_late_init(rdev);
+               if (r) {
+                       rdev->pm.dpm_enabled = false;
+                       DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+               }
+       } else {
+               /* resume old pm late */
+               radeon_pm_resume(rdev);
+       }
+
+       /* init dig PHYs, disp eng pll */
+       if (rdev->is_atom_bios) {
+               radeon_atom_encoder_init(rdev);
+               radeon_atom_disp_eng_pll_init(rdev);
+               /* turn on the BL */
+               if (rdev->mode_info.bl_encoder) {
+                       u8 bl_level = radeon_get_backlight_level(rdev,
+                                                                rdev->mode_info.bl_encoder);
+                       radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+                                                  bl_level);
+               }
+       }
+       /* reset hpd state */
+       radeon_hpd_init(rdev);
+
        drm_helper_resume_force_mode(rdev->ddev);
 
+       /* set the power state here in case we are a PX system or headless */
+       if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+               radeon_pm_compute_clocks(rdev);
+
        ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
        if (r) {
                /* bad news, how to tell it to userspace ? */
index 092d067f93e16534aca712779ce2aa5705069395..8df888908833f5d1a3e8d9f745edb5fe9b6143ea 100644 (file)
@@ -180,6 +180,7 @@ int radeon_vm_size = 8;
 int radeon_vm_block_size = -1;
 int radeon_deep_color = 0;
 int radeon_use_pflipirq = 2;
+int radeon_bapm = -1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -259,6 +260,9 @@ module_param_named(deep_color, radeon_deep_color, int, 0444);
 MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
 module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
 
+MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(bapm, radeon_bapm, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
 };
index 65b0c213488dcf084e08c2ddf95c92c28f367c83..5bf2c0a05827f57b459ca1be85e268af0bbf32ce 100644 (file)
@@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
  * @rdev: radeon_device pointer
  * @ib: IB object to schedule
  * @const_ib: Const IB to schedule (SI only)
+ * @hdp_flush: Whether or not to perform an HDP cache flush
  *
  * Schedule an IB on the associated ring (all asics).
  * Returns 0 on success, error on failure.
@@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
  * to SI there was just a DE IB.
  */
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
-                      struct radeon_ib *const_ib)
+                      struct radeon_ib *const_ib, bool hdp_flush)
 {
        struct radeon_ring *ring = &rdev->ring[ib->ring];
        int r = 0;
@@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
        if (ib->vm)
                radeon_vm_fence(rdev, ib->vm, ib->fence);
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, hdp_flush);
        return 0;
 }
 
index 23314be49480684c583aefffedec4587af48e524..164898b0010c68b350c175b705ef3534af5be69a 100644 (file)
@@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
-       if  ((rdev->flags & RADEON_IS_PX) &&
-            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-               return snprintf(buf, PAGE_SIZE, "off\n");
-
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
                        (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
-       /* Can't set dpm state when the card is off */
-       if  ((rdev->flags & RADEON_IS_PX) &&
-            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-               return -EINVAL;
-
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("battery", buf, strlen("battery")) == 0)
                rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
                goto fail;
        }
        mutex_unlock(&rdev->pm.mutex);
-       radeon_pm_compute_clocks(rdev);
+
+       /* Can't set dpm state when the card is off */
+       if (!(rdev->flags & RADEON_IS_PX) ||
+           (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
+               radeon_pm_compute_clocks(rdev);
+
 fail:
        return count;
 }
index 5b4e0cf231a04d0a104868ed590f1324afe70be1..d65607902537f41a7ede21261eb111ddbf257e63 100644 (file)
@@ -177,16 +177,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
  *
  * @rdev: radeon_device pointer
  * @ring: radeon_ring structure holding ring information
+ * @hdp_flush: Whether or not to perform an HDP cache flush
  *
  * Update the wptr (write pointer) to tell the GPU to
  * execute new commands on the ring buffer (all asics).
  */
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
+                       bool hdp_flush)
 {
        /* If we are emitting the HDP flush via the ring buffer, we need to
         * do it before padding.
         */
-       if (rdev->asic->ring[ring->idx]->hdp_flush)
+       if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
                rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
        /* We pad to match fetch size */
        while (ring->wptr & ring->align_mask) {
@@ -196,7 +198,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
        /* If we are emitting the HDP flush via MMIO, we need to do it after
         * all CPU writes to VRAM finished.
         */
-       if (rdev->asic->mmio_hdp_flush)
+       if (hdp_flush && rdev->asic->mmio_hdp_flush)
                rdev->asic->mmio_hdp_flush(rdev);
        radeon_ring_set_wptr(rdev, ring);
 }
@@ -207,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
  *
  * @rdev: radeon_device pointer
  * @ring: radeon_ring structure holding ring information
+ * @hdp_flush: Whether or not to perform an HDP cache flush
  *
  * Call radeon_ring_commit() then unlock the ring (all asics).
  */
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
+                              bool hdp_flush)
 {
-       radeon_ring_commit(rdev, ring);
+       radeon_ring_commit(rdev, ring, hdp_flush);
        mutex_unlock(&rdev->ring_lock);
 }
 
@@ -372,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
                radeon_ring_write(ring, data[i]);
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        kfree(data);
        return 0;
 }
@@ -400,9 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
        /* Allocate ring buffer */
        if (ring->ring_obj == NULL) {
                r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
-                                    RADEON_GEM_DOMAIN_GTT,
-                                    (rdev->flags & RADEON_IS_PCIE) ?
-                                    RADEON_GEM_GTT_WC : 0,
+                                    RADEON_GEM_DOMAIN_GTT, 0,
                                     NULL, &ring->ring_obj);
                if (r) {
                        dev_err(rdev->dev, "(%d) ring create failed\n", r);
index dbd6bcde92de412a87379d7da23a4cea74b40649..56d9fd66d8aed379c417d1ba4a47de7aa65ba472 100644 (file)
@@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
                        continue;
                }
 
-               radeon_ring_commit(rdev, &rdev->ring[i]);
+               radeon_ring_commit(rdev, &rdev->ring[i], false);
                radeon_fence_note_sync(fence, ring);
 
                semaphore->gpu_addr += 8;
index 5adf4207453d7eb041f6679722283a36ae5a0931..17bc3dced9f1db921a11f80b1167f520261ed0b1 100644 (file)
@@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
                        return r;
                }
                radeon_fence_emit(rdev, fence, ring->idx);
-               radeon_ring_unlock_commit(rdev, ring);
+               radeon_ring_unlock_commit(rdev, ring, false);
        }
        return 0;
 }
@@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringA);
+       radeon_ring_unlock_commit(rdev, ringA, false);
 
        r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
        if (r)
@@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringA);
+       radeon_ring_unlock_commit(rdev, ringA, false);
 
        r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
        if (r)
@@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringB);
+       radeon_ring_unlock_commit(rdev, ringB, false);
 
        r = radeon_fence_wait(fence1, false);
        if (r) {
@@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringB);
+       radeon_ring_unlock_commit(rdev, ringB, false);
 
        r = radeon_fence_wait(fence2, false);
        if (r) {
@@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringA);
+       radeon_ring_unlock_commit(rdev, ringA, false);
 
        r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
        if (r)
@@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringB);
+       radeon_ring_unlock_commit(rdev, ringB, false);
        r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
        if (r)
                goto out_cleanup;
@@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringC);
+       radeon_ring_unlock_commit(rdev, ringC, false);
 
        for (i = 0; i < 30; ++i) {
                mdelay(100);
@@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringC);
+       radeon_ring_unlock_commit(rdev, ringC, false);
 
        mdelay(1000);
 
index 6bf55ec85b62f7b3ee817356c6167181938a3dc1..341848a143761dd8236edc7862f79482d5d2d19d 100644 (file)
@@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
                ib.ptr[i] = PACKET2(0);
        ib.length_dw = 16;
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r)
                goto err;
        ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
index f9b70a43aa524f4ccbcff640e28e0d21573463b6..c7190aadbd8981bee97c0c4ed4d923cdf7880b52 100644 (file)
@@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
        for (i = ib.length_dw; i < ib_size_dw; ++i)
                ib.ptr[i] = 0x0;
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
        }
@@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
        for (i = ib.length_dw; i < ib_size_dw; ++i)
                ib.ptr[i] = 0x0;
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
        }
@@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
                return r;
        }
        radeon_ring_write(ring, VCE_CMD_END);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        for (i = 0; i < rdev->usec_timeout; i++) {
                if (vce_v1_0_get_rptr(rdev, ring) != rptr)
index ccae4d9dc3deb6aefc3f131dbd9648c5ea374f1a..088ffdc2f577c06519e2a31bc90bbd402c940125 100644 (file)
@@ -420,7 +420,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
        radeon_asic_vm_pad_ib(rdev, &ib);
        WARN_ON(ib.length_dw > 64);
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r)
                 goto error;
 
@@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                        /* add a clone of the bo_va to clear the old address */
                        struct radeon_bo_va *tmp;
                        tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+                       if (!tmp) {
+                               mutex_unlock(&vm->mutex);
+                               return -ENOMEM;
+                       }
                        tmp->it.start = bo_va->it.start;
                        tmp->it.last = bo_va->it.last;
                        tmp->vm = vm;
@@ -693,7 +697,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
                radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
                radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
                WARN_ON(ib.length_dw > ndw);
-               r = radeon_ib_schedule(rdev, &ib, NULL);
+               r = radeon_ib_schedule(rdev, &ib, NULL, false);
                if (r) {
                        radeon_ib_free(rdev, &ib);
                        return r;
@@ -957,7 +961,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
        WARN_ON(ib.length_dw > ndw);
 
        radeon_semaphore_sync_to(ib.semaphore, vm->fence);
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                radeon_ib_free(rdev, &ib);
                return r;
index 3e21e869015fece1124e7093bfd028e99be2e909..8a477bf1fdb31529173234f8f30a0b4e3fb9c608 100644 (file)
@@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
        radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
        radeon_ring_write(ring, PACKET0(0x20C8, 0));
        radeon_ring_write(ring, 0);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 }
 
 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
index 2983f17ea1b38399ab649dbb965cae291e6d9b82..d9f5ce715c9bfe5ff18be2a428809c1b7b217c58 100644 (file)
@@ -1177,7 +1177,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        u32 hdp_host_path_cntl;
        u32 sq_dyn_gpr_size_simd_ab_0;
        u32 gb_tiling_config = 0;
-       u32 cc_rb_backend_disable = 0;
        u32 cc_gc_shader_pipe_config = 0;
        u32 mc_arb_ramcfg;
        u32 db_debug4, tmp;
@@ -1311,21 +1310,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
                WREG32(SPI_CONFIG_CNTL, 0);
        }
 
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-       tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
-       if (tmp < rdev->config.rv770.max_backends) {
-               rdev->config.rv770.max_backends = tmp;
-       }
-
        cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
-       tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
-       if (tmp < rdev->config.rv770.max_pipes) {
-               rdev->config.rv770.max_pipes = tmp;
-       }
-       tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
-       if (tmp < rdev->config.rv770.max_simds) {
-               rdev->config.rv770.max_simds = tmp;
-       }
        tmp = rdev->config.rv770.max_simds -
                r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
        rdev->config.rv770.active_simds = tmp;
@@ -1348,6 +1333,14 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
 
        disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+       tmp = 0;
+       for (i = 0; i < rdev->config.rv770.max_backends; i++)
+               tmp |= (1 << i);
+       /* if all the backends are disabled, fix it up here */
+       if ((disabled_rb_mask & tmp) == tmp) {
+               for (i = 0; i < rdev->config.rv770.max_backends; i++)
+                       disabled_rb_mask &= ~(1 << i);
+       }
        tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
        tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
                                        R7XX_MAX_BACKENDS, disabled_rb_mask);
index bbf2e076ee457816924a736c57d2192cabba88ac..74426ac2bb5c2f39a26fb07eb76538afcc7061a9 100644 (file)
@@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
index 011779bd2b3da677129b38709742f673bbaa3398..6bce40847753b822f50be6037654c863833af068 100644 (file)
@@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev)
        u32 sx_debug_1;
        u32 hdp_host_path_cntl;
        u32 tmp;
-       int i, j, k;
+       int i, j;
 
        switch (rdev->family) {
        case CHIP_TAHITI:
@@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev)
                     rdev->config.si.max_sh_per_se,
                     rdev->config.si.max_cu_per_sh);
 
+       rdev->config.si.active_cus = 0;
        for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
                for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
-                       for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
-                               rdev->config.si.active_cus +=
-                                       hweight32(si_get_cu_active_bitmap(rdev, i, j));
-                       }
+                       rdev->config.si.active_cus +=
+                               hweight32(si_get_cu_active_bitmap(rdev, i, j));
                }
        }
 
@@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
        radeon_ring_write(ring, 0xc000);
        radeon_ring_write(ring, 0xe000);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        si_cp_enable(rdev, true);
 
@@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
        radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
                ring = &rdev->ring[i];
@@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev)
                radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
                radeon_ring_write(ring, 0);
 
-               radeon_ring_unlock_commit(rdev, ring);
+               radeon_ring_unlock_commit(rdev, ring, false);
        }
 
        return 0;
@@ -4291,10 +4290,10 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
-                              rdev->gart.table_addr >> 12);
+                              rdev->vm_manager.saved_table_addr[i]);
                else
                        WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
-                              rdev->gart.table_addr >> 12);
+                              rdev->vm_manager.saved_table_addr[i]);
        }
 
        /* enable context1-15 */
@@ -4326,6 +4325,17 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
 
 static void si_pcie_gart_disable(struct radeon_device *rdev)
 {
+       unsigned i;
+
+       for (i = 1; i < 16; ++i) {
+               uint32_t reg;
+               if (i < 8)
+                       reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
+               else
+                       reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
+               rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
+       }
+
        /* Disable all tables */
        WREG32(VM_CONTEXT0_CNTL, 0);
        WREG32(VM_CONTEXT1_CNTL, 0);
@@ -5028,7 +5038,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 
        /* flush hdp cache */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
        radeon_ring_write(ring, 0);
@@ -5036,7 +5046,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 
        /* bits 0-15 are the VM contexts0-15 */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
        radeon_ring_write(ring, 0);
@@ -7178,6 +7188,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
        int ret, i;
        u16 tmp16;
 
+       if (pci_is_root_bus(rdev->pdev->bus))
+               return;
+
        if (radeon_pcie_gen2 == 0)
                return;
 
@@ -7455,7 +7468,8 @@ static void si_program_aspm(struct radeon_device *rdev)
                        if (orig != data)
                                WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
 
-                       if (!disable_clkreq) {
+                       if (!disable_clkreq &&
+                           !pci_is_root_bus(rdev->pdev->bus)) {
                                struct pci_dev *root = rdev->pdev->bus->self;
                                u32 lnkcap;
 
index 7165051294504470c24e054b384802f05dc24de7..7c22baaf94dbf9ac598f745bfb35b2f5ba5138f2 100644 (file)
@@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
index 32e50be9c4ac1c41969fd9de2d1a6a3439a6ef27..57f780053b3e4d2717a69b9e60e5aa5967fffc3d 100644 (file)
@@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev)
        for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
                pi->at[i] = TRINITY_AT_DFLT;
 
-       /* There are stability issues reported on with
-        * bapm enabled when switching between AC and battery
-        * power.  At the same time, some MSI boards hang
-        * if it's not enabled and dpm is enabled.  Just enable
-        * it for MSI boards right now.
-        */
-       if (rdev->pdev->subsystem_vendor == 0x1462)
-               pi->enable_bapm = true;
-       else
+       if (radeon_bapm == -1) {
+               /* There are stability issues reported on with
+                * bapm enabled when switching between AC and battery
+                * power.  At the same time, some MSI boards hang
+                * if it's not enabled and dpm is enabled.  Just enable
+                * it for MSI boards right now.
+                */
+               if (rdev->pdev->subsystem_vendor == 0x1462)
+                       pi->enable_bapm = true;
+               else
+                       pi->enable_bapm = false;
+       } else if (radeon_bapm == 0) {
                pi->enable_bapm = false;
+       } else {
+               pi->enable_bapm = true;
+       }
        pi->enable_nbps_policy = true;
        pi->enable_sclk_ds = true;
        pi->enable_gfx_power_gating = true;
index be42c8125203b22bd62d9fc4be8510672ba577b3..cda391347286c3545bdd952234b3b5fb7186a5d9 100644 (file)
@@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
        radeon_ring_write(ring, 3);
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
 done:
        /* lower clocks again */
@@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        }
        radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(UVD_CONTEXT_ID);
                if (tmp == 0xDEADBEEF)
index 2d9d4252d59867a3aef9ffba5552d7fb3d79e242..ae8850f3e63bc4669e796650a1b76a9e9ff21a9f 100644 (file)
@@ -1,6 +1,7 @@
 config DRM_STI
        tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
        depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
+       select RESET_CONTROLLER
        select DRM_KMS_HELPER
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
index a7cc24917a96c39d9179e55744703171142c3bf5..223d93c3a05d8d7241ad05b669562d0a160af4fa 100644 (file)
@@ -201,8 +201,8 @@ static int sti_drm_platform_probe(struct platform_device *pdev)
        master = platform_device_register_resndata(dev,
                        DRIVER_NAME "__master", -1,
                        NULL, 0, NULL, 0);
-       if (!master)
-               return -EINVAL;
+       if (IS_ERR(master))
+               return PTR_ERR(master);
 
        platform_set_drvdata(pdev, master);
        return 0;
index 72d957f81c057790ad504d2cc626ee6cf08fd721..2ae9a9b7366660632f1745a1dce3e60cf7f03532 100644 (file)
@@ -730,16 +730,16 @@ static int sti_hda_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
-       if (IS_ERR(hda->regs))
-               return PTR_ERR(hda->regs);
+       if (!hda->regs)
+               return -ENOMEM;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                        "video-dacs-ctrl");
        if (res) {
                hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
                                resource_size(res));
-               if (IS_ERR(hda->video_dacs_ctrl))
-                       return PTR_ERR(hda->video_dacs_ctrl);
+               if (!hda->video_dacs_ctrl)
+                       return -ENOMEM;
        } else {
                /* If no existing video-dacs-ctrl resource continue the probe */
                DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n");
@@ -770,7 +770,7 @@ static int sti_hda_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id hda_of_match[] = {
+static const struct of_device_id hda_of_match[] = {
        { .compatible = "st,stih416-hda", },
        { .compatible = "st,stih407-hda", },
        { /* end node */ }
index 284e541d970d8a1ec2213d2479f3d0f7c29176f7..ef93156a69c6fdad9dd2af368ef68e8ecd9a9b9a 100644 (file)
@@ -677,7 +677,7 @@ static const struct component_ops sti_hdmi_ops = {
        .unbind = sti_hdmi_unbind,
 };
 
-static struct of_device_id hdmi_of_match[] = {
+static const struct of_device_id hdmi_of_match[] = {
        {
                .compatible = "st,stih416-hdmi",
                .data = &tx3g0c55phy_ops,
@@ -713,8 +713,8 @@ static int sti_hdmi_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
-       if (IS_ERR(hdmi->regs))
-               return PTR_ERR(hdmi->regs);
+       if (!hdmi->regs)
+               return -ENOMEM;
 
        if (of_device_is_compatible(np, "st,stih416-hdmi")) {
                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -725,8 +725,8 @@ static int sti_hdmi_probe(struct platform_device *pdev)
                }
                hdmi->syscfg = devm_ioremap_nocache(dev, res->start,
                                                    resource_size(res));
-               if (IS_ERR(hdmi->syscfg))
-                       return PTR_ERR(hdmi->syscfg);
+               if (!hdmi->syscfg)
+                       return -ENOMEM;
 
        }
 
index b69e26fee76e0736af3062053e25b41cd47979f9..b8afe490356aedf09bb361d295764020dfff5eac 100644 (file)
@@ -591,8 +591,8 @@ static int sti_tvout_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
-       if (IS_ERR(tvout->regs))
-               return PTR_ERR(tvout->regs);
+       if (!tvout->regs)
+               return -ENOMEM;
 
        /* get reset resources */
        tvout->reset = devm_reset_control_get(dev, "tvout");
@@ -624,7 +624,7 @@ static int sti_tvout_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id tvout_of_match[] = {
+static const struct of_device_id tvout_of_match[] = {
        { .compatible = "st,stih416-tvout", },
        { .compatible = "st,stih407-tvout", },
        { /* end node */ }
index 1bdcccc54a1dda0e04d16fc9fbfe2d3d8e1e22b2..f745d2c1325ec8872a376e668cdd71b3c9c523f3 100644 (file)
@@ -28,7 +28,7 @@
 static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
+       if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
                hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
                rdesc[11] = rdesc[16] = 0xff;
                rdesc[12] = rdesc[17] = 0x03;
index 60f44cd1b0ed30ec19e986800676311bb72dadac..61b68ca27790d65c270465af7152276d7d7be7c9 100644 (file)
@@ -84,6 +84,15 @@ static const __u8 huion_tablet_rdesc_template[] = {
        0xC0                    /*  End Collection                          */
 };
 
+/* Parameter indices */
+enum huion_prm {
+       HUION_PRM_X_LM          = 1,
+       HUION_PRM_Y_LM          = 2,
+       HUION_PRM_PRESSURE_LM   = 4,
+       HUION_PRM_RESOLUTION    = 5,
+       HUION_PRM_NUM
+};
+
 /* Driver data */
 struct huion_drvdata {
        __u8 *rdesc;
@@ -115,7 +124,12 @@ static int huion_tablet_enable(struct hid_device *hdev)
        int rc;
        struct usb_device *usb_dev = hid_to_usb_dev(hdev);
        struct huion_drvdata *drvdata = hid_get_drvdata(hdev);
-       __le16 buf[6];
+       __le16 *buf = NULL;
+       size_t len;
+       s32 params[HUION_PH_ID_NUM];
+       s32 resolution;
+       __u8 *p;
+       s32 v;
 
        /*
         * Read string descriptor containing tablet parameters. The specific
@@ -123,65 +137,79 @@ static int huion_tablet_enable(struct hid_device *hdev)
         * driver traffic.
         * NOTE: This enables fully-functional tablet mode.
         */
+       len = HUION_PRM_NUM * sizeof(*buf);
+       buf = kmalloc(len, GFP_KERNEL);
+       if (buf == NULL) {
+               hid_err(hdev, "failed to allocate parameter buffer\n");
+               rc = -ENOMEM;
+               goto cleanup;
+       }
        rc = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
                                USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
                                (USB_DT_STRING << 8) + 0x64,
-                               0x0409, buf, sizeof(buf),
+                               0x0409, buf, len,
                                USB_CTRL_GET_TIMEOUT);
-       if (rc == -EPIPE)
-               hid_warn(hdev, "device parameters not found\n");
-       else if (rc < 0)
-               hid_warn(hdev, "failed to get device parameters: %d\n", rc);
-       else if (rc != sizeof(buf))
-               hid_warn(hdev, "invalid device parameters\n");
-       else {
-               s32 params[HUION_PH_ID_NUM];
-               s32 resolution;
-               __u8 *p;
-               s32 v;
+       if (rc == -EPIPE) {
+               hid_err(hdev, "device parameters not found\n");
+               rc = -ENODEV;
+               goto cleanup;
+       } else if (rc < 0) {
+               hid_err(hdev, "failed to get device parameters: %d\n", rc);
+               rc = -ENODEV;
+               goto cleanup;
+       } else if (rc != len) {
+               hid_err(hdev, "invalid device parameters\n");
+               rc = -ENODEV;
+               goto cleanup;
+       }
 
-               /* Extract device parameters */
-               params[HUION_PH_ID_X_LM] = le16_to_cpu(buf[1]);
-               params[HUION_PH_ID_Y_LM] = le16_to_cpu(buf[2]);
-               params[HUION_PH_ID_PRESSURE_LM] = le16_to_cpu(buf[4]);
-               resolution = le16_to_cpu(buf[5]);
-               if (resolution == 0) {
-                       params[HUION_PH_ID_X_PM] = 0;
-                       params[HUION_PH_ID_Y_PM] = 0;
-               } else {
-                       params[HUION_PH_ID_X_PM] = params[HUION_PH_ID_X_LM] *
-                                                       1000 / resolution;
-                       params[HUION_PH_ID_Y_PM] = params[HUION_PH_ID_Y_LM] *
-                                                       1000 / resolution;
-               }
+       /* Extract device parameters */
+       params[HUION_PH_ID_X_LM] = le16_to_cpu(buf[HUION_PRM_X_LM]);
+       params[HUION_PH_ID_Y_LM] = le16_to_cpu(buf[HUION_PRM_Y_LM]);
+       params[HUION_PH_ID_PRESSURE_LM] =
+               le16_to_cpu(buf[HUION_PRM_PRESSURE_LM]);
+       resolution = le16_to_cpu(buf[HUION_PRM_RESOLUTION]);
+       if (resolution == 0) {
+               params[HUION_PH_ID_X_PM] = 0;
+               params[HUION_PH_ID_Y_PM] = 0;
+       } else {
+               params[HUION_PH_ID_X_PM] = params[HUION_PH_ID_X_LM] *
+                                               1000 / resolution;
+               params[HUION_PH_ID_Y_PM] = params[HUION_PH_ID_Y_LM] *
+                                               1000 / resolution;
+       }
 
-               /* Allocate fixed report descriptor */
-               drvdata->rdesc = devm_kmalloc(&hdev->dev,
-                                       sizeof(huion_tablet_rdesc_template),
-                                       GFP_KERNEL);
-               if (drvdata->rdesc == NULL) {
-                       hid_err(hdev, "failed to allocate fixed rdesc\n");
-                       return -ENOMEM;
-               }
-               drvdata->rsize = sizeof(huion_tablet_rdesc_template);
+       /* Allocate fixed report descriptor */
+       drvdata->rdesc = devm_kmalloc(&hdev->dev,
+                               sizeof(huion_tablet_rdesc_template),
+                               GFP_KERNEL);
+       if (drvdata->rdesc == NULL) {
+               hid_err(hdev, "failed to allocate fixed rdesc\n");
+               rc = -ENOMEM;
+               goto cleanup;
+       }
+       drvdata->rsize = sizeof(huion_tablet_rdesc_template);
 
-               /* Format fixed report descriptor */
-               memcpy(drvdata->rdesc, huion_tablet_rdesc_template,
-                       drvdata->rsize);
-               for (p = drvdata->rdesc;
-                    p <= drvdata->rdesc + drvdata->rsize - 4;) {
-                       if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
-                           p[3] < sizeof(params)) {
-                               v = params[p[3]];
-                               put_unaligned(cpu_to_le32(v), (s32 *)p);
-                               p += 4;
-                       } else {
-                               p++;
-                       }
+       /* Format fixed report descriptor */
+       memcpy(drvdata->rdesc, huion_tablet_rdesc_template,
+               drvdata->rsize);
+       for (p = drvdata->rdesc;
+            p <= drvdata->rdesc + drvdata->rsize - 4;) {
+               if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
+                   p[3] < sizeof(params)) {
+                       v = params[p[3]];
+                       put_unaligned(cpu_to_le32(v), (s32 *)p);
+                       p += 4;
+               } else {
+                       p++;
                }
        }
 
-       return 0;
+       rc = 0;
+
+cleanup:
+       kfree(buf);
+       return rc;
 }
 
 static int huion_probe(struct hid_device *hdev, const struct hid_device_id *id)
index e7769636759129f2540c5c670f1cc332180191fe..b92bf01a1ae8122f486ea333288558f082162f5d 100644 (file)
@@ -300,7 +300,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                 *   - change the button usage range to 4-7 for the extra
                 *     buttons
                 */
-               if (*rsize >= 74 &&
+               if (*rsize >= 75 &&
                        rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
                        rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
                        rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
index a976f48263f661248f818685d10b708496298cf0..f91ff145db9a0761ce487aa2b43aee1f2503af2f 100644 (file)
@@ -345,14 +345,14 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        struct usb_device_descriptor *udesc;
        __u16 bcdDevice, rev_maj, rev_min;
 
-       if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
+       if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 &&
                        rdesc[84] == 0x8c && rdesc[85] == 0x02) {
                hid_info(hdev,
                         "fixing up Logitech keyboard report descriptor\n");
                rdesc[84] = rdesc[89] = 0x4d;
                rdesc[85] = rdesc[90] = 0x10;
        }
-       if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
+       if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 &&
                        rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
                        rdesc[49] == 0x81 && rdesc[50] == 0x06) {
                hid_info(hdev,
index cc2bd20221989aa0284269cce533c28ab2e7295c..7835717bc02011d4ed2638caca9472021394245b 100644 (file)
@@ -451,13 +451,13 @@ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *at
        drv_data = hid_get_drvdata(hid);
        if (!drv_data) {
                hid_err(hid, "Private driver data not found!\n");
-               return 0;
+               return -EINVAL;
        }
 
        entry = drv_data->device_props;
        if (!entry) {
                hid_err(hid, "Device properties not found!\n");
-               return 0;
+               return -EINVAL;
        }
 
        if (range == 0)
index 486dbde2ba2d90d3802e59163870b57c4274fb28..9bf8637747a57f1b362680eeb36faf649b35f2fd 100644 (file)
@@ -238,13 +238,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
                return;
        }
 
-       if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
-           (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
-               dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
-                       __func__, dj_report->device_index);
-               return;
-       }
-
        if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
                /* The device is already known. No need to reallocate it. */
                dbg_hid("%s: device is already known\n", __func__);
@@ -557,7 +550,7 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
        if (!out_buf)
                return -ENOMEM;
 
-       if (count < DJREPORT_SHORT_LENGTH - 2)
+       if (count > DJREPORT_SHORT_LENGTH - 2)
                count = DJREPORT_SHORT_LENGTH - 2;
 
        out_buf[0] = REPORT_ID_DJ_SHORT;
@@ -663,7 +656,6 @@ static int logi_dj_raw_event(struct hid_device *hdev,
        struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
        struct dj_report *dj_report = (struct dj_report *) data;
        unsigned long flags;
-       bool report_processed = false;
 
        dbg_hid("%s, size:%d\n", __func__, size);
 
@@ -691,27 +683,41 @@ static int logi_dj_raw_event(struct hid_device *hdev,
         * anything else with it.
         */
 
+       /* case 1) */
+       if (data[0] != REPORT_ID_DJ_SHORT)
+               return false;
+
+       if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+           (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+               /*
+                * Device index is wrong, bail out.
+                * This driver can ignore safely the receiver notifications,
+                * so ignore those reports too.
+                */
+               if (dj_report->device_index != DJ_RECEIVER_INDEX)
+                       dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+                               __func__, dj_report->device_index);
+               return false;
+       }
+
        spin_lock_irqsave(&djrcv_dev->lock, flags);
-       if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
-               switch (dj_report->report_type) {
-               case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
-               case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
-                       logi_dj_recv_queue_notification(djrcv_dev, dj_report);
-                       break;
-               case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
-                       if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
-                           STATUS_LINKLOSS) {
-                               logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
-                       }
-                       break;
-               default:
-                       logi_dj_recv_forward_report(djrcv_dev, dj_report);
+       switch (dj_report->report_type) {
+       case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+       case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+               logi_dj_recv_queue_notification(djrcv_dev, dj_report);
+               break;
+       case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
+               if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
+                   STATUS_LINKLOSS) {
+                       logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
                }
-               report_processed = true;
+               break;
+       default:
+               logi_dj_recv_forward_report(djrcv_dev, dj_report);
        }
        spin_unlock_irqrestore(&djrcv_dev->lock, flags);
 
-       return report_processed;
+       return true;
 }
 
 static int logi_dj_probe(struct hid_device *hdev,
index 4a4000340ce1ed8cf6be1f81ee8e0f21d8ba5f5a..daeb0aa4bee99a60f3391c0ab4a56803b37a2568 100644 (file)
@@ -27,6 +27,7 @@
 
 #define DJ_MAX_PAIRED_DEVICES                  6
 #define DJ_MAX_NUMBER_NOTIFICATIONS            8
+#define DJ_RECEIVER_INDEX                      0
 #define DJ_DEVICE_INDEX_MIN                    1
 #define DJ_DEVICE_INDEX_MAX                    6
 
index ecc2cbf300cc39bc31e3ff5ad5979dff49822402..29a74c1efcb85fa727536c4b72be844f9963cd70 100644 (file)
@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
                if (size < 4 || ((size - 4) % 9) != 0)
                        return 0;
                npoints = (size - 4) / 9;
+               if (npoints > 15) {
+                       hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
+                                       size);
+                       return 0;
+               }
                msc->ntouches = 0;
                for (ii = 0; ii < npoints; ii++)
                        magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
                if (size < 6 || ((size - 6) % 8) != 0)
                        return 0;
                npoints = (size - 6) / 8;
+               if (npoints > 15) {
+                       hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
+                                       size);
+                       return 0;
+               }
                msc->ntouches = 0;
                for (ii = 0; ii < npoints; ii++)
                        magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
index 9e14c00eb1b6bb105ffd1326dc802183cfeb9a1a..25daf28b26bdf6b4d921e501bb49d4646e9aed55 100644 (file)
@@ -24,7 +24,7 @@
 static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
+       if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
                hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
                rdesc[30] = 0x0c;
        }
index 736b2502df4f8b00473889f6abf0ac78dcf72fe3..6aca4f2554bf4d748df6fc629276704e740ea40e 100644 (file)
@@ -25,7 +25,7 @@
 static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
+       if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
                        rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
                        rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
                hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
index acbb021065ece8287c9d3ea433c860afc0711855..020df3c2e8b42717c62bbe0470aa47845535e4a5 100644 (file)
@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
        if (!data)
                return 1;
 
+       if (size > 64) {
+               hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
+                               size);
+               return 0;
+       }
+
        if (report->id == REPORT_KEY_STATE) {
                if (data->input_keys)
                        ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
index 0dc25142f451ff8d5fb040249d83d811a029db1d..8389e8109218c7013567b727cdb3ae300c3a51b9 100644 (file)
@@ -909,10 +909,15 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return ret;
        }
 
-       if (!test_bit(RMI_STARTED, &data->flags)) {
-               hid_hw_stop(hdev);
-               return -EIO;
-       }
+       if (!test_bit(RMI_STARTED, &data->flags))
+               /*
+                * The device maybe in the bootloader if rmi_input_configured
+                * failed to find F11 in the PDT. Print an error, but don't
+                * return an error from rmi_probe so that hidraw will be
+                * accessible from userspace. That way a userspace tool
+                * can be used to reload working firmware on the touchpad.
+                */
+               hid_err(hdev, "Device failed to be properly configured\n");
 
        return 0;
 }
index e244e449cbbadc05ffc40c62e27fa1065c5154ed..2ac25760a9a9da02004299ab702c481d7e712cc5 100644 (file)
@@ -604,9 +604,9 @@ static int sensor_hub_probe(struct hid_device *hdev,
                ret = -EINVAL;
                goto err_stop_hw;
        }
-       sd->hid_sensor_hub_client_devs = kzalloc(dev_cnt *
-                                               sizeof(struct mfd_cell),
-                                               GFP_KERNEL);
+       sd->hid_sensor_hub_client_devs = devm_kzalloc(&hdev->dev, dev_cnt *
+                                                     sizeof(struct mfd_cell),
+                                                     GFP_KERNEL);
        if (sd->hid_sensor_hub_client_devs == NULL) {
                hid_err(hdev, "Failed to allocate memory for mfd cells\n");
                        ret = -ENOMEM;
@@ -618,11 +618,12 @@ static int sensor_hub_probe(struct hid_device *hdev,
 
                if (collection->type == HID_COLLECTION_PHYSICAL) {
 
-                       hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
+                       hsdev = devm_kzalloc(&hdev->dev, sizeof(*hsdev),
+                                            GFP_KERNEL);
                        if (!hsdev) {
                                hid_err(hdev, "cannot allocate hid_sensor_hub_device\n");
                                ret = -ENOMEM;
-                               goto err_no_mem;
+                               goto err_stop_hw;
                        }
                        hsdev->hdev = hdev;
                        hsdev->vendor_id = hdev->vendor;
@@ -631,13 +632,13 @@ static int sensor_hub_probe(struct hid_device *hdev,
                        if (last_hsdev)
                                last_hsdev->end_collection_index = i;
                        last_hsdev = hsdev;
-                       name = kasprintf(GFP_KERNEL, "HID-SENSOR-%x",
-                                       collection->usage);
+                       name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+                                             "HID-SENSOR-%x",
+                                             collection->usage);
                        if (name == NULL) {
                                hid_err(hdev, "Failed MFD device name\n");
                                        ret = -ENOMEM;
-                                       kfree(hsdev);
-                                       goto err_no_mem;
+                                       goto err_stop_hw;
                        }
                        sd->hid_sensor_hub_client_devs[
                                sd->hid_sensor_client_cnt].id =
@@ -661,16 +662,10 @@ static int sensor_hub_probe(struct hid_device *hdev,
        ret = mfd_add_devices(&hdev->dev, 0, sd->hid_sensor_hub_client_devs,
                sd->hid_sensor_client_cnt, NULL, 0, NULL);
        if (ret < 0)
-               goto err_no_mem;
+               goto err_stop_hw;
 
        return ret;
 
-err_no_mem:
-       for (i = 0; i < sd->hid_sensor_client_cnt; ++i) {
-               kfree(sd->hid_sensor_hub_client_devs[i].name);
-               kfree(sd->hid_sensor_hub_client_devs[i].platform_data);
-       }
-       kfree(sd->hid_sensor_hub_client_devs);
 err_stop_hw:
        hid_hw_stop(hdev);
 
@@ -681,7 +676,6 @@ static void sensor_hub_remove(struct hid_device *hdev)
 {
        struct sensor_hub_data *data = hid_get_drvdata(hdev);
        unsigned long flags;
-       int i;
 
        hid_dbg(hdev, " hardware removed\n");
        hid_hw_close(hdev);
@@ -691,11 +685,6 @@ static void sensor_hub_remove(struct hid_device *hdev)
                complete(&data->pending.ready);
        spin_unlock_irqrestore(&data->lock, flags);
        mfd_remove_devices(&hdev->dev);
-       for (i = 0; i < data->hid_sensor_client_cnt; ++i) {
-               kfree(data->hid_sensor_hub_client_devs[i].name);
-               kfree(data->hid_sensor_hub_client_devs[i].platform_data);
-       }
-       kfree(data->hid_sensor_hub_client_devs);
        hid_set_drvdata(hdev, NULL);
        mutex_destroy(&data->mutex);
 }
index 87fc91e1c8de4980d2f8e8721f476b6d21959adf..91072fa54663e747908dd09bb11b431eaa4208c5 100644 (file)
@@ -24,7 +24,7 @@
 static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
+       if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
                        rdesc[106] == 0x03) {
                hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
                rdesc[105] = rdesc[110] = 0x03;
index 3e3b680dc007319cf14abe35e5a26fc7f7c8a2b4..b51a402752c4c616a0d2a57ba6cd73c983296fc1 100644 (file)
@@ -23,17 +23,14 @@ config I2C
          This I2C support can also be built as a module.  If so, the module
          will be called i2c-core.
 
-config I2C_ACPI
-       bool "I2C ACPI support"
-       select I2C
-       depends on ACPI
+config ACPI_I2C_OPREGION
+       bool "ACPI I2C Operation region support"
+       depends on I2C=y && ACPI
        default y
        help
-         Say Y here if you want to enable ACPI I2C support. This includes support
-         for automatic enumeration of I2C slave devices and support for ACPI I2C
-         Operation Regions. Operation Regions allow firmware (BIOS) code to
-         access I2C slave devices, such as smart batteries through an I2C host
-         controller driver.
+         Say Y here if you want to enable ACPI I2C operation region support.
+         Operation Regions allow firmware (BIOS) code to access I2C slave devices,
+         such as smart batteries through an I2C host controller driver.
 
 if I2C
 
index a1f590cbb435d4c83a507beb4c11a051a77d828e..e0228b228256a8771df7b0be2536c3c42f90448d 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 i2ccore-y := i2c-core.o
-i2ccore-$(CONFIG_I2C_ACPI)     += i2c-acpi.o
+i2ccore-$(CONFIG_ACPI)         += i2c-acpi.o
 
 obj-$(CONFIG_I2C_BOARDINFO)    += i2c-boardinfo.o
 obj-$(CONFIG_I2C)              += i2ccore.o
index 2994690b26e9b18ffc6223df4db9d8d489924079..10467a3277492a6fa98ee597e7b1b33df687ec32 100644 (file)
 
 /* Older devices have their ID defined in <linux/pci_ids.h> */
 #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS             0x0f12
+#define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS             0x2292
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS          0x1c22
 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS             0x1d22
 /* Patsburg also has three 'Integrated Device Function' SMBus controllers */
@@ -828,6 +829,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
        { 0, }
 };
 
index e8b61967334bb199cbd551042820647d11aea62f..0dbc18c15c43a8529b4c84a34650887cc7a56019 100644 (file)
@@ -126,6 +126,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adap)
                dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
 }
 
+#ifdef CONFIG_ACPI_I2C_OPREGION
 static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
                u8 cmd, u8 *data, u8 data_len)
 {
@@ -360,3 +361,4 @@ void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
 
        acpi_bus_detach_private_data(handle);
 }
+#endif
index 18405314168be35fb00640bbf1e7e05ba23a9466..ecb0109a536045019dc3c5b9e2c084c3628f16ce 100644 (file)
@@ -3149,14 +3149,16 @@ free_domains:
 
 static void cleanup_domain(struct protection_domain *domain)
 {
-       struct iommu_dev_data *dev_data, *next;
+       struct iommu_dev_data *entry;
        unsigned long flags;
 
        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
 
-       list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
-               __detach_device(dev_data);
-               atomic_set(&dev_data->bind, 0);
+       while (!list_empty(&domain->dev_list)) {
+               entry = list_first_entry(&domain->dev_list,
+                                        struct iommu_dev_data, list);
+               __detach_device(entry);
+               atomic_set(&entry->bind, 0);
        }
 
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
index d1f5caad04f99a8ed2e3781d5bcedce7c324564e..5619f264862d9073a587fd1a2b919f2c61a27523 100644 (file)
@@ -3869,6 +3869,14 @@ static int device_notifier(struct notifier_block *nb,
            action != BUS_NOTIFY_DEL_DEVICE)
                return 0;
 
+       /*
+        * If the device is still attached to a device driver we can't
+        * tear down the domain yet as DMA mappings may still be in use.
+        * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
+        */
+       if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
+               return 0;
+
        domain = find_domain(dev);
        if (!domain)
                return 0;
index 169836020208d1a8a5755f16b60f68a66fec6d43..ac4adb337038bc990077a2c0557d1f07d8b1de8e 100644 (file)
@@ -995,7 +995,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
        size_t orig_size = size;
        int ret = 0;
 
-       if (unlikely(domain->ops->unmap == NULL ||
+       if (unlikely(domain->ops->map == NULL ||
                     domain->ops->pgsize_bitmap == 0UL))
                return -ENODEV;
 
index 58368f7b5cba150e736c24d7c5f151347c7f43d8..2498c349a32e49e9379559224b2652d6e9d33e82 100644 (file)
@@ -1,6 +1,6 @@
 /* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */
 
-#ifndef __DIVA_XDI_UM_CFG_MESSSGE_H__
+#ifndef __DIVA_XDI_UM_CFG_MESSAGE_H__
 #define __DIVA_XDI_UM_CFG_MESSAGE_H__
 
 /*
index b08c18871323c904f3964e9c8b650366ad2979ef..6703751d87d7fd7314149cfd8ba5bef314927235 100644 (file)
@@ -2953,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                 */
                if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
                        end_reshape(conf);
+                       close_sync(conf);
                        return 0;
                }
 
@@ -3081,6 +3082,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        }
 
                        r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+                       r10_bio->state = 0;
                        raise_barrier(conf, rb2 != NULL);
                        atomic_set(&r10_bio->remaining, 0);
 
@@ -3269,6 +3271,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                if (sync_blocks < max_sync)
                        max_sync = sync_blocks;
                r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+               r10_bio->state = 0;
 
                r10_bio->mddev = mddev;
                atomic_set(&r10_bio->remaining, 0);
@@ -4384,6 +4387,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
 read_more:
        /* Now schedule reads for blocks from sector_nr to last */
        r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+       r10_bio->state = 0;
        raise_barrier(conf, sectors_done != 0);
        atomic_set(&r10_bio->remaining, 0);
        r10_bio->mddev = mddev;
@@ -4398,6 +4402,7 @@ read_more:
                 * on all the target devices.
                 */
                // FIXME
+               mempool_free(r10_bio, conf->r10buf_pool);
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                return sectors_done;
        }
@@ -4410,7 +4415,7 @@ read_more:
        read_bio->bi_private = r10_bio;
        read_bio->bi_end_io = end_sync_read;
        read_bio->bi_rw = READ;
-       read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
+       read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
        read_bio->bi_flags |= 1 << BIO_UPTODATE;
        read_bio->bi_vcnt = 0;
        read_bio->bi_iter.bi_size = 0;
index 6234b2e84587cd75d37ac50bf5264094e474632a..183588b11fc1d261e173aff9c9e1e63b175ed120 100644 (file)
@@ -2922,7 +2922,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
              (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
              !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
             (sh->raid_conf->level == 6 && s->failed && s->to_write &&
-             s->to_write < sh->raid_conf->raid_disks - 2 &&
+             s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 &&
              (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
                /* we would like to get this block, possibly by computing it,
                 * otherwise read it if the backing disk is insync
@@ -3817,6 +3817,8 @@ static void handle_stripe(struct stripe_head *sh)
                                set_bit(R5_Wantwrite, &dev->flags);
                                if (prexor)
                                        continue;
+                               if (s.failed > 1)
+                                       continue;
                                if (!test_bit(R5_Insync, &dev->flags) ||
                                    ((i == sh->pd_idx || i == sh->qd_idx)  &&
                                     s.failed == 0))
index 5dede6e6437619f455f19ef6f8277e74e7639997..109cb44291f51f3af4c45979aa3b3ef53099d061 100644 (file)
@@ -280,7 +280,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
 
                priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
                                                     resource_size(res));
-               if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
+               if (!priv->raminit_ctrlreg || priv->instance < 0)
                        dev_info(&pdev->dev, "control memory is not used for raminit\n");
                else
                        priv->raminit = c_can_hw_raminit_ti;
index f425ec2c7839de4abe1a481cf94d0fd03435c845..944aa5d3af6ef4602329b75a57e8f87be85d79f8 100644 (file)
@@ -549,6 +549,13 @@ static void do_state(struct net_device *dev,
 
        /* process state changes depending on the new state */
        switch (new_state) {
+       case CAN_STATE_ERROR_WARNING:
+               netdev_dbg(dev, "Error Warning\n");
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->data[1] = (bec.txerr > bec.rxerr) ?
+                       CAN_ERR_CRTL_TX_WARNING :
+                       CAN_ERR_CRTL_RX_WARNING;
+               break;
        case CAN_STATE_ERROR_ACTIVE:
                netdev_dbg(dev, "Error Active\n");
                cf->can_id |= CAN_ERR_PROT;
@@ -852,6 +859,8 @@ static int flexcan_chip_start(struct net_device *dev)
        if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE ||
            priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
                reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
+       else
+               reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK;
 
        /* save for later use */
        priv->reg_ctrl_default = reg_ctrl;
index d1692154ed1b094ab100bc40ddcc2783d1dfdac7..b27ac6074afb1d21ab63282298002f8a4992599b 100644 (file)
@@ -172,6 +172,35 @@ static void set_normal_mode(struct net_device *dev)
        netdev_err(dev, "setting SJA1000 into normal mode failed!\n");
 }
 
+/*
+ * initialize SJA1000 chip:
+ *   - reset chip
+ *   - set output mode
+ *   - set baudrate
+ *   - enable interrupts
+ *   - start operating mode
+ */
+static void chipset_init(struct net_device *dev)
+{
+       struct sja1000_priv *priv = netdev_priv(dev);
+
+       /* set clock divider and output control register */
+       priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
+
+       /* set acceptance filter (accept all) */
+       priv->write_reg(priv, SJA1000_ACCC0, 0x00);
+       priv->write_reg(priv, SJA1000_ACCC1, 0x00);
+       priv->write_reg(priv, SJA1000_ACCC2, 0x00);
+       priv->write_reg(priv, SJA1000_ACCC3, 0x00);
+
+       priv->write_reg(priv, SJA1000_ACCM0, 0xFF);
+       priv->write_reg(priv, SJA1000_ACCM1, 0xFF);
+       priv->write_reg(priv, SJA1000_ACCM2, 0xFF);
+       priv->write_reg(priv, SJA1000_ACCM3, 0xFF);
+
+       priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL);
+}
+
 static void sja1000_start(struct net_device *dev)
 {
        struct sja1000_priv *priv = netdev_priv(dev);
@@ -180,6 +209,10 @@ static void sja1000_start(struct net_device *dev)
        if (priv->can.state != CAN_STATE_STOPPED)
                set_reset_mode(dev);
 
+       /* Initialize chip if uninitialized at this stage */
+       if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
+               chipset_init(dev);
+
        /* Clear error counters and error code capture */
        priv->write_reg(priv, SJA1000_TXERR, 0x0);
        priv->write_reg(priv, SJA1000_RXERR, 0x0);
@@ -236,35 +269,6 @@ static int sja1000_get_berr_counter(const struct net_device *dev,
        return 0;
 }
 
-/*
- * initialize SJA1000 chip:
- *   - reset chip
- *   - set output mode
- *   - set baudrate
- *   - enable interrupts
- *   - start operating mode
- */
-static void chipset_init(struct net_device *dev)
-{
-       struct sja1000_priv *priv = netdev_priv(dev);
-
-       /* set clock divider and output control register */
-       priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
-
-       /* set acceptance filter (accept all) */
-       priv->write_reg(priv, SJA1000_ACCC0, 0x00);
-       priv->write_reg(priv, SJA1000_ACCC1, 0x00);
-       priv->write_reg(priv, SJA1000_ACCC2, 0x00);
-       priv->write_reg(priv, SJA1000_ACCC3, 0x00);
-
-       priv->write_reg(priv, SJA1000_ACCM0, 0xFF);
-       priv->write_reg(priv, SJA1000_ACCM1, 0xFF);
-       priv->write_reg(priv, SJA1000_ACCM2, 0xFF);
-       priv->write_reg(priv, SJA1000_ACCM3, 0xFF);
-
-       priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL);
-}
-
 /*
  * transmit a CAN message
  * message layout in the sk_buff should be like this:
index e1a8f4e19983ffe4f87139af1849906348648333..e4222af2baa66838d27d136c8e4c3d9be369679b 100644 (file)
@@ -563,15 +563,21 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
        struct xgene_enet_desc_ring *ring;
 
        ring = pdata->tx_ring;
-       if (ring && ring->cp_ring && ring->cp_ring->cp_skb)
-               devm_kfree(dev, ring->cp_ring->cp_skb);
-       xgene_enet_free_desc_ring(ring);
+       if (ring) {
+               if (ring->cp_ring && ring->cp_ring->cp_skb)
+                       devm_kfree(dev, ring->cp_ring->cp_skb);
+               xgene_enet_free_desc_ring(ring);
+       }
 
        ring = pdata->rx_ring;
-       if (ring && ring->buf_pool && ring->buf_pool->rx_skb)
-               devm_kfree(dev, ring->buf_pool->rx_skb);
-       xgene_enet_free_desc_ring(ring->buf_pool);
-       xgene_enet_free_desc_ring(ring);
+       if (ring) {
+               if (ring->buf_pool) {
+                       if (ring->buf_pool->rx_skb)
+                               devm_kfree(dev, ring->buf_pool->rx_skb);
+                       xgene_enet_free_desc_ring(ring->buf_pool);
+               }
+               xgene_enet_free_desc_ring(ring);
+       }
 }
 
 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
index 4e6c82e2022492ef02b142ac13950fbf4e3a7fef..4ccc806b11501bb08b96614a0e79b020867648fe 100644 (file)
@@ -483,11 +483,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 
 #ifdef BNX2X_STOP_ON_ERROR
        fp->tpa_queue_used |= (1 << queue);
-#ifdef _ASM_GENERIC_INT_L64_H
-       DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
-#else
        DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
-#endif
           fp->tpa_queue_used);
 #endif
 }
index c13364b6cc19177a0ed4688df3768cd8ac0dc53a..900cab42081068e699e4bc8e2e0bec71659c604d 100644 (file)
@@ -10052,6 +10052,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 }
 
 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
+#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
+                                       0x1848 + ((f) << 4))
 #define BNX2X_PREV_UNDI_RCQ(val)       ((val) & 0xffff)
 #define BNX2X_PREV_UNDI_BD(val)                ((val) >> 16 & 0xffff)
 #define BNX2X_PREV_UNDI_PROD(rcq, bd)  ((bd) << 16 | (rcq))
@@ -10059,8 +10061,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 #define BCM_5710_UNDI_FW_MF_MAJOR      (0x07)
 #define BCM_5710_UNDI_FW_MF_MINOR      (0x08)
 #define BCM_5710_UNDI_FW_MF_VERS       (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
 
 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
 {
@@ -10079,72 +10079,25 @@ static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
        return false;
 }
 
-static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
-{
-       u8 major, minor, version;
-       u32 fw;
-
-       /* Must check that FW is loaded */
-       if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
-            MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
-               BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
-               return false;
-       }
-
-       /* Read Currently loaded FW version */
-       fw = REG_RD(bp, XSEM_REG_PRAM);
-       major = fw & 0xff;
-       minor = (fw >> 0x8) & 0xff;
-       version = (fw >> 0x10) & 0xff;
-       BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
-                      fw, major, minor, version);
-
-       if (major > BCM_5710_UNDI_FW_MF_MAJOR)
-               return true;
-
-       if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
-           (minor > BCM_5710_UNDI_FW_MF_MINOR))
-               return true;
-
-       if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
-           (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
-           (version >= BCM_5710_UNDI_FW_MF_VERS))
-               return true;
-
-       return false;
-}
-
-static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
-{
-       int i;
-
-       /* Due to legacy (FW) code, the first function on each engine has a
-        * different offset macro from the rest of the functions.
-        * Setting this for all 8 functions is harmless regardless of whether
-        * this is actually a multi-function device.
-        */
-       for (i = 0; i < 2; i++)
-               REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
-
-       for (i = 2; i < 8; i++)
-               REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
-
-       BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
-}
-
-static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
+static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
 {
        u16 rcq, bd;
-       u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
+       u32 addr, tmp_reg;
 
+       if (BP_FUNC(bp) < 2)
+               addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
+       else
+               addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
+
+       tmp_reg = REG_RD(bp, addr);
        rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
        bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
 
        tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
-       REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
+       REG_WR(bp, addr, tmp_reg);
 
-       BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
-                      port, bd, rcq);
+       BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
+                      BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
 }
 
 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
@@ -10383,7 +10336,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        /* Reset should be performed after BRB is emptied */
        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
                u32 timer_count = 1000;
-               bool need_write = true;
 
                /* Close the MAC Rx to prevent BRB from filling up */
                bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10420,20 +10372,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                        else
                                timer_count--;
 
-                       /* New UNDI FW supports MF and contains better
-                        * cleaning methods - might be redundant but harmless.
-                        */
-                       if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
-                               if (need_write) {
-                                       bnx2x_prev_unload_undi_mf(bp);
-                                       need_write = false;
-                               }
-                       } else if (prev_undi) {
-                               /* If UNDI resides in memory,
-                                * manually increment it
-                                */
-                               bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
-                       }
+                       /* If UNDI resides in memory, manually increment it */
+                       if (prev_undi)
+                               bnx2x_prev_unload_undi_inc(bp, 1);
+
                        udelay(10);
                }
 
index d57282172ea5497610dbb27185b41869372b9f13..c067b7888ac4fac6c174d3063406d087dcd530bb 100644 (file)
@@ -652,6 +652,7 @@ struct adapter {
        struct tid_info tids;
        void **tid_release_head;
        spinlock_t tid_release_lock;
+       struct workqueue_struct *workq;
        struct work_struct tid_release_task;
        struct work_struct db_full_task;
        struct work_struct db_drop_task;
index 1afee70ce856c884f4c4a8ee3595e4302c731d32..18fb9c61d7bacfd19319a664d2287786938ecec7 100644 (file)
@@ -643,8 +643,6 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
        return ret;
 }
 
-static struct workqueue_struct *workq;
-
 /**
  *     link_start - enable a port
  *     @dev: the port to enable
@@ -3340,7 +3338,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
        adap->tid_release_head = (void **)((uintptr_t)p | chan);
        if (!adap->tid_release_task_busy) {
                adap->tid_release_task_busy = true;
-               queue_work(workq, &adap->tid_release_task);
+               queue_work(adap->workq, &adap->tid_release_task);
        }
        spin_unlock_bh(&adap->tid_release_lock);
 }
@@ -4140,7 +4138,7 @@ void t4_db_full(struct adapter *adap)
                notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
                t4_set_reg_field(adap, SGE_INT_ENABLE3,
                                 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
-               queue_work(workq, &adap->db_full_task);
+               queue_work(adap->workq, &adap->db_full_task);
        }
 }
 
@@ -4150,7 +4148,7 @@ void t4_db_dropped(struct adapter *adap)
                disable_dbs(adap);
                notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
        }
-       queue_work(workq, &adap->db_drop_task);
+       queue_work(adap->workq, &adap->db_drop_task);
 }
 
 static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -6517,6 +6515,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_disable_device;
        }
 
+       adapter->workq = create_singlethread_workqueue("cxgb4");
+       if (!adapter->workq) {
+               err = -ENOMEM;
+               goto out_free_adapter;
+       }
+
        /* PCI device has been enabled */
        adapter->flags |= DEV_ENABLED;
 
@@ -6715,6 +6719,9 @@ sriov:
  out_unmap_bar0:
        iounmap(adapter->regs);
  out_free_adapter:
+       if (adapter->workq)
+               destroy_workqueue(adapter->workq);
+
        kfree(adapter);
  out_disable_device:
        pci_disable_pcie_error_reporting(pdev);
@@ -6736,6 +6743,11 @@ static void remove_one(struct pci_dev *pdev)
        if (adapter) {
                int i;
 
+               /* Tear down per-adapter Work Queue first since it can contain
+                * references to our adapter data structure.
+                */
+               destroy_workqueue(adapter->workq);
+
                if (is_offload(adapter))
                        detach_ulds(adapter);
 
@@ -6788,20 +6800,14 @@ static int __init cxgb4_init_module(void)
 {
        int ret;
 
-       workq = create_singlethread_workqueue("cxgb4");
-       if (!workq)
-               return -ENOMEM;
-
        /* Debugfs support is optional, just warn if this fails */
        cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
        if (!cxgb4_debugfs_root)
                pr_warn("could not create debugfs entry, continuing\n");
 
        ret = pci_register_driver(&cxgb4_driver);
-       if (ret < 0) {
+       if (ret < 0)
                debugfs_remove(cxgb4_debugfs_root);
-               destroy_workqueue(workq);
-       }
 
        register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
 
@@ -6813,8 +6819,6 @@ static void __exit cxgb4_cleanup_module(void)
        unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
        pci_unregister_driver(&cxgb4_driver);
        debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
-       flush_workqueue(workq);
-       destroy_workqueue(workq);
 }
 
 module_init(cxgb4_init_module);
index b0bba32d69d5dfd51eb56cfa6a6e9d5d63f5fe62..d22d728d4e5cb748321d9c86c4ad473a814d0831 100644 (file)
@@ -2303,7 +2303,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
                            FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
        c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
                                 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
-       c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
+       c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE |
+                          FW_EQ_ETH_CMD_VIID(pi->viid));
        c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
                                   FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
                                   FW_EQ_ETH_CMD_FETCHRO(1) |
index 0549170d7e2ed2b60f68322b4ef44887471bbc52..5f2729ebadbe14c4d6c1e56fcc29d078380db437 100644 (file)
@@ -1227,6 +1227,7 @@ struct fw_eq_eth_cmd {
 #define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
 #define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
 
+#define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30)
 #define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
 
 struct fw_eq_ctrl_cmd {
index bdfa80ca5e317cee72c925a4112bdeaaf2551799..a5fb9493dee826563561072185d85ccc46513337 100644 (file)
@@ -2250,7 +2250,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
        cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
                                         FW_EQ_ETH_CMD_EQSTART |
                                         FW_LEN16(cmd));
-       cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid));
+       cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE |
+                                  FW_EQ_ETH_CMD_VIID(pi->viid));
        cmd.fetchszm_to_iqid =
                cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
                            FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
index 9f7fa644a397a57c21bfa317b9154f1bc5b8e2ee..ee41d98b44b6d685ccb7a2c4f3d62bea419c25ff 100644 (file)
@@ -275,6 +275,9 @@ struct fec_enet_private {
        struct clk *clk_enet_out;
        struct clk *clk_ptp;
 
+       bool ptp_clk_on;
+       struct mutex ptp_clk_mutex;
+
        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
        unsigned char *tx_bounce[TX_RING_SIZE];
        struct  sk_buff *tx_skbuff[TX_RING_SIZE];
@@ -335,7 +338,7 @@ struct fec_enet_private {
        u32 cycle_speed;
        int hwts_rx_en;
        int hwts_tx_en;
-       struct timer_list time_keep;
+       struct delayed_work time_keep;
        struct regulator *reg_phy;
 };
 
index 4f87dffcb9b26688ba7ef51145138c88b8d6ad8a..89355a719625567789c5a2ed5a43df783fcad487 100644 (file)
@@ -1611,17 +1611,27 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
                                goto failed_clk_enet_out;
                }
                if (fep->clk_ptp) {
+                       mutex_lock(&fep->ptp_clk_mutex);
                        ret = clk_prepare_enable(fep->clk_ptp);
-                       if (ret)
+                       if (ret) {
+                               mutex_unlock(&fep->ptp_clk_mutex);
                                goto failed_clk_ptp;
+                       } else {
+                               fep->ptp_clk_on = true;
+                       }
+                       mutex_unlock(&fep->ptp_clk_mutex);
                }
        } else {
                clk_disable_unprepare(fep->clk_ahb);
                clk_disable_unprepare(fep->clk_ipg);
                if (fep->clk_enet_out)
                        clk_disable_unprepare(fep->clk_enet_out);
-               if (fep->clk_ptp)
+               if (fep->clk_ptp) {
+                       mutex_lock(&fep->ptp_clk_mutex);
                        clk_disable_unprepare(fep->clk_ptp);
+                       fep->ptp_clk_on = false;
+                       mutex_unlock(&fep->ptp_clk_mutex);
+               }
        }
 
        return 0;
@@ -2625,6 +2635,8 @@ fec_probe(struct platform_device *pdev)
        if (IS_ERR(fep->clk_enet_out))
                fep->clk_enet_out = NULL;
 
+       fep->ptp_clk_on = false;
+       mutex_init(&fep->ptp_clk_mutex);
        fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
        fep->bufdesc_ex =
                pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
@@ -2715,10 +2727,10 @@ fec_drv_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       cancel_delayed_work_sync(&fep->time_keep);
        cancel_work_sync(&fep->tx_timeout_work);
        unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
-       del_timer_sync(&fep->time_keep);
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
        if (fep->ptp_clock)
index 82386b29914a8640bd2e17f956bdd0946fc5c3ce..cca3617a2321fa22198658e83b1ee5d65176f298 100644 (file)
@@ -245,12 +245,20 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
        u64 ns;
        unsigned long flags;
 
+       mutex_lock(&fep->ptp_clk_mutex);
+       /* Check the ptp clock */
+       if (!fep->ptp_clk_on) {
+               mutex_unlock(&fep->ptp_clk_mutex);
+               return -EINVAL;
+       }
+
        ns = ts->tv_sec * 1000000000ULL;
        ns += ts->tv_nsec;
 
        spin_lock_irqsave(&fep->tmreg_lock, flags);
        timecounter_init(&fep->tc, &fep->cc, ns);
        spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       mutex_unlock(&fep->ptp_clk_mutex);
        return 0;
 }
 
@@ -338,17 +346,22 @@ int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
  * fec_time_keep - call timecounter_read every second to avoid timer overrun
  *                 because ENET just support 32bit counter, will timeout in 4s
  */
-static void fec_time_keep(unsigned long _data)
+static void fec_time_keep(struct work_struct *work)
 {
-       struct fec_enet_private *fep = (struct fec_enet_private *)_data;
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
        u64 ns;
        unsigned long flags;
 
-       spin_lock_irqsave(&fep->tmreg_lock, flags);
-       ns = timecounter_read(&fep->tc);
-       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       mutex_lock(&fep->ptp_clk_mutex);
+       if (fep->ptp_clk_on) {
+               spin_lock_irqsave(&fep->tmreg_lock, flags);
+               ns = timecounter_read(&fep->tc);
+               spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       }
+       mutex_unlock(&fep->ptp_clk_mutex);
 
-       mod_timer(&fep->time_keep, jiffies + HZ);
+       schedule_delayed_work(&fep->time_keep, HZ);
 }
 
 /**
@@ -386,15 +399,13 @@ void fec_ptp_init(struct platform_device *pdev)
 
        fec_ptp_start_cyclecounter(ndev);
 
-       init_timer(&fep->time_keep);
-       fep->time_keep.data = (unsigned long)fep;
-       fep->time_keep.function = fec_time_keep;
-       fep->time_keep.expires = jiffies + HZ;
-       add_timer(&fep->time_keep);
+       INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
 
        fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
        if (IS_ERR(fep->ptp_clock)) {
                fep->ptp_clock = NULL;
                pr_err("ptp_clock_register failed\n");
        }
+
+       schedule_delayed_work(&fep->time_keep, HZ);
 }
index c9127562bd22cb51114249d35264bcbb679dcc3d..21978cc019e7c86dab83968ba994c0e9051c8e33 100644 (file)
@@ -292,6 +292,18 @@ failure:
        atomic_add(buffers_added, &(pool->available));
 }
 
+/*
+ * The final 8 bytes of the buffer list is a counter of frames dropped
+ * because there was not a buffer in the buffer list capable of holding
+ * the frame.
+ */
+static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
+{
+       __be64 *p = adapter->buffer_list_addr + 4096 - 8;
+
+       adapter->rx_no_buffer = be64_to_cpup(p);
+}
+
 /* replenish routine */
 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
 {
@@ -307,8 +319,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
                        ibmveth_replenish_buffer_pool(adapter, pool);
        }
 
-       adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
-                                               4096 - 8);
+       ibmveth_update_rx_no_buffer(adapter);
 }
 
 /* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -698,8 +709,7 @@ static int ibmveth_close(struct net_device *netdev)
 
        free_irq(netdev->irq, netdev);
 
-       adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
-                                               4096 - 8);
+       ibmveth_update_rx_no_buffer(adapter);
 
        ibmveth_cleanup(adapter);
 
index bb7fe98b3a6cd7bd3c99d7454186f19451e84f1d..537b6216971d3b77c9ca169f01e2f6aaeb1f2ad1 100644 (file)
@@ -247,7 +247,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
        u32 prttsyn_stat;
        int n;
 
-       if (pf->flags & I40E_FLAG_PTP)
+       if (!(pf->flags & I40E_FLAG_PTP))
                return;
 
        prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
index 89672551dce93628e06d0d7777d25b55d51c8b27..3ac6a0d2f1433d8bff3323a28f77af063a2ddb89 100644 (file)
@@ -1003,11 +1003,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
                                  u32 v_retval, u8 *msg, u16 msglen)
 {
-       struct i40e_pf *pf = vf->pf;
-       struct i40e_hw *hw = &pf->hw;
-       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+       struct i40e_pf *pf;
+       struct i40e_hw *hw;
+       int abs_vf_id;
        i40e_status aq_ret;
 
+       /* validate the request */
+       if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+               return -EINVAL;
+
+       pf = vf->pf;
+       hw = &pf->hw;
+       abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
        /* single place to detect unsuccessful return values */
        if (v_retval) {
                vf->num_invalid_msgs++;
@@ -1928,17 +1936,20 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
 {
        struct i40e_hw *hw = &pf->hw;
        struct i40e_vf *vf = pf->vf;
-       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        int i;
 
-       for (i = 0; i < pf->num_alloc_vfs; i++) {
+       for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+               /* Not all vfs are enabled so skip the ones that are not */
+               if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+                   !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+                       continue;
+
                /* Ignore return value on purpose - a given VF may fail, but
                 * we need to keep going and send to all of them
                 */
                i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
                                       msg, msglen, NULL);
-               vf++;
-               abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        }
 }
 
@@ -1954,12 +1965,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
        struct i40e_hw *hw = &pf->hw;
        struct i40e_vf *vf = pf->vf;
        struct i40e_link_status *ls = &pf->hw.phy.link_info;
-       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        int i;
 
        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
-       for (i = 0; i < pf->num_alloc_vfs; i++) {
+       for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
                if (vf->link_forced) {
                        pfe.event_data.link_event.link_status = vf->link_up;
                        pfe.event_data.link_event.link_speed =
@@ -1972,8 +1983,6 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
                i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
                                       0, (u8 *)&pfe, sizeof(pfe),
                                       NULL);
-               vf++;
-               abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        }
 }
 
@@ -2002,7 +2011,18 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 {
        struct i40e_virtchnl_pf_event pfe;
-       int abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+       int abs_vf_id;
+
+       /* validate the request */
+       if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+               return;
+
+       /* verify if the VF is in either init or active before proceeding */
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+           !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+               return;
+
+       abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
 
        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
index 16039d1497b84a68efb84aa8f66aa0fd250418f4..b84f5ea3d659ee599e02db455296c54bf10e5fbe 100644 (file)
@@ -268,7 +268,7 @@ struct qlcnic_fdt {
        u16     cksum;
        u16     unused;
        u8      model[16];
-       u16     mfg_id;
+       u     mfg_id;
        u16     id;
        u8      flag;
        u8      erase_cmd;
@@ -2362,6 +2362,19 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
                return QLC_DEFAULT_VNIC_COUNT;
 }
 
+static inline void qlcnic_swap32_buffer(u32 *buffer, int count)
+{
+#if defined(__BIG_ENDIAN)
+       u32 *tmp = buffer;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               *tmp = swab32(*tmp);
+               tmp++;
+       }
+#endif
+}
+
 #ifdef CONFIG_QLCNIC_HWMON
 void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
 void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
index a4a4ec0b68f8d5e9d7b0c6f3ed5050b5787a37c4..476e4998ef991693008818a4c559df1c0fc03e2d 100644 (file)
@@ -2603,7 +2603,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
        }
 
        qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
-                                    (addr));
+                                    (addr & 0xFFFF0000));
 
        range = flash_offset + (count * sizeof(u32));
        /* Check if data is spread across multiple sectors */
@@ -2753,7 +2753,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
        ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
                                                (u8 *)&adapter->ahw->fdt,
                                                count);
-
+       qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count);
        qlcnic_83xx_unlock_flash(adapter);
        return ret;
 }
@@ -2788,7 +2788,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
 
        addr1 = (sector_start_addr & 0xFF) << 16;
        addr2 = (sector_start_addr & 0xFF0000) >> 16;
-       reversed_addr = addr1 | addr2;
+       reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00);
 
        qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
                                     reversed_addr);
index f33559b725283cf69b08e80a8179fb488b89acb2..86783e1afcf76d77176a079e4420a50f7b452b62 100644 (file)
@@ -1378,31 +1378,45 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
 {
        struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
        const struct firmware *fw = fw_info->fw;
-       u32 dest, *p_cache;
+       u32 dest, *p_cache, *temp;
        int i, ret = -EIO;
+       __le32 *temp_le;
        u8 data[16];
        size_t size;
        u64 addr;
 
+       temp = kzalloc(fw->size, GFP_KERNEL);
+       if (!temp) {
+               release_firmware(fw);
+               fw_info->fw = NULL;
+               return -ENOMEM;
+       }
+
+       temp_le = (__le32 *)fw->data;
+
+       /* FW image in file is in little endian, swap the data to nullify
+        * the effect of writel() operation on big endian platform.
+        */
+       for (i = 0; i < fw->size / sizeof(u32); i++)
+               temp[i] = __le32_to_cpu(temp_le[i]);
+
        dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
        size = (fw->size & ~0xF);
-       p_cache = (u32 *)fw->data;
+       p_cache = temp;
        addr = (u64)dest;
 
        ret = qlcnic_ms_mem_write128(adapter, addr,
                                     p_cache, size / 16);
        if (ret) {
                dev_err(&adapter->pdev->dev, "MS memory write failed\n");
-               release_firmware(fw);
-               fw_info->fw = NULL;
-               return -EIO;
+               goto exit;
        }
 
        /* alignment check */
        if (fw->size & 0xF) {
                addr = dest + size;
                for (i = 0; i < (fw->size & 0xF); i++)
-                       data[i] = fw->data[size + i];
+                       data[i] = temp[size + i];
                for (; i < 16; i++)
                        data[i] = 0;
                ret = qlcnic_ms_mem_write128(adapter, addr,
@@ -1410,15 +1424,16 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
                if (ret) {
                        dev_err(&adapter->pdev->dev,
                                "MS memory write failed\n");
-                       release_firmware(fw);
-                       fw_info->fw = NULL;
-                       return -EIO;
+                       goto exit;
                }
        }
+
+exit:
        release_firmware(fw);
        fw_info->fw = NULL;
+       kfree(temp);
 
-       return 0;
+       return ret;
 }
 
 static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
index e46fc39d425d45ee1d0d081b45954801c46f89ed..c9f57fb84b9eb47215f0cc21a680dce46d253e56 100644 (file)
@@ -47,15 +47,26 @@ struct qlcnic_common_entry_hdr {
        u32     type;
        u32     offset;
        u32     cap_size;
+#if defined(__LITTLE_ENDIAN)
        u8      mask;
        u8      rsvd[2];
        u8      flags;
+#else
+       u8      flags;
+       u8      rsvd[2];
+       u8      mask;
+#endif
 } __packed;
 
 struct __crb {
        u32     addr;
+#if defined(__LITTLE_ENDIAN)
        u8      stride;
        u8      rsvd1[3];
+#else
+       u8      rsvd1[3];
+       u8      stride;
+#endif
        u32     data_size;
        u32     no_ops;
        u32     rsvd2[4];
@@ -63,15 +74,28 @@ struct __crb {
 
 struct __ctrl {
        u32     addr;
+#if defined(__LITTLE_ENDIAN)
        u8      stride;
        u8      index_a;
        u16     timeout;
+#else
+       u16     timeout;
+       u8      index_a;
+       u8      stride;
+#endif
        u32     data_size;
        u32     no_ops;
+#if defined(__LITTLE_ENDIAN)
        u8      opcode;
        u8      index_v;
        u8      shl_val;
        u8      shr_val;
+#else
+       u8      shr_val;
+       u8      shl_val;
+       u8      index_v;
+       u8      opcode;
+#endif
        u32     val1;
        u32     val2;
        u32     val3;
@@ -79,16 +103,27 @@ struct __ctrl {
 
 struct __cache {
        u32     addr;
+#if defined(__LITTLE_ENDIAN)
        u16     stride;
        u16     init_tag_val;
+#else
+       u16     init_tag_val;
+       u16     stride;
+#endif
        u32     size;
        u32     no_ops;
        u32     ctrl_addr;
        u32     ctrl_val;
        u32     read_addr;
+#if defined(__LITTLE_ENDIAN)
        u8      read_addr_stride;
        u8      read_addr_num;
        u8      rsvd1[2];
+#else
+       u8      rsvd1[2];
+       u8      read_addr_num;
+       u8      read_addr_stride;
+#endif
 } __packed;
 
 struct __ocm {
@@ -122,23 +157,39 @@ struct __mux {
 
 struct __queue {
        u32     sel_addr;
+#if defined(__LITTLE_ENDIAN)
        u16     stride;
        u8      rsvd[2];
+#else
+       u8      rsvd[2];
+       u16     stride;
+#endif
        u32     size;
        u32     no_ops;
        u8      rsvd2[8];
        u32     read_addr;
+#if defined(__LITTLE_ENDIAN)
        u8      read_addr_stride;
        u8      read_addr_cnt;
        u8      rsvd3[2];
+#else
+       u8      rsvd3[2];
+       u8      read_addr_cnt;
+       u8      read_addr_stride;
+#endif
 } __packed;
 
 struct __pollrd {
        u32     sel_addr;
        u32     read_addr;
        u32     sel_val;
+#if defined(__LITTLE_ENDIAN)
        u16     sel_val_stride;
        u16     no_ops;
+#else
+       u16     no_ops;
+       u16     sel_val_stride;
+#endif
        u32     poll_wait;
        u32     poll_mask;
        u32     data_size;
@@ -153,9 +204,15 @@ struct __mux2 {
        u32     no_ops;
        u32     sel_val_mask;
        u32     read_addr;
+#if defined(__LITTLE_ENDIAN)
        u8      sel_val_stride;
        u8      data_size;
        u8      rsvd[2];
+#else
+       u8      rsvd[2];
+       u8      data_size;
+       u8      sel_val_stride;
+#endif
 } __packed;
 
 struct __pollrdmwr {
index f5786d5792df06fe16db6f7ffd2276f9bdabe96f..59a721fba018249679bf15d0984b90e4835c155e 100644 (file)
@@ -280,6 +280,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
        if (ret != 0)
                return ret;
        qlcnic_read_crb(adapter, buf, offset, size);
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
 
        return size;
 }
@@ -296,6 +297,7 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
        if (ret != 0)
                return ret;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        qlcnic_write_crb(adapter, buf, offset, size);
        return size;
 }
@@ -329,6 +331,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
                return -EIO;
 
        memcpy(buf, &data, size);
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
 
        return size;
 }
@@ -346,6 +349,7 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
        if (ret != 0)
                return ret;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        memcpy(&data, buf, size);
 
        if (qlcnic_pci_mem_write_2M(adapter, offset, data))
@@ -412,6 +416,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
        if (rem)
                return QL_STATUS_INVALID_PARAM;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
        ret = validate_pm_config(adapter, pm_cfg, count);
 
@@ -474,6 +479,7 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
                pm_cfg[pci_func].dest_npar = 0;
                pm_cfg[pci_func].pci_func = i;
        }
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        return size;
 }
 
@@ -555,6 +561,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
        if (rem)
                return QL_STATUS_INVALID_PARAM;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
        ret = validate_esw_config(adapter, esw_cfg, count);
        if (ret)
@@ -649,6 +656,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
                if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
                        return QL_STATUS_INVALID_PARAM;
        }
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        return size;
 }
 
@@ -688,6 +696,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
        if (rem)
                return QL_STATUS_INVALID_PARAM;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        np_cfg = (struct qlcnic_npar_func_cfg *)buf;
        ret = validate_npar_config(adapter, np_cfg, count);
        if (ret)
@@ -759,6 +768,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
                np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
                np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
        }
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        return size;
 }
 
@@ -916,6 +926,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
 
        pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
        count = size / sizeof(struct qlcnic_pci_func_cfg);
+       qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32));
        for (i = 0; i < count; i++) {
                pci_cfg[i].pci_func = pci_info[i].id;
                pci_cfg[i].func_type = pci_info[i].type;
@@ -969,6 +980,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
        }
 
        qlcnic_83xx_unlock_flash(adapter);
+       qlcnic_swap32_buffer((u32 *)p_read_buf, count);
        memcpy(buf, p_read_buf, size);
        kfree(p_read_buf);
 
@@ -986,9 +998,10 @@ static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
        if (!p_cache)
                return -ENOMEM;
 
+       count = size / sizeof(u32);
+       qlcnic_swap32_buffer((u32 *)buf, count);
        memcpy(p_cache, buf, size);
        p_src = p_cache;
-       count = size / sizeof(u32);
 
        if (qlcnic_83xx_lock_flash(adapter) != 0) {
                kfree(p_cache);
@@ -1053,6 +1066,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
        if (!p_cache)
                return -ENOMEM;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        memcpy(p_cache, buf, size);
        p_src = p_cache;
        count = size / sizeof(u32);
index 60e4ca01ccbb7d5c2fc26bc8fdcc26a49f943c17..a96955597755326475b5478b974c59ba02c223c7 100644 (file)
@@ -739,7 +739,10 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        struct macvlan_dev *vlan = netdev_priv(dev);
        int err = -EINVAL;
 
-       if (!vlan->port->passthru)
+       /* Support unicast filter only on passthru devices.
+        * Multicast filter should be allowed on all devices.
+        */
+       if (!vlan->port->passthru && is_unicast_ether_addr(addr))
                return -EOPNOTSUPP;
 
        if (flags & NLM_F_REPLACE)
@@ -760,7 +763,10 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
        struct macvlan_dev *vlan = netdev_priv(dev);
        int err = -EINVAL;
 
-       if (!vlan->port->passthru)
+       /* Support unicast filter only on passthru devices.
+        * Multicast filter should be allowed on all devices.
+        */
+       if (!vlan->port->passthru && is_unicast_ether_addr(addr))
                return -EOPNOTSUPP;
 
        if (is_unicast_ether_addr(addr))
index 526b94cea56980633c1d78caabac13004211766f..fdce1ea28790c4e9ae0d56e9995d1a299782f4ec 100644 (file)
@@ -157,6 +157,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
        return bcm7xxx_28nm_afe_config_init(phydev);
 }
 
+static int bcm7xxx_28nm_resume(struct phy_device *phydev)
+{
+       int ret;
+
+       /* Re-apply workarounds coming out suspend/resume */
+       ret = bcm7xxx_28nm_config_init(phydev);
+       if (ret)
+               return ret;
+
+       /* 28nm Gigabit PHYs come out of reset without any half-duplex
+        * or "hub" compliant advertised mode, fix that. This does not
+        * cause any problems with the PHY library since genphy_config_aneg()
+        * gracefully handles auto-negotiated and forced modes.
+        */
+       return genphy_config_aneg(phydev);
+}
+
 static int phy_set_clr_bits(struct phy_device *dev, int location,
                                        int set_mask, int clr_mask)
 {
@@ -212,7 +229,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
 }
 
 /* Workaround for putting the PHY in IDDQ mode, required
- * for all BCM7XXX PHYs
+ * for all BCM7XXX 40nm and 65nm PHYs
  */
 static int bcm7xxx_suspend(struct phy_device *phydev)
 {
@@ -257,8 +274,7 @@ static struct phy_driver bcm7xxx_driver[] = {
        .config_init    = bcm7xxx_28nm_afe_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .suspend        = bcm7xxx_suspend,
-       .resume         = bcm7xxx_28nm_afe_config_init,
+       .resume         = bcm7xxx_28nm_resume,
        .driver         = { .owner = THIS_MODULE },
 }, {
        .phy_id         = PHY_ID_BCM7439,
@@ -270,8 +286,7 @@ static struct phy_driver bcm7xxx_driver[] = {
        .config_init    = bcm7xxx_28nm_afe_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .suspend        = bcm7xxx_suspend,
-       .resume         = bcm7xxx_28nm_afe_config_init,
+       .resume         = bcm7xxx_28nm_resume,
        .driver         = { .owner = THIS_MODULE },
 }, {
        .phy_id         = PHY_ID_BCM7445,
@@ -283,21 +298,7 @@ static struct phy_driver bcm7xxx_driver[] = {
        .config_init    = bcm7xxx_28nm_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .suspend        = bcm7xxx_suspend,
-       .resume         = bcm7xxx_28nm_config_init,
-       .driver         = { .owner = THIS_MODULE },
-}, {
-       .name           = "Broadcom BCM7XXX 28nm",
-       .phy_id         = PHY_ID_BCM7XXX_28,
-       .phy_id_mask    = PHY_BCM_OUI_MASK,
-       .features       = PHY_GBIT_FEATURES |
-                         SUPPORTED_Pause | SUPPORTED_Asym_Pause,
-       .flags          = PHY_IS_INTERNAL,
-       .config_init    = bcm7xxx_28nm_config_init,
-       .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
-       .suspend        = bcm7xxx_suspend,
-       .resume         = bcm7xxx_28nm_config_init,
+       .resume         = bcm7xxx_28nm_afe_config_init,
        .driver         = { .owner = THIS_MODULE },
 }, {
        .phy_id         = PHY_BCM_OUI_4,
@@ -331,7 +332,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
        { PHY_ID_BCM7366, 0xfffffff0, },
        { PHY_ID_BCM7439, 0xfffffff0, },
        { PHY_ID_BCM7445, 0xfffffff0, },
-       { PHY_ID_BCM7XXX_28, 0xfffffc00 },
        { PHY_BCM_OUI_4, 0xffff0000 },
        { PHY_BCM_OUI_5, 0xffffff00 },
        { }
index 180c49479c42f9b4a19f070056b782923de5084c..a4b08198fb9f28363ef1a49a5b84ec64edeabcf9 100644 (file)
@@ -42,6 +42,22 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
 }
 
 static int smsc_phy_config_init(struct phy_device *phydev)
+{
+       int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+
+       if (rc < 0)
+               return rc;
+
+       /* Enable energy detect mode for this SMSC Transceivers */
+       rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+                      rc | MII_LAN83C185_EDPWRDOWN);
+       if (rc < 0)
+               return rc;
+
+       return smsc_phy_ack_interrupt(phydev);
+}
+
+static int smsc_phy_reset(struct phy_device *phydev)
 {
        int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES);
        if (rc < 0)
@@ -66,18 +82,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
                        rc = phy_read(phydev, MII_BMCR);
                } while (rc & BMCR_RESET);
        }
-
-       rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
-       if (rc < 0)
-               return rc;
-
-       /* Enable energy detect mode for this SMSC Transceivers */
-       rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
-                      rc | MII_LAN83C185_EDPWRDOWN);
-       if (rc < 0)
-               return rc;
-
-       return smsc_phy_ack_interrupt (phydev);
+       return 0;
 }
 
 static int lan911x_config_init(struct phy_device *phydev)
@@ -142,6 +147,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .config_init    = smsc_phy_config_init,
+       .soft_reset     = smsc_phy_reset,
 
        /* IRQ related */
        .ack_interrupt  = smsc_phy_ack_interrupt,
@@ -164,6 +170,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .config_init    = smsc_phy_config_init,
+       .soft_reset     = smsc_phy_reset,
 
        /* IRQ related */
        .ack_interrupt  = smsc_phy_ack_interrupt,
@@ -186,6 +193,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .config_init    = smsc_phy_config_init,
+       .soft_reset     = smsc_phy_reset,
 
        /* IRQ related */
        .ack_interrupt  = smsc_phy_ack_interrupt,
@@ -230,6 +238,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = lan87xx_read_status,
        .config_init    = smsc_phy_config_init,
+       .soft_reset     = smsc_phy_reset,
 
        /* IRQ related */
        .ack_interrupt  = smsc_phy_ack_interrupt,
index f46a24ffa3fe7be040d488bc49bfba912848fde1..79cb8313c7d8b0b86681bc75867e8b6e5ee746ef 100644 (file)
@@ -453,7 +453,7 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
                base = dt_mem_next_cell(dt_root_addr_cells, &prop);
                size = dt_mem_next_cell(dt_root_size_cells, &prop);
 
-               if (base && size &&
+               if (size &&
                    early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
                        pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
                                uname, &base, (unsigned long)size / SZ_1M);
index 3e06a699352d0c83a8c660b35d64c97babcc7469..1471e0a223a59286497501e84ed72075aff18b01 100644 (file)
@@ -301,16 +301,17 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
        /* Get the reg property (if any) */
        addr = of_get_property(device, "reg", NULL);
 
+       /* Try the new-style interrupts-extended first */
+       res = of_parse_phandle_with_args(device, "interrupts-extended",
+                                       "#interrupt-cells", index, out_irq);
+       if (!res)
+               return of_irq_parse_raw(addr, out_irq);
+
        /* Get the interrupts property */
        intspec = of_get_property(device, "interrupts", &intlen);
-       if (intspec == NULL) {
-               /* Try the new-style interrupts-extended */
-               res = of_parse_phandle_with_args(device, "interrupts-extended",
-                                               "#interrupt-cells", index, out_irq);
-               if (res)
-                       return -EINVAL;
-               return of_irq_parse_raw(addr, out_irq);
-       }
+       if (intspec == NULL)
+               return -EINVAL;
+
        intlen /= sizeof(*intspec);
 
        pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);
index d410026678334885bae9e51893fde5f9873a7831..a737cb5974deae12aed1987d5250326ffa350445 100644 (file)
@@ -27,6 +27,7 @@ static struct selftest_results {
 #define NO_OF_NODES 2
 static struct device_node *nodes[NO_OF_NODES];
 static int last_node_index;
+static bool selftest_live_tree;
 
 #define selftest(result, fmt, ...) { \
        if (!(result)) { \
@@ -630,13 +631,6 @@ static int attach_node_and_children(struct device_node *np)
 {
        struct device_node *next, *root = np, *dup;
 
-       if (!np) {
-               pr_warn("%s: No tree to attach; not running tests\n",
-                       __func__);
-               return -ENODATA;
-       }
-
-
        /* skip root node */
        np = np->child;
        /* storing a copy in temporary node */
@@ -672,12 +666,12 @@ static int attach_node_and_children(struct device_node *np)
 static int __init selftest_data_add(void)
 {
        void *selftest_data;
-       struct device_node *selftest_data_node;
+       struct device_node *selftest_data_node, *np;
        extern uint8_t __dtb_testcases_begin[];
        extern uint8_t __dtb_testcases_end[];
        const int size = __dtb_testcases_end - __dtb_testcases_begin;
 
-       if (!size || !of_allnodes) {
+       if (!size) {
                pr_warn("%s: No testcase data to attach; not running tests\n",
                        __func__);
                return -ENODATA;
@@ -692,6 +686,22 @@ static int __init selftest_data_add(void)
                return -ENOMEM;
        }
        of_fdt_unflatten_tree(selftest_data, &selftest_data_node);
+       if (!selftest_data_node) {
+               pr_warn("%s: No tree to attach; not running tests\n", __func__);
+               return -ENODATA;
+       }
+
+       if (!of_allnodes) {
+               /* enabling flag for removing nodes */
+               selftest_live_tree = true;
+               of_allnodes = selftest_data_node;
+
+               for_each_of_allnodes(np)
+                       __of_attach_node_sysfs(np);
+               of_aliases = of_find_node_by_path("/aliases");
+               of_chosen = of_find_node_by_path("/chosen");
+               return 0;
+       }
 
        /* attach the sub-tree to live tree */
        return attach_node_and_children(selftest_data_node);
@@ -723,6 +733,18 @@ static void selftest_data_remove(void)
        struct device_node *np;
        struct property *prop;
 
+       if (selftest_live_tree) {
+               of_node_put(of_aliases);
+               of_node_put(of_chosen);
+               of_aliases = NULL;
+               of_chosen = NULL;
+               for_each_child_of_node(of_allnodes, np)
+                       detach_node_and_children(np);
+               __of_detach_node_sysfs(of_allnodes);
+               of_allnodes = NULL;
+               return;
+       }
+
        while (last_node_index >= 0) {
                if (nodes[last_node_index]) {
                        np = of_find_node_by_path(nodes[last_node_index]->full_name);
index 2d8a4d05d78fc02513fe5a2587e545bc36914f08..8922c376456aee763090296a7045211aaae90839 100644 (file)
@@ -1,9 +1,18 @@
 menu "PCI host controller drivers"
        depends on PCI
 
+config PCI_DRA7XX
+       bool "TI DRA7xx PCIe controller"
+       select PCIE_DW
+       depends on OF && HAS_IOMEM && TI_PIPE3
+       help
+        Enables support for the PCIe controller in the DRA7xx SoC.  There
+        are two instances of PCIe controller in DRA7xx.  This controller can
+        act both as EP and RC.  This reuses the Designware core.
+
 config PCI_MVEBU
        bool "Marvell EBU PCIe controller"
-       depends on ARCH_MVEBU || ARCH_DOVE || ARCH_KIRKWOOD
+       depends on ARCH_MVEBU || ARCH_DOVE
        depends on OF
 
 config PCIE_DW
index 0daec7941aba44f30fb17e37857872f2e523fce1..d0e88f114ff93b5e016f7993f012b17ce4ff2631 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
 obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
 obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
new file mode 100644 (file)
index 0000000..52b34fe
--- /dev/null
@@ -0,0 +1,458 @@
+/*
+ * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* PCIe controller wrapper DRA7XX configuration registers */
+
+#define        PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN             0x0024
+#define        PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN         0x0028
+#define        ERR_SYS                                         BIT(0)
+#define        ERR_FATAL                                       BIT(1)
+#define        ERR_NONFATAL                                    BIT(2)
+#define        ERR_COR                                         BIT(3)
+#define        ERR_AXI                                         BIT(4)
+#define        ERR_ECRC                                        BIT(5)
+#define        PME_TURN_OFF                                    BIT(8)
+#define        PME_TO_ACK                                      BIT(9)
+#define        PM_PME                                          BIT(10)
+#define        LINK_REQ_RST                                    BIT(11)
+#define        LINK_UP_EVT                                     BIT(12)
+#define        CFG_BME_EVT                                     BIT(13)
+#define        CFG_MSE_EVT                                     BIT(14)
+#define        INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
+                       ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
+                       LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
+
+#define        PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI              0x0034
+#define        PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI          0x0038
+#define        INTA                                            BIT(0)
+#define        INTB                                            BIT(1)
+#define        INTC                                            BIT(2)
+#define        INTD                                            BIT(3)
+#define        MSI                                             BIT(4)
+#define        LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+
+#define        PCIECTRL_DRA7XX_CONF_DEVICE_CMD                 0x0104
+#define        LTSSM_EN                                        0x1
+
+#define        PCIECTRL_DRA7XX_CONF_PHY_CS                     0x010C
+#define        LINK_UP                                         BIT(16)
+
+struct dra7xx_pcie {
+       void __iomem            *base;
+       struct phy              **phy;
+       int                     phy_count;
+       struct device           *dev;
+       struct pcie_port        pp;
+};
+
+#define to_dra7xx_pcie(x)      container_of((x), struct dra7xx_pcie, pp)
+
+static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
+{
+       return readl(pcie->base + offset);
+}
+
+static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
+                                     u32 value)
+{
+       writel(value, pcie->base + offset);
+}
+
+static int dra7xx_pcie_link_up(struct pcie_port *pp)
+{
+       struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+       u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+
+       return !!(reg & LINK_UP);
+}
+
+static int dra7xx_pcie_establish_link(struct pcie_port *pp)
+{
+       u32 reg;
+       unsigned int retries = 1000;
+       struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
+       if (dw_pcie_link_up(pp)) {
+               dev_err(pp->dev, "link is already up\n");
+               return 0;
+       }
+
+       reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+       reg |= LTSSM_EN;
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+       while (retries--) {
+               reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+               if (reg & LINK_UP)
+                       break;
+               usleep_range(10, 20);
+       }
+
+       if (retries == 0) {
+               dev_err(pp->dev, "link is not up\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
+{
+       struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+                          ~INTERRUPTS);
+       dra7xx_pcie_writel(dra7xx,
+                          PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
+                          ~LEG_EP_INTERRUPTS & ~MSI);
+
+       if (IS_ENABLED(CONFIG_PCI_MSI))
+               dra7xx_pcie_writel(dra7xx,
+                                  PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, MSI);
+       else
+               dra7xx_pcie_writel(dra7xx,
+                                  PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+                                  LEG_EP_INTERRUPTS);
+}
+
+static void dra7xx_pcie_host_init(struct pcie_port *pp)
+{
+       dw_pcie_setup_rc(pp);
+       dra7xx_pcie_establish_link(pp);
+       if (IS_ENABLED(CONFIG_PCI_MSI))
+               dw_pcie_msi_init(pp);
+       dra7xx_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops dra7xx_pcie_host_ops = {
+       .link_up = dra7xx_pcie_link_up,
+       .host_init = dra7xx_pcie_host_init,
+};
+
+static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+                               irq_hw_number_t hwirq)
+{
+       irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+       irq_set_chip_data(irq, domain->host_data);
+       set_irq_flags(irq, IRQF_VALID);
+
+       return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+       .map = dra7xx_pcie_intx_map,
+};
+
+static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+{
+       struct device *dev = pp->dev;
+       struct device_node *node = dev->of_node;
+       struct device_node *pcie_intc_node =  of_get_next_child(node, NULL);
+
+       if (!pcie_intc_node) {
+               dev_err(dev, "No PCIe Intc node found\n");
+               return PTR_ERR(pcie_intc_node);
+       }
+
+       pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
+                                              &intx_domain_ops, pp);
+       if (!pp->irq_domain) {
+               dev_err(dev, "Failed to get a INTx IRQ domain\n");
+               return PTR_ERR(pp->irq_domain);
+       }
+
+       return 0;
+}
+
+static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
+{
+       struct pcie_port *pp = arg;
+       struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+       u32 reg;
+
+       reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+
+       switch (reg) {
+       case MSI:
+               dw_handle_msi_irq(pp);
+               break;
+       case INTA:
+       case INTB:
+       case INTC:
+       case INTD:
+               generic_handle_irq(irq_find_mapping(pp->irq_domain, ffs(reg)));
+               break;
+       }
+
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
+
+       return IRQ_HANDLED;
+}
+
+
+static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
+{
+       struct dra7xx_pcie *dra7xx = arg;
+       u32 reg;
+
+       reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
+
+       if (reg & ERR_SYS)
+               dev_dbg(dra7xx->dev, "System Error\n");
+
+       if (reg & ERR_FATAL)
+               dev_dbg(dra7xx->dev, "Fatal Error\n");
+
+       if (reg & ERR_NONFATAL)
+               dev_dbg(dra7xx->dev, "Non Fatal Error\n");
+
+       if (reg & ERR_COR)
+               dev_dbg(dra7xx->dev, "Correctable Error\n");
+
+       if (reg & ERR_AXI)
+               dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n");
+
+       if (reg & ERR_ECRC)
+               dev_dbg(dra7xx->dev, "ECRC Error\n");
+
+       if (reg & PME_TURN_OFF)
+               dev_dbg(dra7xx->dev,
+                       "Power Management Event Turn-Off message received\n");
+
+       if (reg & PME_TO_ACK)
+               dev_dbg(dra7xx->dev,
+                       "Power Management Turn-Off Ack message received\n");
+
+       if (reg & PM_PME)
+               dev_dbg(dra7xx->dev,
+                       "PM Power Management Event message received\n");
+
+       if (reg & LINK_REQ_RST)
+               dev_dbg(dra7xx->dev, "Link Request Reset\n");
+
+       if (reg & LINK_UP_EVT)
+               dev_dbg(dra7xx->dev, "Link-up state change\n");
+
+       if (reg & CFG_BME_EVT)
+               dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n");
+
+       if (reg & CFG_MSE_EVT)
+               dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n");
+
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
+
+       return IRQ_HANDLED;
+}
+
+static int add_pcie_port(struct dra7xx_pcie *dra7xx,
+                         struct platform_device *pdev)
+{
+       int ret;
+       struct pcie_port *pp;
+       struct resource *res;
+       struct device *dev = &pdev->dev;
+
+       pp = &dra7xx->pp;
+       pp->dev = dev;
+       pp->ops = &dra7xx_pcie_host_ops;
+
+       pp->irq = platform_get_irq(pdev, 1);
+       if (pp->irq < 0) {
+               dev_err(dev, "missing IRQ resource\n");
+               return -EINVAL;
+       }
+
+       ret = devm_request_irq(&pdev->dev, pp->irq,
+                              dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
+                              "dra7-pcie-msi", pp);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to request irq\n");
+               return ret;
+       }
+
+       if (!IS_ENABLED(CONFIG_PCI_MSI)) {
+               ret = dra7xx_pcie_init_irq_domain(pp);
+               if (ret < 0)
+                       return ret;
+       }
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
+       pp->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
+       if (!pp->dbi_base)
+               return -ENOMEM;
+
+       ret = dw_pcie_host_init(pp);
+       if (ret) {
+               dev_err(dra7xx->dev, "failed to initialize host\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int __init dra7xx_pcie_probe(struct platform_device *pdev)
+{
+       u32 reg;
+       int ret;
+       int irq;
+       int i;
+       int phy_count;
+       struct phy **phy;
+       void __iomem *base;
+       struct resource *res;
+       struct dra7xx_pcie *dra7xx;
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       char name[10];
+
+       dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
+       if (!dra7xx)
+               return -ENOMEM;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "missing IRQ resource\n");
+               return -EINVAL;
+       }
+
+       ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
+                              IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+       if (ret) {
+               dev_err(dev, "failed to request irq\n");
+               return ret;
+       }
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
+       base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+       if (!base)
+               return -ENOMEM;
+
+       phy_count = of_property_count_strings(np, "phy-names");
+       if (phy_count < 0) {
+               dev_err(dev, "unable to find the strings\n");
+               return phy_count;
+       }
+
+       phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+       if (!phy)
+               return -ENOMEM;
+
+       for (i = 0; i < phy_count; i++) {
+               snprintf(name, sizeof(name), "pcie-phy%d", i);
+               phy[i] = devm_phy_get(dev, name);
+               if (IS_ERR(phy[i]))
+                       return PTR_ERR(phy[i]);
+
+               ret = phy_init(phy[i]);
+               if (ret < 0)
+                       goto err_phy;
+
+               ret = phy_power_on(phy[i]);
+               if (ret < 0) {
+                       phy_exit(phy[i]);
+                       goto err_phy;
+               }
+       }
+
+       dra7xx->base = base;
+       dra7xx->phy = phy;
+       dra7xx->dev = dev;
+       dra7xx->phy_count = phy_count;
+
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (IS_ERR_VALUE(ret)) {
+               dev_err(dev, "pm_runtime_get_sync failed\n");
+               goto err_phy;
+       }
+
+       reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+       reg &= ~LTSSM_EN;
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+       platform_set_drvdata(pdev, dra7xx);
+
+       ret = add_pcie_port(dra7xx, pdev);
+       if (ret < 0)
+               goto err_add_port;
+
+       return 0;
+
+err_add_port:
+       pm_runtime_put(dev);
+       pm_runtime_disable(dev);
+
+err_phy:
+       while (--i >= 0) {
+               phy_power_off(phy[i]);
+               phy_exit(phy[i]);
+       }
+
+       return ret;
+}
+
+static int __exit dra7xx_pcie_remove(struct platform_device *pdev)
+{
+       struct dra7xx_pcie *dra7xx = platform_get_drvdata(pdev);
+       struct pcie_port *pp = &dra7xx->pp;
+       struct device *dev = &pdev->dev;
+       int count = dra7xx->phy_count;
+
+       if (pp->irq_domain)
+               irq_domain_remove(pp->irq_domain);
+       pm_runtime_put(dev);
+       pm_runtime_disable(dev);
+       while (count--) {
+               phy_power_off(dra7xx->phy[count]);
+               phy_exit(dra7xx->phy[count]);
+       }
+
+       return 0;
+}
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+       { .compatible = "ti,dra7-pcie", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
+
+static struct platform_driver dra7xx_pcie_driver = {
+       .remove         = __exit_p(dra7xx_pcie_remove),
+       .driver = {
+               .name   = "dra7-pcie",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_dra7xx_pcie_match,
+       },
+};
+
+module_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
+
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_DESCRIPTION("TI PCIe controller driver");
+MODULE_LICENSE("GPL v2");
index abd65784618dca13f56960acc051b716ec1bbfd7..0fb0fdb223d5174d6a2e10e750a74194d27a4db6 100644 (file)
@@ -25,6 +25,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/export.h>
 #include <linux/interrupt.h>
@@ -276,6 +277,7 @@ struct tegra_pcie {
        unsigned int num_supplies;
 
        const struct tegra_pcie_soc_data *soc_data;
+       struct dentry *debugfs;
 };
 
 struct tegra_pcie_port {
@@ -1739,6 +1741,115 @@ static const struct of_device_id tegra_pcie_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
 
+static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
+{
+       struct tegra_pcie *pcie = s->private;
+
+       if (list_empty(&pcie->ports))
+               return NULL;
+
+       seq_printf(s, "Index  Status\n");
+
+       return seq_list_start(&pcie->ports, *pos);
+}
+
+static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       struct tegra_pcie *pcie = s->private;
+
+       return seq_list_next(v, &pcie->ports, pos);
+}
+
+static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
+{
+       bool up = false, active = false;
+       struct tegra_pcie_port *port;
+       unsigned int value;
+
+       port = list_entry(v, struct tegra_pcie_port, list);
+
+       value = readl(port->base + RP_VEND_XP);
+
+       if (value & RP_VEND_XP_DL_UP)
+               up = true;
+
+       value = readl(port->base + RP_LINK_CONTROL_STATUS);
+
+       if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+               active = true;
+
+       seq_printf(s, "%2u     ", port->index);
+
+       if (up)
+               seq_printf(s, "up");
+
+       if (active) {
+               if (up)
+                       seq_printf(s, ", ");
+
+               seq_printf(s, "active");
+       }
+
+       seq_printf(s, "\n");
+       return 0;
+}
+
+static const struct seq_operations tegra_pcie_ports_seq_ops = {
+       .start = tegra_pcie_ports_seq_start,
+       .next = tegra_pcie_ports_seq_next,
+       .stop = tegra_pcie_ports_seq_stop,
+       .show = tegra_pcie_ports_seq_show,
+};
+
+static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
+{
+       struct tegra_pcie *pcie = inode->i_private;
+       struct seq_file *s;
+       int err;
+
+       err = seq_open(file, &tegra_pcie_ports_seq_ops);
+       if (err)
+               return err;
+
+       s = file->private_data;
+       s->private = pcie;
+
+       return 0;
+}
+
+static const struct file_operations tegra_pcie_ports_ops = {
+       .owner = THIS_MODULE,
+       .open = tegra_pcie_ports_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
+{
+       struct dentry *file;
+
+       pcie->debugfs = debugfs_create_dir("pcie", NULL);
+       if (!pcie->debugfs)
+               return -ENOMEM;
+
+       file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
+                                  pcie, &tegra_pcie_ports_ops);
+       if (!file)
+               goto remove;
+
+       return 0;
+
+remove:
+       debugfs_remove_recursive(pcie->debugfs);
+       pcie->debugfs = NULL;
+       return -ENOMEM;
+}
+
 static int tegra_pcie_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match;
@@ -1793,6 +1904,13 @@ static int tegra_pcie_probe(struct platform_device *pdev)
                goto disable_msi;
        }
 
+       if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+               err = tegra_pcie_debugfs_init(pcie);
+               if (err < 0)
+                       dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
+                               err);
+       }
+
        platform_set_drvdata(pdev, pcie);
        return 0;
 
index 1eaf4df3618a18a57e0eee175272d5a19646f2bf..52bd3a14356310195af1219e74b7e65091da6d88 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of_pci.h>
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
+#include <linux/platform_device.h>
 #include <linux/types.h>
 
 #include "pcie-designware.h"
@@ -217,27 +218,47 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
        return 0;
 }
 
+static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+       unsigned int res, bit, val;
+
+       res = (irq / 32) * 12;
+       bit = irq % 32;
+       dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+       val &= ~(1 << bit);
+       dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
 static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
                            unsigned int nvec, unsigned int pos)
 {
-       unsigned int i, res, bit, val;
+       unsigned int i;
 
        for (i = 0; i < nvec; i++) {
                irq_set_msi_desc_off(irq_base, i, NULL);
                clear_bit(pos + i, pp->msi_irq_in_use);
                /* Disable corresponding interrupt on MSI controller */
-               res = ((pos + i) / 32) * 12;
-               bit = (pos + i) % 32;
-               dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
-               val &= ~(1 << bit);
-               dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+               if (pp->ops->msi_clear_irq)
+                       pp->ops->msi_clear_irq(pp, pos + i);
+               else
+                       dw_pcie_msi_clear_irq(pp, pos + i);
        }
 }
 
+static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+       unsigned int res, bit, val;
+
+       res = (irq / 32) * 12;
+       bit = irq % 32;
+       dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+       val |= 1 << bit;
+       dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
 static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
 {
-       int res, bit, irq, pos0, pos1, i;
-       u32 val;
+       int irq, pos0, pos1, i;
        struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
 
        if (!pp) {
@@ -281,11 +302,10 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
                }
                set_bit(pos0 + i, pp->msi_irq_in_use);
                /*Enable corresponding interrupt in MSI interrupt controller */
-               res = ((pos0 + i) / 32) * 12;
-               bit = (pos0 + i) % 32;
-               dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
-               val |= 1 << bit;
-               dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+               if (pp->ops->msi_set_irq)
+                       pp->ops->msi_set_irq(pp, pos0 + i);
+               else
+                       dw_pcie_msi_set_irq(pp, pos0 + i);
        }
 
        *pos = pos0;
@@ -353,7 +373,10 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
         */
        desc->msi_attrib.multiple = msgvec;
 
-       msg.address_lo = virt_to_phys((void *)pp->msi_data);
+       if (pp->ops->get_msi_data)
+               msg.address_lo = pp->ops->get_msi_data(pp);
+       else
+               msg.address_lo = virt_to_phys((void *)pp->msi_data);
        msg.address_hi = 0x0;
        msg.data = pos;
        write_msi_msg(irq, &msg);
@@ -396,10 +419,35 @@ static const struct irq_domain_ops msi_domain_ops = {
 int __init dw_pcie_host_init(struct pcie_port *pp)
 {
        struct device_node *np = pp->dev->of_node;
+       struct platform_device *pdev = to_platform_device(pp->dev);
        struct of_pci_range range;
        struct of_pci_range_parser parser;
-       u32 val;
-       int i;
+       struct resource *cfg_res;
+       u32 val, na, ns;
+       const __be32 *addrp;
+       int i, index;
+
+       /* Find the address cell size and the number of cells in order to get
+        * the untranslated address.
+        */
+       of_property_read_u32(np, "#address-cells", &na);
+       ns = of_n_size_cells(np);
+
+       cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+       if (cfg_res) {
+               pp->config.cfg0_size = resource_size(cfg_res)/2;
+               pp->config.cfg1_size = resource_size(cfg_res)/2;
+               pp->cfg0_base = cfg_res->start;
+               pp->cfg1_base = cfg_res->start + pp->config.cfg0_size;
+
+               /* Find the untranslated configuration space address */
+               index = of_property_match_string(np, "reg-names", "config");
+               addrp = of_get_address(np, index, false, false);
+               pp->cfg0_mod_base = of_read_number(addrp, ns);
+               pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size;
+       } else {
+               dev_err(pp->dev, "missing *config* reg space\n");
+       }
 
        if (of_pci_range_parser_init(&parser, np)) {
                dev_err(pp->dev, "missing ranges property\n");
@@ -422,17 +470,33 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
                        pp->config.io_size = resource_size(&pp->io);
                        pp->config.io_bus_addr = range.pci_addr;
                        pp->io_base = range.cpu_addr;
+
+                       /* Find the untranslated IO space address */
+                       pp->io_mod_base = of_read_number(parser.range -
+                                                        parser.np + na, ns);
                }
                if (restype == IORESOURCE_MEM) {
                        of_pci_range_to_resource(&range, np, &pp->mem);
                        pp->mem.name = "MEM";
                        pp->config.mem_size = resource_size(&pp->mem);
                        pp->config.mem_bus_addr = range.pci_addr;
+
+                       /* Find the untranslated MEM space address */
+                       pp->mem_mod_base = of_read_number(parser.range -
+                                                         parser.np + na, ns);
                }
                if (restype == 0) {
                        of_pci_range_to_resource(&range, np, &pp->cfg);
                        pp->config.cfg0_size = resource_size(&pp->cfg)/2;
                        pp->config.cfg1_size = resource_size(&pp->cfg)/2;
+                       pp->cfg0_base = pp->cfg.start;
+                       pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
+
+                       /* Find the untranslated configuration space address */
+                       pp->cfg0_mod_base = of_read_number(parser.range -
+                                                          parser.np + na, ns);
+                       pp->cfg1_mod_base = pp->cfg0_mod_base +
+                                           pp->config.cfg0_size;
                }
        }
 
@@ -445,8 +509,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
                }
        }
 
-       pp->cfg0_base = pp->cfg.start;
-       pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
        pp->mem_base = pp->mem.start;
 
        pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
@@ -509,9 +571,9 @@ static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
        /* Program viewport 0 : OUTBOUND : CFG0 */
        dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
                          PCIE_ATU_VIEWPORT);
-       dw_pcie_writel_rc(pp, pp->cfg0_base, PCIE_ATU_LOWER_BASE);
-       dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32), PCIE_ATU_UPPER_BASE);
-       dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
+       dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1,
                          PCIE_ATU_LIMIT);
        dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
        dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -525,9 +587,9 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
        dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
                          PCIE_ATU_VIEWPORT);
        dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
-       dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE);
-       dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE);
-       dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
+       dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1,
                          PCIE_ATU_LIMIT);
        dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
        dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -540,9 +602,9 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
        dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
                          PCIE_ATU_VIEWPORT);
        dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
-       dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
-       dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
-       dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
+       dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1,
                          PCIE_ATU_LIMIT);
        dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
        dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
@@ -556,9 +618,9 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
        dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
                          PCIE_ATU_VIEWPORT);
        dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
-       dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
-       dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
-       dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
+       dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1,
                          PCIE_ATU_LIMIT);
        dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
        dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
@@ -656,7 +718,11 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
        }
 
        if (bus->number != pp->root_bus_nr)
-               ret = dw_pcie_rd_other_conf(pp, bus, devfn,
+               if (pp->ops->rd_other_conf)
+                       ret = pp->ops->rd_other_conf(pp, bus, devfn,
+                                               where, size, val);
+               else
+                       ret = dw_pcie_rd_other_conf(pp, bus, devfn,
                                                where, size, val);
        else
                ret = dw_pcie_rd_own_conf(pp, where, size, val);
@@ -679,7 +745,11 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        if (bus->number != pp->root_bus_nr)
-               ret = dw_pcie_wr_other_conf(pp, bus, devfn,
+               if (pp->ops->wr_other_conf)
+                       ret = pp->ops->wr_other_conf(pp, bus, devfn,
+                                               where, size, val);
+               else
+                       ret = dw_pcie_wr_other_conf(pp, bus, devfn,
                                                where, size, val);
        else
                ret = dw_pcie_wr_own_conf(pp, where, size, val);
index 77f592faa7bf28ee6d86f84cad6ee7cdfd345158..daf81f922cda34e472f4be5c16d5a967f70528da 100644 (file)
@@ -36,11 +36,15 @@ struct pcie_port {
        u8                      root_bus_nr;
        void __iomem            *dbi_base;
        u64                     cfg0_base;
+       u64                     cfg0_mod_base;
        void __iomem            *va_cfg0_base;
        u64                     cfg1_base;
+       u64                     cfg1_mod_base;
        void __iomem            *va_cfg1_base;
        u64                     io_base;
+       u64                     io_mod_base;
        u64                     mem_base;
+       u64                     mem_mod_base;
        struct resource         cfg;
        struct resource         io;
        struct resource         mem;
@@ -61,8 +65,15 @@ struct pcie_host_ops {
                        u32 val, void __iomem *dbi_base);
        int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
        int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
+       int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+                       unsigned int devfn, int where, int size, u32 *val);
+       int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+                       unsigned int devfn, int where, int size, u32 val);
        int (*link_up)(struct pcie_port *pp);
        void (*host_init)(struct pcie_port *pp);
+       void (*msi_set_irq)(struct pcie_port *pp, int irq);
+       void (*msi_clear_irq)(struct pcie_port *pp, int irq);
+       u32 (*get_msi_data)(struct pcie_port *pp);
 };
 
 int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
index a53a689a2bfaa371024369dfc7a9e8dc72fc2ca3..8c6fd8d4dd3cedc26401492346fd8ab79893dac9 100644 (file)
@@ -620,8 +620,7 @@ static void abx500_gpio_dbg_show_one(struct seq_file *s,
        } else
                seq_printf(s, " %-9s", chip->get(chip, offset) ? "hi" : "lo");
 
-       if (pctldev)
-               mode = abx500_get_mode(pctldev, chip, offset);
+       mode = abx500_get_mode(pctldev, chip, offset);
 
        seq_printf(s, " %s", (mode < 0) ? "unknown" : modes[mode]);
 
index af1ba4fc150dd4e10509b8e53ccab89458d7d09a..60464a2648aa0dfb7c31fe13c75dc419472c3b31 100644 (file)
@@ -497,10 +497,10 @@ static struct at91_pinctrl_mux_ops at91sam9x5_ops = {
 static void at91_pin_dbg(const struct device *dev, const struct at91_pmx_pin *pin)
 {
        if (pin->mux) {
-               dev_dbg(dev, "pio%c%d configured as periph%c with conf = 0x%lu\n",
+               dev_dbg(dev, "pio%c%d configured as periph%c with conf = 0x%lx\n",
                        pin->bank + 'A', pin->pin, pin->mux - 1 + 'A', pin->conf);
        } else {
-               dev_dbg(dev, "pio%c%d configured as gpio with conf = 0x%lu\n",
+               dev_dbg(dev, "pio%c%d configured as gpio with conf = 0x%lx\n",
                        pin->bank + 'A', pin->pin, pin->conf);
        }
 }
index 5e8b2e04cd7a322e6aefa85adbd3996ad04cc19e..0c372a300cb88058e9957e017db99f3d127fb08d 100644 (file)
@@ -438,7 +438,7 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
        int reg, ret, mask;
        unsigned long flags;
        u8 bit;
-       u32 data;
+       u32 data, rmask;
 
        if (iomux_num > 3)
                return -EINVAL;
@@ -478,8 +478,9 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
        spin_lock_irqsave(&bank->slock, flags);
 
        data = (mask << (bit + 16));
+       rmask = data | (data >> 16);
        data |= (mux & mask) << bit;
-       ret = regmap_write(regmap, reg, data);
+       ret = regmap_update_bits(regmap, reg, rmask, data);
 
        spin_unlock_irqrestore(&bank->slock, flags);
 
@@ -634,7 +635,7 @@ static int rk3288_set_drive(struct rockchip_pin_bank *bank, int pin_num,
        struct regmap *regmap;
        unsigned long flags;
        int reg, ret, i;
-       u32 data;
+       u32 data, rmask;
        u8 bit;
 
        rk3288_calc_drv_reg_and_bit(bank, pin_num, &regmap, &reg, &bit);
@@ -657,9 +658,10 @@ static int rk3288_set_drive(struct rockchip_pin_bank *bank, int pin_num,
 
        /* enable the write to the equivalent lower bits */
        data = ((1 << RK3288_DRV_BITS_PER_PIN) - 1) << (bit + 16);
+       rmask = data | (data >> 16);
        data |= (ret << bit);
 
-       ret = regmap_write(regmap, reg, data);
+       ret = regmap_update_bits(regmap, reg, rmask, data);
        spin_unlock_irqrestore(&bank->slock, flags);
 
        return ret;
@@ -722,7 +724,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
        int reg, ret;
        unsigned long flags;
        u8 bit;
-       u32 data;
+       u32 data, rmask;
 
        dev_dbg(info->dev, "setting pull of GPIO%d-%d to %d\n",
                 bank->bank_num, pin_num, pull);
@@ -750,6 +752,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
 
                /* enable the write to the equivalent lower bits */
                data = ((1 << RK3188_PULL_BITS_PER_PIN) - 1) << (bit + 16);
+               rmask = data | (data >> 16);
 
                switch (pull) {
                case PIN_CONFIG_BIAS_DISABLE:
@@ -770,7 +773,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
                        return -EINVAL;
                }
 
-               ret = regmap_write(regmap, reg, data);
+               ret = regmap_update_bits(regmap, reg, rmask, data);
 
                spin_unlock_irqrestore(&bank->slock, flags);
                break;
index a06620474845964667bea51b387395360f206bf0..e641b4226c422fe9afac1d9be8eee73e65509b02 100644 (file)
@@ -680,7 +680,7 @@ static struct phy *tegra_xusb_padctl_xlate(struct device *dev,
        if (args->args_count <= 0)
                return ERR_PTR(-EINVAL);
 
-       if (index > ARRAY_SIZE(padctl->phys))
+       if (index >= ARRAY_SIZE(padctl->phys))
                return ERR_PTR(-EINVAL);
 
        return padctl->phys[index];
@@ -930,7 +930,8 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
 
        padctl->provider = devm_of_phy_provider_register(&pdev->dev,
                                                         tegra_xusb_padctl_xlate);
-       if (err < 0) {
+       if (IS_ERR(padctl->provider)) {
+               err = PTR_ERR(padctl->provider);
                dev_err(&pdev->dev, "failed to register PHYs: %d\n", err);
                goto unregister;
        }
index 003bfd874a6155ca8dc70a50d527c3151ee6a797..d7154ed0b0eb19347fa1bd26ca82dee99a4c9e87 100644 (file)
@@ -127,14 +127,10 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
        struct irq_chip *chip = irq_data_get_irq_chip(irqd);
        struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
        struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
-       struct samsung_pin_bank_type *bank_type = bank->type;
        struct samsung_pinctrl_drv_data *d = bank->drvdata;
-       unsigned int pin = irqd->hwirq;
-       unsigned int shift = EXYNOS_EINT_CON_LEN * pin;
+       unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
        unsigned int con, trig_type;
        unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
-       unsigned long flags;
-       unsigned int mask;
 
        switch (type) {
        case IRQ_TYPE_EDGE_RISING:
@@ -167,8 +163,32 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
        con |= trig_type << shift;
        writel(con, d->virt_base + reg_con);
 
+       return 0;
+}
+
+static int exynos_irq_request_resources(struct irq_data *irqd)
+{
+       struct irq_chip *chip = irq_data_get_irq_chip(irqd);
+       struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
+       struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+       struct samsung_pin_bank_type *bank_type = bank->type;
+       struct samsung_pinctrl_drv_data *d = bank->drvdata;
+       unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
+       unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
+       unsigned long flags;
+       unsigned int mask;
+       unsigned int con;
+       int ret;
+
+       ret = gpio_lock_as_irq(&bank->gpio_chip, irqd->hwirq);
+       if (ret) {
+               dev_err(bank->gpio_chip.dev, "unable to lock pin %s-%lu IRQ\n",
+                       bank->name, irqd->hwirq);
+               return ret;
+       }
+
        reg_con = bank->pctl_offset + bank_type->reg_offset[PINCFG_TYPE_FUNC];
-       shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC];
+       shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
        mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
 
        spin_lock_irqsave(&bank->slock, flags);
@@ -180,9 +200,42 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
 
        spin_unlock_irqrestore(&bank->slock, flags);
 
+       exynos_irq_unmask(irqd);
+
        return 0;
 }
 
+static void exynos_irq_release_resources(struct irq_data *irqd)
+{
+       struct irq_chip *chip = irq_data_get_irq_chip(irqd);
+       struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
+       struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+       struct samsung_pin_bank_type *bank_type = bank->type;
+       struct samsung_pinctrl_drv_data *d = bank->drvdata;
+       unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
+       unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
+       unsigned long flags;
+       unsigned int mask;
+       unsigned int con;
+
+       reg_con = bank->pctl_offset + bank_type->reg_offset[PINCFG_TYPE_FUNC];
+       shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
+       mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
+
+       exynos_irq_mask(irqd);
+
+       spin_lock_irqsave(&bank->slock, flags);
+
+       con = readl(d->virt_base + reg_con);
+       con &= ~(mask << shift);
+       con |= FUNC_INPUT << shift;
+       writel(con, d->virt_base + reg_con);
+
+       spin_unlock_irqrestore(&bank->slock, flags);
+
+       gpio_unlock_as_irq(&bank->gpio_chip, irqd->hwirq);
+}
+
 /*
  * irq_chip for gpio interrupts.
  */
@@ -193,6 +246,8 @@ static struct exynos_irq_chip exynos_gpio_irq_chip = {
                .irq_mask = exynos_irq_mask,
                .irq_ack = exynos_irq_ack,
                .irq_set_type = exynos_irq_set_type,
+               .irq_request_resources = exynos_irq_request_resources,
+               .irq_release_resources = exynos_irq_release_resources,
        },
        .eint_con = EXYNOS_GPIO_ECON_OFFSET,
        .eint_mask = EXYNOS_GPIO_EMASK_OFFSET,
@@ -336,6 +391,8 @@ static struct exynos_irq_chip exynos_wkup_irq_chip = {
                .irq_ack = exynos_irq_ack,
                .irq_set_type = exynos_irq_set_type,
                .irq_set_wake = exynos_wkup_irq_set_wake,
+               .irq_request_resources = exynos_irq_request_resources,
+               .irq_release_resources = exynos_irq_release_resources,
        },
        .eint_con = EXYNOS_WKUP_ECON_OFFSET,
        .eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
index 2b882320e8e97077fca276200a970e10296be939..5cedc9d26390fae0d5aa77c785116eceaac3146b 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/gpio.h>
 
 /* pinmux function number for pin as gpio output line */
+#define FUNC_INPUT     0x0
 #define FUNC_OUTPUT    0x1
 
 /**
index 576d41b459e97fd4f3675e2a8f1cf981a3a2b410..c6e5deba238ec5dc9311ba51e21a52cca70327d2 100644 (file)
@@ -4509,24 +4509,24 @@ static const char * const audio_clk_groups[] = {
 };
 
 static const char * const can0_groups[] = {
-       "can0_data_a",
+       "can0_data",
        "can0_data_b",
        "can0_data_c",
        "can0_data_d",
        "can0_data_e",
        "can0_data_f",
-       "can_clk_a",
+       "can_clk",
        "can_clk_b",
        "can_clk_c",
        "can_clk_d",
 };
 
 static const char * const can1_groups[] = {
-       "can1_data_a",
+       "can1_data",
        "can1_data_b",
        "can1_data_c",
        "can1_data_d",
-       "can_clk_a",
+       "can_clk",
        "can_clk_b",
        "can_clk_c",
        "can_clk_d",
index e4da61bcbf8bb138a692325706cc05dc94e2beca..b062d3d7b37304e9e724fd869419febfae522c19 100644 (file)
@@ -1258,7 +1258,7 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
        int mode = -1;
        int time = -1;
 
-       if (sscanf(buf, "%i", &mode) != 1  || (mode != 2 || mode != 1))
+       if (sscanf(buf, "%i", &mode) != 1 && (mode != 2 || mode != 1))
                return -EINVAL;
 
        /* Set the Keyboard Backlight Mode where:
index 4b66bf09ee550fc391b8b22155dc4610ae649022..d2c35920ff08e7e84e4f1a6be3ce1c15454b2a4b 100644 (file)
@@ -606,6 +606,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
        unsigned int best = 0;
        struct pwm_lookup *p;
        unsigned int match;
+       unsigned int period;
+       enum pwm_polarity polarity;
 
        /* look up via DT first */
        if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
@@ -653,6 +655,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
                if (match > best) {
                        chip = pwmchip_find_by_name(p->provider);
                        index = p->index;
+                       period = p->period;
+                       polarity = p->polarity;
 
                        if (match != 3)
                                best = match;
@@ -668,8 +672,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
        if (IS_ERR(pwm))
                return pwm;
 
-       pwm_set_period(pwm, p->period);
-       pwm_set_polarity(pwm, p->polarity);
+       pwm_set_period(pwm, period);
+       pwm_set_polarity(pwm, polarity);
 
 
        return pwm;
index a6d47e5eee9e0add1485533915832107659d2df6..c43aca69fb30dffed727c210b725b46a365440dd 100644 (file)
@@ -1035,12 +1035,26 @@ static int tty3215_write(struct tty_struct * tty,
                         const unsigned char *buf, int count)
 {
        struct raw3215_info *raw;
+       int i, written;
 
        if (!tty)
                return 0;
        raw = (struct raw3215_info *) tty->driver_data;
-       raw3215_write(raw, buf, count);
-       return count;
+       written = count;
+       while (count > 0) {
+               for (i = 0; i < count; i++)
+                       if (buf[i] == '\t' || buf[i] == '\n')
+                               break;
+               raw3215_write(raw, buf, i);
+               count -= i;
+               buf += i;
+               if (count > 0) {
+                       raw3215_putchar(raw, *buf);
+                       count--;
+                       buf++;
+               }
+       }
+       return written;
 }
 
 /*
@@ -1188,7 +1202,7 @@ static int __init tty3215_init(void)
        driver->subtype = SYSTEM_TYPE_TTY;
        driver->init_termios = tty_std_termios;
        driver->init_termios.c_iflag = IGNBRK | IGNPAR;
-       driver->init_termios.c_oflag = ONLCR | XTABS;
+       driver->init_termios.c_oflag = ONLCR;
        driver->init_termios.c_lflag = ISIG;
        driver->flags = TTY_DRIVER_REAL_RAW;
        tty_set_operations(driver, &tty3215_ops);
index 7ed7a59878165bf8ef265e3fae90ee4b10017827..003663288e29b7f60405fa9bf721dcc6dbe19c0c 100644 (file)
@@ -559,7 +559,7 @@ sclp_tty_init(void)
        driver->subtype = SYSTEM_TYPE_TTY;
        driver->init_termios = tty_std_termios;
        driver->init_termios.c_iflag = IGNBRK | IGNPAR;
-       driver->init_termios.c_oflag = ONLCR | XTABS;
+       driver->init_termios.c_oflag = ONLCR;
        driver->init_termios.c_lflag = ISIG | ECHO;
        driver->flags = TTY_DRIVER_REAL_RAW;
        tty_set_operations(driver, &sclp_ops);
index df3306019a7eeaf2009411ba0328dc7450fc6034..d81f3cc43ff1d3e834fcb6d2dc6460810d167ee8 100644 (file)
@@ -377,6 +377,10 @@ scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
                pool->slab_flags |= SLAB_CACHE_DMA;
                pool->gfp_mask = __GFP_DMA;
        }
+
+       if (hostt->cmd_size)
+               hostt->cmd_pool = pool;
+
        return pool;
 }
 
@@ -421,8 +425,10 @@ out:
 out_free_slab:
        kmem_cache_destroy(pool->cmd_slab);
 out_free_pool:
-       if (hostt->cmd_size)
+       if (hostt->cmd_size) {
                scsi_free_host_cmd_pool(pool);
+               hostt->cmd_pool = NULL;
+       }
        goto out;
 }
 
@@ -444,8 +450,10 @@ static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
        if (!--pool->users) {
                kmem_cache_destroy(pool->cmd_slab);
                kmem_cache_destroy(pool->sense_slab);
-               if (hostt->cmd_size)
+               if (hostt->cmd_size) {
                        scsi_free_host_cmd_pool(pool);
+                       hostt->cmd_pool = NULL;
+               }
        }
        mutex_unlock(&host_cmd_pool_mutex);
 }
index 9c44392b748ff88044eeec12eb4dcdb6615e398c..ce62e8798cc80da072b452bced0a26fb44b5eb11 100644 (file)
@@ -1774,7 +1774,7 @@ static void scsi_request_fn(struct request_queue *q)
        blk_requeue_request(q, req);
        atomic_dec(&sdev->device_busy);
 out_delay:
-       if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
+       if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
                blk_delay_queue(q, SCSI_QUEUE_DELAY);
 }
 
index 788ed9b59b4e3f04c3a485fefe6d31dfdb6b681f..114203f32843216fcddae75fd225ea9b11284417 100644 (file)
@@ -1,8 +1,7 @@
 #
 # Makefile for the SuperH specific drivers.
 #
-obj-$(CONFIG_SUPERH)                   += intc/
-obj-$(CONFIG_ARCH_SHMOBILE_LEGACY)     += intc/
+obj-$(CONFIG_SH_INTC)                  += intc/
 ifneq ($(CONFIG_COMMON_CLK),y)
 obj-$(CONFIG_HAVE_CLK)                 += clk/
 endif
index 60228fae943fb0ac27fcd6ed70c1e0ac2d89f182..6a1b05ddc8c98b087e79580abdf2ff272cbce750 100644 (file)
@@ -1,7 +1,9 @@
 config SH_INTC
-       def_bool y
+       bool
        select IRQ_DOMAIN
 
+if SH_INTC
+
 comment "Interrupt controller options"
 
 config INTC_USERIMASK
@@ -37,3 +39,5 @@ config INTC_MAPPING_DEBUG
          between system IRQs and the per-controller id tables.
 
          If in doubt, say N.
+
+endif
index ae635872affb4b9a3fa993652cfad3009894532a..97bc62cbe2dab816cd33b20b86e6baa73d2ae19f 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -141,6 +141,7 @@ struct kioctx {
 
        struct {
                unsigned        tail;
+               unsigned        completed_events;
                spinlock_t      completion_lock;
        } ____cacheline_aligned_in_smp;
 
@@ -857,6 +858,68 @@ out:
        return ret;
 }
 
+/* refill_reqs_available
+ *     Updates the reqs_available reference counts used for tracking the
+ *     number of free slots in the completion ring.  This can be called
+ *     from aio_complete() (to optimistically update reqs_available) or
+ *     from aio_get_req() (the we're out of events case).  It must be
+ *     called holding ctx->completion_lock.
+ */
+static void refill_reqs_available(struct kioctx *ctx, unsigned head,
+                                  unsigned tail)
+{
+       unsigned events_in_ring, completed;
+
+       /* Clamp head since userland can write to it. */
+       head %= ctx->nr_events;
+       if (head <= tail)
+               events_in_ring = tail - head;
+       else
+               events_in_ring = ctx->nr_events - (head - tail);
+
+       completed = ctx->completed_events;
+       if (events_in_ring < completed)
+               completed -= events_in_ring;
+       else
+               completed = 0;
+
+       if (!completed)
+               return;
+
+       ctx->completed_events -= completed;
+       put_reqs_available(ctx, completed);
+}
+
+/* user_refill_reqs_available
+ *     Called to refill reqs_available when aio_get_req() encounters an
+ *     out of space in the completion ring.
+ */
+static void user_refill_reqs_available(struct kioctx *ctx)
+{
+       spin_lock_irq(&ctx->completion_lock);
+       if (ctx->completed_events) {
+               struct aio_ring *ring;
+               unsigned head;
+
+               /* Access of ring->head may race with aio_read_events_ring()
+                * here, but that's okay since whether we read the old version
+                * or the new version, and either will be valid.  The important
+                * part is that head cannot pass tail since we prevent
+                * aio_complete() from updating tail by holding
+                * ctx->completion_lock.  Even if head is invalid, the check
+                * against ctx->completed_events below will make sure we do the
+                * safe/right thing.
+                */
+               ring = kmap_atomic(ctx->ring_pages[0]);
+               head = ring->head;
+               kunmap_atomic(ring);
+
+               refill_reqs_available(ctx, head, ctx->tail);
+       }
+
+       spin_unlock_irq(&ctx->completion_lock);
+}
+
 /* aio_get_req
  *     Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
@@ -865,8 +928,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
 {
        struct kiocb *req;
 
-       if (!get_reqs_available(ctx))
-               return NULL;
+       if (!get_reqs_available(ctx)) {
+               user_refill_reqs_available(ctx);
+               if (!get_reqs_available(ctx))
+                       return NULL;
+       }
 
        req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
        if (unlikely(!req))
@@ -925,8 +991,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
        struct kioctx   *ctx = iocb->ki_ctx;
        struct aio_ring *ring;
        struct io_event *ev_page, *event;
+       unsigned tail, pos, head;
        unsigned long   flags;
-       unsigned tail, pos;
 
        /*
         * Special case handling for sync iocbs:
@@ -987,10 +1053,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
        ctx->tail = tail;
 
        ring = kmap_atomic(ctx->ring_pages[0]);
+       head = ring->head;
        ring->tail = tail;
        kunmap_atomic(ring);
        flush_dcache_page(ctx->ring_pages[0]);
 
+       ctx->completed_events++;
+       if (ctx->completed_events > 1)
+               refill_reqs_available(ctx, head, tail);
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        pr_debug("added to ring %p at [%u]\n", iocb, tail);
@@ -1005,7 +1075,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
 
        /* everything turned out well, dispose of the aiocb. */
        kiocb_free(iocb);
-       put_reqs_available(ctx, 1);
 
        /*
         * We have to order our ring_info tail store above and test
index 5a201d81049c09fcb280f26539d0ad5c6c5c5b51..fbd76ded9a34b3260a5e794115fff0312b3c6e08 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/freezer.h>
-#include <linux/workqueue.h>
 #include "async-thread.h"
 #include "ctree.h"
 
@@ -55,8 +54,39 @@ struct btrfs_workqueue {
        struct __btrfs_workqueue *high;
 };
 
-static inline struct __btrfs_workqueue
-*__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
+static void normal_work_helper(struct btrfs_work *work);
+
+#define BTRFS_WORK_HELPER(name)                                        \
+void btrfs_##name(struct work_struct *arg)                             \
+{                                                                      \
+       struct btrfs_work *work = container_of(arg, struct btrfs_work,  \
+                                              normal_work);            \
+       normal_work_helper(work);                                       \
+}
+
+BTRFS_WORK_HELPER(worker_helper);
+BTRFS_WORK_HELPER(delalloc_helper);
+BTRFS_WORK_HELPER(flush_delalloc_helper);
+BTRFS_WORK_HELPER(cache_helper);
+BTRFS_WORK_HELPER(submit_helper);
+BTRFS_WORK_HELPER(fixup_helper);
+BTRFS_WORK_HELPER(endio_helper);
+BTRFS_WORK_HELPER(endio_meta_helper);
+BTRFS_WORK_HELPER(endio_meta_write_helper);
+BTRFS_WORK_HELPER(endio_raid56_helper);
+BTRFS_WORK_HELPER(rmw_helper);
+BTRFS_WORK_HELPER(endio_write_helper);
+BTRFS_WORK_HELPER(freespace_write_helper);
+BTRFS_WORK_HELPER(delayed_meta_helper);
+BTRFS_WORK_HELPER(readahead_helper);
+BTRFS_WORK_HELPER(qgroup_rescan_helper);
+BTRFS_WORK_HELPER(extent_refs_helper);
+BTRFS_WORK_HELPER(scrub_helper);
+BTRFS_WORK_HELPER(scrubwrc_helper);
+BTRFS_WORK_HELPER(scrubnc_helper);
+
+static struct __btrfs_workqueue *
+__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
                         int thresh)
 {
        struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
@@ -232,13 +262,11 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
        spin_unlock_irqrestore(lock, flags);
 }
 
-static void normal_work_helper(struct work_struct *arg)
+static void normal_work_helper(struct btrfs_work *work)
 {
-       struct btrfs_work *work;
        struct __btrfs_workqueue *wq;
        int need_order = 0;
 
-       work = container_of(arg, struct btrfs_work, normal_work);
        /*
         * We should not touch things inside work in the following cases:
         * 1) after work->func() if it has no ordered_free
@@ -262,7 +290,7 @@ static void normal_work_helper(struct work_struct *arg)
                trace_btrfs_all_work_done(work);
 }
 
-void btrfs_init_work(struct btrfs_work *work,
+void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
                     btrfs_func_t func,
                     btrfs_func_t ordered_func,
                     btrfs_func_t ordered_free)
@@ -270,7 +298,7 @@ void btrfs_init_work(struct btrfs_work *work,
        work->func = func;
        work->ordered_func = ordered_func;
        work->ordered_free = ordered_free;
-       INIT_WORK(&work->normal_work, normal_work_helper);
+       INIT_WORK(&work->normal_work, uniq_func);
        INIT_LIST_HEAD(&work->ordered_list);
        work->flags = 0;
 }
index 9c6b66d15fb0a07cd0a6a4656d6865848aa5c8ea..e9e31c94758fd6ddea5cbd458d5aca04a27e9a73 100644 (file)
 
 #ifndef __BTRFS_ASYNC_THREAD_
 #define __BTRFS_ASYNC_THREAD_
+#include <linux/workqueue.h>
 
 struct btrfs_workqueue;
 /* Internal use only */
 struct __btrfs_workqueue;
 struct btrfs_work;
 typedef void (*btrfs_func_t)(struct btrfs_work *arg);
+typedef void (*btrfs_work_func_t)(struct work_struct *arg);
 
 struct btrfs_work {
        btrfs_func_t func;
@@ -38,11 +40,35 @@ struct btrfs_work {
        unsigned long flags;
 };
 
+#define BTRFS_WORK_HELPER_PROTO(name)                                  \
+void btrfs_##name(struct work_struct *arg)
+
+BTRFS_WORK_HELPER_PROTO(worker_helper);
+BTRFS_WORK_HELPER_PROTO(delalloc_helper);
+BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper);
+BTRFS_WORK_HELPER_PROTO(cache_helper);
+BTRFS_WORK_HELPER_PROTO(submit_helper);
+BTRFS_WORK_HELPER_PROTO(fixup_helper);
+BTRFS_WORK_HELPER_PROTO(endio_helper);
+BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
+BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
+BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
+BTRFS_WORK_HELPER_PROTO(rmw_helper);
+BTRFS_WORK_HELPER_PROTO(endio_write_helper);
+BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
+BTRFS_WORK_HELPER_PROTO(delayed_meta_helper);
+BTRFS_WORK_HELPER_PROTO(readahead_helper);
+BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper);
+BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
+BTRFS_WORK_HELPER_PROTO(scrub_helper);
+BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
+BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
+
 struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
                                              int flags,
                                              int max_active,
                                              int thresh);
-void btrfs_init_work(struct btrfs_work *work,
+void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
                     btrfs_func_t func,
                     btrfs_func_t ordered_func,
                     btrfs_func_t ordered_free);
index da775bfdebc989d905c931cde0f12bd00027abc9..a2e90f855d7d1e3cad650fc1d9691fd2de01cf65 100644 (file)
@@ -1395,8 +1395,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
                return -ENOMEM;
 
        async_work->delayed_root = delayed_root;
-       btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root,
-                       NULL, NULL);
+       btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
+                       btrfs_async_run_delayed_root, NULL, NULL);
        async_work->nr = nr;
 
        btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
index d0ed9e664f7d4d26c69a12498f80b3ad59809a43..a1d36e62179c528041f292675e7452102863de45 100644 (file)
@@ -39,7 +39,6 @@
 #include "btrfs_inode.h"
 #include "volumes.h"
 #include "print-tree.h"
-#include "async-thread.h"
 #include "locking.h"
 #include "tree-log.h"
 #include "free-space-cache.h"
@@ -693,35 +692,41 @@ static void end_workqueue_bio(struct bio *bio, int err)
 {
        struct end_io_wq *end_io_wq = bio->bi_private;
        struct btrfs_fs_info *fs_info;
+       struct btrfs_workqueue *wq;
+       btrfs_work_func_t func;
 
        fs_info = end_io_wq->info;
        end_io_wq->error = err;
-       btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
 
        if (bio->bi_rw & REQ_WRITE) {
-               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
-                       btrfs_queue_work(fs_info->endio_meta_write_workers,
-                                        &end_io_wq->work);
-               else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
-                       btrfs_queue_work(fs_info->endio_freespace_worker,
-                                        &end_io_wq->work);
-               else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
-                       btrfs_queue_work(fs_info->endio_raid56_workers,
-                                        &end_io_wq->work);
-               else
-                       btrfs_queue_work(fs_info->endio_write_workers,
-                                        &end_io_wq->work);
+               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
+                       wq = fs_info->endio_meta_write_workers;
+                       func = btrfs_endio_meta_write_helper;
+               } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
+                       wq = fs_info->endio_freespace_worker;
+                       func = btrfs_freespace_write_helper;
+               } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+                       wq = fs_info->endio_raid56_workers;
+                       func = btrfs_endio_raid56_helper;
+               } else {
+                       wq = fs_info->endio_write_workers;
+                       func = btrfs_endio_write_helper;
+               }
        } else {
-               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
-                       btrfs_queue_work(fs_info->endio_raid56_workers,
-                                        &end_io_wq->work);
-               else if (end_io_wq->metadata)
-                       btrfs_queue_work(fs_info->endio_meta_workers,
-                                        &end_io_wq->work);
-               else
-                       btrfs_queue_work(fs_info->endio_workers,
-                                        &end_io_wq->work);
+               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+                       wq = fs_info->endio_raid56_workers;
+                       func = btrfs_endio_raid56_helper;
+               } else if (end_io_wq->metadata) {
+                       wq = fs_info->endio_meta_workers;
+                       func = btrfs_endio_meta_helper;
+               } else {
+                       wq = fs_info->endio_workers;
+                       func = btrfs_endio_helper;
+               }
        }
+
+       btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
+       btrfs_queue_work(wq, &end_io_wq->work);
 }
 
 /*
@@ -828,7 +833,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
        async->submit_bio_start = submit_bio_start;
        async->submit_bio_done = submit_bio_done;
 
-       btrfs_init_work(&async->work, run_one_async_start,
+       btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
                        run_one_async_done, run_one_async_free);
 
        async->bio_flags = bio_flags;
@@ -3450,7 +3455,8 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
                btrfs_set_stack_device_generation(dev_item, 0);
                btrfs_set_stack_device_type(dev_item, dev->type);
                btrfs_set_stack_device_id(dev_item, dev->devid);
-               btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
+               btrfs_set_stack_device_total_bytes(dev_item,
+                                                  dev->disk_total_bytes);
                btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
                btrfs_set_stack_device_io_align(dev_item, dev->io_align);
                btrfs_set_stack_device_io_width(dev_item, dev->io_width);
index 102ed3143976fa9cfe98e21f237f45ed7fbed3c2..3efe1c3877bf34c4643ce99fb90d52fedda74d06 100644 (file)
@@ -552,7 +552,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        caching_ctl->block_group = cache;
        caching_ctl->progress = cache->key.objectid;
        atomic_set(&caching_ctl->count, 1);
-       btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
+       btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
+                       caching_thread, NULL, NULL);
 
        spin_lock(&cache->lock);
        /*
@@ -2749,8 +2750,8 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
                async->sync = 0;
        init_completion(&async->wait);
 
-       btrfs_init_work(&async->work, delayed_ref_async_start,
-                       NULL, NULL);
+       btrfs_init_work(&async->work, btrfs_extent_refs_helper,
+                       delayed_ref_async_start, NULL, NULL);
 
        btrfs_queue_work(root->fs_info->extent_workers, &async->work);
 
@@ -3586,13 +3587,7 @@ static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  */
 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
 {
-       /*
-        * we add in the count of missing devices because we want
-        * to make sure that any RAID levels on a degraded FS
-        * continue to be honored.
-        */
-       u64 num_devices = root->fs_info->fs_devices->rw_devices +
-               root->fs_info->fs_devices->missing_devices;
+       u64 num_devices = root->fs_info->fs_devices->rw_devices;
        u64 target;
        u64 tmp;
 
@@ -8440,13 +8435,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
        if (stripped)
                return extended_to_chunk(stripped);
 
-       /*
-        * we add in the count of missing devices because we want
-        * to make sure that any RAID levels on a degraded FS
-        * continue to be honored.
-        */
-       num_devices = root->fs_info->fs_devices->rw_devices +
-               root->fs_info->fs_devices->missing_devices;
+       num_devices = root->fs_info->fs_devices->rw_devices;
 
        stripped = BTRFS_BLOCK_GROUP_RAID0 |
                BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
index 3e11aab9f391d9bce24c329694d62d3a48ced811..af0359dcf337dbfec421b49fedbb8b5d0ecbf8b8 100644 (file)
@@ -2532,6 +2532,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                                        test_bit(BIO_UPTODATE, &bio->bi_flags);
                                if (err)
                                        uptodate = 0;
+                               offset += len;
                                continue;
                        }
                }
@@ -4207,8 +4208,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                return -ENOMEM;
        path->leave_spinning = 1;
 
-       start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
-       len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
+       start = round_down(start, BTRFS_I(inode)->root->sectorsize);
+       len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
 
        /*
         * lookup the last file extent.  We're not using i_size here
index d3afac292d677e0ed492255ca7cb3890cde717b1..36861b7a6757673189737024b62fcce9842bc3d0 100644 (file)
@@ -1840,7 +1840,15 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
 {
        if (filp->private_data)
                btrfs_ioctl_trans_end(filp);
-       filemap_flush(inode->i_mapping);
+       /*
+        * ordered_data_close is set by settattr when we are about to truncate
+        * a file from a non-zero size to a zero size.  This tries to
+        * flush down new bytes that may have been written if the
+        * application were using truncate to replace a file in place.
+        */
+       if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+                              &BTRFS_I(inode)->runtime_flags))
+                       filemap_flush(inode->i_mapping);
        return 0;
 }
 
@@ -2088,10 +2096,9 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
                goto out;
        }
 
-       if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
+       if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
                u64 num_bytes;
 
-               path->slots[0]++;
                key.offset = offset;
                btrfs_set_item_key_safe(root, path, &key);
                fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -2216,7 +2223,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                goto out_only_mutex;
        }
 
-       lockstart = round_up(offset , BTRFS_I(inode)->root->sectorsize);
+       lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
        lockend = round_down(offset + len,
                             BTRFS_I(inode)->root->sectorsize) - 1;
        same_page = ((offset >> PAGE_CACHE_SHIFT) ==
@@ -2277,7 +2284,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                                                tail_start + tail_len, 0, 1);
                                if (ret)
                                        goto out_only_mutex;
-                               }
+                       }
                }
        }
 
index 03708ef3deefb11edd7998ee831bf370e6ad8a66..9c194bd74d6e513c075b077b62ee1754cd02e966 100644 (file)
@@ -1096,8 +1096,10 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
                async_cow->end = cur_end;
                INIT_LIST_HEAD(&async_cow->extents);
 
-               btrfs_init_work(&async_cow->work, async_cow_start,
-                               async_cow_submit, async_cow_free);
+               btrfs_init_work(&async_cow->work,
+                               btrfs_delalloc_helper,
+                               async_cow_start, async_cow_submit,
+                               async_cow_free);
 
                nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
                        PAGE_CACHE_SHIFT;
@@ -1881,7 +1883,8 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
 
        SetPageChecked(page);
        page_cache_get(page);
-       btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
+       btrfs_init_work(&fixup->work, btrfs_fixup_helper,
+                       btrfs_writepage_fixup_worker, NULL, NULL);
        fixup->page = page;
        btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
        return -EBUSY;
@@ -2822,7 +2825,8 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
        struct inode *inode = page->mapping->host;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_ordered_extent *ordered_extent = NULL;
-       struct btrfs_workqueue *workers;
+       struct btrfs_workqueue *wq;
+       btrfs_work_func_t func;
 
        trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
 
@@ -2831,13 +2835,17 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
                                            end - start + 1, uptodate))
                return 0;
 
-       btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
+       if (btrfs_is_free_space_inode(inode)) {
+               wq = root->fs_info->endio_freespace_worker;
+               func = btrfs_freespace_write_helper;
+       } else {
+               wq = root->fs_info->endio_write_workers;
+               func = btrfs_endio_write_helper;
+       }
 
-       if (btrfs_is_free_space_inode(inode))
-               workers = root->fs_info->endio_freespace_worker;
-       else
-               workers = root->fs_info->endio_write_workers;
-       btrfs_queue_work(workers, &ordered_extent->work);
+       btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
+                       NULL);
+       btrfs_queue_work(wq, &ordered_extent->work);
 
        return 0;
 }
@@ -4674,6 +4682,11 @@ static void evict_inode_truncate_pages(struct inode *inode)
                clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
                remove_extent_mapping(map_tree, em);
                free_extent_map(em);
+               if (need_resched()) {
+                       write_unlock(&map_tree->lock);
+                       cond_resched();
+                       write_lock(&map_tree->lock);
+               }
        }
        write_unlock(&map_tree->lock);
 
@@ -4696,6 +4709,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
                                 &cached_state, GFP_NOFS);
                free_extent_state(state);
 
+               cond_resched();
                spin_lock(&io_tree->lock);
        }
        spin_unlock(&io_tree->lock);
@@ -5181,6 +5195,42 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
                        iput(inode);
                        inode = ERR_PTR(ret);
                }
+               /*
+                * If orphan cleanup did remove any orphans, it means the tree
+                * was modified and therefore the commit root is not the same as
+                * the current root anymore. This is a problem, because send
+                * uses the commit root and therefore can see inode items that
+                * don't exist in the current root anymore, and for example make
+                * calls to btrfs_iget, which will do tree lookups based on the
+                * current root and not on the commit root. Those lookups will
+                * fail, returning a -ESTALE error, and making send fail with
+                * that error. So make sure a send does not see any orphans we
+                * have just removed, and that it will see the same inodes
+                * regardless of whether a transaction commit happened before
+                * it started (meaning that the commit root will be the same as
+                * the current root) or not.
+                */
+               if (sub_root->node != sub_root->commit_root) {
+                       u64 sub_flags = btrfs_root_flags(&sub_root->root_item);
+
+                       if (sub_flags & BTRFS_ROOT_SUBVOL_RDONLY) {
+                               struct extent_buffer *eb;
+
+                               /*
+                                * Assert we can't have races between dentry
+                                * lookup called through the snapshot creation
+                                * ioctl and the VFS.
+                                */
+                               ASSERT(mutex_is_locked(&dir->i_mutex));
+
+                               down_write(&root->fs_info->commit_root_sem);
+                               eb = sub_root->commit_root;
+                               sub_root->commit_root =
+                                       btrfs_root_node(sub_root);
+                               up_write(&root->fs_info->commit_root_sem);
+                               free_extent_buffer(eb);
+                       }
+               }
        }
 
        return inode;
@@ -5605,6 +5655,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                return ERR_PTR(-ENOMEM);
        }
 
+       /*
+        * O_TMPFILE, set link count to 0, so that after this point,
+        * we fill in an inode item with the correct link count.
+        */
+       if (!name)
+               set_nlink(inode, 0);
+
        /*
         * we have to initialize this early, so we can reclaim the inode
         * number if we fail afterwards in this function.
@@ -6097,14 +6154,14 @@ out_fail:
 static int merge_extent_mapping(struct extent_map_tree *em_tree,
                                struct extent_map *existing,
                                struct extent_map *em,
-                               u64 map_start, u64 map_len)
+                               u64 map_start)
 {
        u64 start_diff;
 
        BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
        start_diff = map_start - em->start;
        em->start = map_start;
-       em->len = map_len;
+       em->len = existing->start - em->start;
        if (em->block_start < EXTENT_MAP_LAST_BYTE &&
            !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
                em->block_start += start_diff;
@@ -6275,6 +6332,8 @@ next:
                        goto not_found;
                if (start + len <= found_key.offset)
                        goto not_found;
+               if (start > found_key.offset)
+                       goto next;
                em->start = start;
                em->orig_start = start;
                em->len = found_key.offset - start;
@@ -6390,8 +6449,7 @@ insert:
                                                         em->len);
                        if (existing) {
                                err = merge_extent_mapping(em_tree, existing,
-                                                          em, start,
-                                                          root->sectorsize);
+                                                          em, start);
                                free_extent_map(existing);
                                if (err) {
                                        free_extent_map(em);
@@ -7158,7 +7216,8 @@ again:
        if (!ret)
                goto out_test;
 
-       btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL);
+       btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
+                       finish_ordered_fn, NULL, NULL);
        btrfs_queue_work(root->fs_info->endio_write_workers,
                         &ordered->work);
 out_test:
@@ -7306,10 +7365,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        map_length = orig_bio->bi_iter.bi_size;
        ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
                              &map_length, NULL, 0);
-       if (ret) {
-               bio_put(orig_bio);
+       if (ret)
                return -EIO;
-       }
 
        if (map_length >= orig_bio->bi_iter.bi_size) {
                bio = orig_bio;
@@ -7326,6 +7383,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
        if (!bio)
                return -ENOMEM;
+
        bio->bi_private = dip;
        bio->bi_end_io = btrfs_end_dio_bio;
        atomic_inc(&dip->pending_bios);
@@ -7534,7 +7592,8 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
        count = iov_iter_count(iter);
        if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
                     &BTRFS_I(inode)->runtime_flags))
-               filemap_fdatawrite_range(inode->i_mapping, offset, count);
+               filemap_fdatawrite_range(inode->i_mapping, offset,
+                                        offset + count - 1);
 
        if (rw & WRITE) {
                /*
@@ -8495,7 +8554,9 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
        work->inode = inode;
        work->wait = wait;
        work->delay_iput = delay_iput;
-       btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
+       WARN_ON_ONCE(!inode);
+       btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
+                       btrfs_run_delalloc_work, NULL, NULL);
 
        return work;
 }
@@ -8979,6 +9040,14 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
        if (ret)
                goto out;
 
+       /*
+        * We set number of links to 0 in btrfs_new_inode(), and here we set
+        * it to 1 because d_tmpfile() will issue a warning if the count is 0,
+        * through:
+        *
+        *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
+        */
+       set_nlink(inode, 1);
        d_tmpfile(dentry, inode);
        mark_inode_dirty(inode);
 
index 47aceb494d1d456da8940c5e7a1d3eed3fa4adc5..fce6fd0e3f50c729adcf278b1b2a13bd2ede1494 100644 (file)
@@ -711,39 +711,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        if (ret)
                goto fail;
 
-       ret = btrfs_orphan_cleanup(pending_snapshot->snap);
-       if (ret)
-               goto fail;
-
-       /*
-        * If orphan cleanup did remove any orphans, it means the tree was
-        * modified and therefore the commit root is not the same as the
-        * current root anymore. This is a problem, because send uses the
-        * commit root and therefore can see inode items that don't exist
-        * in the current root anymore, and for example make calls to
-        * btrfs_iget, which will do tree lookups based on the current root
-        * and not on the commit root. Those lookups will fail, returning a
-        * -ESTALE error, and making send fail with that error. So make sure
-        * a send does not see any orphans we have just removed, and that it
-        * will see the same inodes regardless of whether a transaction
-        * commit happened before it started (meaning that the commit root
-        * will be the same as the current root) or not.
-        */
-       if (readonly && pending_snapshot->snap->node !=
-           pending_snapshot->snap->commit_root) {
-               trans = btrfs_join_transaction(pending_snapshot->snap);
-               if (IS_ERR(trans) && PTR_ERR(trans) != -ENOENT) {
-                       ret = PTR_ERR(trans);
-                       goto fail;
-               }
-               if (!IS_ERR(trans)) {
-                       ret = btrfs_commit_transaction(trans,
-                                                      pending_snapshot->snap);
-                       if (ret)
-                               goto fail;
-               }
-       }
-
        inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
        if (IS_ERR(inode)) {
                ret = PTR_ERR(inode);
@@ -3527,7 +3494,8 @@ process_slot:
                        btrfs_mark_buffer_dirty(leaf);
                        btrfs_release_path(path);
 
-                       last_dest_end = new_key.offset + datal;
+                       last_dest_end = ALIGN(new_key.offset + datal,
+                                             root->sectorsize);
                        ret = clone_finish_inode_update(trans, inode,
                                                        last_dest_end,
                                                        destoff, olen);
index 963895c1f801dcbbde3ce9c5e621c91308793f7a..ac734ec4cc20ecbc47bc1b495b75021b4c699edf 100644 (file)
@@ -615,6 +615,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
                spin_unlock(&root->ordered_extent_lock);
 
                btrfs_init_work(&ordered->flush_work,
+                               btrfs_flush_delalloc_helper,
                                btrfs_run_ordered_extent_work, NULL, NULL);
                list_add_tail(&ordered->work_list, &works);
                btrfs_queue_work(root->fs_info->flush_workers,
index b497498484bebaf5774aa3c4d016a5e4f0b05ff1..ded5c601d9162a7699a3fa4802a8c9df9bd37722 100644 (file)
@@ -1973,7 +1973,7 @@ static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
                                   elem.seq, &roots);
        btrfs_put_tree_mod_seq(fs_info, &elem);
        if (ret < 0)
-               return ret;
+               goto out;
 
        if (roots->nnodes != 1)
                goto out;
@@ -2720,6 +2720,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
        memset(&fs_info->qgroup_rescan_work, 0,
               sizeof(fs_info->qgroup_rescan_work));
        btrfs_init_work(&fs_info->qgroup_rescan_work,
+                       btrfs_qgroup_rescan_helper,
                        btrfs_qgroup_rescan_worker, NULL, NULL);
 
        if (ret) {
index 4a88f073fdd79bf5440f2d54fb4771fc4361212c..0a6b6e4bcbb97a8af56ad6a58aef9f514c3b1132 100644 (file)
@@ -1416,7 +1416,8 @@ cleanup:
 
 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
 {
-       btrfs_init_work(&rbio->work, rmw_work, NULL, NULL);
+       btrfs_init_work(&rbio->work, btrfs_rmw_helper,
+                       rmw_work, NULL, NULL);
 
        btrfs_queue_work(rbio->fs_info->rmw_workers,
                         &rbio->work);
@@ -1424,7 +1425,8 @@ static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
 
 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
 {
-       btrfs_init_work(&rbio->work, read_rebuild_work, NULL, NULL);
+       btrfs_init_work(&rbio->work, btrfs_rmw_helper,
+                       read_rebuild_work, NULL, NULL);
 
        btrfs_queue_work(rbio->fs_info->rmw_workers,
                         &rbio->work);
@@ -1665,7 +1667,8 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
        plug = container_of(cb, struct btrfs_plug_cb, cb);
 
        if (from_schedule) {
-               btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
+               btrfs_init_work(&plug->work, btrfs_rmw_helper,
+                               unplug_work, NULL, NULL);
                btrfs_queue_work(plug->info->rmw_workers,
                                 &plug->work);
                return;
index 09230cf3a2447b3541826f21b2f3fe1dcce202ce..20408c6b665ae94e03a152c94bb822bb87fdc63e 100644 (file)
@@ -798,7 +798,8 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info)
                /* FIXME we cannot handle this properly right now */
                BUG();
        }
-       btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
+       btrfs_init_work(&rmw->work, btrfs_readahead_helper,
+                       reada_start_machine_worker, NULL, NULL);
        rmw->fs_info = fs_info;
 
        btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
index b6d198f5181ed6d07f8f9c29782bd5a99e46f9ce..f4a41f37be229b555fb2e26993aff7055ee7fff1 100644 (file)
@@ -428,8 +428,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
                sbio->index = i;
                sbio->sctx = sctx;
                sbio->page_count = 0;
-               btrfs_init_work(&sbio->work, scrub_bio_end_io_worker,
-                               NULL, NULL);
+               btrfs_init_work(&sbio->work, btrfs_scrub_helper,
+                               scrub_bio_end_io_worker, NULL, NULL);
 
                if (i != SCRUB_BIOS_PER_SCTX - 1)
                        sctx->bios[i]->next_free = i + 1;
@@ -999,8 +999,8 @@ nodatasum_case:
                fixup_nodatasum->root = fs_info->extent_root;
                fixup_nodatasum->mirror_num = failed_mirror_index + 1;
                scrub_pending_trans_workers_inc(sctx);
-               btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum,
-                               NULL, NULL);
+               btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
+                               scrub_fixup_nodatasum, NULL, NULL);
                btrfs_queue_work(fs_info->scrub_workers,
                                 &fixup_nodatasum->work);
                goto out;
@@ -1616,7 +1616,8 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err)
        sbio->err = err;
        sbio->bio = bio;
 
-       btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
+       btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
+                        scrub_wr_bio_end_io_worker, NULL, NULL);
        btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
 }
 
@@ -2904,6 +2905,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
        struct scrub_ctx *sctx;
        int ret;
        struct btrfs_device *dev;
+       struct rcu_string *name;
 
        if (btrfs_fs_closing(fs_info))
                return -EINVAL;
@@ -2965,6 +2967,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
                return -ENODEV;
        }
 
+       if (!is_dev_replace && !readonly && !dev->writeable) {
+               mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+               rcu_read_lock();
+               name = rcu_dereference(dev->name);
+               btrfs_err(fs_info, "scrub: device %s is not writable",
+                         name->str);
+               rcu_read_unlock();
+               return -EROFS;
+       }
+
        mutex_lock(&fs_info->scrub_lock);
        if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
                mutex_unlock(&fs_info->scrub_lock);
@@ -3203,7 +3215,8 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
        nocow_ctx->len = len;
        nocow_ctx->mirror_num = mirror_num;
        nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
-       btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL);
+       btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
+                       copy_nocow_pages_worker, NULL, NULL);
        INIT_LIST_HEAD(&nocow_ctx->inodes);
        btrfs_queue_work(fs_info->scrub_nocow_workers,
                         &nocow_ctx->work);
index 78699364f537c423b9fa25cb0e3c124ded8c6ce7..12e53556e214c2c26f0a67aadf63b53637178822 100644 (file)
@@ -614,7 +614,7 @@ int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
        if (!fs_info->device_dir_kobj)
                return -EINVAL;
 
-       if (one_device) {
+       if (one_device && one_device->bdev) {
                disk = one_device->bdev->bd_part;
                disk_kobj = &part_to_dev(disk)->kobj;
 
index 9e1f2cd5e67ab0fff18ec12df76c965e109e662c..7e0e6e3029dd4e36e9fd72e68b151ac991b66c12 100644 (file)
@@ -3298,7 +3298,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
        struct list_head ordered_sums;
        int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
        bool has_extents = false;
-       bool need_find_last_extent = (*last_extent == 0);
+       bool need_find_last_extent = true;
        bool done = false;
 
        INIT_LIST_HEAD(&ordered_sums);
@@ -3352,8 +3352,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                 */
                if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
                        has_extents = true;
-                       if (need_find_last_extent &&
-                           first_key.objectid == (u64)-1)
+                       if (first_key.objectid == (u64)-1)
                                first_key = ins_keys[i];
                } else {
                        need_find_last_extent = false;
@@ -3427,6 +3426,16 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
        if (!has_extents)
                return ret;
 
+       if (need_find_last_extent && *last_extent == first_key.offset) {
+               /*
+                * We don't have any leafs between our current one and the one
+                * we processed before that can have file extent items for our
+                * inode (and have a generation number smaller than our current
+                * transaction id).
+                */
+               need_find_last_extent = false;
+       }
+
        /*
         * Because we use btrfs_search_forward we could skip leaves that were
         * not modified and then assume *last_extent is valid when it really
@@ -3537,7 +3546,7 @@ fill_holes:
                                               0, 0);
                if (ret)
                        break;
-               *last_extent = offset + len;
+               *last_extent = extent_end;
        }
        /*
         * Need to let the callers know we dropped the path so they should
index 6cb82f62cb7c22c4b3038e248e52b9694171a5bd..340a92d08e84d9e5268a18fe5f3112465fa47888 100644 (file)
@@ -508,6 +508,44 @@ static noinline int device_list_add(const char *path,
                ret = 1;
                device->fs_devices = fs_devices;
        } else if (!device->name || strcmp(device->name->str, path)) {
+               /*
+                * When FS is already mounted.
+                * 1. If you are here and if the device->name is NULL that
+                *    means this device was missing at time of FS mount.
+                * 2. If you are here and if the device->name is different
+                *    from 'path' that means either
+                *      a. The same device disappeared and reappeared with
+                *         different name. or
+                *      b. The missing-disk-which-was-replaced, has
+                *         reappeared now.
+                *
+                * We must allow 1 and 2a above. But 2b would be a spurious
+                * and unintentional.
+                *
+                * Further in case of 1 and 2a above, the disk at 'path'
+                * would have missed some transaction when it was away and
+                * in case of 2a the stale bdev has to be updated as well.
+                * 2b must not be allowed at all time.
+                */
+
+               /*
+                * As of now don't allow update to btrfs_fs_device through
+                * the btrfs dev scan cli, after FS has been mounted.
+                */
+               if (fs_devices->opened) {
+                       return -EBUSY;
+               } else {
+                       /*
+                        * That is if the FS is _not_ mounted and if you
+                        * are here, that means there is more than one
+                        * disk with same uuid and devid.We keep the one
+                        * with larger generation number or the last-in if
+                        * generation are equal.
+                        */
+                       if (found_transid < device->generation)
+                               return -EEXIST;
+               }
+
                name = rcu_string_strdup(path, GFP_NOFS);
                if (!name)
                        return -ENOMEM;
@@ -519,6 +557,15 @@ static noinline int device_list_add(const char *path,
                }
        }
 
+       /*
+        * Unmount does not free the btrfs_device struct but would zero
+        * generation along with most of the other members. So just update
+        * it back. We need it to pick the disk with largest generation
+        * (as above).
+        */
+       if (!fs_devices->opened)
+               device->generation = found_transid;
+
        if (found_transid > fs_devices->latest_trans) {
                fs_devices->latest_devid = devid;
                fs_devices->latest_trans = found_transid;
@@ -1436,7 +1483,7 @@ static int btrfs_add_device(struct btrfs_trans_handle *trans,
        btrfs_set_device_io_align(leaf, dev_item, device->io_align);
        btrfs_set_device_io_width(leaf, dev_item, device->io_width);
        btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
-       btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
+       btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
        btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
        btrfs_set_device_group(leaf, dev_item, 0);
        btrfs_set_device_seek_speed(leaf, dev_item, 0);
@@ -1671,7 +1718,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        device->fs_devices->total_devices--;
 
        if (device->missing)
-               root->fs_info->fs_devices->missing_devices--;
+               device->fs_devices->missing_devices--;
 
        next_device = list_entry(root->fs_info->fs_devices->devices.next,
                                 struct btrfs_device, dev_list);
@@ -1801,8 +1848,12 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
        if (srcdev->bdev) {
                fs_info->fs_devices->open_devices--;
 
-               /* zero out the old super */
-               btrfs_scratch_superblock(srcdev);
+               /*
+                * zero out the old super if it is not writable
+                * (e.g. seed device)
+                */
+               if (srcdev->writeable)
+                       btrfs_scratch_superblock(srcdev);
        }
 
        call_rcu(&srcdev->rcu, free_device);
@@ -1941,6 +1992,9 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
        fs_devices->seeding = 0;
        fs_devices->num_devices = 0;
        fs_devices->open_devices = 0;
+       fs_devices->missing_devices = 0;
+       fs_devices->num_can_discard = 0;
+       fs_devices->rotating = 0;
        fs_devices->seed = seed_devices;
 
        generate_random_uuid(fs_devices->fsid);
@@ -5800,7 +5854,8 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
        else
                generate_random_uuid(dev->uuid);
 
-       btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL);
+       btrfs_init_work(&dev->work, btrfs_submit_helper,
+                       pending_bios_fn, NULL, NULL);
 
        return dev;
 }
index ac4f260155c875b80d9d6611af1139438e62fc91..889b9845575079517e92ee387fe5c863751c3848 100644 (file)
@@ -207,6 +207,19 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
        return 0;
 }
 
+static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
+{
+       struct super_block *sb = file->f_path.dentry->d_sb;
+       struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+       struct TCP_Server_Info *server = tcon->ses->server;
+
+       if (server->ops->fallocate)
+               return server->ops->fallocate(file, tcon, mode, off, len);
+
+       return -EOPNOTSUPP;
+}
+
 static int cifs_permission(struct inode *inode, int mask)
 {
        struct cifs_sb_info *cifs_sb;
@@ -812,8 +825,9 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
        if (!(S_ISREG(inode->i_mode)))
                return -EINVAL;
 
-       /* check if file is oplocked */
-       if (((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
+       /* Check if file is oplocked if this is request for new lease */
+       if (arg == F_UNLCK ||
+           ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
            ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
                return generic_setlease(file, arg, lease);
        else if (tlink_tcon(cfile->tlink)->local_lease &&
@@ -908,6 +922,7 @@ const struct file_operations cifs_file_ops = {
        .unlocked_ioctl = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_strict_ops = {
@@ -927,6 +942,7 @@ const struct file_operations cifs_file_strict_ops = {
        .unlocked_ioctl = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_direct_ops = {
@@ -947,6 +963,7 @@ const struct file_operations cifs_file_direct_ops = {
 #endif /* CONFIG_CIFS_POSIX */
        .llseek = cifs_llseek,
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_nobrl_ops = {
@@ -965,6 +982,7 @@ const struct file_operations cifs_file_nobrl_ops = {
        .unlocked_ioctl = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_strict_nobrl_ops = {
@@ -983,6 +1001,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
        .unlocked_ioctl = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_direct_nobrl_ops = {
@@ -1002,6 +1021,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
 #endif /* CONFIG_CIFS_POSIX */
        .llseek = cifs_llseek,
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_dir_ops = {
index 0012e1e291d427cbb4a6696792cb85743231ec1e..dfc731b02aa9b3daca3a15d95346bcd82cc2b3d5 100644 (file)
@@ -409,6 +409,10 @@ struct smb_version_operations {
        /* get mtu credits */
        int (*wait_mtu_credits)(struct TCP_Server_Info *, unsigned int,
                                unsigned int *, unsigned int *);
+       /* check if we need to issue closedir */
+       bool (*dir_needs_close)(struct cifsFileInfo *);
+       long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t,
+                         loff_t);
 };
 
 struct smb_version_values {
@@ -883,6 +887,7 @@ struct cifs_tcon {
                                for this mount even if server would support */
        bool local_lease:1; /* check leases (only) on local system not remote */
        bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
+       bool broken_sparse_sup; /* if server or share does not support sparse */
        bool need_reconnect:1; /* connection reset, tid now invalid */
 #ifdef CONFIG_CIFS_SMB2
        bool print:1;           /* set if connection to printer share */
index 33df36ef9d52037e69857f1dc7b1c9591acb8251..5f9822ac0245dcd637838b3fda843b3cc4d09901 100644 (file)
@@ -2253,6 +2253,29 @@ typedef struct {
 /* minimum includes first three fields, and empty FS Name */
 #define MIN_FS_ATTR_INFO_SIZE 12
 
+
+/* List of FileSystemAttributes - see 2.5.1 of MS-FSCC */
+#define FILE_SUPPORT_INTEGRITY_STREAMS 0x04000000
+#define FILE_SUPPORTS_USN_JOURNAL      0x02000000
+#define FILE_SUPPORTS_OPEN_BY_FILE_ID  0x01000000
+#define FILE_SUPPORTS_EXTENDED_ATTRIBUTES 0x00800000
+#define FILE_SUPPORTS_HARD_LINKS       0x00400000
+#define FILE_SUPPORTS_TRANSACTIONS     0x00200000
+#define FILE_SEQUENTIAL_WRITE_ONCE     0x00100000
+#define FILE_READ_ONLY_VOLUME          0x00080000
+#define FILE_NAMED_STREAMS             0x00040000
+#define FILE_SUPPORTS_ENCRYPTION       0x00020000
+#define FILE_SUPPORTS_OBJECT_IDS       0x00010000
+#define FILE_VOLUME_IS_COMPRESSED      0x00008000
+#define FILE_SUPPORTS_REMOTE_STORAGE   0x00000100
+#define FILE_SUPPORTS_REPARSE_POINTS   0x00000080
+#define FILE_SUPPORTS_SPARSE_FILES     0x00000040
+#define FILE_VOLUME_QUOTAS             0x00000020
+#define FILE_FILE_COMPRESSION          0x00000010
+#define FILE_PERSISTENT_ACLS           0x00000008
+#define FILE_UNICODE_ON_DISK           0x00000004
+#define FILE_CASE_PRESERVED_NAMES      0x00000002
+#define FILE_CASE_SENSITIVE_SEARCH     0x00000001
 typedef struct {
        __le32 Attributes;
        __le32 MaxPathNameComponentLength;
index 4ab2f79ffa7a4eb7f2e6ac3a2a8d67131d8f8080..d5fec92e036068b98bf2e3426e3f8d159b70e048 100644 (file)
@@ -762,7 +762,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
 
        cifs_dbg(FYI, "Freeing private data in close dir\n");
        spin_lock(&cifs_file_list_lock);
-       if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
+       if (server->ops->dir_needs_close(cfile)) {
                cfile->invalidHandle = true;
                spin_unlock(&cifs_file_list_lock);
                if (server->ops->close_dir)
index 426d6c6ad8bfacfa188f2eb2dfb5385620559f44..949ec909ec9ab94747ed332cc96cdcdbc29fd47b 100644 (file)
@@ -1727,6 +1727,12 @@ unlink_target:
                                    target_dentry, to_name);
        }
 
+       /* force revalidate to go get info when needed */
+       CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
+
+       source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
+               target_dir->i_mtime = current_fs_time(source_dir->i_sb);
+
 cifs_rename_exit:
        kfree(info_buf_source);
        kfree(from_name);
index 81340c6253eb36bcaf922c8e1b955e5d493099ca..b7415d596dbd478926d75cdaca022ac8960a65c3 100644 (file)
@@ -574,13 +574,6 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
                cinode->oplock = 0;
 }
 
-static int
-cifs_oplock_break_wait(void *unused)
-{
-       schedule();
-       return signal_pending(current) ? -ERESTARTSYS : 0;
-}
-
 /*
  * We wait for oplock breaks to be processed before we attempt to perform
  * writes.
index b15862e0f68c3c749a7cf6278dc6d3666ba71ccc..798c80a41c886b3fba7e66f8264066902d35b8cb 100644 (file)
@@ -593,7 +593,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
                /* close and restart search */
                cifs_dbg(FYI, "search backing up - close and restart search\n");
                spin_lock(&cifs_file_list_lock);
-               if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
+               if (server->ops->dir_needs_close(cfile)) {
                        cfile->invalidHandle = true;
                        spin_unlock(&cifs_file_list_lock);
                        if (server->ops->close)
index 5e8c22d6c7b9dc96f4079a9579fd8b5c68143af8..1a6df4b03f67cae97688d7a9578d7317a88419bf 100644 (file)
@@ -1015,6 +1015,12 @@ cifs_wp_retry_size(struct inode *inode)
        return CIFS_SB(inode->i_sb)->wsize;
 }
 
+static bool
+cifs_dir_needs_close(struct cifsFileInfo *cfile)
+{
+       return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
+}
+
 struct smb_version_operations smb1_operations = {
        .send_cancel = send_nt_cancel,
        .compare_fids = cifs_compare_fids,
@@ -1086,6 +1092,7 @@ struct smb_version_operations smb1_operations = {
        .create_mf_symlink = cifs_create_mf_symlink,
        .is_read_op = cifs_is_read_op,
        .wp_retry_size = cifs_wp_retry_size,
+       .dir_needs_close = cifs_dir_needs_close,
 #ifdef CONFIG_CIFS_XATTR
        .query_all_EAs = CIFSSMBQAllEAs,
        .set_EA = CIFSSMBSetEA,
index e31a9dfdcd39a8cf7a68d18a45922f9001b918d6..af59d03db49280488a33abe5ec372599aa8ad4ed 100644 (file)
@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
        {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
        {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"},
-       {STATUS_NO_MORE_FILES, -EIO, "STATUS_NO_MORE_FILES"},
+       {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
        {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
        {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
        {STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"},
@@ -298,7 +298,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_INVALID_PARAMETER, -EINVAL, "STATUS_INVALID_PARAMETER"},
        {STATUS_NO_SUCH_DEVICE, -ENODEV, "STATUS_NO_SUCH_DEVICE"},
        {STATUS_NO_SUCH_FILE, -ENOENT, "STATUS_NO_SUCH_FILE"},
-       {STATUS_INVALID_DEVICE_REQUEST, -EIO, "STATUS_INVALID_DEVICE_REQUEST"},
+       {STATUS_INVALID_DEVICE_REQUEST, -EOPNOTSUPP, "STATUS_INVALID_DEVICE_REQUEST"},
        {STATUS_END_OF_FILE, -ENODATA, "STATUS_END_OF_FILE"},
        {STATUS_WRONG_VOLUME, -EIO, "STATUS_WRONG_VOLUME"},
        {STATUS_NO_MEDIA_IN_DEVICE, -EIO, "STATUS_NO_MEDIA_IN_DEVICE"},
index f2e6ac29a8d661b82f4ba1712dee36a842d6f7cd..4aa7a0f07d6eace61da47500bb6e83a5bd39e294 100644 (file)
@@ -178,9 +178,24 @@ smb2_check_message(char *buf, unsigned int length)
                /* Windows 7 server returns 24 bytes more */
                if (clc_len + 20 == len && command == SMB2_OPLOCK_BREAK_HE)
                        return 0;
-               /* server can return one byte more */
+               /* server can return one byte more due to implied bcc[0] */
                if (clc_len == 4 + len + 1)
                        return 0;
+
+               /*
+                * MacOS server pads after SMB2.1 write response with 3 bytes
+                * of junk. Other servers match RFC1001 len to actual
+                * SMB2/SMB3 frame length (header + smb2 response specific data)
+                * Log the server error (once), but allow it and continue
+                * since the frame is parseable.
+                */
+               if (clc_len < 4 /* RFC1001 header size */ + len) {
+                       printk_once(KERN_WARNING
+                               "SMB2 server sent bad RFC1001 len %d not %d\n",
+                               len, clc_len - 4);
+                       return 0;
+               }
+
                return 1;
        }
        return 0;
index 77f8aeb9c2fc7ecb9a2c45785a65d6e9a45700fa..5a48aa290dfe83fdab6f9af322aadcdd0ebe3b3b 100644 (file)
@@ -731,11 +731,72 @@ smb2_sync_write(const unsigned int xid, struct cifsFileInfo *cfile,
        return SMB2_write(xid, parms, written, iov, nr_segs);
 }
 
+/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
+static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
+               struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
+{
+       struct cifsInodeInfo *cifsi;
+       int rc;
+
+       cifsi = CIFS_I(inode);
+
+       /* if file already sparse don't bother setting sparse again */
+       if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
+               return true; /* already sparse */
+
+       if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
+               return true; /* already not sparse */
+
+       /*
+        * Can't check for sparse support on share the usual way via the
+        * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
+        * since Samba server doesn't set the flag on the share, yet
+        * supports the set sparse FSCTL and returns sparse correctly
+        * in the file attributes. If we fail setting sparse though we
+        * mark that server does not support sparse files for this share
+        * to avoid repeatedly sending the unsupported fsctl to server
+        * if the file is repeatedly extended.
+        */
+       if (tcon->broken_sparse_sup)
+               return false;
+
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
+                       true /* is_fctl */, &setsparse, 1, NULL, NULL);
+       if (rc) {
+               tcon->broken_sparse_sup = true;
+               cifs_dbg(FYI, "set sparse rc = %d\n", rc);
+               return false;
+       }
+
+       if (setsparse)
+               cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
+       else
+               cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
+
+       return true;
+}
+
 static int
 smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
                   struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
 {
        __le64 eof = cpu_to_le64(size);
+       struct inode *inode;
+
+       /*
+        * If extending file more than one page make sparse. Many Linux fs
+        * make files sparse by default when extending via ftruncate
+        */
+       inode = cfile->dentry->d_inode;
+
+       if (!set_alloc && (size > inode->i_size + 8192)) {
+               __u8 set_sparse = 1;
+
+               /* whether set sparse succeeds or not, extend the file */
+               smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
+       }
+
        return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
                            cfile->fid.volatile_fid, cfile->pid, &eof, false);
 }
@@ -954,6 +1015,105 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        return rc;
 }
 
+static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+                           loff_t offset, loff_t len, bool keep_size)
+{
+       struct inode *inode;
+       struct cifsInodeInfo *cifsi;
+       struct cifsFileInfo *cfile = file->private_data;
+       struct file_zero_data_information fsctl_buf;
+       long rc;
+       unsigned int xid;
+
+       xid = get_xid();
+
+       inode = cfile->dentry->d_inode;
+       cifsi = CIFS_I(inode);
+
+       /* if file not oplocked can't be sure whether asking to extend size */
+       if (!CIFS_CACHE_READ(cifsi))
+               if (keep_size == false)
+                       return -EOPNOTSUPP;
+
+       /* 
+        * Must check if file sparse since fallocate -z (zero range) assumes
+        * non-sparse allocation
+        */
+       if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE))
+               return -EOPNOTSUPP;
+
+       /*
+        * need to make sure we are not asked to extend the file since the SMB3
+        * fsctl does not change the file size. In the future we could change
+        * this to zero the first part of the range then set the file size
+        * which for a non sparse file would zero the newly extended range
+        */
+       if (keep_size == false)
+               if (i_size_read(inode) < offset + len)
+                       return -EOPNOTSUPP;
+
+       cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+
+       fsctl_buf.FileOffset = cpu_to_le64(offset);
+       fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+                       true /* is_fctl */, (char *)&fsctl_buf,
+                       sizeof(struct file_zero_data_information), NULL, NULL);
+       free_xid(xid);
+       return rc;
+}
+
+static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
+                           loff_t offset, loff_t len)
+{
+       struct inode *inode;
+       struct cifsInodeInfo *cifsi;
+       struct cifsFileInfo *cfile = file->private_data;
+       struct file_zero_data_information fsctl_buf;
+       long rc;
+       unsigned int xid;
+       __u8 set_sparse = 1;
+
+       xid = get_xid();
+
+       inode = cfile->dentry->d_inode;
+       cifsi = CIFS_I(inode);
+
+       /* Need to make file sparse, if not already, before freeing range. */
+       /* Consider adding equivalent for compressed since it could also work */
+       if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse))
+               return -EOPNOTSUPP;
+
+       cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+
+       fsctl_buf.FileOffset = cpu_to_le64(offset);
+       fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+                       true /* is_fctl */, (char *)&fsctl_buf,
+                       sizeof(struct file_zero_data_information), NULL, NULL);
+       free_xid(xid);
+       return rc;
+}
+
+static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
+                          loff_t off, loff_t len)
+{
+       /* KEEP_SIZE already checked for by do_fallocate */
+       if (mode & FALLOC_FL_PUNCH_HOLE)
+               return smb3_punch_hole(file, tcon, off, len);
+       else if (mode & FALLOC_FL_ZERO_RANGE) {
+               if (mode & FALLOC_FL_KEEP_SIZE)
+                       return smb3_zero_range(file, tcon, off, len, true);
+               return smb3_zero_range(file, tcon, off, len, false);
+       }
+
+       return -EOPNOTSUPP;
+}
+
 static void
 smb2_downgrade_oplock(struct TCP_Server_Info *server,
                        struct cifsInodeInfo *cinode, bool set_level2)
@@ -1161,6 +1321,12 @@ smb2_wp_retry_size(struct inode *inode)
                     SMB2_MAX_BUFFER_SIZE);
 }
 
+static bool
+smb2_dir_needs_close(struct cifsFileInfo *cfile)
+{
+       return !cfile->invalidHandle;
+}
+
 struct smb_version_operations smb20_operations = {
        .compare_fids = smb2_compare_fids,
        .setup_request = smb2_setup_request,
@@ -1236,6 +1402,7 @@ struct smb_version_operations smb20_operations = {
        .parse_lease_buf = smb2_parse_lease_buf,
        .clone_range = smb2_clone_range,
        .wp_retry_size = smb2_wp_retry_size,
+       .dir_needs_close = smb2_dir_needs_close,
 };
 
 struct smb_version_operations smb21_operations = {
@@ -1313,6 +1480,7 @@ struct smb_version_operations smb21_operations = {
        .parse_lease_buf = smb2_parse_lease_buf,
        .clone_range = smb2_clone_range,
        .wp_retry_size = smb2_wp_retry_size,
+       .dir_needs_close = smb2_dir_needs_close,
 };
 
 struct smb_version_operations smb30_operations = {
@@ -1393,6 +1561,8 @@ struct smb_version_operations smb30_operations = {
        .clone_range = smb2_clone_range,
        .validate_negotiate = smb3_validate_negotiate,
        .wp_retry_size = smb2_wp_retry_size,
+       .dir_needs_close = smb2_dir_needs_close,
+       .fallocate = smb3_fallocate,
 };
 
 struct smb_version_values smb20_values = {
index 42ebc1a8be6cb241c5ee77e1944f063ae15a472d..fa0dd044213b9dc7dab5ec26d37c469627e9ca49 100644 (file)
@@ -907,7 +907,8 @@ tcon_exit:
 tcon_error_exit:
        if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
                cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
-               tcon->bad_network_name = true;
+               if (tcon)
+                       tcon->bad_network_name = true;
        }
        goto tcon_exit;
 }
@@ -1224,7 +1225,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 
        cifs_dbg(FYI, "SMB2 IOCTL\n");
 
-       *out_data = NULL;
+       if (out_data != NULL)
+               *out_data = NULL;
+
        /* zero out returned data len, in case of error */
        if (plen)
                *plen = 0;
@@ -2177,6 +2180,10 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
 
        if (rc) {
+               if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
+                       srch_inf->endOfSearch = true;
+                       rc = 0;
+               }
                cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
                goto qdir_exit;
        }
@@ -2214,11 +2221,6 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        else
                cifs_dbg(VFS, "illegal search buffer type\n");
 
-       if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
-               srch_inf->endOfSearch = 1;
-       else
-               srch_inf->endOfSearch = 0;
-
        return rc;
 
 qdir_exit:
index 69f3595d3952b74a595650103a791aec839f69a5..fbe486c285a90aff757a43569330fb32c9bbb50d 100644 (file)
@@ -573,6 +573,12 @@ struct copychunk_ioctl {
        __u32 Reserved2;
 } __packed;
 
+/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
+struct file_zero_data_information {
+       __le64  FileOffset;
+       __le64  BeyondFinalZero;
+} __packed;
+
 struct copychunk_ioctl_rsp {
        __le32 ChunksWritten;
        __le32 ChunkBytesWritten;
index 0e538b5c96221f61f55f9a8bff58d6d88cfc836b..83efa59535bedf988292548c0fcba1526fcdde1c 100644 (file)
@@ -63,7 +63,7 @@
 #define FSCTL_SET_OBJECT_ID_EXTENDED 0x000900BC /* BB add struct */
 #define FSCTL_CREATE_OR_GET_OBJECT_ID 0x000900C0 /* BB add struct */
 #define FSCTL_SET_SPARSE             0x000900C4 /* BB add struct */
-#define FSCTL_SET_ZERO_DATA          0x000900C8 /* BB add struct */
+#define FSCTL_SET_ZERO_DATA          0x000980C8
 #define FSCTL_SET_ENCRYPTION         0x000900D7 /* BB add struct */
 #define FSCTL_ENCRYPTION_FSCTL_IO    0x000900DB /* BB add struct */
 #define FSCTL_WRITE_RAW_ENCRYPTED    0x000900DF /* BB add struct */
index 08cdfe5461e3f726650c1216f801d6620694ea5f..622e8824902432e5aae886ce68dca464f504fda3 100644 (file)
@@ -2828,8 +2828,9 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
                 */
                overhead += ngroups * (2 + sbi->s_itb_per_group);
 
-               /* Add the journal blocks as well */
-                overhead += sbi->s_journal->j_maxlen;
+               /* Add the internal journal blocks as well */
+               if (sbi->s_journal && !sbi->journal_bdev)
+                       overhead += sbi->s_journal->j_maxlen;
 
                sbi->s_overhead_last = overhead;
                smp_wmb();
index 4556ce1af5b04f5a4f0cd0b4e1ae0515435a1857..5ddaf8625d3b7a1ab3d0128a91dbd3b84b8b9b0b 100644 (file)
@@ -61,7 +61,7 @@ static void isofs_put_super(struct super_block *sb)
        return;
 }
 
-static int isofs_read_inode(struct inode *);
+static int isofs_read_inode(struct inode *, int relocated);
 static int isofs_statfs (struct dentry *, struct kstatfs *);
 
 static struct kmem_cache *isofs_inode_cachep;
@@ -1259,7 +1259,7 @@ out_toomany:
        goto out;
 }
 
-static int isofs_read_inode(struct inode *inode)
+static int isofs_read_inode(struct inode *inode, int relocated)
 {
        struct super_block *sb = inode->i_sb;
        struct isofs_sb_info *sbi = ISOFS_SB(sb);
@@ -1404,7 +1404,7 @@ static int isofs_read_inode(struct inode *inode)
         */
 
        if (!high_sierra) {
-               parse_rock_ridge_inode(de, inode);
+               parse_rock_ridge_inode(de, inode, relocated);
                /* if we want uid/gid set, override the rock ridge setting */
                if (sbi->s_uid_set)
                        inode->i_uid = sbi->s_uid;
@@ -1483,9 +1483,10 @@ static int isofs_iget5_set(struct inode *ino, void *data)
  * offset that point to the underlying meta-data for the inode.  The
  * code below is otherwise similar to the iget() code in
  * include/linux/fs.h */
-struct inode *isofs_iget(struct super_block *sb,
-                        unsigned long block,
-                        unsigned long offset)
+struct inode *__isofs_iget(struct super_block *sb,
+                          unsigned long block,
+                          unsigned long offset,
+                          int relocated)
 {
        unsigned long hashval;
        struct inode *inode;
@@ -1507,7 +1508,7 @@ struct inode *isofs_iget(struct super_block *sb,
                return ERR_PTR(-ENOMEM);
 
        if (inode->i_state & I_NEW) {
-               ret = isofs_read_inode(inode);
+               ret = isofs_read_inode(inode, relocated);
                if (ret < 0) {
                        iget_failed(inode);
                        inode = ERR_PTR(ret);
index 99167238518d61a30c4a5e6bfc838291d6206a5d..0ac4c1f73fbd6c2616e04ad6310995426c03da68 100644 (file)
@@ -107,7 +107,7 @@ extern int iso_date(char *, int);
 
 struct inode;          /* To make gcc happy */
 
-extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
+extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated);
 extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
 extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *);
 
@@ -118,9 +118,24 @@ extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int
 extern struct buffer_head *isofs_bread(struct inode *, sector_t);
 extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
 
-extern struct inode *isofs_iget(struct super_block *sb,
-                                unsigned long block,
-                                unsigned long offset);
+struct inode *__isofs_iget(struct super_block *sb,
+                          unsigned long block,
+                          unsigned long offset,
+                          int relocated);
+
+static inline struct inode *isofs_iget(struct super_block *sb,
+                                      unsigned long block,
+                                      unsigned long offset)
+{
+       return __isofs_iget(sb, block, offset, 0);
+}
+
+static inline struct inode *isofs_iget_reloc(struct super_block *sb,
+                                            unsigned long block,
+                                            unsigned long offset)
+{
+       return __isofs_iget(sb, block, offset, 1);
+}
 
 /* Because the inode number is no longer relevant to finding the
  * underlying meta-data for an inode, we are free to choose a more
index c0bf42472e408fd16911cee33f3d9079943aa46a..f488bbae541ac8d5db4eb7e963c33452ebb3e937 100644 (file)
@@ -288,12 +288,16 @@ eio:
        goto out;
 }
 
+#define RR_REGARD_XA 1
+#define RR_RELOC_DE 2
+
 static int
 parse_rock_ridge_inode_internal(struct iso_directory_record *de,
-                               struct inode *inode, int regard_xa)
+                               struct inode *inode, int flags)
 {
        int symlink_len = 0;
        int cnt, sig;
+       unsigned int reloc_block;
        struct inode *reloc;
        struct rock_ridge *rr;
        int rootflag;
@@ -305,7 +309,7 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
 
        init_rock_state(&rs, inode);
        setup_rock_ridge(de, inode, &rs);
-       if (regard_xa) {
+       if (flags & RR_REGARD_XA) {
                rs.chr += 14;
                rs.len -= 14;
                if (rs.len < 0)
@@ -485,12 +489,22 @@ repeat:
                                        "relocated directory\n");
                        goto out;
                case SIG('C', 'L'):
-                       ISOFS_I(inode)->i_first_extent =
-                           isonum_733(rr->u.CL.location);
-                       reloc =
-                           isofs_iget(inode->i_sb,
-                                      ISOFS_I(inode)->i_first_extent,
-                                      0);
+                       if (flags & RR_RELOC_DE) {
+                               printk(KERN_ERR
+                                      "ISOFS: Recursive directory relocation "
+                                      "is not supported\n");
+                               goto eio;
+                       }
+                       reloc_block = isonum_733(rr->u.CL.location);
+                       if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
+                           ISOFS_I(inode)->i_iget5_offset == 0) {
+                               printk(KERN_ERR
+                                      "ISOFS: Directory relocation points to "
+                                      "itself\n");
+                               goto eio;
+                       }
+                       ISOFS_I(inode)->i_first_extent = reloc_block;
+                       reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
                        if (IS_ERR(reloc)) {
                                ret = PTR_ERR(reloc);
                                goto out;
@@ -637,9 +651,11 @@ static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
        return rpnt;
 }
 
-int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
+int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
+                          int relocated)
 {
-       int result = parse_rock_ridge_inode_internal(de, inode, 0);
+       int flags = relocated ? RR_RELOC_DE : 0;
+       int result = parse_rock_ridge_inode_internal(de, inode, flags);
 
        /*
         * if rockridge flag was reset and we didn't look for attributes
@@ -647,7 +663,8 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
         */
        if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
            && (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
-               result = parse_rock_ridge_inode_internal(de, inode, 14);
+               result = parse_rock_ridge_inode_internal(de, inode,
+                                                        flags | RR_REGARD_XA);
        }
        return result;
 }
index ba491926df5f7df2db1e224c96e7e070bd36dcec..be7cbce6e4c7a71c0fbd22a03193e20c9fec55ce 100644 (file)
@@ -116,7 +116,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
                if (atomic_read(&c->io_count) == 0)
                        break;
                ret = nfs_wait_bit_killable(&q.key);
-       } while (atomic_read(&c->io_count) != 0);
+       } while (atomic_read(&c->io_count) != 0 && !ret);
        finish_wait(wq, &q.wait);
        return ret;
 }
@@ -139,26 +139,49 @@ nfs_iocounter_wait(struct nfs_io_counter *c)
 /*
  * nfs_page_group_lock - lock the head of the page group
  * @req - request in group that is to be locked
+ * @nonblock - if true don't block waiting for lock
  *
  * this lock must be held if modifying the page group list
  *
- * returns result from wait_on_bit_lock: 0 on success, < 0 on error
+ * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
+ * result from wait_on_bit_lock
+ *
+ * NOTE: calling with nonblock=false should always have set the
+ *       lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
+ *       with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
  */
 int
-nfs_page_group_lock(struct nfs_page *req, bool wait)
+nfs_page_group_lock(struct nfs_page *req, bool nonblock)
 {
        struct nfs_page *head = req->wb_head;
-       int ret;
 
        WARN_ON_ONCE(head != head->wb_head);
 
-       do {
-               ret = wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
-                       TASK_UNINTERRUPTIBLE);
-       } while (wait && ret != 0);
+       if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
+               return 0;
 
-       WARN_ON_ONCE(ret > 0);
-       return ret;
+       if (!nonblock)
+               return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
+                               TASK_UNINTERRUPTIBLE);
+
+       return -EAGAIN;
+}
+
+/*
+ * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
+ * @req - a request in the group
+ *
+ * This is a blocking call to wait for the group lock to be cleared.
+ */
+void
+nfs_page_group_lock_wait(struct nfs_page *req)
+{
+       struct nfs_page *head = req->wb_head;
+
+       WARN_ON_ONCE(head != head->wb_head);
+
+       wait_on_bit(&head->wb_flags, PG_HEADLOCK,
+               TASK_UNINTERRUPTIBLE);
 }
 
 /*
@@ -219,7 +242,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
 {
        bool ret;
 
-       nfs_page_group_lock(req, true);
+       nfs_page_group_lock(req, false);
        ret = nfs_page_group_sync_on_bit_locked(req, bit);
        nfs_page_group_unlock(req);
 
@@ -701,10 +724,11 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
                     struct nfs_pgio_header *hdr)
 {
        struct nfs_page         *req;
-       struct page             **pages;
+       struct page             **pages,
+                               *last_page;
        struct list_head *head = &desc->pg_list;
        struct nfs_commit_info cinfo;
-       unsigned int pagecount;
+       unsigned int pagecount, pageused;
 
        pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count);
        if (!nfs_pgarray_set(&hdr->page_array, pagecount))
@@ -712,12 +736,23 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 
        nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
        pages = hdr->page_array.pagevec;
+       last_page = NULL;
+       pageused = 0;
        while (!list_empty(head)) {
                req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
                nfs_list_add_request(req, &hdr->pages);
-               *pages++ = req->wb_page;
+
+               if (WARN_ON_ONCE(pageused >= pagecount))
+                       return nfs_pgio_error(desc, hdr);
+
+               if (!last_page || last_page != req->wb_page) {
+                       *pages++ = last_page = req->wb_page;
+                       pageused++;
+               }
        }
+       if (WARN_ON_ONCE(pageused != pagecount))
+               return nfs_pgio_error(desc, hdr);
 
        if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
            (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
@@ -788,6 +823,14 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
                        return false;
                if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
                        return false;
+               if (req->wb_page == prev->wb_page) {
+                       if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
+                               return false;
+               } else {
+                       if (req->wb_pgbase != 0 ||
+                           prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
+                               return false;
+               }
        }
        size = pgio->pg_ops->pg_test(pgio, prev, req);
        WARN_ON_ONCE(size > req->wb_bytes);
@@ -858,13 +901,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
        struct nfs_page *subreq;
        unsigned int bytes_left = 0;
        unsigned int offset, pgbase;
-       int ret;
 
-       ret = nfs_page_group_lock(req, false);
-       if (ret < 0) {
-               desc->pg_error = ret;
-               return 0;
-       }
+       nfs_page_group_lock(req, false);
 
        subreq = req;
        bytes_left = subreq->wb_bytes;
@@ -886,11 +924,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
                        if (desc->pg_recoalesce)
                                return 0;
                        /* retry add_request for this subreq */
-                       ret = nfs_page_group_lock(req, false);
-                       if (ret < 0) {
-                               desc->pg_error = ret;
-                               return 0;
-                       }
+                       nfs_page_group_lock(req, false);
                        continue;
                }
 
index e3b5cf28bdc5c2dbfba5d3c16b06b5724afe6c60..175d5d073ccf350db485daf81ab3030555d80a60 100644 (file)
@@ -241,7 +241,7 @@ static bool nfs_page_group_covers_page(struct nfs_page *req)
        unsigned int pos = 0;
        unsigned int len = nfs_page_length(req->wb_page);
 
-       nfs_page_group_lock(req, true);
+       nfs_page_group_lock(req, false);
 
        do {
                tmp = nfs_page_group_search_locked(req->wb_head, pos);
@@ -478,10 +478,23 @@ try_again:
                return NULL;
        }
 
-       /* lock each request in the page group */
-       ret = nfs_page_group_lock(head, false);
-       if (ret < 0)
+       /* holding inode lock, so always make a non-blocking call to try the
+        * page group lock */
+       ret = nfs_page_group_lock(head, true);
+       if (ret < 0) {
+               spin_unlock(&inode->i_lock);
+
+               if (!nonblock && ret == -EAGAIN) {
+                       nfs_page_group_lock_wait(head);
+                       nfs_release_request(head);
+                       goto try_again;
+               }
+
+               nfs_release_request(head);
                return ERR_PTR(ret);
+       }
+
+       /* lock each request in the page group */
        subreq = head;
        do {
                /*
index 9737cba1357d754f4b1ea8927af83326865b8e54..83a06001742bfaab6781bc26509517196fc3af16 100644 (file)
@@ -1014,7 +1014,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
 
        fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
        if (!fi)
-               goto out_no_entry;
+               goto out_fail;
        cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
        cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
        if (UDF_SB(inode->i_sb)->s_lvid_bh) {
@@ -1036,6 +1036,7 @@ out:
 
 out_no_entry:
        up_write(&iinfo->i_data_sem);
+out_fail:
        inode_dec_link_count(inode);
        iput(inode);
        goto out;
index 6dfd64b3a6042d34176e85f25e73ed477a7c600e..e973540cd15baac6ab16836cdea2ce1df5ab10c7 100644 (file)
@@ -17,6 +17,7 @@
        {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index 6f76277baf391733a3f9303f4f740b7151c5e96b..61219b9b34458862c9be600652006d8ac84b8705 100644 (file)
@@ -16,7 +16,6 @@
 #define PHY_ID_BCM7366                 0x600d8490
 #define PHY_ID_BCM7439                 0x600d8480
 #define PHY_ID_BCM7445                 0x600d8510
-#define PHY_ID_BCM7XXX_28              0x600d8400
 
 #define PHY_BCM_OUI_MASK               0xfffffc00
 #define PHY_BCM_OUI_1                  0x00206000
index 6bb5e3f2a3b40741363838f6916db7886af16fe0..f0b0edbf55a94e4e4594ca65270e80bacd2e8f61 100644 (file)
@@ -102,6 +102,15 @@ enum {
        FTRACE_OPS_FL_DELETED                   = 1 << 8,
 };
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* The hash used to know what functions callbacks trace */
+struct ftrace_ops_hash {
+       struct ftrace_hash              *notrace_hash;
+       struct ftrace_hash              *filter_hash;
+       struct mutex                    regex_lock;
+};
+#endif
+
 /*
  * Note, ftrace_ops can be referenced outside of RCU protection.
  * (Although, for perf, the control ops prevent that). If ftrace_ops is
@@ -121,10 +130,9 @@ struct ftrace_ops {
        int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
        int                             nr_trampolines;
-       struct ftrace_hash              *notrace_hash;
-       struct ftrace_hash              *filter_hash;
+       struct ftrace_ops_hash          local_hash;
+       struct ftrace_ops_hash          *func_hash;
        struct ftrace_hash              *tramp_hash;
-       struct mutex                    regex_lock;
        unsigned long                   trampoline;
 #endif
 };
index b7ce0c64c6f3186e4acf37857db3ed3a8390b1e7..c7e17de732f3eb6b4f221bb16b6a9753a9d3a85d 100644 (file)
@@ -16,8 +16,6 @@ struct device;
  */
 struct gpio_desc;
 
-#ifdef CONFIG_GPIOLIB
-
 #define GPIOD_FLAGS_BIT_DIR_SET                BIT(0)
 #define GPIOD_FLAGS_BIT_DIR_OUT                BIT(1)
 #define GPIOD_FLAGS_BIT_DIR_VAL                BIT(2)
@@ -34,6 +32,8 @@ enum gpiod_flags {
                          GPIOD_FLAGS_BIT_DIR_VAL,
 };
 
+#ifdef CONFIG_GPIOLIB
+
 /* Acquire and dispose GPIOs */
 struct gpio_desc *__must_check __gpiod_get(struct device *dev,
                                         const char *con_id,
index ea507665896cfa774cf3fe7226b9da77d7bc9622..a95efeb53a8b76da08450f47f614f9713dc89a9f 100644 (file)
@@ -577,16 +577,20 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
 }
 #endif /* CONFIG_OF */
 
-#ifdef CONFIG_I2C_ACPI
-int acpi_i2c_install_space_handler(struct i2c_adapter *adapter);
-void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter);
+#ifdef CONFIG_ACPI
 void acpi_i2c_register_devices(struct i2c_adapter *adap);
 #else
 static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI_I2C_OPREGION
+int acpi_i2c_install_space_handler(struct i2c_adapter *adapter);
+void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter);
+#else
 static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
 { }
 static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
 { return 0; }
-#endif
+#endif /* CONFIG_ACPI_I2C_OPREGION */
 
 #endif /* _LINUX_I2C_H */
index 6ad2bbcad4050c12105778c3011b5196fcbf4b9e..6c3e06ee2fb7af63cc5a87314baec589631619af 100644 (file)
@@ -123,6 +123,7 @@ extern  int nfs_wait_on_request(struct nfs_page *);
 extern void nfs_unlock_request(struct nfs_page *req);
 extern void nfs_unlock_and_release_request(struct nfs_page *);
 extern int nfs_page_group_lock(struct nfs_page *, bool);
+extern void nfs_page_group_lock_wait(struct nfs_page *);
 extern void nfs_page_group_unlock(struct nfs_page *);
 extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
 
index 3d6003de4b0d4e4d367231cf3ee457cf612bf5aa..a1ba6a5ccdd62c2275eb63106d2fc9d78e4cee3c 100644 (file)
@@ -62,6 +62,7 @@ to_seqno_fence(struct fence *fence)
  * @context: the execution context this fence is a part of
  * @seqno_ofs: the offset within @sync_buf
  * @seqno: the sequence # to signal on
+ * @cond: fence wait condition
  * @ops: the fence_ops for operations on this seqno fence
  *
  * This function initializes a struct seqno_fence with passed parameters,
index f1afd607f043cc6aa0a9da0649b1dc92eeaad275..11d11bc5c78f2d480ecac331cbf5d73f58001bb7 100644 (file)
@@ -703,9 +703,11 @@ __SYSCALL(__NR_renameat2, sys_renameat2)
 __SYSCALL(__NR_seccomp, sys_seccomp)
 #define __NR_getrandom 278
 __SYSCALL(__NR_getrandom, sys_getrandom)
+#define __NR_memfd_create 279
+__SYSCALL(__NR_memfd_create, sys_memfd_create)
 
 #undef __NR_syscalls
-#define __NR_syscalls 279
+#define __NR_syscalls 280
 
 /*
  * All syscalls below here should go away really,
index 509b2d7a41b7ea88e676409a39d483e5a450f029..fea6099608ef2244d2020fbf5dcfac3c7d886d91 100644 (file)
@@ -944,6 +944,7 @@ struct drm_radeon_cs_chunk {
 };
 
 /* drm_radeon_cs_reloc.flags */
+#define RADEON_RELOC_PRIO_MASK         (0xf << 0)
 
 struct drm_radeon_cs_reloc {
        uint32_t                handle;
index 1cf24b3e42ece4f2de8854bd882f127165866af6..f9c1ed002dbc81997e6e062f358c57b6e459ce84 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/cgroup.h>
 #include <linux/module.h>
 #include <linux/mman.h>
+#include <linux/compat.h>
 
 #include "internal.h"
 
@@ -3717,6 +3718,26 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return 0;
 }
 
+#ifdef CONFIG_COMPAT
+static long perf_compat_ioctl(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       switch (_IOC_NR(cmd)) {
+       case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
+       case _IOC_NR(PERF_EVENT_IOC_ID):
+               /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
+               if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+                       cmd &= ~IOCSIZE_MASK;
+                       cmd |= sizeof(void *) << IOCSIZE_SHIFT;
+               }
+               break;
+       }
+       return perf_ioctl(file, cmd, arg);
+}
+#else
+# define perf_compat_ioctl NULL
+#endif
+
 int perf_event_task_enable(void)
 {
        struct perf_event *event;
@@ -4222,7 +4243,7 @@ static const struct file_operations perf_fops = {
        .read                   = perf_read,
        .poll                   = perf_poll,
        .unlocked_ioctl         = perf_ioctl,
-       .compat_ioctl           = perf_ioctl,
+       .compat_ioctl           = perf_compat_ioctl,
        .mmap                   = perf_mmap,
        .fasync                 = perf_fasync,
 };
index 734e9a7d280bd22a046566cc40b1edf159f250bf..3995f546d0f3f9c7bc845e7053f7a099088108a2 100644 (file)
@@ -1778,7 +1778,18 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
        unsigned long hash, flags = 0;
        struct kretprobe_instance *ri;
 
-       /*TODO: consider to only swap the RA after the last pre_handler fired */
+       /*
+        * To avoid deadlocks, prohibit return probing in NMI contexts,
+        * just skip the probe and increase the (inexact) 'nmissed'
+        * statistical counter, so that the user is informed that
+        * something happened:
+        */
+       if (unlikely(in_nmi())) {
+               rp->nmissed++;
+               return 0;
+       }
+
+       /* TODO: consider to only swap the RA after the last pre_handler fired */
        hash = hash_ptr(current, KPROBE_HASH_BITS);
        raw_spin_lock_irqsave(&rp->lock, flags);
        if (!hlist_empty(&rp->free_instances)) {
index 1654b12c891a9367b2a8b20be93375990db6f22f..5916a8e59e878d4c6d9fc87bc370949f5501d962 100644 (file)
 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
-#define INIT_REGEX_LOCK(opsname)       \
-       .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
+#define INIT_OPS_HASH(opsname) \
+       .func_hash              = &opsname.local_hash,                  \
+       .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
+#define ASSIGN_OPS_HASH(opsname, val) \
+       .func_hash              = val, \
+       .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
 #else
-#define INIT_REGEX_LOCK(opsname)
+#define INIT_OPS_HASH(opsname)
+#define ASSIGN_OPS_HASH(opsname, val)
 #endif
 
 static struct ftrace_ops ftrace_list_end __read_mostly = {
        .func           = ftrace_stub,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
+       INIT_OPS_HASH(ftrace_list_end)
 };
 
 /* ftrace_enabled is a method to turn ftrace on or off */
@@ -140,7 +146,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
 {
 #ifdef CONFIG_DYNAMIC_FTRACE
        if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
-               mutex_init(&ops->regex_lock);
+               mutex_init(&ops->local_hash.regex_lock);
+               ops->func_hash = &ops->local_hash;
                ops->flags |= FTRACE_OPS_FL_INITIALIZED;
        }
 #endif
@@ -899,7 +906,7 @@ static void unregister_ftrace_profiler(void)
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
        .func           = function_profile_call,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(ftrace_profile_ops)
+       INIT_OPS_HASH(ftrace_profile_ops)
 };
 
 static int register_ftrace_profiler(void)
@@ -1081,11 +1088,12 @@ static const struct ftrace_hash empty_hash = {
 #define EMPTY_HASH     ((struct ftrace_hash *)&empty_hash)
 
 static struct ftrace_ops global_ops = {
-       .func                   = ftrace_stub,
-       .notrace_hash           = EMPTY_HASH,
-       .filter_hash            = EMPTY_HASH,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(global_ops)
+       .func                           = ftrace_stub,
+       .local_hash.notrace_hash        = EMPTY_HASH,
+       .local_hash.filter_hash         = EMPTY_HASH,
+       INIT_OPS_HASH(global_ops)
+       .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
+                                         FTRACE_OPS_FL_INITIALIZED,
 };
 
 struct ftrace_page {
@@ -1226,8 +1234,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
 void ftrace_free_filter(struct ftrace_ops *ops)
 {
        ftrace_ops_init(ops);
-       free_ftrace_hash(ops->filter_hash);
-       free_ftrace_hash(ops->notrace_hash);
+       free_ftrace_hash(ops->func_hash->filter_hash);
+       free_ftrace_hash(ops->func_hash->notrace_hash);
 }
 
 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
@@ -1288,9 +1296,9 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
 }
 
 static void
-ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
+ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
 static void
-ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
+ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
 
 static int
 ftrace_hash_move(struct ftrace_ops *ops, int enable,
@@ -1342,13 +1350,13 @@ update:
         * Remove the current set, update the hash and add
         * them back.
         */
-       ftrace_hash_rec_disable(ops, enable);
+       ftrace_hash_rec_disable_modify(ops, enable);
 
        old_hash = *dst;
        rcu_assign_pointer(*dst, new_hash);
        free_ftrace_hash_rcu(old_hash);
 
-       ftrace_hash_rec_enable(ops, enable);
+       ftrace_hash_rec_enable_modify(ops, enable);
 
        return 0;
 }
@@ -1382,8 +1390,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
                return 0;
 #endif
 
-       filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
-       notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
+       filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
+       notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
 
        if ((ftrace_hash_empty(filter_hash) ||
             ftrace_lookup_ip(filter_hash, ip)) &&
@@ -1503,25 +1511,38 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
 static void ftrace_remove_tramp(struct ftrace_ops *ops,
                                struct dyn_ftrace *rec)
 {
-       struct ftrace_func_entry *entry;
-
-       entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip);
-       if (!entry)
+       /* If TRAMP is not set, no ops should have a trampoline for this */
+       if (!(rec->flags & FTRACE_FL_TRAMP))
                return;
 
+       rec->flags &= ~FTRACE_FL_TRAMP;
+
+       if ((!ftrace_hash_empty(ops->func_hash->filter_hash) &&
+            !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) ||
+           ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
+               return;
        /*
         * The tramp_hash entry will be removed at time
         * of update.
         */
        ops->nr_trampolines--;
-       rec->flags &= ~FTRACE_FL_TRAMP;
 }
 
-static void ftrace_clear_tramps(struct dyn_ftrace *rec)
+static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops)
 {
        struct ftrace_ops *op;
 
+       /* If TRAMP is not set, no ops should have a trampoline for this */
+       if (!(rec->flags & FTRACE_FL_TRAMP))
+               return;
+
        do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /*
+                * This function is called to clear other tramps
+                * not the one that is being updated.
+                */
+               if (op == ops)
+                       continue;
                if (op->nr_trampolines)
                        ftrace_remove_tramp(op, rec);
        } while_for_each_ftrace_op(op);
@@ -1554,14 +1575,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
         *   gets inversed.
         */
        if (filter_hash) {
-               hash = ops->filter_hash;
-               other_hash = ops->notrace_hash;
+               hash = ops->func_hash->filter_hash;
+               other_hash = ops->func_hash->notrace_hash;
                if (ftrace_hash_empty(hash))
                        all = 1;
        } else {
                inc = !inc;
-               hash = ops->notrace_hash;
-               other_hash = ops->filter_hash;
+               hash = ops->func_hash->notrace_hash;
+               other_hash = ops->func_hash->filter_hash;
                /*
                 * If the notrace hash has no items,
                 * then there's nothing to do.
@@ -1622,13 +1643,10 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                                /*
                                 * If we are adding another function callback
                                 * to this function, and the previous had a
-                                * trampoline used, then we need to go back to
-                                * the default trampoline.
+                                * custom trampoline in use, then we need to go
+                                * back to the default trampoline.
                                 */
-                               rec->flags &= ~FTRACE_FL_TRAMP;
-
-                               /* remove trampolines from any ops for this rec */
-                               ftrace_clear_tramps(rec);
+                               ftrace_clear_tramps(rec, ops);
                        }
 
                        /*
@@ -1682,6 +1700,41 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
        __ftrace_hash_rec_update(ops, filter_hash, 1);
 }
 
+static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
+                                         int filter_hash, int inc)
+{
+       struct ftrace_ops *op;
+
+       __ftrace_hash_rec_update(ops, filter_hash, inc);
+
+       if (ops->func_hash != &global_ops.local_hash)
+               return;
+
+       /*
+        * If the ops shares the global_ops hash, then we need to update
+        * all ops that are enabled and use this hash.
+        */
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /* Already done */
+               if (op == ops)
+                       continue;
+               if (op->func_hash == &global_ops.local_hash)
+                       __ftrace_hash_rec_update(op, filter_hash, inc);
+       } while_for_each_ftrace_op(op);
+}
+
+static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
+                                          int filter_hash)
+{
+       ftrace_hash_rec_update_modify(ops, filter_hash, 0);
+}
+
+static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
+                                         int filter_hash)
+{
+       ftrace_hash_rec_update_modify(ops, filter_hash, 1);
+}
+
 static void print_ip_ins(const char *fmt, unsigned char *p)
 {
        int i;
@@ -1896,8 +1949,8 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
        if (rec->flags & FTRACE_FL_TRAMP) {
                ops = ftrace_find_tramp_ops_new(rec);
                if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
-                       pr_warning("Bad trampoline accounting at: %p (%pS)\n",
-                                   (void *)rec->ip, (void *)rec->ip);
+                       pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
+                               (void *)rec->ip, (void *)rec->ip, rec->flags);
                        /* Ftrace is shutting down, return anything */
                        return (unsigned long)FTRACE_ADDR;
                }
@@ -1964,7 +2017,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
                return ftrace_make_call(rec, ftrace_addr);
 
        case FTRACE_UPDATE_MAKE_NOP:
-               return ftrace_make_nop(NULL, rec, ftrace_addr);
+               return ftrace_make_nop(NULL, rec, ftrace_old_addr);
 
        case FTRACE_UPDATE_MODIFY_CALL:
                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
@@ -2227,7 +2280,10 @@ static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
        } while_for_each_ftrace_rec();
 
        /* The number of recs in the hash must match nr_trampolines */
-       FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines);
+       if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines))
+               pr_warn("count=%ld trampolines=%d\n",
+                       ops->tramp_hash->count,
+                       ops->nr_trampolines);
 
        return 0;
 }
@@ -2436,8 +2492,8 @@ static inline int ops_traces_mod(struct ftrace_ops *ops)
         * Filter_hash being empty will default to trace module.
         * But notrace hash requires a test of individual module functions.
         */
-       return ftrace_hash_empty(ops->filter_hash) &&
-               ftrace_hash_empty(ops->notrace_hash);
+       return ftrace_hash_empty(ops->func_hash->filter_hash) &&
+               ftrace_hash_empty(ops->func_hash->notrace_hash);
 }
 
 /*
@@ -2459,12 +2515,12 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
                return 0;
 
        /* The function must be in the filter */
-       if (!ftrace_hash_empty(ops->filter_hash) &&
-           !ftrace_lookup_ip(ops->filter_hash, rec->ip))
+       if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
+           !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
                return 0;
 
        /* If in notrace hash, we ignore it too */
-       if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
+       if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
                return 0;
 
        return 1;
@@ -2785,10 +2841,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
        } else {
                rec = &iter->pg->records[iter->idx++];
                if (((iter->flags & FTRACE_ITER_FILTER) &&
-                    !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
+                    !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
 
                    ((iter->flags & FTRACE_ITER_NOTRACE) &&
-                    !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
+                    !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
 
                    ((iter->flags & FTRACE_ITER_ENABLED) &&
                     !(rec->flags & FTRACE_FL_ENABLED))) {
@@ -2837,9 +2893,9 @@ static void *t_start(struct seq_file *m, loff_t *pos)
         * functions are enabled.
         */
        if ((iter->flags & FTRACE_ITER_FILTER &&
-            ftrace_hash_empty(ops->filter_hash)) ||
+            ftrace_hash_empty(ops->func_hash->filter_hash)) ||
            (iter->flags & FTRACE_ITER_NOTRACE &&
-            ftrace_hash_empty(ops->notrace_hash))) {
+            ftrace_hash_empty(ops->func_hash->notrace_hash))) {
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
@@ -3001,12 +3057,12 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        iter->ops = ops;
        iter->flags = flag;
 
-       mutex_lock(&ops->regex_lock);
+       mutex_lock(&ops->func_hash->regex_lock);
 
        if (flag & FTRACE_ITER_NOTRACE)
-               hash = ops->notrace_hash;
+               hash = ops->func_hash->notrace_hash;
        else
-               hash = ops->filter_hash;
+               hash = ops->func_hash->filter_hash;
 
        if (file->f_mode & FMODE_WRITE) {
                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
@@ -3041,7 +3097,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                file->private_data = iter;
 
  out_unlock:
-       mutex_unlock(&ops->regex_lock);
+       mutex_unlock(&ops->func_hash->regex_lock);
 
        return ret;
 }
@@ -3279,7 +3335,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
 {
        .func           = function_trace_probe_call,
        .flags          = FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(trace_probe_ops)
+       INIT_OPS_HASH(trace_probe_ops)
 };
 
 static int ftrace_probe_registered;
@@ -3342,7 +3398,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                              void *data)
 {
        struct ftrace_func_probe *entry;
-       struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+       struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
        struct ftrace_hash *hash;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
@@ -3359,7 +3415,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        if (WARN_ON(not))
                return -EINVAL;
 
-       mutex_lock(&trace_probe_ops.regex_lock);
+       mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash) {
@@ -3428,7 +3484,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  out_unlock:
        mutex_unlock(&ftrace_lock);
  out:
-       mutex_unlock(&trace_probe_ops.regex_lock);
+       mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
        free_ftrace_hash(hash);
 
        return count;
@@ -3446,7 +3502,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        struct ftrace_func_entry *rec_entry;
        struct ftrace_func_probe *entry;
        struct ftrace_func_probe *p;
-       struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+       struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
        struct list_head free_list;
        struct ftrace_hash *hash;
        struct hlist_node *tmp;
@@ -3468,7 +3524,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                        return;
        }
 
-       mutex_lock(&trace_probe_ops.regex_lock);
+       mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash)
@@ -3521,7 +3577,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        mutex_unlock(&ftrace_lock);
                
  out_unlock:
-       mutex_unlock(&trace_probe_ops.regex_lock);
+       mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
        free_ftrace_hash(hash);
 }
 
@@ -3717,12 +3773,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
-       mutex_lock(&ops->regex_lock);
+       mutex_lock(&ops->func_hash->regex_lock);
 
        if (enable)
-               orig_hash = &ops->filter_hash;
+               orig_hash = &ops->func_hash->filter_hash;
        else
-               orig_hash = &ops->notrace_hash;
+               orig_hash = &ops->func_hash->notrace_hash;
 
        if (reset)
                hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
@@ -3752,7 +3808,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        mutex_unlock(&ftrace_lock);
 
  out_regex_unlock:
-       mutex_unlock(&ops->regex_lock);
+       mutex_unlock(&ops->func_hash->regex_lock);
 
        free_ftrace_hash(hash);
        return ret;
@@ -3975,15 +4031,15 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        trace_parser_put(parser);
 
-       mutex_lock(&iter->ops->regex_lock);
+       mutex_lock(&iter->ops->func_hash->regex_lock);
 
        if (file->f_mode & FMODE_WRITE) {
                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
 
                if (filter_hash)
-                       orig_hash = &iter->ops->filter_hash;
+                       orig_hash = &iter->ops->func_hash->filter_hash;
                else
-                       orig_hash = &iter->ops->notrace_hash;
+                       orig_hash = &iter->ops->func_hash->notrace_hash;
 
                mutex_lock(&ftrace_lock);
                ret = ftrace_hash_move(iter->ops, filter_hash,
@@ -3994,7 +4050,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
                mutex_unlock(&ftrace_lock);
        }
 
-       mutex_unlock(&iter->ops->regex_lock);
+       mutex_unlock(&iter->ops->func_hash->regex_lock);
        free_ftrace_hash(iter->hash);
        kfree(iter);
 
@@ -4611,7 +4667,6 @@ void __init ftrace_init(void)
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(global_ops)
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -4713,7 +4768,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops control_ops = {
        .func   = ftrace_ops_control_func,
        .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(control_ops)
+       INIT_OPS_HASH(control_ops)
 };
 
 static inline void
@@ -5145,6 +5200,17 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
+static struct ftrace_ops graph_ops = {
+       .func                   = ftrace_stub,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
+                                  FTRACE_OPS_FL_INITIALIZED |
+                                  FTRACE_OPS_FL_STUB,
+#ifdef FTRACE_GRAPH_TRAMP_ADDR
+       .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
+#endif
+       ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
+};
+
 static int ftrace_graph_active;
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -5307,12 +5373,28 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
  */
 static void update_function_graph_func(void)
 {
-       if (ftrace_ops_list == &ftrace_list_end ||
-           (ftrace_ops_list == &global_ops &&
-            global_ops.next == &ftrace_list_end))
-               ftrace_graph_entry = __ftrace_graph_entry;
-       else
+       struct ftrace_ops *op;
+       bool do_test = false;
+
+       /*
+        * The graph and global ops share the same set of functions
+        * to test. If any other ops is on the list, then
+        * the graph tracing needs to test if its the function
+        * it should call.
+        */
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op != &global_ops && op != &graph_ops &&
+                   op != &ftrace_list_end) {
+                       do_test = true;
+                       /* in double loop, break out with goto */
+                       goto out;
+               }
+       } while_for_each_ftrace_op(op);
+ out:
+       if (do_test)
                ftrace_graph_entry = ftrace_graph_entry_test;
+       else
+               ftrace_graph_entry = __ftrace_graph_entry;
 }
 
 static struct notifier_block ftrace_suspend_notifier = {
@@ -5353,16 +5435,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        ftrace_graph_entry = ftrace_graph_entry_test;
        update_function_graph_func();
 
-       /* Function graph doesn't use the .func field of global_ops */
-       global_ops.flags |= FTRACE_OPS_FL_STUB;
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-       /* Optimize function graph calling (if implemented by arch) */
-       if (FTRACE_GRAPH_TRAMP_ADDR != 0)
-               global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
-#endif
-
-       ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
+       ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
 
 out:
        mutex_unlock(&ftrace_lock);
@@ -5380,12 +5453,7 @@ void unregister_ftrace_graph(void)
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
        __ftrace_graph_entry = ftrace_graph_entry_stub;
-       ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
-       global_ops.flags &= ~FTRACE_OPS_FL_STUB;
-#ifdef CONFIG_DYNAMIC_FTRACE
-       if (FTRACE_GRAPH_TRAMP_ADDR != 0)
-               global_ops.trampoline = 0;
-#endif
+       ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
        unregister_pm_notifier(&ftrace_suspend_notifier);
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
 
index afb04b9b818a473f8b38e0aa94ee12ad84c44d1a..b38fb2b9e23772d3f3360ef556cf0714d4a44803 100644 (file)
@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
                work = &cpu_buffer->irq_work;
        }
 
-       work->waiters_pending = true;
        poll_wait(filp, &work->waiters, poll_table);
+       work->waiters_pending = true;
+       /*
+        * There's a tight race between setting the waiters_pending and
+        * checking if the ring buffer is empty.  Once the waiters_pending bit
+        * is set, the next event will wake the task up, but we can get stuck
+        * if there's only a single event in.
+        *
+        * FIXME: Ideally, we need a memory barrier on the writer side as well,
+        * but adding a memory barrier to all events will cause too much of a
+        * performance hit in the fast path.  We only need a memory barrier when
+        * the buffer goes from empty to having content.  But as this race is
+        * extremely small, and it's not a problem if another event comes in, we
+        * will fix it later.
+        */
+       smp_mb();
 
        if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
            (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
index 07c28323f88fe99d08900d82e3eacec6ca88fb91..1b233fc6746616beb56ff6d1be6ade998db4af24 100644 (file)
@@ -892,6 +892,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
         the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
         will test all possible w/w mutex interface abuse with the
         exception of simply not acquiring all the required locks.
+        Note that this feature can introduce significant overhead, so
+        it really should not be enabled in a production or distro kernel,
+        even a debug kernel.  If you are a driver writer, enable it.  If
+        you are a distro, do not.
 
 config DEBUG_LOCK_ALLOC
        bool "Lock debugging: detect incorrect freeing of live locks"
index e4853b50cf402d9606c49daf34bc849467dab391..4b98f897044aa6a364392bc1ec5b68a2a672a2d2 100644 (file)
@@ -410,9 +410,11 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
                priv->lane2_ops = NULL;
                if (priv->lane_version > 1)
                        priv->lane2_ops = &lane2_ops;
+               rtnl_lock();
                if (dev_set_mtu(dev, mesg->content.config.mtu))
                        pr_info("%s: change_mtu to %d failed\n",
                                dev->name, mesg->content.config.mtu);
+               rtnl_unlock();
                priv->is_proxy = mesg->content.config.is_proxy;
                break;
        case l_flush_tran_id:
index 52c43f9042209deaba0be22b549724ad28de1a77..fc1835c6bb4099e50e75f964826b0aacdd7200f0 100644 (file)
@@ -188,7 +188,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
 
        /* Reached the end of the list, so insert after 'frag_entry_last'. */
        if (likely(frag_entry_last)) {
-               hlist_add_behind(&frag_entry_last->list, &frag_entry_new->list);
+               hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
                chain->size += skb->len - hdr_size;
                chain->timestamp = jiffies;
                ret = true;
index cb4459bd1d294d1901cc03b9c203fbcbf761f53f..76b7f5ee8f4c8a9d8e2b0369135e83d067a31afa 100644 (file)
@@ -643,7 +643,7 @@ static int fib6_commit_metrics(struct dst_entry *dst,
        if (dst->flags & DST_HOST) {
                mp = dst_metrics_write_ptr(dst);
        } else {
-               mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+               mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
                if (!mp)
                        return -ENOMEM;
                dst_init_metrics(dst, mp, 0);
index 6d537f03c0baa0450eefc5c22f496be085072cfd..0375009ddc0db39fd5a365db8cb5e4122219d864 100644 (file)
@@ -1444,7 +1444,7 @@ ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
 
                        list_del(&sdata->reserved_chanctx_list);
                        list_move(&sdata->assigned_chanctx_list,
-                                 &new_ctx->assigned_vifs);
+                                 &ctx->assigned_vifs);
                        sdata->reserved_chanctx = NULL;
 
                        ieee80211_vif_chanctx_reservation_complete(sdata);
index fe5cda0deb395db1d6a619e2145c22945ea05d74..5231652a95d90fa2c5d18193380c1a8b6c7dae62 100644 (file)
@@ -42,6 +42,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 
 static int make_writable(struct sk_buff *skb, int write_len)
 {
+       if (!pskb_may_pull(skb, write_len))
+               return -ENOMEM;
+
        if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
                return 0;
 
@@ -70,6 +73,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
 
        vlan_set_encap_proto(skb, vhdr);
        skb->mac_header += VLAN_HLEN;
+       if (skb_network_offset(skb) < ETH_HLEN)
+               skb_set_network_header(skb, ETH_HLEN);
        skb_reset_mac_len(skb);
 
        return 0;
index 8d9f8042705ad82d8f11050bd3eb1951712eeaf7..93896d2092f67dc60a17c6a7c6b4abbb689b6cd9 100644 (file)
@@ -632,6 +632,7 @@ static void init_prb_bdqc(struct packet_sock *po,
        p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
        p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 
+       p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
        prb_init_ft_ops(p1, req_u);
        prb_setup_retire_blk_timer(po, tx_ring);
        prb_open_block(p1, pbd);
@@ -1942,6 +1943,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                        if ((int)snaplen < 0)
                                snaplen = 0;
                }
+       } else if (unlikely(macoff + snaplen >
+                           GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
+               u32 nval;
+
+               nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
+               pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
+                           snaplen, nval, macoff);
+               snaplen = nval;
+               if (unlikely((int)snaplen < 0)) {
+                       snaplen = 0;
+                       macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
+               }
        }
        spin_lock(&sk->sk_receive_queue.lock);
        h.raw = packet_current_rx_frame(po, skb,
@@ -3783,6 +3796,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
                if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
                        goto out;
+               if (po->tp_version >= TPACKET_V3 &&
+                   (int)(req->tp_block_size -
+                         BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
+                       goto out;
                if (unlikely(req->tp_frame_size < po->tp_hdrlen +
                                        po->tp_reserve))
                        goto out;
index eb9580a6b25ff4474a7af54900efbe2579325d00..cdddf6a303996b1a512a7a3896362acc31635f0e 100644 (file)
@@ -29,6 +29,7 @@ struct tpacket_kbdq_core {
        char            *pkblk_start;
        char            *pkblk_end;
        int             kblk_size;
+       unsigned int    max_frame_len;
        unsigned int    knum_blocks;
        uint64_t        knxt_seq_num;
        char            *prev;
index ead526467ccae574d6afddb7a5146fc7507e68a9..762a04bb8f6dcbe4627b579ff4081a4cf92adb11 100644 (file)
@@ -159,7 +159,6 @@ struct cbq_sched_data {
        struct cbq_class        *tx_borrowed;
        int                     tx_len;
        psched_time_t           now;            /* Cached timestamp */
-       psched_time_t           now_rt;         /* Cached real time */
        unsigned int            pmask;
 
        struct hrtimer          delay_timer;
@@ -353,12 +352,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
        int toplevel = q->toplevel;
 
        if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
-               psched_time_t now;
-               psched_tdiff_t incr;
-
-               now = psched_get_time();
-               incr = now - q->now_rt;
-               now = q->now + incr;
+               psched_time_t now = psched_get_time();
 
                do {
                        if (cl->undertime < now) {
@@ -700,8 +694,13 @@ cbq_update(struct cbq_sched_data *q)
        struct cbq_class *this = q->tx_class;
        struct cbq_class *cl = this;
        int len = q->tx_len;
+       psched_time_t now;
 
        q->tx_class = NULL;
+       /* Time integrator. We calculate EOS time
+        * by adding expected packet transmission time.
+        */
+       now = q->now + L2T(&q->link, len);
 
        for ( ; cl; cl = cl->share) {
                long avgidle = cl->avgidle;
@@ -717,7 +716,7 @@ cbq_update(struct cbq_sched_data *q)
                 *      idle = (now - last) - last_pktlen/rate
                 */
 
-               idle = q->now - cl->last;
+               idle = now - cl->last;
                if ((unsigned long)idle > 128*1024*1024) {
                        avgidle = cl->maxidle;
                } else {
@@ -761,7 +760,7 @@ cbq_update(struct cbq_sched_data *q)
                        idle -= L2T(&q->link, len);
                        idle += L2T(cl, len);
 
-                       cl->undertime = q->now + idle;
+                       cl->undertime = now + idle;
                } else {
                        /* Underlimit */
 
@@ -771,7 +770,8 @@ cbq_update(struct cbq_sched_data *q)
                        else
                                cl->avgidle = avgidle;
                }
-               cl->last = q->now;
+               if ((s64)(now - cl->last) > 0)
+                       cl->last = now;
        }
 
        cbq_update_toplevel(q, this, q->tx_borrowed);
@@ -943,31 +943,13 @@ cbq_dequeue(struct Qdisc *sch)
        struct sk_buff *skb;
        struct cbq_sched_data *q = qdisc_priv(sch);
        psched_time_t now;
-       psched_tdiff_t incr;
 
        now = psched_get_time();
-       incr = now - q->now_rt;
-
-       if (q->tx_class) {
-               psched_tdiff_t incr2;
-               /* Time integrator. We calculate EOS time
-                * by adding expected packet transmission time.
-                * If real time is greater, we warp artificial clock,
-                * so that:
-                *
-                * cbq_time = max(real_time, work);
-                */
-               incr2 = L2T(&q->link, q->tx_len);
-               q->now += incr2;
+
+       if (q->tx_class)
                cbq_update(q);
-               if ((incr -= incr2) < 0)
-                       incr = 0;
-               q->now += incr;
-       } else {
-               if (now > q->now)
-                       q->now = now;
-       }
-       q->now_rt = now;
+
+       q->now = now;
 
        for (;;) {
                q->wd_expires = 0;
@@ -1223,7 +1205,6 @@ cbq_reset(struct Qdisc *sch)
        hrtimer_cancel(&q->delay_timer);
        q->toplevel = TC_CBQ_MAXLEVEL;
        q->now = psched_get_time();
-       q->now_rt = q->now;
 
        for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
                q->active[prio] = NULL;
@@ -1407,7 +1388,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        q->delay_timer.function = cbq_undelay;
        q->toplevel = TC_CBQ_MAXLEVEL;
        q->now = psched_get_time();
-       q->now_rt = q->now;
 
        cbq_link_class(&q->link);
 
index 06a9ee6b2d3a577a73b62028a711f5f801fe13f0..a88b8524846eb0b6a03d7f05f2e8910efd432305 100644 (file)
@@ -813,6 +813,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
                else {
                        dst_release(transport->dst);
                        transport->dst = NULL;
+                       ulp_notify = false;
                }
 
                spc_state = SCTP_ADDR_UNREACHABLE;
@@ -1244,7 +1245,7 @@ static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
 {
        u8 score_curr, score_best;
 
-       if (best == NULL)
+       if (best == NULL || curr == best)
                return curr;
 
        score_curr = sctp_trans_score(curr);
@@ -1355,14 +1356,11 @@ static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
                trans_sec = trans_pri;
 
        /* If we failed to find a usable transport, just camp on the
-        * primary or retran, even if they are inactive, if possible
-        * pick a PF iff it's the better choice.
+        * active or pick a PF iff it's the better choice.
         */
        if (trans_pri == NULL) {
-               trans_pri = sctp_trans_elect_best(asoc->peer.primary_path,
-                                                 asoc->peer.retran_path);
-               trans_pri = sctp_trans_elect_best(trans_pri, trans_pf);
-               trans_sec = asoc->peer.primary_path;
+               trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
+               trans_sec = trans_pri;
        }
 
        /* Set the active and retran transports. */
index 3f93454592b6dfdfc305e0c233c087d19d9985ab..3087da39ee47b2acf4f1d5b20babfb15a308d0be 100644 (file)
@@ -179,9 +179,12 @@ static inline int tipc_port_importance(struct tipc_port *port)
        return msg_importance(&port->phdr);
 }
 
-static inline void tipc_port_set_importance(struct tipc_port *port, int imp)
+static inline int tipc_port_set_importance(struct tipc_port *port, int imp)
 {
+       if (imp > TIPC_CRITICAL_IMPORTANCE)
+               return -EINVAL;
        msg_set_importance(&port->phdr, (u32)imp);
+       return 0;
 }
 
 #endif
index 7d423ee10897ca6978cb3030cdaae6f0c81814a1..ff8c8118d56e216a62172f0d60e92bf892660ee3 100644 (file)
@@ -1973,7 +1973,7 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
 
        switch (opt) {
        case TIPC_IMPORTANCE:
-               tipc_port_set_importance(port, value);
+               res = tipc_port_set_importance(port, value);
                break;
        case TIPC_SRC_DROPPABLE:
                if (sock->type != SOCK_STREAM)
index 16a07cfa4d3469d0a386cc1264afb07c6241bcea..70bea942b4135b6a607bdb8ded0f50f0142aba40 100755 (executable)
@@ -2085,6 +2085,7 @@ sub dump_function($$) {
     $prototype =~ s/^noinline +//;
     $prototype =~ s/__init +//;
     $prototype =~ s/__init_or_module +//;
+    $prototype =~ s/__meminit +//;
     $prototype =~ s/__must_check +//;
     $prototype =~ s/__weak +//;
     my $define = $prototype =~ s/^#\s*define\s+//; #ak added
index a3386d119425eb8367194063470efe017eef622b..bed745c8b1a30d47a173fd7d96322aebb2d09c9c 100644 (file)
@@ -173,7 +173,7 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
                 * Use filesystem name if filesystem does not support rename()
                 * operation.
                 */
-               if (!inode->i_op->rename)
+               if (!inode->i_op->rename && !inode->i_op->rename2)
                        goto prepend_filesystem_name;
        }
        /* Prepend device name. */
@@ -282,7 +282,8 @@ char *tomoyo_realpath_from_path(struct path *path)
                 * Get local name for filesystems without rename() operation
                 * or dentry without vfsmount.
                 */
-               if (!path->mnt || !inode->i_op->rename)
+               if (!path->mnt ||
+                   (!inode->i_op->rename && !inode->i_op->rename2))
                        pos = tomoyo_get_local_path(path->dentry, buf,
                                                    buf_len - 1);
                /* Get absolute name for the rest. */
index 051d55b05521216d31a1d011d58560744e930672..9f404e965ea26366e6ad9ec65e4946263650fd37 100644 (file)
@@ -684,7 +684,7 @@ int snd_info_card_free(struct snd_card *card)
  * snd_info_get_line - read one line from the procfs buffer
  * @buffer: the procfs buffer
  * @line: the buffer to store
- * @len: the max. buffer size - 1
+ * @len: the max. buffer size
  *
  * Reads one line from the buffer and stores the string.
  *
@@ -704,7 +704,7 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len)
                        buffer->stop = 1;
                if (c == '\n')
                        break;
-               if (len) {
+               if (len > 1) {
                        len--;
                        *line++ = c;
                }
index 4560ca0e5651ac6c515c589efa18fc8d308b1910..2c6fd80e0bd1fd0042848bdc3e68bd2734c4a0c5 100644 (file)
@@ -142,11 +142,11 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
        },
        [SNDRV_PCM_FORMAT_DSD_U8] = {
                .width = 8, .phys = 8, .le = 1, .signd = 0,
-               .silence = {},
+               .silence = { 0x69 },
        },
        [SNDRV_PCM_FORMAT_DSD_U16_LE] = {
                .width = 16, .phys = 16, .le = 1, .signd = 0,
-               .silence = {},
+               .silence = { 0x69, 0x69 },
        },
        /* FIXME: the following three formats are not defined properly yet */
        [SNDRV_PCM_FORMAT_MPEG] = {
index f2e34e3f27eef6336d5798ae634b5f5ecc8aed70..5851249f11d912a318af26b9fc2ac6d933efe492 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #ifndef CT20K1REG_H
-#define CT20k1REG_H
+#define CT20K1REG_H
 
 /* 20k1 registers */
 #define        DSPXRAM_START                   0x000000
 #define I2SD_R    0x19L
 
 #endif /* CT20K1REG_H */
-
-
index 07e760937d3c0fc83bc7214f775a3a621173bdc9..8371274aa8116b90fe14dc03c712aac48b2c2897 100644 (file)
@@ -20,7 +20,7 @@
  */
 
 #ifndef __CA0132_REGS_H
-#define __CA0312_REGS_H
+#define __CA0132_REGS_H
 
 #define DSP_CHIP_OFFSET                0x100000
 #define DSP_DBGCNTL_MODULE_OFFSET      0xE30
index 36badba2dcec8bde02b67229e4bb8bcd1a3ad3fb..99d7d7fecaad09934090eb6575cfb43131f2dfb4 100644 (file)
@@ -50,6 +50,8 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
 #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec))
 
 #define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
+#define is_cherryview(codec) ((codec)->vendor_id == 0x80862883)
+#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
 
 struct hdmi_spec_per_cvt {
        hda_nid_t cvt_nid;
@@ -1459,7 +1461,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
                            mux_idx);
 
        /* configure unused pins to choose other converters */
-       if (is_haswell_plus(codec) || is_valleyview(codec))
+       if (is_haswell_plus(codec) || is_valleyview_plus(codec))
                intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
 
        snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
@@ -1598,7 +1600,8 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
                 *   and this can make HW reset converter selection on a pin.
                 */
                if (eld->eld_valid && !old_eld_valid && per_pin->setup) {
-                       if (is_haswell_plus(codec) || is_valleyview(codec)) {
+                       if (is_haswell_plus(codec) ||
+                               is_valleyview_plus(codec)) {
                                intel_verify_pin_cvt_connect(codec, per_pin);
                                intel_not_share_assigned_cvt(codec, pin_nid,
                                                        per_pin->mux_idx);
@@ -1779,7 +1782,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
        bool non_pcm;
        int pinctl;
 
-       if (is_haswell_plus(codec) || is_valleyview(codec)) {
+       if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
                /* Verify pin:cvt selections to avoid silent audio after S3.
                 * After S3, the audio driver restores pin:cvt selections
                 * but this can happen before gfx is ready and such selection
@@ -2330,9 +2333,8 @@ static int patch_generic_hdmi(struct hda_codec *codec)
                intel_haswell_fixup_enable_dp12(codec);
        }
 
-       if (is_haswell(codec) || is_valleyview(codec)) {
+       if (is_haswell_plus(codec) || is_valleyview_plus(codec))
                codec->depop_delay = 0;
-       }
 
        if (hdmi_parse_codec(codec) < 0) {
                codec->spec = NULL;
index 6b38ec3c6e575bd2ee219630ad74412ba434f3b3..d446ac3137b37ac6f3512f8fa7830b537632bca8 100644 (file)
@@ -181,6 +181,8 @@ static void alc_fix_pll(struct hda_codec *codec)
                            spec->pll_coef_idx);
        val = snd_hda_codec_read(codec, spec->pll_nid, 0,
                                 AC_VERB_GET_PROC_COEF, 0);
+       if (val == -1)
+               return;
        snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX,
                            spec->pll_coef_idx);
        snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_PROC_COEF,
@@ -2806,6 +2808,8 @@ static void alc286_shutup(struct hda_codec *codec)
 static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
 {
        int val = alc_read_coef_idx(codec, 0x04);
+       if (val == -1)
+               return;
        if (power_up)
                val |= 1 << 11;
        else
@@ -3264,6 +3268,15 @@ static int alc269_resume(struct hda_codec *codec)
        snd_hda_codec_resume_cache(codec);
        alc_inv_dmic_sync(codec, true);
        hda_call_check_power_status(codec, 0x01);
+
+       /* on some machine, the BIOS will clear the codec gpio data when enter
+        * suspend, and won't restore the data after resume, so we restore it
+        * in the driver.
+        */
+       if (spec->gpio_led)
+               snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_SET_GPIO_DATA,
+                           spec->gpio_led);
+
        if (spec->has_alc5505_dsp)
                alc5505_dsp_resume(codec);
 
@@ -4395,6 +4408,7 @@ enum {
        ALC292_FIXUP_TPT440_DOCK,
        ALC283_FIXUP_BXBT2807_MIC,
        ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
+       ALC282_FIXUP_ASPIRE_V5_PINS,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -4842,6 +4856,22 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained_before = true,
                .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
        },
+       [ALC282_FIXUP_ASPIRE_V5_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x12, 0x90a60130 },
+                       { 0x14, 0x90170110 },
+                       { 0x17, 0x40000008 },
+                       { 0x18, 0x411111f0 },
+                       { 0x19, 0x411111f0 },
+                       { 0x1a, 0x411111f0 },
+                       { 0x1b, 0x411111f0 },
+                       { 0x1d, 0x40f89b2d },
+                       { 0x1e, 0x411111f0 },
+                       { 0x21, 0x0321101f },
+                       { },
+               },
+       },
 
 };
 
@@ -4853,6 +4883,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+       SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -5311,27 +5342,30 @@ static void alc269_fill_coef(struct hda_codec *codec)
        if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
                val = alc_read_coef_idx(codec, 0x04);
                /* Power up output pin */
-               alc_write_coef_idx(codec, 0x04, val | (1<<11));
+               if (val != -1)
+                       alc_write_coef_idx(codec, 0x04, val | (1<<11));
        }
 
        if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
                val = alc_read_coef_idx(codec, 0xd);
-               if ((val & 0x0c00) >> 10 != 0x1) {
+               if (val != -1 && (val & 0x0c00) >> 10 != 0x1) {
                        /* Capless ramp up clock control */
                        alc_write_coef_idx(codec, 0xd, val | (1<<10));
                }
                val = alc_read_coef_idx(codec, 0x17);
-               if ((val & 0x01c0) >> 6 != 0x4) {
+               if (val != -1 && (val & 0x01c0) >> 6 != 0x4) {
                        /* Class D power on reset */
                        alc_write_coef_idx(codec, 0x17, val | (1<<7));
                }
        }
 
        val = alc_read_coef_idx(codec, 0xd); /* Class D */
-       alc_write_coef_idx(codec, 0xd, val | (1<<14));
+       if (val != -1)
+               alc_write_coef_idx(codec, 0xd, val | (1<<14));
 
        val = alc_read_coef_idx(codec, 0x4); /* HP */
-       alc_write_coef_idx(codec, 0x4, val | (1<<11));
+       if (val != -1)
+               alc_write_coef_idx(codec, 0x4, val | (1<<11));
 }
 
 /*
index bd41ee4da078c6c7808364db38d344eab4151a75..2c71f16bd6613fc3e07ca63c4a5b1d8f85efa874 100644 (file)
@@ -1278,6 +1278,8 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
        else
                rates = &arizona_48k_bclk_rates[0];
 
+       wl = snd_pcm_format_width(params_format(params));
+
        if (tdm_slots) {
                arizona_aif_dbg(dai, "Configuring for %d %d bit TDM slots\n",
                                tdm_slots, tdm_width);
@@ -1285,6 +1287,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
                channels = tdm_slots;
        } else {
                bclk_target = snd_soc_params_to_bclk(params);
+               tdm_width = wl;
        }
 
        if (chan_limit && chan_limit < channels) {
@@ -1319,8 +1322,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
        arizona_aif_dbg(dai, "BCLK %dHz LRCLK %dHz\n",
                        rates[bclk], rates[bclk] / lrclk);
 
-       wl = snd_pcm_format_width(params_format(params));
-       frame = wl << ARIZONA_AIF1TX_WL_SHIFT | wl;
+       frame = wl << ARIZONA_AIF1TX_WL_SHIFT | tdm_width;
 
        reconfig = arizona_aif_cfg_changed(codec, base, bclk, lrclk, frame);
 
index 163ec3855fd4027e01a1067fa6842688f0175f4b..0c8aefab404ca15cabfaaf43a36d1885983476ed 100644 (file)
@@ -259,13 +259,13 @@ static const struct soc_enum pcm512x_veds =
                        pcm512x_ramp_step_text);
 
 static const struct snd_kcontrol_new pcm512x_controls[] = {
-SOC_DOUBLE_R_TLV("Playback Digital Volume", PCM512x_DIGITAL_VOLUME_2,
+SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
                 PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
 SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
               PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
 SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
               PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
-SOC_DOUBLE("Playback Digital Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
+SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
           PCM512x_RQMR_SHIFT, 1, 1),
 
 SOC_SINGLE("Deemphasis Switch", PCM512x_DSP, PCM512x_DEMP_SHIFT, 1, 1),
index c28508da34cf3ba4354ba6d54de685a03d112ca4..6a6b2ff7d7d73daf4b6742242af4247e5585ebcc 100644 (file)
@@ -403,7 +403,8 @@ out:
        return ret;
 }
 
-static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
+static int __davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id,
+                                     int div, bool explicit)
 {
        struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
 
@@ -420,7 +421,8 @@ static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div
                               ACLKXDIV(div - 1), ACLKXDIV_MASK);
                mcasp_mod_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG,
                               ACLKRDIV(div - 1), ACLKRDIV_MASK);
-               mcasp->bclk_div = div;
+               if (explicit)
+                       mcasp->bclk_div = div;
                break;
 
        case 2:         /* BCLK/LRCLK ratio */
@@ -434,6 +436,12 @@ static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div
        return 0;
 }
 
+static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id,
+                                   int div)
+{
+       return __davinci_mcasp_set_clkdiv(dai, div_id, div, 1);
+}
+
 static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id,
                                    unsigned int freq, int dir)
 {
@@ -738,7 +746,7 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
                                 "Inaccurate BCLK: %u Hz / %u != %u Hz\n",
                                 mcasp->sysclk_freq, div, bclk_freq);
                }
-               davinci_mcasp_set_clkdiv(cpu_dai, 1, div);
+               __davinci_mcasp_set_clkdiv(cpu_dai, 1, div, 0);
        }
 
        ret = mcasp_common_hw_param(mcasp, substream->stream,
index f54a8fc992913ee837ee8c2914599274982bbba2..f3012b645b51fcfdcdbc5c2e60b8f1d5db11724f 100644 (file)
@@ -49,7 +49,6 @@ config SND_SOC_FSL_ESAI
        tristate "Enhanced Serial Audio Interface (ESAI) module support"
        select REGMAP_MMIO
        select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
-       select SND_SOC_FSL_UTILS
        help
          Say Y if you want to add Enhanced Synchronous Audio Interface
          (ESAI) support for the Freescale CPUs.
index 72d154e7dd03c3695860dd4da8b00fd6bc2ff3c5..a3b29ed84963459baada6cc9d8ab742f696ee44e 100644 (file)
@@ -18,7 +18,6 @@
 
 #include "fsl_esai.h"
 #include "imx-pcm.h"
-#include "fsl_utils.h"
 
 #define FSL_ESAI_RATES         SNDRV_PCM_RATE_8000_192000
 #define FSL_ESAI_FORMATS       (SNDRV_PCM_FMTBIT_S8 | \
@@ -607,7 +606,6 @@ static struct snd_soc_dai_ops fsl_esai_dai_ops = {
        .hw_params = fsl_esai_hw_params,
        .set_sysclk = fsl_esai_set_dai_sysclk,
        .set_fmt = fsl_esai_set_dai_fmt,
-       .xlate_tdm_slot_mask = fsl_asoc_xlate_tdm_slot_mask,
        .set_tdm_slot = fsl_esai_set_dai_tdm_slot,
 };
 
index 42edc6f4fc4a839d49140710adcf191f19b7e4a4..03d0a166b6359243a12e046aa64264dc647f4cdd 100644 (file)
@@ -246,8 +246,8 @@ static struct sst_acpi_desc sst_acpi_broadwell_desc = {
 };
 
 static struct sst_acpi_mach baytrail_machines[] = {
-       { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-i2s_master" },
-       { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-i2s_master" },
+       { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master" },
+       { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master" },
        {}
 };
 
index 67673a2c0f415d9f120bf375a3f2a9ca722d7c9a..b4ad98c43e5c6d53a1654e75bc4577d82f15b305 100644 (file)
@@ -817,7 +817,7 @@ static struct sst_dsp_device byt_dev = {
        .ops = &sst_byt_ops,
 };
 
-int sst_byt_dsp_suspend_noirq(struct device *dev, struct sst_pdata *pdata)
+int sst_byt_dsp_suspend_late(struct device *dev, struct sst_pdata *pdata)
 {
        struct sst_byt *byt = pdata->dsp;
 
@@ -826,14 +826,6 @@ int sst_byt_dsp_suspend_noirq(struct device *dev, struct sst_pdata *pdata)
        sst_byt_drop_all(byt);
        dev_dbg(byt->dev, "dsp in reset\n");
 
-       return 0;
-}
-EXPORT_SYMBOL_GPL(sst_byt_dsp_suspend_noirq);
-
-int sst_byt_dsp_suspend_late(struct device *dev, struct sst_pdata *pdata)
-{
-       struct sst_byt *byt = pdata->dsp;
-
        dev_dbg(byt->dev, "free all blocks and unload fw\n");
        sst_fw_unload(byt->fw);
 
index 06a4d202689b3780e13d37a55594862735c0d129..8faff6dcf25d0c1328c799cf40b78620aa3c59d9 100644 (file)
@@ -66,7 +66,6 @@ int sst_byt_get_dsp_position(struct sst_byt *byt,
 int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata);
 void sst_byt_dsp_free(struct device *dev, struct sst_pdata *pdata);
 struct sst_dsp *sst_byt_get_dsp(struct sst_byt *byt);
-int sst_byt_dsp_suspend_noirq(struct device *dev, struct sst_pdata *pdata);
 int sst_byt_dsp_suspend_late(struct device *dev, struct sst_pdata *pdata);
 int sst_byt_dsp_boot(struct device *dev, struct sst_pdata *pdata);
 int sst_byt_dsp_wait_for_ready(struct device *dev, struct sst_pdata *pdata);
index 599401c0c6551d59b1166359e0df5a45e74c8f8d..eab1c7d8518782b01e517e562c6dd7f9eb686d5c 100644 (file)
@@ -59,6 +59,9 @@ struct sst_byt_priv_data {
 
        /* DAI data */
        struct sst_byt_pcm_data pcm[BYT_PCM_COUNT];
+
+       /* flag indicating is stream context restore needed after suspend */
+       bool restore_stream;
 };
 
 /* this may get called several times by oss emulation */
@@ -184,7 +187,10 @@ static int sst_byt_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                sst_byt_stream_start(byt, pcm_data->stream, 0);
                break;
        case SNDRV_PCM_TRIGGER_RESUME:
-               schedule_work(&pcm_data->work);
+               if (pdata->restore_stream == true)
+                       schedule_work(&pcm_data->work);
+               else
+                       sst_byt_stream_resume(byt, pcm_data->stream);
                break;
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                sst_byt_stream_resume(byt, pcm_data->stream);
@@ -193,6 +199,7 @@ static int sst_byt_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                sst_byt_stream_stop(byt, pcm_data->stream);
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
+               pdata->restore_stream = false;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                sst_byt_stream_pause(byt, pcm_data->stream);
                break;
@@ -404,26 +411,10 @@ static const struct snd_soc_component_driver byt_dai_component = {
 };
 
 #ifdef CONFIG_PM
-static int sst_byt_pcm_dev_suspend_noirq(struct device *dev)
-{
-       struct sst_pdata *sst_pdata = dev_get_platdata(dev);
-       int ret;
-
-       dev_dbg(dev, "suspending noirq\n");
-
-       /* at this point all streams will be stopped and context saved */
-       ret = sst_byt_dsp_suspend_noirq(dev, sst_pdata);
-       if (ret < 0) {
-               dev_err(dev, "failed to suspend %d\n", ret);
-               return ret;
-       }
-
-       return ret;
-}
-
 static int sst_byt_pcm_dev_suspend_late(struct device *dev)
 {
        struct sst_pdata *sst_pdata = dev_get_platdata(dev);
+       struct sst_byt_priv_data *priv_data = dev_get_drvdata(dev);
        int ret;
 
        dev_dbg(dev, "suspending late\n");
@@ -434,34 +425,30 @@ static int sst_byt_pcm_dev_suspend_late(struct device *dev)
                return ret;
        }
 
+       priv_data->restore_stream = true;
+
        return ret;
 }
 
 static int sst_byt_pcm_dev_resume_early(struct device *dev)
 {
        struct sst_pdata *sst_pdata = dev_get_platdata(dev);
+       int ret;
 
        dev_dbg(dev, "resume early\n");
 
        /* load fw and boot DSP */
-       return sst_byt_dsp_boot(dev, sst_pdata);
-}
-
-static int sst_byt_pcm_dev_resume(struct device *dev)
-{
-       struct sst_pdata *sst_pdata = dev_get_platdata(dev);
-
-       dev_dbg(dev, "resume\n");
+       ret = sst_byt_dsp_boot(dev, sst_pdata);
+       if (ret)
+               return ret;
 
        /* wait for FW to finish booting */
        return sst_byt_dsp_wait_for_ready(dev, sst_pdata);
 }
 
 static const struct dev_pm_ops sst_byt_pm_ops = {
-       .suspend_noirq = sst_byt_pcm_dev_suspend_noirq,
        .suspend_late = sst_byt_pcm_dev_suspend_late,
        .resume_early = sst_byt_pcm_dev_resume_early,
-       .resume = sst_byt_pcm_dev_resume,
 };
 
 #define SST_BYT_PM_OPS (&sst_byt_pm_ops)
index 0109f6c2334e528e757b0f128fbf2dddbd3f29de..a8e0974330749f86c894ce520ff43d7c4d0960cc 100644 (file)
@@ -765,9 +765,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai)
                          SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
                          SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
 
-#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
-                           SNDRV_PCM_FMTBIT_S24_LE |   \
-                           SNDRV_PCM_FMTBIT_S32_LE)
+#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
 static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
        .startup        = pxa_ssp_startup,
index 8348352dc2c62c484a1efba6f04c290367b93864..177bd8639ef93e6b97a20e1cd022224e62cf14bf 100644 (file)
@@ -2860,12 +2860,14 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
        struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        unsigned int reg_val, val;
-       int ret = 0;
 
-       if (e->reg != SND_SOC_NOPM)
-               ret = soc_dapm_read(dapm, e->reg, &reg_val);
-       else
+       if (e->reg != SND_SOC_NOPM) {
+               int ret = soc_dapm_read(dapm, e->reg, &reg_val);
+               if (ret)
+                       return ret;
+       } else {
                reg_val = dapm_kcontrol_get_value(kcontrol);
+       }
 
        val = (reg_val >> e->shift_l) & e->mask;
        ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(e, val);
@@ -2875,7 +2877,7 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
                ucontrol->value.enumerated.item[1] = val;
        }
 
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_double);
 
index bf06577fea51c22ab944edb9560e56f01aae2f94..5819a2708d7edd5823d9e5885a6b7c3796b387ad 100644 (file)
@@ -526,8 +526,10 @@ static int assign_guest_irq(struct kvm *kvm,
                dev->irq_requested_type |= guest_irq_type;
                if (dev->ack_notifier.gsi != -1)
                        kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
-       } else
+       } else {
                kvm_free_irq_source_id(kvm, dev->irq_source_id);
+               dev->irq_source_id = -1;
+       }
 
        return r;
 }
index 0df7d4b34dfec96345d359bf08562309d8607f40..714b949323120aee855dd7e57db549cac31e8183 100644 (file)
@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
        return pfn;
 }
 
+static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
+{
+       unsigned long i;
+
+       for (i = 0; i < npages; ++i)
+               kvm_release_pfn_clean(pfn + i);
+}
+
 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
 {
        gfn_t gfn, end_gfn;
@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
                if (r) {
                        printk(KERN_ERR "kvm_iommu_map_address:"
                               "iommu failed to map pfn=%llx\n", pfn);
+                       kvm_unpin_pages(kvm, pfn, page_size);
                        goto unmap_pages;
                }
 
@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
        return 0;
 
 unmap_pages:
-       kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
+       kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
        return r;
 }
 
@@ -266,14 +275,6 @@ out_unlock:
        return r;
 }
 
-static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
-{
-       unsigned long i;
-
-       for (i = 0; i < npages; ++i)
-               kvm_release_pfn_clean(pfn + i);
-}
-
 static void kvm_iommu_put_pages(struct kvm *kvm,
                                gfn_t base_gfn, unsigned long npages)
 {