Merge branch 'kvm-ppc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus...
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 11 Dec 2015 11:26:47 +0000 (12:26 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 11 Dec 2015 11:26:47 +0000 (12:26 +0100)
452 files changed:
Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/armada-38x.dtsi
arch/arm64/net/bpf_jit_comp.c
arch/blackfin/kernel/perf_event.c
arch/mn10300/Kconfig
arch/sh/kernel/perf_event.c
arch/sparc/kernel/perf_event.c
arch/tile/kernel/perf_event.c
arch/um/Makefile
arch/um/drivers/net_user.c
arch/um/kernel/signal.c
arch/x86/boot/boot.h
arch/x86/boot/video-mode.c
arch/x86/boot/video.c
arch/x86/entry/entry_64.S
arch/x86/include/asm/page_types.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/x86_init.h
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_cqm.c
arch/x86/kernel/cpu/perf_event_intel_lbr.c
arch/x86/kernel/irq_work.c
arch/x86/kernel/pmem.c
arch/x86/kernel/setup.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/mpx.c
arch/x86/pci/bus_numa.c
arch/x86/um/signal.c
block/blk-cgroup.c
block/blk-core.c
block/blk-merge.c
block/blk-settings.c
block/blk-sysfs.c
block/partition-generic.c
crypto/algif_aead.c
crypto/algif_skcipher.c
drivers/acpi/Kconfig
drivers/acpi/nfit.c
drivers/acpi/nfit.h
drivers/acpi/pci_root.c
drivers/ata/ahci.c
drivers/ata/ahci_mvebu.c
drivers/ata/libahci.c
drivers/ata/libata-eh.c
drivers/ata/sata_fsl.c
drivers/ata/sata_sil.c
drivers/base/power/domain.c
drivers/base/power/domain_governor.c
drivers/block/null_blk.c
drivers/block/rbd.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/clk/clk-gpio.c
drivers/clk/clk-qoriq.c
drivers/clk/clk-scpi.c
drivers/clk/imx/clk-pllv1.c
drivers/clk/imx/clk-pllv2.c
drivers/clk/mmp/clk-mmp2.c
drivers/clk/mmp/clk-pxa168.c
drivers/clk/mmp/clk-pxa910.c
drivers/clk/sunxi/clk-a10-pll2.c
drivers/clk/ti/clk-816x.c
drivers/clk/ti/clkt_dpll.c
drivers/clk/ti/divider.c
drivers/clk/ti/fapll.c
drivers/clk/ti/mux.c
drivers/cpufreq/cpufreq.c
drivers/crypto/nx/nx-aes-ccm.c
drivers/crypto/nx/nx-aes-gcm.c
drivers/crypto/talitos.c
drivers/gpio/gpio-74xx-mmio.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-palmas.c
drivers/gpio/gpio-syscon.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_fence.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-drm.h
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/imx/ipuv3-plane.h
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_agp.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg.c
drivers/hid/usbhid/hid-quirks.c
drivers/isdn/hisax/config.c
drivers/isdn/hisax/hfc_pci.c
drivers/isdn/hisax/hfc_sx.c
drivers/isdn/hisax/q931.c
drivers/lightnvm/core.c
drivers/lightnvm/gennvm.c
drivers/net/can/bfin_can.c
drivers/net/can/c_can/c_can.c
drivers/net/can/cc770/cc770.c
drivers/net/can/flexcan.c
drivers/net/can/janz-ican3.c
drivers/net/can/m_can/m_can.c
drivers/net/can/pch_can.c
drivers/net/can/rcar_can.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/sun4i_can.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/usb_8dev.c
drivers/net/can/xilinx_can.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/alx/reg.h
drivers/net/ethernet/aurora/Kconfig [new file with mode: 0644]
drivers/net/ethernet/aurora/Makefile [new file with mode: 0644]
drivers/net/ethernet/aurora/nb8800.c [new file with mode: 0644]
drivers/net/ethernet/aurora/nb8800.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/ti/cpsw-common.c
drivers/net/macvtap.c
drivers/net/phy/broadcom.c
drivers/net/phy/phy.c
drivers/net/tun.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vrf.c
drivers/net/wan/hdlc_fr.c
drivers/net/wan/x25_asy.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
drivers/nvme/host/Makefile
drivers/nvme/host/lightnvm.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/of/address.c
drivers/of/fdt.c
drivers/of/irq.c
drivers/of/of_reserved_mem.c
drivers/pci/host/pcie-altera.c
drivers/pci/msi.c
drivers/pci/pci-driver.c
drivers/pinctrl/Kconfig
drivers/pinctrl/freescale/pinctrl-imx1-core.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
drivers/pinctrl/sh-pfc/pfc-sh7734.c
drivers/remoteproc/remoteproc_core.c
drivers/remoteproc/remoteproc_debugfs.c
drivers/scsi/Kconfig
drivers/scsi/advansys.c
drivers/scsi/hosts.c
drivers/scsi/hpsa.c
drivers/scsi/mpt3sas/Kconfig
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/st.c
drivers/spi/spi-bcm63xx.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-pl022.c
drivers/spi/spi.c
drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
drivers/staging/lustre/lustre/libcfs/module.c
drivers/vfio/Kconfig
drivers/vfio/pci/vfio_pci.c
drivers/vfio/platform/vfio_platform.c
drivers/vfio/platform/vfio_platform_common.c
drivers/vfio/vfio.c
drivers/vhost/vhost.c
drivers/virtio/virtio.c
drivers/virtio/virtio_ring.c
fs/9p/vfs_inode.c
fs/direct-io.c
fs/dlm/lowcomms.c
fs/ext4/crypto.c
fs/ext4/ext4.h
fs/ext4/symlink.c
fs/ext4/sysfs.c
fs/jbd2/transaction.c
fs/namei.c
fs/overlayfs/copy_up.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
include/asm-generic/tlb.h
include/drm/drmP.h
include/linux/acpi.h
include/linux/bitops.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/cpufreq.h
include/linux/dns_resolver.h
include/linux/ipv6.h
include/linux/jump_label.h
include/linux/libata.h
include/linux/lightnvm.h
include/linux/lockdep.h
include/linux/net.h
include/linux/netdevice.h
include/linux/of_irq.h
include/linux/perf_event.h
include/linux/proportions.h
include/linux/uprobes.h
include/linux/vfio.h
include/net/af_unix.h
include/net/ip6_route.h
include/net/ipv6.h
include/net/mac80211.h
include/net/ndisc.h
include/net/sch_generic.h
include/net/sctp/structs.h
include/net/sock.h
include/scsi/scsi_host.h
include/sound/soc-dapm.h
include/uapi/linux/vfio.h
include/video/imx-ipu-v3.h
kernel/bpf/arraymap.c
kernel/bpf/hashtab.c
kernel/bpf/inode.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/cgroup_pids.c
kernel/cpuset.c
kernel/events/callchain.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/events/uprobes.c
kernel/fork.c
kernel/irq_work.c
kernel/jump_label.c
kernel/locking/lockdep.c
kernel/locking/lockdep_proc.c
kernel/sched/clock.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/wait.c
kernel/trace/ring_buffer.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
lib/btree.c
lib/proportions.c
mm/memcontrol.c
mm/page-writeback.c
net/bluetooth/af_bluetooth.c
net/bluetooth/smp.c
net/caif/caif_socket.c
net/core/datagram.c
net/core/neighbour.c
net/core/netclassid_cgroup.c
net/core/netprio_cgroup.c
net/core/scm.c
net/core/sock.c
net/core/stream.c
net/dccp/ipv6.c
net/dccp/proto.c
net/decnet/af_decnet.c
net/dns_resolver/dns_query.c
net/hsr/hsr_device.c
net/ipv4/igmp.c
net/ipv4/ipmr.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/ndisc.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/iucv/af_iucv.c
net/l2tp/l2tp_ip6.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh_pathtbl.c
net/mac80211/scan.c
net/nfc/llcp_sock.c
net/openvswitch/dp_notify.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-gre.c
net/openvswitch/vport-netdev.c
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/rds/connection.c
net/rds/send.c
net/rxrpc/ar-ack.c
net/rxrpc/ar-output.c
net/sched/sch_api.c
net/sched/sch_generic.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sctp/ipv6.c
net/sctp/socket.c
net/socket.c
net/sunrpc/xprtsock.c
net/tipc/link.c
net/tipc/socket.c
net/tipc/udp_media.c
net/unix/af_unix.c
scripts/link-vmlinux.sh
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/soc/codecs/arizona.c
sound/soc/codecs/es8328.c
sound/soc/codecs/nau8825.c
sound/soc/codecs/rl6231.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5670.h
sound/soc/codecs/rt5677.c
sound/soc/codecs/wm8960.c
sound/soc/codecs/wm8962.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/fsl/Kconfig
sound/soc/fsl/fsl_sai.c
sound/soc/intel/Kconfig
sound/soc/intel/skylake/skl-topology.c
sound/soc/rockchip/rockchip_spdif.c
sound/soc/rockchip/rockchip_spdif.h
sound/soc/sh/rcar/gen.c
sound/soc/sh/rcar/src.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-ops.c
sound/soc/soc-topology.c
sound/soc/sti/uniperif_player.c
sound/soc/sti/uniperif_reader.c
sound/soc/sunxi/sun4i-codec.c
tools/testing/nvdimm/test/nfit.c
tools/virtio/linux/kernel.h
tools/virtio/linux/virtio.h
tools/virtio/linux/virtio_config.h

index f5a8ca29aff06e84d49e3c75caf125b35c660055..aeea50c84e921fb301fc7fe9fd2cd6f295c7139c 100644 (file)
@@ -8,6 +8,11 @@ Required properties:
 - phy-mode: See ethernet.txt file in the same directory
 - clocks: a pointer to the reference clock for this device.
 
+Optional properties:
+- tx-csum-limit: maximum mtu supported by port that allow TX checksum.
+  Value is presented in bytes. If not used, by default 1600B is set for
+  "marvell,armada-370-neta" and 9800B for others.
+
 Example:
 
 ethernet@d0070000 {
@@ -15,6 +20,7 @@ ethernet@d0070000 {
        reg = <0xd0070000 0x2500>;
        interrupts = <8>;
        clocks = <&gate_clk 4>;
+       tx-csum-limit = <9800>
        status = "okay";
        phy = <&phy0>;
        phy-mode = "rgmii-id";
index cba790b42f23d5980088588d01d97084cd8423d2..38df53f828e1ea9036957d93d3cf744164e2f3c1 100644 (file)
@@ -318,7 +318,7 @@ M:  Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
 W:     https://01.org/linux-acpi
 S:     Supported
-F:     drivers/acpi/video.c
+F:     drivers/acpi/acpi_video.c
 
 ACPI WMI DRIVER
 L:     platform-driver-x86@vger.kernel.org
@@ -1847,7 +1847,7 @@ S:        Supported
 F:     drivers/net/wireless/ath/ath6kl/
 
 WILOCITY WIL6210 WIRELESS DRIVER
-M:     Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
+M:     Maya Erez <qca_merez@qca.qualcomm.com>
 L:     linux-wireless@vger.kernel.org
 L:     wil6210@qca.qualcomm.com
 S:     Supported
@@ -8286,7 +8286,7 @@ F:        include/linux/delayacct.h
 F:     kernel/delayacct.c
 
 PERFORMANCE EVENTS SUBSYSTEM
-M:     Peter Zijlstra <a.p.zijlstra@chello.nl>
+M:     Peter Zijlstra <peterz@infradead.org>
 M:     Ingo Molnar <mingo@redhat.com>
 M:     Arnaldo Carvalho de Melo <acme@kernel.org>
 L:     linux-kernel@vger.kernel.org
@@ -9427,8 +9427,10 @@ F:       include/scsi/sg.h
 
 SCSI SUBSYSTEM
 M:     "James E.J. Bottomley" <JBottomley@odin.com>
-L:     linux-scsi@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
+M:     "Martin K. Petersen" <martin.petersen@oracle.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
+L:     linux-scsi@vger.kernel.org
 S:     Maintained
 F:     drivers/scsi/
 F:     include/scsi/
@@ -10903,9 +10905,9 @@ S:      Maintained
 F:     drivers/media/tuners/tua9001*
 
 TULIP NETWORK DRIVERS
-M:     Grant Grundler <grundler@parisc-linux.org>
 L:     netdev@vger.kernel.org
-S:     Maintained
+L:     linux-parisc@vger.kernel.org
+S:     Orphan
 F:     drivers/net/ethernet/dec/tulip/
 
 TUN/TAP driver
index 904a1d65df302160853af36b76122c71870874b3..d644f6e92cf653be39a981d7ca6b021084c7eaa5 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
index c6a0e9d7f1a9bd0409b31bb4272bfbcb68f3b093..e8b7f67267723241730c8279902b82b8340d8118 100644 (file)
                                reg = <0x70000 0x4000>;
                                interrupts-extended = <&mpic 8>;
                                clocks = <&gateclk 4>;
+                               tx-csum-limit = <9800>;
                                status = "disabled";
                        };
 
index d6a53ef2350be81a090c07ab7d6df409d99c6a37..b162ad70effcfeacbc7a304d569b222717f495d7 100644 (file)
@@ -139,6 +139,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
 /* Stack must be multiples of 16B */
 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
 
+#define _STACK_SIZE \
+       (MAX_BPF_STACK \
+        + 4 /* extra for skb_copy_bits buffer */)
+
+#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
+
 static void build_prologue(struct jit_ctx *ctx)
 {
        const u8 r6 = bpf2a64[BPF_REG_6];
@@ -150,10 +156,6 @@ static void build_prologue(struct jit_ctx *ctx)
        const u8 rx = bpf2a64[BPF_REG_X];
        const u8 tmp1 = bpf2a64[TMP_REG_1];
        const u8 tmp2 = bpf2a64[TMP_REG_2];
-       int stack_size = MAX_BPF_STACK;
-
-       stack_size += 4; /* extra for skb_copy_bits buffer */
-       stack_size = STACK_ALIGN(stack_size);
 
        /*
         * BPF prog stack layout
@@ -165,12 +167,13 @@ static void build_prologue(struct jit_ctx *ctx)
         *                        | ... | callee saved registers
         *                        +-----+
         *                        |     | x25/x26
-        * BPF fp register => -80:+-----+
+        * BPF fp register => -80:+-----+ <= (BPF_FP)
         *                        |     |
         *                        | ... | BPF prog stack
         *                        |     |
-        *                        |     |
-        * current A64_SP =>      +-----+
+        *                        +-----+ <= (BPF_FP - MAX_BPF_STACK)
+        *                        |RSVD | JIT scratchpad
+        * current A64_SP =>      +-----+ <= (BPF_FP - STACK_SIZE)
         *                        |     |
         *                        | ... | Function call stack
         *                        |     |
@@ -196,7 +199,7 @@ static void build_prologue(struct jit_ctx *ctx)
        emit(A64_MOV(1, fp, A64_SP), ctx);
 
        /* Set up function call stack */
-       emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
+       emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
 
        /* Clear registers A and X */
        emit_a64_mov_i64(ra, 0, ctx);
@@ -213,13 +216,9 @@ static void build_epilogue(struct jit_ctx *ctx)
        const u8 fp = bpf2a64[BPF_REG_FP];
        const u8 tmp1 = bpf2a64[TMP_REG_1];
        const u8 tmp2 = bpf2a64[TMP_REG_2];
-       int stack_size = MAX_BPF_STACK;
-
-       stack_size += 4; /* extra for skb_copy_bits buffer */
-       stack_size = STACK_ALIGN(stack_size);
 
        /* We're done with BPF stack */
-       emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
+       emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
 
        /* Restore fs (x25) and x26 */
        emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
@@ -591,7 +590,25 @@ emit_cond_jmp:
        case BPF_ST | BPF_MEM | BPF_H:
        case BPF_ST | BPF_MEM | BPF_B:
        case BPF_ST | BPF_MEM | BPF_DW:
-               goto notyet;
+               /* Load imm to a register then store it */
+               ctx->tmp_used = 1;
+               emit_a64_mov_i(1, tmp2, off, ctx);
+               emit_a64_mov_i(1, tmp, imm, ctx);
+               switch (BPF_SIZE(code)) {
+               case BPF_W:
+                       emit(A64_STR32(tmp, dst, tmp2), ctx);
+                       break;
+               case BPF_H:
+                       emit(A64_STRH(tmp, dst, tmp2), ctx);
+                       break;
+               case BPF_B:
+                       emit(A64_STRB(tmp, dst, tmp2), ctx);
+                       break;
+               case BPF_DW:
+                       emit(A64_STR64(tmp, dst, tmp2), ctx);
+                       break;
+               }
+               break;
 
        /* STX: *(size *)(dst + off) = src */
        case BPF_STX | BPF_MEM | BPF_W:
@@ -658,7 +675,7 @@ emit_cond_jmp:
                        return -EINVAL;
                }
                emit_a64_mov_i64(r3, size, ctx);
-               emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
+               emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
                emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
                emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
                emit(A64_MOV(1, A64_FP, A64_SP), ctx);
index 1e9c8b0bf48666cfba1ee19d8731eb71cf68a77e..170d786807c460eda342f4383dc379e5279a215d 100644 (file)
@@ -14,7 +14,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *
  * ppc:
index 4434b54e1d87c4e10e705ff5b7641d4f52c9616c..78ae5552fdb89cca3c6a5ff4290a2581fbe95079 100644 (file)
@@ -1,6 +1,7 @@
 config MN10300
        def_bool y
        select HAVE_OPROFILE
+       select HAVE_UID16
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_IPC_PARSE_VERSION
        select HAVE_ARCH_TRACEHOOK
@@ -37,9 +38,6 @@ config HIGHMEM
 config NUMA
        def_bool n
 
-config UID16
-       def_bool y
-
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
index 7cfd7f153966719c9201a66ff6043c3e48a47a93..4dca18347ee9a40949d55af11ae6adf07d05ca4b 100644 (file)
@@ -10,7 +10,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *
  * ppc:
index b0da5aedb336c643baf9163c010c6c679ce6bbdc..3091267c5cc3d30dba854f152ff8d9bbc06c65a2 100644 (file)
@@ -9,7 +9,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  */
 
 #include <linux/perf_event.h>
index bb509cee3b598807eeae1aca6056e7adaed59e19..8767060d70fb32eb2dfc0a7da1010233973d78ea 100644 (file)
@@ -21,7 +21,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  */
index 25ed4098640eecf5fc23ae5a8a494b7275badb4a..e3abe6f3156d3fbacca4003c072954619c7e1bb1 100644 (file)
@@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
 # The wrappers will select whether using "malloc" or the kernel allocator.
 LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
 
-LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt
+LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
 
 # Used by link-vmlinux.sh which has special support for um link
 export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
index e697a4136707e13c609a5e51529f87631896b0c6..e9f8445861dc73639bb32742bb5f6fae327a8b4c 100644 (file)
@@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
 
 char *split_if_spec(char *str, ...)
 {
-       char **arg, *end;
+       char **arg, *end, *ret = NULL;
        va_list ap;
 
        va_start(ap, str);
        while ((arg = va_arg(ap, char **)) != NULL) {
                if (*str == '\0')
-                       return NULL;
+                       goto out;
                end = strchr(str, ',');
                if (end != str)
                        *arg = str;
                if (end == NULL)
-                       return NULL;
+                       goto out;
                *end++ = '\0';
                str = end;
        }
+       ret = str;
+out:
        va_end(ap);
-       return str;
+       return ret;
 }
index 57acbd67d85dbd4051cd3c534ceff2a300ce9906..fc8be0e3a4ff879a2cf9fddc8e017dfa974ffc95 100644 (file)
@@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
        struct ksignal ksig;
        int handled_sig = 0;
 
-       while (get_signal(&ksig)) {
+       if (get_signal(&ksig)) {
                handled_sig = 1;
                /* Whee!  Actually deliver the signal.  */
                handle_signal(&ksig, regs);
index 0033e96c3f09c165e5745ca410048335de2106d7..9011a88353ded70ece09718a04349464ee8467ec 100644 (file)
@@ -23,7 +23,6 @@
 #include <stdarg.h>
 #include <linux/types.h>
 #include <linux/edd.h>
-#include <asm/boot.h>
 #include <asm/setup.h>
 #include "bitops.h"
 #include "ctype.h"
index aa8a96b052e30263d60afc6b81a60e387fc55773..95c7a818c0ed6929800f72fbb5d507fbc4539195 100644 (file)
@@ -19,6 +19,8 @@
 #include "video.h"
 #include "vesa.h"
 
+#include <uapi/asm/boot.h>
+
 /*
  * Common variables
  */
index 05111bb8d018e38c71ae278ee0a78ccff76e34e2..77780e386e9b224ef8ec5421af644abc65d1eebd 100644 (file)
@@ -13,6 +13,8 @@
  * Select video mode
  */
 
+#include <uapi/asm/boot.h>
+
 #include "boot.h"
 #include "video.h"
 #include "vesa.h"
index 53616ca0324440d6cba482c3855433b5a63b4ef9..a55697d19824727fda8306c566bb99cb4ba6d980 100644 (file)
@@ -509,6 +509,17 @@ END(irq_entries_start)
         * tracking that we're in kernel mode.
         */
        SWAPGS
+
+       /*
+        * We need to tell lockdep that IRQs are off.  We can't do this until
+        * we fix gsbase, and we should do it before enter_from_user_mode
+        * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
+        * the simplest way to handle it is to just call it twice if
+        * we enter from user mode.  There's no reason to optimize this since
+        * TRACE_IRQS_OFF is a no-op if lockdep is off.
+        */
+       TRACE_IRQS_OFF
+
 #ifdef CONFIG_CONTEXT_TRACKING
        call enter_from_user_mode
 #endif
@@ -1049,12 +1060,18 @@ ENTRY(error_entry)
        SWAPGS
 
 .Lerror_entry_from_usermode_after_swapgs:
+       /*
+        * We need to tell lockdep that IRQs are off.  We can't do this until
+        * we fix gsbase, and we should do it before enter_from_user_mode
+        * (which can take locks).
+        */
+       TRACE_IRQS_OFF
 #ifdef CONFIG_CONTEXT_TRACKING
        call enter_from_user_mode
 #endif
+       ret
 
 .Lerror_entry_done:
-
        TRACE_IRQS_OFF
        ret
 
index c5b7fb2774d08e17bad6966933e2d753c6de92ec..cc071c6f7d4da2847b3df2c4cbf7666edde5675b 100644 (file)
@@ -9,19 +9,21 @@
 #define PAGE_SIZE      (_AC(1,UL) << PAGE_SHIFT)
 #define PAGE_MASK      (~(PAGE_SIZE-1))
 
+#define PMD_PAGE_SIZE          (_AC(1, UL) << PMD_SHIFT)
+#define PMD_PAGE_MASK          (~(PMD_PAGE_SIZE-1))
+
+#define PUD_PAGE_SIZE          (_AC(1, UL) << PUD_SHIFT)
+#define PUD_PAGE_MASK          (~(PUD_PAGE_SIZE-1))
+
 #define __PHYSICAL_MASK                ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
 #define __VIRTUAL_MASK         ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
 
-/* Cast PAGE_MASK to a signed type so that it is sign-extended if
+/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
    virtual addresses are 32-bits but physical addresses are larger
    (ie, 32-bit PAE). */
 #define PHYSICAL_PAGE_MASK     (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
-
-#define PMD_PAGE_SIZE          (_AC(1, UL) << PMD_SHIFT)
-#define PMD_PAGE_MASK          (~(PMD_PAGE_SIZE-1))
-
-#define PUD_PAGE_SIZE          (_AC(1, UL) << PUD_SHIFT)
-#define PUD_PAGE_MASK          (~(PUD_PAGE_SIZE-1))
+#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
+#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
 
 #define HPAGE_SHIFT            PMD_SHIFT
 #define HPAGE_SIZE             (_AC(1,UL) << HPAGE_SHIFT)
index dd5b0aa9dd2f93a01b554029ebc243aa56ae91d3..a471cadb9630e7b8f46340139efaf4f467a24aff 100644 (file)
@@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
 static inline pudval_t pud_pfn_mask(pud_t pud)
 {
        if (native_pud_val(pud) & _PAGE_PSE)
-               return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK;
+               return PHYSICAL_PUD_PAGE_MASK;
        else
                return PTE_PFN_MASK;
 }
 
 static inline pudval_t pud_flags_mask(pud_t pud)
 {
-       if (native_pud_val(pud) & _PAGE_PSE)
-               return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
-       else
-               return ~PTE_PFN_MASK;
+       return ~pud_pfn_mask(pud);
 }
 
 static inline pudval_t pud_flags(pud_t pud)
@@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud)
 static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
 {
        if (native_pmd_val(pmd) & _PAGE_PSE)
-               return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK;
+               return PHYSICAL_PMD_PAGE_MASK;
        else
                return PTE_PFN_MASK;
 }
 
 static inline pmdval_t pmd_flags_mask(pmd_t pmd)
 {
-       if (native_pmd_val(pmd) & _PAGE_PSE)
-               return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
-       else
-               return ~PTE_PFN_MASK;
+       return ~pmd_pfn_mask(pmd);
 }
 
 static inline pmdval_t pmd_flags(pmd_t pmd)
index 48d34d28f5a60543bc72471e2a1931fcf13dc04e..cd0fc0cc78bc34f1d378c844baf6007ce60c1797 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef _ASM_X86_PLATFORM_H
 #define _ASM_X86_PLATFORM_H
 
-#include <asm/pgtable_types.h>
 #include <asm/bootparam.h>
 
 struct mpc_bus;
index 7fc27f1cca586a1752d95fa73c612db03db78437..b3e94ef461fddcea5c6d5696eb012b6c9c89edc8 100644 (file)
@@ -698,3 +698,4 @@ int __init microcode_init(void)
        return error;
 
 }
+late_initcall(microcode_init);
index 4562cf070c279d5edeb0e18ae94c8bff94166dd8..2bf79d7c97dfb8848b1e7b060a66ddedb4605a1a 100644 (file)
@@ -5,7 +5,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  *
index 499f533dd3ccbd22bb84423649e031f2759080ca..d0e35ebb2adb1d34b526fcb04b2bc192c643fd55 100644 (file)
@@ -5,7 +5,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  *
@@ -387,7 +387,7 @@ struct cpu_hw_events {
 /* Check flags and event code/umask, and set the HSW N/A flag */
 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
-                         INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
+                         INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
 
 
@@ -627,6 +627,7 @@ struct x86_perf_task_context {
        u64 lbr_from[MAX_LBR_ENTRIES];
        u64 lbr_to[MAX_LBR_ENTRIES];
        u64 lbr_info[MAX_LBR_ENTRIES];
+       int tos;
        int lbr_callstack_users;
        int lbr_stack_state;
 };
index f63360be22387d4fb4cb30728f1834ee4cbd6228..e2a430021e46e71eb2904af74ae5a7d50653d048 100644 (file)
@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
-       INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
+       INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
        /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
index 377e8f8ed39186ad4ef57b33264592ed8459a037..a316ca96f1b639d8a0f58f616c1c62d4da61bfdf 100644 (file)
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
 static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
 {
        if (event->attach_state & PERF_ATTACH_TASK)
-               return perf_cgroup_from_task(event->hw.target);
+               return perf_cgroup_from_task(event->hw.target, event->ctx);
 
        return event->cgrp;
 }
index bfd0b717e944ce012b7a0abe19c89f5294d284e1..659f01e165d57520f33b09ba68818f72cd206a4d 100644 (file)
@@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
        }
 
        mask = x86_pmu.lbr_nr - 1;
-       tos = intel_pmu_lbr_tos();
+       tos = task_ctx->tos;
        for (i = 0; i < tos; i++) {
                lbr_idx = (tos - i) & mask;
                wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
                        wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
        }
+       wrmsrl(x86_pmu.lbr_tos, tos);
        task_ctx->lbr_stack_state = LBR_NONE;
 }
 
@@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
                        rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
        }
+       task_ctx->tos = tos;
        task_ctx->lbr_stack_state = LBR_VALID;
 }
 
index dc5fa6a1e8d640aa8fc407ee3035feb0a1778451..3512ba607361403e587f417cbce2775cdec428a1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * x86 specific code for irq_work
  *
- * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  */
 
 #include <linux/kernel.h>
index 4f00b63d7ff33bea8f0016eaa8393d8cb6972fad..14415aff18136524cfa3d4c9b07bdde6ee0d6f54 100644 (file)
@@ -4,10 +4,22 @@
  */
 #include <linux/platform_device.h>
 #include <linux/module.h>
+#include <linux/ioport.h>
+
+static int found(u64 start, u64 end, void *data)
+{
+       return 1;
+}
 
 static __init int register_e820_pmem(void)
 {
+       char *pmem = "Persistent Memory (legacy)";
        struct platform_device *pdev;
+       int rc;
+
+       rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
+       if (rc <= 0)
+               return 0;
 
        /*
         * See drivers/nvdimm/e820.c for the implementation, this is
index 29db25f9a745ee11d23867cc974438817a6e5b15..d2bbe343fda74a87307675b3a65704e2cc96769b 100644 (file)
@@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p)
        if (efi_enabled(EFI_BOOT))
                efi_apply_memmap_quirks();
 #endif
-
-       microcode_init();
 }
 
 #ifdef CONFIG_X86_32
index b7ffb7c00075787532ceec10b6aed0028ea01b5c..cb6282c3638ffbd32bcb33663d8cefc17eac8a8e 100644 (file)
@@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
        signal_setup_done(failed, ksig, stepping);
 }
 
-#ifdef CONFIG_X86_32
-#define NR_restart_syscall     __NR_restart_syscall
-#else /* !CONFIG_X86_32 */
-#define NR_restart_syscall     \
-       test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
-#endif /* CONFIG_X86_32 */
+static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
+{
+#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
+       return __NR_restart_syscall;
+#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
+       return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
+               __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
+#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
+}
 
 /*
  * Note that 'init' is a special process: it doesn't get signals it doesn't
@@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs)
                        break;
 
                case -ERESTART_RESTARTBLOCK:
-                       regs->ax = NR_restart_syscall;
+                       regs->ax = get_nr_restart_syscall(regs);
                        regs->ip -= 2;
                        break;
                }
index 892ee2e5ecbce417df506715f7b28d28c403ef91..fbabe4fcc7fbb71802c756289ac01df9b11599ec 100644 (file)
@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
  */
 #define UDELAY_10MS_DEFAULT 10000
 
-static unsigned int init_udelay = INT_MAX;
+static unsigned int init_udelay = UINT_MAX;
 
 static int __init cpu_init_udelay(char *str)
 {
@@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay);
 static void __init smp_quirk_init_udelay(void)
 {
        /* if cmdline changed it from default, leave it alone */
-       if (init_udelay != INT_MAX)
+       if (init_udelay != UINT_MAX)
                return;
 
        /* if modern processor, use no delay */
        if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
-           ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
+           ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
                init_udelay = 0;
-
+               return;
+       }
        /* else, use legacy delay */
        init_udelay = UDELAY_10MS_DEFAULT;
 }
index 83a1c643f9a50fd3fa0ffc041c40d9f4d25aa5b5..899c40f826dd9a5f10c57e0758153cdbb1d9f1a6 100644 (file)
@@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
        struct kvm_run *kvm_run = vcpu->run;
        u32 exit_code = svm->vmcb->control.exit_code;
 
+       trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
+
        if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
                vcpu->arch.cr0 = svm->vmcb->save.cr0;
        if (npt_enabled)
@@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
        vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
 
-       trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
-
        if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
                kvm_before_handle_nmi(&svm->vcpu);
 
index af823a388c1994ba244e3ef0098f1a578408101e..6b5605607849d42d15d096e0eb24c4db4814a241 100644 (file)
@@ -8042,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
        u32 exit_reason = vmx->exit_reason;
        u32 vectoring_info = vmx->idt_vectoring_info;
 
+       trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
+
        /*
         * Flush logged GPAs PML buffer, this will make dirty_bitmap more
         * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -8668,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        vmx->loaded_vmcs->launched = 1;
 
        vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
-       trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
 
        /*
         * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
index eed32283d22cc0cafbff75e8f2a246887d4cbb5f..b84ba4b17757aed24d0385ae7211b80985b61eb6 100644 (file)
@@ -6515,6 +6515,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (req_immediate_exit)
                smp_send_reschedule(vcpu->cpu);
 
+       trace_kvm_entry(vcpu->vcpu_id);
+       wait_lapic_expire(vcpu);
        __kvm_guest_enter();
 
        if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -6527,8 +6529,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
        }
 
-       trace_kvm_entry(vcpu->vcpu_id);
-       wait_lapic_expire(vcpu);
        kvm_x86_ops->run(vcpu);
 
        /*
index 1202d5ca2fb582d1a71cad1c85f65b61b907c441..b2fd67da1701433d9168ebf1d0a908330544bd7d 100644 (file)
@@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
        switch (type) {
        case REG_TYPE_RM:
                regno = X86_MODRM_RM(insn->modrm.value);
-               if (X86_REX_B(insn->rex_prefix.value) == 1)
+               if (X86_REX_B(insn->rex_prefix.value))
                        regno += 8;
                break;
 
        case REG_TYPE_INDEX:
                regno = X86_SIB_INDEX(insn->sib.value);
-               if (X86_REX_X(insn->rex_prefix.value) == 1)
+               if (X86_REX_X(insn->rex_prefix.value))
                        regno += 8;
                break;
 
        case REG_TYPE_BASE:
                regno = X86_SIB_BASE(insn->sib.value);
-               if (X86_REX_B(insn->rex_prefix.value) == 1)
+               if (X86_REX_B(insn->rex_prefix.value))
                        regno += 8;
                break;
 
index 7bcf06a7cd12069e9e4f5c2f13066e1551ba8a27..6eb3c8af96e23678f16378858d05d435e12199fd 100644 (file)
@@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
        if (!found)
                pci_add_resource(resources, &info->busn);
 
-       list_for_each_entry(root_res, &info->resources, list) {
-               struct resource *res;
-               struct resource *root;
+       list_for_each_entry(root_res, &info->resources, list)
+               pci_add_resource(resources, &root_res->res);
 
-               res = &root_res->res;
-               pci_add_resource(resources, res);
-               if (res->flags & IORESOURCE_IO)
-                       root = &ioport_resource;
-               else
-                       root = &iomem_resource;
-               insert_resource(root, res);
-       }
        return;
 
 default_resources:
index 06934a8a48724dc5fc6600b49178e9573e7558c5..e5f854ce2d72404237067031a09297c8f0d3f424 100644 (file)
@@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
                if (err)
                        return 1;
 
-               err = convert_fxsr_from_user(&fpx, sc.fpstate);
+               err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
                if (err)
                        return 1;
 
@@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
        {
                struct user_i387_struct fp;
 
-               err = copy_from_user(&fp, sc.fpstate,
+               err = copy_from_user(&fp, (void *)sc.fpstate,
                                     sizeof(struct user_i387_struct));
                if (err)
                        return 1;
@@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
 #endif
 #undef PUTREG
        sc.oldmask = mask;
-       sc.fpstate = to_fp;
+       sc.fpstate = (unsigned long)to_fp;
 
        err = copy_to_user(to, &sc, sizeof(struct sigcontext));
        if (err)
@@ -468,12 +468,10 @@ long sys_sigreturn(void)
        struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
        sigset_t set;
        struct sigcontext __user *sc = &frame->sc;
-       unsigned long __user *oldmask = &sc->oldmask;
-       unsigned long __user *extramask = frame->extramask;
        int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
 
-       if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
-           copy_from_user(&set.sig[1], extramask, sig_size))
+       if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) ||
+           copy_from_user(&set.sig[1], frame->extramask, sig_size))
                goto segfault;
 
        set_current_blocked(&set);
@@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
 {
        struct rt_sigframe __user *frame;
        int err = 0, sig = ksig->sig;
+       unsigned long fp_to;
 
        frame = (struct rt_sigframe __user *)
                round_down(stack_top - sizeof(struct rt_sigframe), 16);
@@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
        err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
        err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
                               set->sig[0]);
-       err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
+
+       fp_to = (unsigned long)&frame->fpstate;
+
+       err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
        if (sizeof(*set) == 16) {
                err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
                err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
index 5bcdfc10c23a6340367c4b9781496a49b5c81efe..5a37188b559fba8feb1da48b00d5290c3a6a09b6 100644 (file)
@@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q)
  * of the main cic data structures.  For now we allow a task to change
  * its cgroup only if it's the only owner of its ioc.
  */
-static int blkcg_can_attach(struct cgroup_subsys_state *css,
-                           struct cgroup_taskset *tset)
+static int blkcg_can_attach(struct cgroup_taskset *tset)
 {
        struct task_struct *task;
+       struct cgroup_subsys_state *dst_css;
        struct io_context *ioc;
        int ret = 0;
 
        /* task_lock() is needed to avoid races with exit_io_context() */
-       cgroup_taskset_for_each(task, tset) {
+       cgroup_taskset_for_each(task, dst_css, tset) {
                task_lock(task);
                ioc = task->io_context;
                if (ioc && atomic_read(&ioc->nr_tasks) > 1)
index 5131993b23a1a2b35670adcbd4ab7d569a29a3d2..a0af4043dda2bbe953b3550c07ac40f88148958b 100644 (file)
@@ -2114,7 +2114,8 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
 EXPORT_SYMBOL(submit_bio);
 
 /**
- * blk_rq_check_limits - Helper function to check a request for the queue limit
+ * blk_cloned_rq_check_limits - Helper function to check a cloned request
+ *                              for new the queue limits
  * @q:  the queue
  * @rq: the request being checked
  *
@@ -2125,20 +2126,13 @@ EXPORT_SYMBOL(submit_bio);
  *    after it is inserted to @q, it should be checked against @q before
  *    the insertion using this generic function.
  *
- *    This function should also be useful for request stacking drivers
- *    in some cases below, so export this function.
  *    Request stacking drivers like request-based dm may change the queue
- *    limits while requests are in the queue (e.g. dm's table swapping).
- *    Such request stacking drivers should check those requests against
- *    the new queue limits again when they dispatch those requests,
- *    although such checkings are also done against the old queue limits
- *    when submitting requests.
+ *    limits when retrying requests on other queues. Those requests need
+ *    to be checked against the new queue limits again during dispatch.
  */
-int blk_rq_check_limits(struct request_queue *q, struct request *rq)
+static int blk_cloned_rq_check_limits(struct request_queue *q,
+                                     struct request *rq)
 {
-       if (!rq_mergeable(rq))
-               return 0;
-
        if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
                printk(KERN_ERR "%s: over max size limit.\n", __func__);
                return -EIO;
@@ -2158,7 +2152,6 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(blk_rq_check_limits);
 
 /**
  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
@@ -2170,7 +2163,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
        unsigned long flags;
        int where = ELEVATOR_INSERT_BACK;
 
-       if (blk_rq_check_limits(q, rq))
+       if (blk_cloned_rq_check_limits(q, rq))
                return -EIO;
 
        if (rq->rq_disk &&
index 41a55ba0d78e8a97b5707a9194cce3b6ebd1c811..e01405a3e8b3f51ce0424a844fadb3304bda5e44 100644 (file)
@@ -103,6 +103,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
                        bvprv = bv;
                        bvprvp = &bvprv;
                        sectors += bv.bv_len >> 9;
+
+                       if (nsegs == 1 && seg_size > front_seg_size)
+                               front_seg_size = seg_size;
                        continue;
                }
 new_segment:
index 7d8f129a1516b408d8ebd827e65ffd6d688b2df8..dd49735839789167d427dc8b64da71c6b21f08f5 100644 (file)
@@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
        lim->virt_boundary_mask = 0;
        lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
-       lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+       lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
+               BLK_SAFE_MAX_SECTORS;
        lim->chunk_sectors = 0;
        lim->max_write_same_sectors = 0;
        lim->max_discard_sectors = 0;
@@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
        lim->max_hw_sectors = UINT_MAX;
        lim->max_segment_size = UINT_MAX;
        lim->max_sectors = UINT_MAX;
+       lim->max_dev_sectors = UINT_MAX;
        lim->max_write_same_sectors = UINT_MAX;
 }
 EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
 EXPORT_SYMBOL(blk_queue_bounce_limit);
 
 /**
- * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
- * @limits: the queue limits
+ * blk_queue_max_hw_sectors - set max sectors for a request for this queue
+ * @q:  the request queue for the device
  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
  *
  * Description:
@@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  *    the device driver based upon the capabilities of the I/O
  *    controller.
  *
+ *    max_dev_sectors is a hard limit imposed by the storage device for
+ *    READ/WRITE requests. It is set by the disk driver.
+ *
  *    max_sectors is a soft limit imposed by the block layer for
  *    filesystem type requests.  This value can be overridden on a
  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
  *    The soft limit can not exceed max_hw_sectors.
  **/
-void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
 {
+       struct queue_limits *limits = &q->limits;
+       unsigned int max_sectors;
+
        if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
                max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
                printk(KERN_INFO "%s: set to minimum %d\n",
@@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_
        }
 
        limits->max_hw_sectors = max_hw_sectors;
-       limits->max_sectors = min_t(unsigned int, max_hw_sectors,
-                                   BLK_DEF_MAX_SECTORS);
-}
-EXPORT_SYMBOL(blk_limits_max_hw_sectors);
-
-/**
- * blk_queue_max_hw_sectors - set max sectors for a request for this queue
- * @q:  the request queue for the device
- * @max_hw_sectors:  max hardware sectors in the usual 512b unit
- *
- * Description:
- *    See description for blk_limits_max_hw_sectors().
- **/
-void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
-{
-       blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
+       max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
+       max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
+       limits->max_sectors = max_sectors;
 }
 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 
@@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 
        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+       t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
        t->max_write_same_sectors = min(t->max_write_same_sectors,
                                        b->max_write_same_sectors);
        t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
index 565b8dac578297edf327e7451dedfe80a75e5751..e140cc487ce11349ff1917e4e866eccf0a18106c 100644 (file)
@@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
        if (ret < 0)
                return ret;
 
+       max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
+                                        q->limits.max_dev_sectors >> 1);
+
        if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
                return -EINVAL;
 
index 3b030157ec85c45faedd520b6993cd440254d763..746935a5973ca6c76b8f66cfff8d58ce50566994 100644 (file)
@@ -397,7 +397,7 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
        struct hd_struct *part;
        int res;
 
-       if (bdev->bd_part_count)
+       if (bdev->bd_part_count || bdev->bd_super)
                return -EBUSY;
        res = invalidate_partition(disk, 0);
        if (res)
index 0aa6fdfb448a8c4081e06aa9dcb041433dc280a5..6d4d4569447ee080ef44eb7c8c17d782bec23103 100644 (file)
@@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
        if (flags & MSG_DONTWAIT)
                return -EAGAIN;
 
-       set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+       sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 
        for (;;) {
                if (signal_pending(current))
@@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
        }
        finish_wait(sk_sleep(sk), &wait);
 
-       clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+       sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 
        return err;
 }
index af31a0ee4057370593536cb9f8cc0a4343ab91b8..ca9efe17db1ac4e9e2806528ea28d2d87a954b8f 100644 (file)
@@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
        if (flags & MSG_DONTWAIT)
                return -EAGAIN;
 
-       set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+       sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        for (;;) {
                if (signal_pending(current))
@@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
                return -EAGAIN;
        }
 
-       set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+       sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 
        for (;;) {
                if (signal_pending(current))
@@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
        }
        finish_wait(sk_sleep(sk), &wait);
 
-       clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+       sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 
        return err;
 }
index 25dbb76c02ccb0fd1d9b56d9956acb115d2e8419..5eef4cb4f70e6995f2d623268d1f8cd2daab63c2 100644 (file)
@@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED
        bool
 
 config ACPI_DEBUGGER
-       bool "In-kernel debugger (EXPERIMENTAL)"
+       bool "AML debugger interface (EXPERIMENTAL)"
        select ACPI_DEBUG
        help
-         Enable in-kernel debugging facilities: statistics, internal
+         Enable in-kernel debugging of AML facilities: statistics, internal
          object dump, single step control method execution.
          This is still under development, currently enabling this only
          results in the compilation of the ACPICA debugger files.
index f7dab53b352ae0f70838d622ba32da62eb381d83..e7ed39bab97d5d0a97d03919bd615eb5fba4105e 100644 (file)
@@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
                struct nfit_table_prev *prev,
                struct acpi_nfit_system_address *spa)
 {
+       size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
        struct device *dev = acpi_desc->dev;
        struct nfit_spa *nfit_spa;
 
        list_for_each_entry(nfit_spa, &prev->spas, list) {
-               if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
+               if (memcmp(nfit_spa->spa, spa, length) == 0) {
                        list_move_tail(&nfit_spa->list, &acpi_desc->spas);
                        return true;
                }
@@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
                struct nfit_table_prev *prev,
                struct acpi_nfit_memory_map *memdev)
 {
+       size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
        struct device *dev = acpi_desc->dev;
        struct nfit_memdev *nfit_memdev;
 
        list_for_each_entry(nfit_memdev, &prev->memdevs, list)
-               if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
+               if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
                        list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
                        return true;
                }
@@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
                struct nfit_table_prev *prev,
                struct acpi_nfit_control_region *dcr)
 {
+       size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
        struct device *dev = acpi_desc->dev;
        struct nfit_dcr *nfit_dcr;
 
        list_for_each_entry(nfit_dcr, &prev->dcrs, list)
-               if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) {
+               if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
                        list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
                        return true;
                }
@@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
                struct nfit_table_prev *prev,
                struct acpi_nfit_data_region *bdw)
 {
+       size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
        struct device *dev = acpi_desc->dev;
        struct nfit_bdw *nfit_bdw;
 
        list_for_each_entry(nfit_bdw, &prev->bdws, list)
-               if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
+               if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
                        list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
                        return true;
                }
@@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
                struct nfit_table_prev *prev,
                struct acpi_nfit_interleave *idt)
 {
+       size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
        struct device *dev = acpi_desc->dev;
        struct nfit_idt *nfit_idt;
 
        list_for_each_entry(nfit_idt, &prev->idts, list)
-               if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) {
+               if (memcmp(nfit_idt->idt, idt, length) == 0) {
                        list_move_tail(&nfit_idt->list, &acpi_desc->idts);
                        return true;
                }
@@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc,
                struct nfit_table_prev *prev,
                struct acpi_nfit_flush_address *flush)
 {
+       size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
        struct device *dev = acpi_desc->dev;
        struct nfit_flush *nfit_flush;
 
        list_for_each_entry(nfit_flush, &prev->flushes, list)
-               if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) {
+               if (memcmp(nfit_flush->flush, flush, length) == 0) {
                        list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
                        return true;
                }
@@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev,
        struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
        struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 
-       return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision);
+       return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
 }
 static DEVICE_ATTR_RO(revision);
 
@@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
 
        data = (u8 *) acpi_desc->nfit;
        end = data + sz;
-       data += sizeof(struct acpi_table_nfit);
        while (!IS_ERR_OR_NULL(data))
                data = add_table(acpi_desc, &prev, data, end);
 
@@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev)
                return PTR_ERR(acpi_desc);
        }
 
-       acpi_desc->nfit = (struct acpi_table_nfit *) tbl;
+       /*
+        * Save the acpi header for later and then skip it,
+        * making nfit point to the first nfit table header.
+        */
+       acpi_desc->acpi_header = *tbl;
+       acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
+       sz -= sizeof(struct acpi_table_nfit);
 
        /* Evaluate _FIT and override with that if present */
        status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
        if (ACPI_SUCCESS(status) && buf.length > 0) {
-               acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer;
-               sz = buf.length;
+               union acpi_object *obj;
+               /*
+                * Adjust for the acpi_object header of the _FIT
+                */
+               obj = buf.pointer;
+               if (obj->type == ACPI_TYPE_BUFFER) {
+                       acpi_desc->nfit =
+                               (struct acpi_nfit_header *)obj->buffer.pointer;
+                       sz = obj->buffer.length;
+               } else
+                       dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
+                                __func__, (int) obj->type);
        }
 
        rc = acpi_nfit_init(acpi_desc, sz);
@@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
 {
        struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
        struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
-       struct acpi_table_nfit *nfit_saved;
+       struct acpi_nfit_header *nfit_saved;
+       union acpi_object *obj;
        struct device *dev = &adev->dev;
        acpi_status status;
        int ret;
@@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
        }
 
        nfit_saved = acpi_desc->nfit;
-       acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer;
-       ret = acpi_nfit_init(acpi_desc, buf.length);
-       if (!ret) {
-               /* Merge failed, restore old nfit, and exit */
-               acpi_desc->nfit = nfit_saved;
-               dev_err(dev, "failed to merge updated NFIT\n");
+       obj = buf.pointer;
+       if (obj->type == ACPI_TYPE_BUFFER) {
+               acpi_desc->nfit =
+                       (struct acpi_nfit_header *)obj->buffer.pointer;
+               ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
+               if (ret) {
+                       /* Merge failed, restore old nfit, and exit */
+                       acpi_desc->nfit = nfit_saved;
+                       dev_err(dev, "failed to merge updated NFIT\n");
+               }
+       } else {
+               /* Bad _FIT, restore old nfit */
+               dev_err(dev, "Invalid _FIT\n");
        }
        kfree(buf.pointer);
 
index 2ea5c0797c8f4575c090a34352ceba14d9a5af40..3d549a3836590bb9dd55f6dd14dd8619698597ab 100644 (file)
@@ -96,7 +96,8 @@ struct nfit_mem {
 
 struct acpi_nfit_desc {
        struct nvdimm_bus_descriptor nd_desc;
-       struct acpi_table_nfit *nfit;
+       struct acpi_table_header acpi_header;
+       struct acpi_nfit_header *nfit;
        struct mutex spa_map_mutex;
        struct mutex init_mutex;
        struct list_head spa_maps;
index 850d7bf0c873fb64af77ada90e971a733527e2bd..ae3fe4e642035b2d51b2e3f6c4d93e68a6cb1bcc 100644 (file)
@@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info)
                else
                        continue;
 
+               /*
+                * Some legacy x86 host bridge drivers use iomem_resource and
+                * ioport_resource as default resource pool, skip it.
+                */
+               if (res == root)
+                       continue;
+
                conflict = insert_resource_conflict(root, res);
                if (conflict) {
                        dev_info(&info->bridge->dev,
index ff02bb4218fcaae3bbf60821e6d41f81b5a0e40d..cdfbcc54821fd6ea3a5dd6a11c969f12055c7e24 100644 (file)
@@ -314,16 +314,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
        { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
        { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
-       { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
-       { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
-       { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
-       { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
-       { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
-       { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
-       { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
-       { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
-       { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
-       { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
        { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
        { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@@ -350,10 +340,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
        { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
        { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
        { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
+       { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
+       { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
index 8490d37aee2a466809c2634e51e8b96386e2eecb..f7a7fa81740e8f7e7f1daa0c6d6e48d344496ceb 100644 (file)
@@ -62,6 +62,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
        writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
 {
        return ahci_platform_suspend_host(&pdev->dev);
@@ -81,6 +82,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
 
        return ahci_platform_resume_host(&pdev->dev);
 }
+#else
+#define ahci_mvebu_suspend NULL
+#define ahci_mvebu_resume NULL
+#endif
 
 static const struct ata_port_info ahci_mvebu_port_info = {
        .flags     = AHCI_FLAG_COMMON,
index 096064cd6c52b1b0f72bf710d1a56893bd6bff9c..4665512dae44d99e9a5af194812094498d5888cf 100644 (file)
@@ -1273,6 +1273,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
        ata_tf_to_fis(tf, pmp, is_cmd, fis);
        ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
 
+       /* set port value for softreset of Port Multiplier */
+       if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
+               tmp = readl(port_mmio + PORT_FBS);
+               tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+               tmp |= pmp << PORT_FBS_DEV_OFFSET;
+               writel(tmp, port_mmio + PORT_FBS);
+               pp->fbs_last_dev = pmp;
+       }
+
        /* issue & wait */
        writel(1, port_mmio + PORT_CMD_ISSUE);
 
index cb0508af1459ac43f4aa26f1a16d94134bd9d0bc..961acc788f4490cea48abb4e215cb144d8fc5e93 100644 (file)
@@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask)
 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
                               u8 page, void *buf, unsigned int sectors)
 {
+       unsigned long ap_flags = dev->link->ap->flags;
        struct ata_taskfile tf;
        unsigned int err_mask;
        bool dma = false;
 
        DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
 
+       /*
+        * Return error without actually issuing the command on controllers
+        * which e.g. lockup on a read log page.
+        */
+       if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
+               return AC_ERR_DEV;
+
 retry:
        ata_tf_init(dev, &tf);
        if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
index 5389579c51204cf336f2689052ccf29c48b8af98..a723ae92978310f64e65eeef44509337a027c8a5 100644 (file)
@@ -45,7 +45,8 @@ enum {
        SATA_FSL_MAX_PRD_DIRECT = 16,   /* Direct PRDT entries */
 
        SATA_FSL_HOST_FLAGS     = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
-                               ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
+                                  ATA_FLAG_PMP | ATA_FLAG_NCQ |
+                                  ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
 
        SATA_FSL_MAX_CMDS       = SATA_FSL_QUEUE_DEPTH,
        SATA_FSL_CMD_HDR_SIZE   = 16,   /* 4 DWORDS */
index dea6edcbf145c3d1eaf45d8265ae7f971d6c808c..29bcff086bcedd548f90a15fd67829c7b21a4de9 100644 (file)
@@ -630,6 +630,9 @@ static void sil_dev_config(struct ata_device *dev)
        unsigned int n, quirks = 0;
        unsigned char model_num[ATA_ID_PROD_LEN + 1];
 
+       /* This controller doesn't support trim */
+       dev->horkage |= ATA_HORKAGE_NOTRIM;
+
        ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
 
        for (n = 0; sil_blacklist[n].product; n++)
index e03b1ad25a906334078490786dbd1406ac9ca76b..167418e73445a4f69b372f9205f65ca43f525c9d 100644 (file)
@@ -1775,10 +1775,10 @@ int genpd_dev_pm_attach(struct device *dev)
        }
 
        pd = of_genpd_get_from_provider(&pd_args);
+       of_node_put(pd_args.np);
        if (IS_ERR(pd)) {
                dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
                        __func__, PTR_ERR(pd));
-               of_node_put(dev->of_node);
                return -EPROBE_DEFER;
        }
 
@@ -1796,7 +1796,6 @@ int genpd_dev_pm_attach(struct device *dev)
        if (ret < 0) {
                dev_err(dev, "failed to add to PM domain %s: %d",
                        pd->name, ret);
-               of_node_put(dev->of_node);
                goto out;
        }
 
index e60dd12e23aaee75a2a0d90ba0ba720c28c769e7..1e937ac5f45661b6e2af770e1ab9633e91ad7bd6 100644 (file)
@@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
                struct gpd_timing_data *td;
                s64 constraint_ns;
 
-               if (!pdd->dev->driver)
-                       continue;
-
                /*
                 * Check if the device is allowed to be off long enough for the
                 * domain to turn off and on (that's how much time it will
index 5c8ba5484d86b14edaaf20cfd215591950826a76..0c3940ec5e62c070b393a5f187ac22674e262c3b 100644 (file)
@@ -18,6 +18,7 @@ struct nullb_cmd {
        struct bio *bio;
        unsigned int tag;
        struct nullb_queue *nq;
+       struct hrtimer timer;
 };
 
 struct nullb_queue {
@@ -49,17 +50,6 @@ static int null_major;
 static int nullb_indexes;
 static struct kmem_cache *ppa_cache;
 
-struct completion_queue {
-       struct llist_head list;
-       struct hrtimer timer;
-};
-
-/*
- * These are per-cpu for now, they will need to be configured by the
- * complete_queues parameter and appropriately mapped.
- */
-static DEFINE_PER_CPU(struct completion_queue, completion_queues);
-
 enum {
        NULL_IRQ_NONE           = 0,
        NULL_IRQ_SOFTIRQ        = 1,
@@ -142,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
 device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
 
-static int completion_nsec = 10000;
-module_param(completion_nsec, int, S_IRUGO);
+static unsigned long completion_nsec = 10000;
+module_param(completion_nsec, ulong, S_IRUGO);
 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
 
 static int hw_queue_depth = 64;
@@ -180,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd)
        put_tag(cmd->nq, cmd->tag);
 }
 
+static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
+
 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
 {
        struct nullb_cmd *cmd;
@@ -190,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
                cmd = &nq->cmds[tag];
                cmd->tag = tag;
                cmd->nq = nq;
+               if (irqmode == NULL_IRQ_TIMER) {
+                       hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
+                                    HRTIMER_MODE_REL);
+                       cmd->timer.function = null_cmd_timer_expired;
+               }
                return cmd;
        }
 
@@ -220,6 +217,8 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
 
 static void end_cmd(struct nullb_cmd *cmd)
 {
+       struct request_queue *q = NULL;
+
        switch (queue_mode)  {
        case NULL_Q_MQ:
                blk_mq_end_request(cmd->rq, 0);
@@ -230,55 +229,37 @@ static void end_cmd(struct nullb_cmd *cmd)
                break;
        case NULL_Q_BIO:
                bio_endio(cmd->bio);
-               break;
+               goto free_cmd;
        }
 
+       if (cmd->rq)
+               q = cmd->rq->q;
+
+       /* Restart queue if needed, as we are freeing a tag */
+       if (q && !q->mq_ops && blk_queue_stopped(q)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(q->queue_lock, flags);
+               if (blk_queue_stopped(q))
+                       blk_start_queue(q);
+               spin_unlock_irqrestore(q->queue_lock, flags);
+       }
+free_cmd:
        free_cmd(cmd);
 }
 
 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
 {
-       struct completion_queue *cq;
-       struct llist_node *entry;
-       struct nullb_cmd *cmd;
-
-       cq = &per_cpu(completion_queues, smp_processor_id());
-
-       while ((entry = llist_del_all(&cq->list)) != NULL) {
-               entry = llist_reverse_order(entry);
-               do {
-                       struct request_queue *q = NULL;
-
-                       cmd = container_of(entry, struct nullb_cmd, ll_list);
-                       entry = entry->next;
-                       if (cmd->rq)
-                               q = cmd->rq->q;
-                       end_cmd(cmd);
-
-                       if (q && !q->mq_ops && blk_queue_stopped(q)) {
-                               spin_lock(q->queue_lock);
-                               if (blk_queue_stopped(q))
-                                       blk_start_queue(q);
-                               spin_unlock(q->queue_lock);
-                       }
-               } while (entry);
-       }
+       end_cmd(container_of(timer, struct nullb_cmd, timer));
 
        return HRTIMER_NORESTART;
 }
 
 static void null_cmd_end_timer(struct nullb_cmd *cmd)
 {
-       struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
-
-       cmd->ll_list.next = NULL;
-       if (llist_add(&cmd->ll_list, &cq->list)) {
-               ktime_t kt = ktime_set(0, completion_nsec);
-
-               hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED);
-       }
+       ktime_t kt = ktime_set(0, completion_nsec);
 
-       put_cpu();
+       hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
 }
 
 static void null_softirq_done_fn(struct request *rq)
@@ -376,6 +357,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
 {
        struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
 
+       if (irqmode == NULL_IRQ_TIMER) {
+               hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+               cmd->timer.function = null_cmd_timer_expired;
+       }
        cmd->rq = bd->rq;
        cmd->nq = hctx->driver_data;
 
@@ -813,19 +798,6 @@ static int __init null_init(void)
 
        mutex_init(&lock);
 
-       /* Initialize a separate list for each CPU for issuing softirqs */
-       for_each_possible_cpu(i) {
-               struct completion_queue *cq = &per_cpu(completion_queues, i);
-
-               init_llist_head(&cq->list);
-
-               if (irqmode != NULL_IRQ_TIMER)
-                       continue;
-
-               hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-               cq->timer.function = null_cmd_timer_expired;
-       }
-
        null_major = register_blkdev(0, "nullb");
        if (null_major < 0)
                return null_major;
index 235708c7c46eee709acb110acb95a28d85ebc946..81ea69fee7ca183313b8e8322833062262279187 100644 (file)
@@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work)
                goto err_rq;
        }
        img_request->rq = rq;
+       snapc = NULL; /* img_request consumes a ref */
 
        if (op_type == OBJ_OP_DISCARD)
                result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
index 55fe9020459f2c4bdcec710a90220b2ea32e54db..4cc72fa017c7bbb02c4027e3bf44bc4d102d3849 100644 (file)
@@ -1230,14 +1230,14 @@ static int smi_start_processing(void       *send_info,
 
        new_smi->intf = intf;
 
-       /* Try to claim any interrupts. */
-       if (new_smi->irq_setup)
-               new_smi->irq_setup(new_smi);
-
        /* Set up the timer that drives the interface. */
        setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
        smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
 
+       /* Try to claim any interrupts. */
+       if (new_smi->irq_setup)
+               new_smi->irq_setup(new_smi);
+
        /*
         * Check if the user forcefully enabled the daemon.
         */
index 10819e2484145ebae0ef9b1092098d1bbc7848d1..335322dc403f48fcf773028d232e5c772f3cd8b5 100644 (file)
@@ -209,6 +209,8 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
 
 struct clk_gpio_delayed_register_data {
        const char *gpio_name;
+       int num_parents;
+       const char **parent_names;
        struct device_node *node;
        struct mutex lock;
        struct clk *clk;
@@ -222,8 +224,6 @@ static struct clk *of_clk_gpio_delayed_register_get(
 {
        struct clk_gpio_delayed_register_data *data = _data;
        struct clk *clk;
-       const char **parent_names;
-       int i, num_parents;
        int gpio;
        enum of_gpio_flags of_flags;
 
@@ -248,26 +248,14 @@ static struct clk *of_clk_gpio_delayed_register_get(
                return ERR_PTR(gpio);
        }
 
-       num_parents = of_clk_get_parent_count(data->node);
-
-       parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
-       if (!parent_names) {
-               clk = ERR_PTR(-ENOMEM);
-               goto out;
-       }
-
-       for (i = 0; i < num_parents; i++)
-               parent_names[i] = of_clk_get_parent_name(data->node, i);
-
-       clk = data->clk_register_get(data->node->name, parent_names,
-                       num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
+       clk = data->clk_register_get(data->node->name, data->parent_names,
+                       data->num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
        if (IS_ERR(clk))
                goto out;
 
        data->clk = clk;
 out:
        mutex_unlock(&data->lock);
-       kfree(parent_names);
 
        return clk;
 }
@@ -296,11 +284,24 @@ static void __init of_gpio_clk_setup(struct device_node *node,
                                unsigned gpio, bool active_low))
 {
        struct clk_gpio_delayed_register_data *data;
+       const char **parent_names;
+       int i, num_parents;
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return;
 
+       num_parents = of_clk_get_parent_count(node);
+
+       parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
+       if (!parent_names)
+               return;
+
+       for (i = 0; i < num_parents; i++)
+               parent_names[i] = of_clk_get_parent_name(node, i);
+
+       data->num_parents = num_parents;
+       data->parent_names = parent_names;
        data->node = node;
        data->gpio_name = gpio_name;
        data->clk_register_get = clk_register_get;
index 1ab0fb81c6a0ef7746d77427c488bdb84235011a..7bc1c4527ae48d0baad2ebb4e141385e695ff8f4 100644 (file)
@@ -778,8 +778,10 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
         */
        clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
        div = get_pll_div(cg, hwc, clksel);
-       if (!div)
+       if (!div) {
+               kfree(hwc);
                return NULL;
+       }
 
        pct80_rate = clk_get_rate(div->clk);
        pct80_rate *= 8;
index 0b501a9fef92b96810655b952870c7da58400c2c..cd0f2726f5e0dd0da33b06e4518f02fc231cf1f7 100644 (file)
@@ -292,6 +292,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
                ret = scpi_clk_add(dev, child, match);
                if (ret) {
                        scpi_clocks_remove(pdev);
+                       of_node_put(child);
                        return ret;
                }
        }
index 8564e4342c7d1fa115c1b5f9043bf6bbb8d6e70d..82fe3662b5f6d43e5bf4d8cc1acb3ea3b29048b9 100644 (file)
@@ -52,7 +52,7 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
                unsigned long parent_rate)
 {
        struct clk_pllv1 *pll = to_clk_pllv1(hw);
-       long long ll;
+       unsigned long long ull;
        int mfn_abs;
        unsigned int mfi, mfn, mfd, pd;
        u32 reg;
@@ -94,16 +94,16 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
        rate = parent_rate * 2;
        rate /= pd + 1;
 
-       ll = (unsigned long long)rate * mfn_abs;
+       ull = (unsigned long long)rate * mfn_abs;
 
-       do_div(ll, mfd + 1);
+       do_div(ull, mfd + 1);
 
        if (mfn_is_negative(pll, mfn))
-               ll = -ll;
+               ull = (rate * mfi) - ull;
+       else
+               ull = (rate * mfi) + ull;
 
-       ll = (rate * mfi) + ll;
-
-       return ll;
+       return ull;
 }
 
 static struct clk_ops clk_pllv1_ops = {
index b18f875eac6acadd7e0ae27644fe8af0df240af6..4aeda56ce37279006a5cb8be93f9146d7e3d56d7 100644 (file)
@@ -79,7 +79,7 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
 {
        long mfi, mfn, mfd, pdf, ref_clk;
        unsigned long dbl;
-       s64 temp;
+       u64 temp;
 
        dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
 
@@ -98,8 +98,9 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
        temp = (u64) ref_clk * abs(mfn);
        do_div(temp, mfd + 1);
        if (mfn < 0)
-               temp = -temp;
-       temp = (ref_clk * mfi) + temp;
+               temp = (ref_clk * mfi) - temp;
+       else
+               temp = (ref_clk * mfi) + temp;
 
        return temp;
 }
@@ -126,7 +127,7 @@ static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
 {
        u32 reg;
        long mfi, pdf, mfn, mfd = 999999;
-       s64 temp64;
+       u64 temp64;
        unsigned long quad_parent_rate;
 
        quad_parent_rate = 4 * parent_rate;
index 09d2832fbd7821a56e53fe8dc22ab16548fa3f1c..71fd29348f28b778faacba038cbcc94a887d08ba 100644 (file)
@@ -9,6 +9,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <linux/clk.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
index 93e967c0f972f954ef1f7af9bb2f90e0c3241cae..75244915df05f92aad3272887e3f5b55b10ba3be 100644 (file)
@@ -9,6 +9,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <linux/clk.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
index 993abcdb32cce825fdb1d2ea835cb69f3ecf94fc..37ba04ba13686c4f986b6559e7175eb005509e67 100644 (file)
@@ -9,6 +9,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <linux/clk.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
index 5484c31ec5683c6f92a0c4eeab2ceba1a782e562..0ee1f363e4be154749d4b37a07e215dbd705d76e 100644 (file)
 
 #define SUN4I_PLL2_OUTPUTS             4
 
-struct sun4i_pll2_data {
-       u32     post_div_offset;
-       u32     pre_div_flags;
-};
-
 static DEFINE_SPINLOCK(sun4i_a10_pll2_lock);
 
 static void __init sun4i_pll2_setup(struct device_node *node,
-                                   struct sun4i_pll2_data *data)
+                                   int post_div_offset)
 {
        const char *clk_name = node->name, *parent;
        struct clk **clks, *base_clk, *prediv_clk;
@@ -76,7 +71,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
                                          parent, 0, reg,
                                          SUN4I_PLL2_PRE_DIV_SHIFT,
                                          SUN4I_PLL2_PRE_DIV_WIDTH,
-                                         data->pre_div_flags,
+                                         CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
                                          &sun4i_a10_pll2_lock);
        if (!prediv_clk) {
                pr_err("Couldn't register the prediv clock\n");
@@ -127,7 +122,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
         */
        val = readl(reg);
        val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT);
-       val |= (SUN4I_PLL2_POST_DIV_VALUE - data->post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT;
+       val |= (SUN4I_PLL2_POST_DIV_VALUE - post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT;
        writel(val, reg);
 
        of_property_read_string_index(node, "clock-output-names",
@@ -191,25 +186,17 @@ err_unmap:
        iounmap(reg);
 }
 
-static struct sun4i_pll2_data sun4i_a10_pll2_data = {
-       .pre_div_flags  = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
-};
-
 static void __init sun4i_a10_pll2_setup(struct device_node *node)
 {
-       sun4i_pll2_setup(node, &sun4i_a10_pll2_data);
+       sun4i_pll2_setup(node, 0);
 }
 
 CLK_OF_DECLARE(sun4i_a10_pll2, "allwinner,sun4i-a10-pll2-clk",
               sun4i_a10_pll2_setup);
 
-static struct sun4i_pll2_data sun5i_a13_pll2_data = {
-       .post_div_offset        = 1,
-};
-
 static void __init sun5i_a13_pll2_setup(struct device_node *node)
 {
-       sun4i_pll2_setup(node, &sun5i_a13_pll2_data);
+       sun4i_pll2_setup(node, 1);
 }
 
 CLK_OF_DECLARE(sun5i_a13_pll2, "allwinner,sun5i-a13-pll2-clk",
index 1dfad0c712cd87c63eb2dabecda3be0e17ba22be..2a5d84fdddc5d56c0f47fe56aa03683569a5e929 100644 (file)
@@ -20,6 +20,8 @@ static struct ti_dt_clk dm816x_clks[] = {
        DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
        DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
        DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+       DT_CLK(NULL, "timer_32k_ck", "sysclk18_ck"),
+       DT_CLK(NULL, "timer_ext_ck", "tclkin_ck"),
        DT_CLK(NULL, "mpu_ck", "mpu_ck"),
        DT_CLK(NULL, "timer1_fck", "timer1_fck"),
        DT_CLK(NULL, "timer2_fck", "timer2_fck"),
index 9023ca9caf84dde6b02c7ffacdded5244b16795d..b5cc6f66ae5df5e1725dc21ebfd3b18ca5eac1fa 100644 (file)
@@ -240,7 +240,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw)
  */
 unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
 {
-       long long dpll_clk;
+       u64 dpll_clk;
        u32 dpll_mult, dpll_div, v;
        struct dpll_data *dd;
 
@@ -262,7 +262,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
        dpll_div = v & dd->div1_mask;
        dpll_div >>= __ffs(dd->div1_mask);
 
-       dpll_clk = (long long)clk_get_rate(dd->clk_ref) * dpll_mult;
+       dpll_clk = (u64)clk_get_rate(dd->clk_ref) * dpll_mult;
        do_div(dpll_clk, dpll_div + 1);
 
        return dpll_clk;
index 5b1726829e6df038de9e8502471cbb6b70414fd5..df2558350fc1d6be681d818c361339acd2afaba3 100644 (file)
@@ -214,7 +214,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
 {
        struct clk_divider *divider;
        unsigned int div, value;
-       unsigned long flags = 0;
        u32 val;
 
        if (!hw || !rate)
@@ -228,9 +227,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
        if (value > div_mask(divider))
                value = div_mask(divider);
 
-       if (divider->lock)
-               spin_lock_irqsave(divider->lock, flags);
-
        if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
                val = div_mask(divider) << (divider->shift + 16);
        } else {
@@ -240,9 +236,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
        val |= value << divider->shift;
        ti_clk_ll_ops->clk_writel(val, divider->reg);
 
-       if (divider->lock)
-               spin_unlock_irqrestore(divider->lock, flags);
-
        return 0;
 }
 
@@ -256,8 +249,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
                                     const char *parent_name,
                                     unsigned long flags, void __iomem *reg,
                                     u8 shift, u8 width, u8 clk_divider_flags,
-                                    const struct clk_div_table *table,
-                                    spinlock_t *lock)
+                                    const struct clk_div_table *table)
 {
        struct clk_divider *div;
        struct clk *clk;
@@ -288,7 +280,6 @@ static struct clk *_register_divider(struct device *dev, const char *name,
        div->shift = shift;
        div->width = width;
        div->flags = clk_divider_flags;
-       div->lock = lock;
        div->hw.init = &init;
        div->table = table;
 
@@ -421,7 +412,7 @@ struct clk *ti_clk_register_divider(struct ti_clk *setup)
 
        clk = _register_divider(NULL, setup->name, div->parent,
                                flags, (void __iomem *)reg, div->bit_shift,
-                               width, div_flags, table, NULL);
+                               width, div_flags, table);
 
        if (IS_ERR(clk))
                kfree(table);
@@ -584,8 +575,7 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
                goto cleanup;
 
        clk = _register_divider(NULL, node->name, parent_name, flags, reg,
-                               shift, width, clk_divider_flags, table,
-                               NULL);
+                               shift, width, clk_divider_flags, table);
 
        if (!IS_ERR(clk)) {
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
index f4b2e9888bdf3c3079b0854b1c04c65a5201c5db..66a0d0ed8b55064ac7a888a1d2e6441d3021ceb1 100644 (file)
@@ -168,7 +168,7 @@ static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
 {
        struct fapll_data *fd = to_fapll(hw);
        u32 fapll_n, fapll_p, v;
-       long long rate;
+       u64 rate;
 
        if (ti_fapll_clock_is_bypass(fd))
                return parent_rate;
@@ -314,7 +314,7 @@ static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
 {
        struct fapll_synth *synth = to_synth(hw);
        u32 synth_div_m;
-       long long rate;
+       u64 rate;
 
        /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
        if (!synth->div)
index 69f08a1d047d8672a758e441fcd718bbf64bb946..dab9ba88b9d6d2b1b97e2472694bbb588997d8de 100644 (file)
@@ -69,7 +69,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
 {
        struct clk_mux *mux = to_clk_mux(hw);
        u32 val;
-       unsigned long flags = 0;
 
        if (mux->table) {
                index = mux->table[index];
@@ -81,9 +80,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
                        index++;
        }
 
-       if (mux->lock)
-               spin_lock_irqsave(mux->lock, flags);
-
        if (mux->flags & CLK_MUX_HIWORD_MASK) {
                val = mux->mask << (mux->shift + 16);
        } else {
@@ -93,9 +89,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
        val |= index << mux->shift;
        ti_clk_ll_ops->clk_writel(val, mux->reg);
 
-       if (mux->lock)
-               spin_unlock_irqrestore(mux->lock, flags);
-
        return 0;
 }
 
@@ -109,7 +102,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
                                 const char **parent_names, u8 num_parents,
                                 unsigned long flags, void __iomem *reg,
                                 u8 shift, u32 mask, u8 clk_mux_flags,
-                                u32 *table, spinlock_t *lock)
+                                u32 *table)
 {
        struct clk_mux *mux;
        struct clk *clk;
@@ -133,7 +126,6 @@ static struct clk *_register_mux(struct device *dev, const char *name,
        mux->shift = shift;
        mux->mask = mask;
        mux->flags = clk_mux_flags;
-       mux->lock = lock;
        mux->table = table;
        mux->hw.init = &init;
 
@@ -175,7 +167,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup)
 
        return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
                             flags, (void __iomem *)reg, mux->bit_shift, mask,
-                            mux_flags, NULL, NULL);
+                            mux_flags, NULL);
 }
 
 /**
@@ -227,8 +219,7 @@ static void of_mux_clk_setup(struct device_node *node)
        mask = (1 << fls(mask)) - 1;
 
        clk = _register_mux(NULL, node->name, parent_names, num_parents,
-                           flags, reg, shift, mask, clk_mux_flags, NULL,
-                           NULL);
+                           flags, reg, shift, mask, clk_mux_flags, NULL);
 
        if (!IS_ERR(clk))
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
index a83c995a62dfed9173823289d4cc3bbd3199cf12..8412ce5f93a712a03bfa81df25bee238a299d242 100644 (file)
@@ -976,10 +976,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
 
        new_policy.governor = gov;
 
-       /* Use the default policy if its valid. */
-       if (cpufreq_driver->setpolicy)
-               cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
-
+       /* Use the default policy if there is no last_policy. */
+       if (cpufreq_driver->setpolicy) {
+               if (policy->last_policy)
+                       new_policy.policy = policy->last_policy;
+               else
+                       cpufreq_parse_governor(gov->name, &new_policy.policy,
+                                              NULL);
+       }
        /* set default policy */
        return cpufreq_set_policy(policy, &new_policy);
 }
@@ -1330,6 +1334,8 @@ static void cpufreq_offline_prepare(unsigned int cpu)
                if (has_target())
                        strncpy(policy->last_governor, policy->governor->name,
                                CPUFREQ_NAME_LEN);
+               else
+                       policy->last_policy = policy->policy;
        } else if (cpu == policy->cpu) {
                /* Nominate new CPU */
                policy->cpu = cpumask_any(policy->cpus);
index 73ef499227881e6fd5c71c65d00f486a1f173dd4..7038f364acb51934f51b5dfd6053b5ec6545ddfa 100644 (file)
@@ -409,7 +409,7 @@ static int ccm_nx_decrypt(struct aead_request   *req,
                processed += to_process;
        } while (processed < nbytes);
 
-       rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
+       rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
                    authsize) ? -EBADMSG : 0;
 out:
        spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
index eee624f589b6545a310d081e772d5924094ed70d..abd465f479c433641f3e5024cfe6442d6b68024d 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <crypto/internal/aead.h>
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 #include <crypto/scatterwalk.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -418,7 +419,7 @@ mac:
                        itag, req->src, req->assoclen + nbytes,
                        crypto_aead_authsize(crypto_aead_reqtfm(req)),
                        SCATTERWALK_FROM_SG);
-               rc = memcmp(itag, otag,
+               rc = crypto_memneq(itag, otag,
                            crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
                     -EBADMSG : 0;
        }
index 46f531e19ccf07e97af05c221a5bfbb0d060d0a7..b6f9f42e2985b476ecc63ac16f648535be0cddc2 100644 (file)
@@ -977,7 +977,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
                } else
                        oicv = (char *)&edesc->link_tbl[0];
 
-               err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
+               err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
        }
 
        kfree(edesc);
index 6ed7c0fb3378eac88af03b8c125d0a7c9749f3db..6b186829087c4b3f4746ebebb1e327df58486d15 100644 (file)
@@ -113,13 +113,16 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 
 static int mmio_74xx_gpio_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *of_id =
-               of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
+       const struct of_device_id *of_id;
        struct mmio_74xx_gpio_priv *priv;
        struct resource *res;
        void __iomem *dat;
        int err;
 
+       of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
+       if (!of_id)
+               return -ENODEV;
+
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
index 56d2d026e62e42bf6fec7db5ad2aa25aebd970f3..f7fbb46d5d797a32d9d35c119adb3df2166cc37e 100644 (file)
@@ -1122,8 +1122,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
        /* MPUIO is a bit different, reading IRQ status clears it */
        if (bank->is_mpuio) {
                irqc->irq_ack = dummy_irq_chip.irq_ack;
-               irqc->irq_mask = irq_gc_mask_set_bit;
-               irqc->irq_unmask = irq_gc_mask_clr_bit;
                if (!bank->regs->wkup_en)
                        irqc->irq_set_wake = NULL;
        }
index 171a6389f9ce086ffdf74f6c50523a77e205b473..52b447c071cbaa2e160bf5a67fc511ae26d6a55d 100644 (file)
@@ -167,6 +167,8 @@ static int palmas_gpio_probe(struct platform_device *pdev)
        const struct palmas_device_data *dev_data;
 
        match = of_match_device(of_palmas_gpio_match, &pdev->dev);
+       if (!match)
+               return -ENODEV;
        dev_data = match->data;
        if (!dev_data)
                dev_data = &palmas_dev_data;
index 045a952576c708e253de29438abaec95640989f2..7b25fdf64802339967ab541bb5ecd5bb876779d7 100644 (file)
@@ -187,11 +187,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
 static int syscon_gpio_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev);
+       const struct of_device_id *of_id;
        struct syscon_gpio_priv *priv;
        struct device_node *np = dev->of_node;
        int ret;
 
+       of_id = of_match_device(syscon_gpio_ids, dev);
+       if (!of_id)
+               return -ENODEV;
+
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
index 027e5f47dd28738f04e6347f1807c6b28b34e618..896bf29776b093ee34efad4d4d29d073fda184b6 100644 (file)
@@ -375,6 +375,60 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
 }
 #endif
 
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static int dbg_gpio_show(struct seq_file *s, void *unused)
+{
+       int i;
+       int j;
+
+       for (i = 0; i < tegra_gpio_bank_count; i++) {
+               for (j = 0; j < 4; j++) {
+                       int gpio = tegra_gpio_compose(i, j, 0);
+                       seq_printf(s,
+                               "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
+                               i, j,
+                               tegra_gpio_readl(GPIO_CNF(gpio)),
+                               tegra_gpio_readl(GPIO_OE(gpio)),
+                               tegra_gpio_readl(GPIO_OUT(gpio)),
+                               tegra_gpio_readl(GPIO_IN(gpio)),
+                               tegra_gpio_readl(GPIO_INT_STA(gpio)),
+                               tegra_gpio_readl(GPIO_INT_ENB(gpio)),
+                               tegra_gpio_readl(GPIO_INT_LVL(gpio)));
+               }
+       }
+       return 0;
+}
+
+static int dbg_gpio_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, dbg_gpio_show, &inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+       .open           = dbg_gpio_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static void tegra_gpio_debuginit(void)
+{
+       (void) debugfs_create_file("tegra_gpio", S_IRUGO,
+                                       NULL, NULL, &debug_fops);
+}
+
+#else
+
+static inline void tegra_gpio_debuginit(void)
+{
+}
+
+#endif
+
 static struct irq_chip tegra_gpio_irq_chip = {
        .name           = "GPIO",
        .irq_ack        = tegra_gpio_irq_ack,
@@ -519,6 +573,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
                        spin_lock_init(&bank->lvl_lock[j]);
        }
 
+       tegra_gpio_debuginit();
+
        return 0;
 }
 
@@ -536,52 +592,3 @@ static int __init tegra_gpio_init(void)
        return platform_driver_register(&tegra_gpio_driver);
 }
 postcore_initcall(tegra_gpio_init);
-
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-static int dbg_gpio_show(struct seq_file *s, void *unused)
-{
-       int i;
-       int j;
-
-       for (i = 0; i < tegra_gpio_bank_count; i++) {
-               for (j = 0; j < 4; j++) {
-                       int gpio = tegra_gpio_compose(i, j, 0);
-                       seq_printf(s,
-                               "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
-                               i, j,
-                               tegra_gpio_readl(GPIO_CNF(gpio)),
-                               tegra_gpio_readl(GPIO_OE(gpio)),
-                               tegra_gpio_readl(GPIO_OUT(gpio)),
-                               tegra_gpio_readl(GPIO_IN(gpio)),
-                               tegra_gpio_readl(GPIO_INT_STA(gpio)),
-                               tegra_gpio_readl(GPIO_INT_ENB(gpio)),
-                               tegra_gpio_readl(GPIO_INT_LVL(gpio)));
-               }
-       }
-       return 0;
-}
-
-static int dbg_gpio_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, dbg_gpio_show, &inode->i_private);
-}
-
-static const struct file_operations debug_fops = {
-       .open           = dbg_gpio_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int __init tegra_gpio_debuginit(void)
-{
-       (void) debugfs_create_file("tegra_gpio", S_IRUGO,
-                                       NULL, NULL, &debug_fops);
-       return 0;
-}
-late_initcall(tegra_gpio_debuginit);
-#endif
index a18f00fc1bb87544cc59182e6f4a5515464a2f29..2a91f3287e3b7e52f2f6e16d108af07d4343be0b 100644 (file)
@@ -233,7 +233,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
                for (i = 0; i != chip->ngpio; ++i) {
                        struct gpio_desc *gpio = &chip->desc[i];
 
-                       if (!gpio->name)
+                       if (!gpio->name || !name)
                                continue;
 
                        if (!strcmp(gpio->name, name)) {
index 251b14736de92b7ff429d01f9d257c3b918b6547..5a5f04d0902d662548ebae9f82525e1363e8457c 100644 (file)
@@ -539,6 +539,7 @@ struct amdgpu_bo {
        /* Constant after initialization */
        struct amdgpu_device            *adev;
        struct drm_gem_object           gem_base;
+       struct amdgpu_bo                *parent;
 
        struct ttm_bo_kmap_obj          dma_buf_vmap;
        pid_t                           pid;
@@ -955,6 +956,8 @@ struct amdgpu_vm {
        struct amdgpu_vm_id     ids[AMDGPU_MAX_RINGS];
        /* for interval tree */
        spinlock_t              it_lock;
+       /* protecting freed */
+       spinlock_t              freed_lock;
 };
 
 struct amdgpu_vm_manager {
index 1d44d508d4d4f50b249104b4aabc9a6cbab83e61..4f352ec9dec4e2fb7c3bb5bd062e674291fc8d5a 100644 (file)
@@ -222,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                                }
 
                                p->uf.bo = gem_to_amdgpu_bo(gobj);
+                               amdgpu_bo_ref(p->uf.bo);
+                               drm_gem_object_unreference_unlocked(gobj);
                                p->uf.offset = fence_data->offset;
                        } else {
                                ret = -EINVAL;
@@ -487,7 +489,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
                        amdgpu_ib_free(parser->adev, &parser->ibs[i]);
        kfree(parser->ibs);
        if (parser->uf.bo)
-               drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+               amdgpu_bo_unref(&parser->uf.bo);
 }
 
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -776,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job)
                        amdgpu_ib_free(job->adev, &job->ibs[i]);
        kfree(job->ibs);
        if (job->uf.bo)
-               drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
+               amdgpu_bo_unref(&job->uf.bo);
        return 0;
 }
 
index e173a5a02f0d8052bb6c76203b188b6cf231d787..5580d3420c3a368815364d8eccce3e1421ce6a12 100644 (file)
@@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
        struct drm_crtc *crtc = &amdgpuCrtc->base;
        unsigned long flags;
        unsigned i;
+       int vpos, hpos, stat, min_udelay;
+       struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
 
        amdgpu_flip_wait_fence(adev, &work->excl);
        for (i = 0; i < work->shared_count; ++i)
@@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
        /* We borrow the event spin lock for protecting flip_status */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
+       /* If this happens to execute within the "virtually extended" vblank
+        * interval before the start of the real vblank interval then it needs
+        * to delay programming the mmio flip until the real vblank is entered.
+        * This prevents completing a flip too early due to the way we fudge
+        * our vblank counter and vblank timestamps in order to work around the
+        * problem that the hw fires vblank interrupts before actual start of
+        * vblank (when line buffer refilling is done for a frame). It
+        * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
+        * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
+        *
+        * In practice this won't execute very often unless on very fast
+        * machines because the time window for this to happen is very small.
+        */
+       for (;;) {
+               /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+                * start in hpos, and to the "fudged earlier" vblank start in
+                * vpos.
+                */
+               stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
+                                                 GET_DISTANCE_TO_VBLANKSTART,
+                                                 &vpos, &hpos, NULL, NULL,
+                                                 &crtc->hwmode);
+
+               if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+                   (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
+                   !(vpos >= 0 && hpos <= 0))
+                       break;
+
+               /* Sleep at least until estimated real start of hw vblank */
+               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+               min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+               usleep_range(min_udelay, 2 * min_udelay);
+               spin_lock_irqsave(&crtc->dev->event_lock, flags);
+       };
+
        /* do the flip (mmio) */
        adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
        /* set the flip status */
@@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
        } else
                DRM_ERROR("failed to reserve buffer after flip\n");
 
-       drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+       amdgpu_bo_unref(&work->old_rbo);
        kfree(work->shared);
        kfree(work);
 }
@@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
        obj = old_amdgpu_fb->obj;
 
        /* take a reference to the old object */
-       drm_gem_object_reference(obj);
        work->old_rbo = gem_to_amdgpu_bo(obj);
+       amdgpu_bo_ref(work->old_rbo);
 
        new_amdgpu_fb = to_amdgpu_framebuffer(fb);
        obj = new_amdgpu_fb->obj;
@@ -222,7 +259,7 @@ pflip_cleanup:
        amdgpu_bo_unreserve(new_rbo);
 
 cleanup:
-       drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+       amdgpu_bo_unref(&work->old_rbo);
        fence_put(work->excl);
        for (i = 0; i < work->shared_count; ++i)
                fence_put(work->shared[i]);
@@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  * \param dev Device to query.
  * \param pipe Crtc to query.
  * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ *              For driver internal use only also supports these flags:
+ *
+ *              USE_REAL_VBLANKSTART to use the real start of vblank instead
+ *              of a fudged earlier start of vblank.
+ *
+ *              GET_DISTANCE_TO_VBLANKSTART to return distance to the
+ *              fudged earlier start of vblank in *vpos and the distance
+ *              to true start of vblank in *hpos.
+ *
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
  * \param *stime Target location for timestamp taken immediately before
@@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
                vbl_end = 0;
        }
 
+       /* Called from driver internal vblank counter query code? */
+       if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+           /* Caller wants distance from real vbl_start in *hpos */
+           *hpos = *vpos - vbl_start;
+       }
+
+       /* Fudge vblank to start a few scanlines earlier to handle the
+        * problem that vblank irqs fire a few scanlines before start
+        * of vblank. Some driver internal callers need the true vblank
+        * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
+        *
+        * The cause of the "early" vblank irq is that the irq is triggered
+        * by the line buffer logic when the line buffer read position enters
+        * the vblank, whereas our crtc scanout position naturally lags the
+        * line buffer read position.
+        */
+       if (!(flags & USE_REAL_VBLANKSTART))
+               vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
+
        /* Test scanout position against vblank region. */
        if ((*vpos < vbl_start) && (*vpos >= vbl_end))
                in_vbl = false;
 
+       /* In vblank? */
+       if (in_vbl)
+           ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+       /* Called from driver internal vblank counter query code? */
+       if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+               /* Caller wants distance from fudged earlier vbl_start */
+               *vpos -= vbl_start;
+               return ret;
+       }
+
        /* Check if inside vblank area and apply corrective offsets:
         * vpos will then be >=0 in video scanout area, but negative
         * within vblank area, counting down the number of lines until
@@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
        /* Correct for shifted end of vbl at vbl_end. */
        *vpos = *vpos - vbl_end;
 
-       /* In vblank? */
-       if (in_vbl)
-               ret |= DRM_SCANOUTPOS_IN_VBLANK;
-
-       /* Is vpos outside nominal vblank area, but less than
-        * 1/100 of a frame height away from start of vblank?
-        * If so, assume this isn't a massively delayed vblank
-        * interrupt, but a vblank interrupt that fired a few
-        * microseconds before true start of vblank. Compensate
-        * by adding a full frame duration to the final timestamp.
-        * Happens, e.g., on ATI R500, R600.
-        *
-        * We only do this if DRM_CALLED_FROM_VBLIRQ.
-        */
-       if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
-               vbl_start = mode->crtc_vdisplay;
-               vtotal = mode->crtc_vtotal;
-
-               if (vbl_start - *vpos < vtotal / 100) {
-                       *vpos -= vtotal;
-
-                       /* Signal this correction as "applied". */
-                       ret |= 0x8;
-               }
-       }
-
        return ret;
 }
 
index fc32fc01a64ba8f1d68ae8621af29195287881c8..f6ea4b43a60c59b144bc5ff9916d96af38a59b24 100644 (file)
@@ -235,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
            AMDGPU_GEM_USERPTR_REGISTER))
                return -EINVAL;
 
-       if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
-                  !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
+       if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
+            !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
+            !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
 
                /* if we want to write to it we must require anonymous
                   memory and install a MMU notifier */
index 1618e2294a16056171458998ed65e62a83583774..e23843f4d877be7813a1574db0f855847f9aa591 100644 (file)
@@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
 {
        struct amdgpu_device *adev = dev->dev_private;
+       int vpos, hpos, stat;
+       u32 count;
 
        if (pipe >= adev->mode_info.num_crtc) {
                DRM_ERROR("Invalid crtc %u\n", pipe);
                return -EINVAL;
        }
 
-       return amdgpu_display_vblank_get_counter(adev, pipe);
+       /* The hw increments its frame counter at start of vsync, not at start
+        * of vblank, as is required by DRM core vblank counter handling.
+        * Cook the hw count here to make it appear to the caller as if it
+        * incremented at start of vblank. We measure distance to start of
+        * vblank in vpos. vpos therefore will be >= 0 between start of vblank
+        * and start of vsync, so vpos >= 0 means to bump the hw frame counter
+        * result by 1 to give the proper appearance to caller.
+        */
+       if (adev->mode_info.crtcs[pipe]) {
+               /* Repeat readout if needed to provide stable result if
+                * we cross start of vsync during the queries.
+                */
+               do {
+                       count = amdgpu_display_vblank_get_counter(adev, pipe);
+                       /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
+                        * distance to start of vblank, instead of regular
+                        * vertical scanout pos.
+                        */
+                       stat = amdgpu_get_crtc_scanoutpos(
+                               dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
+                               &vpos, &hpos, NULL, NULL,
+                               &adev->mode_info.crtcs[pipe]->base.hwmode);
+               } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
+
+               if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+                   (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
+                       DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
+               } else {
+                       DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
+                                     pipe, vpos);
+
+                       /* Bump counter if we are at >= leading edge of vblank,
+                        * but before vsync where vpos would turn negative and
+                        * the hw counter really increments.
+                        */
+                       if (vpos >= 0)
+                               count++;
+               }
+       } else {
+               /* Fallback to use value as is. */
+               count = amdgpu_display_vblank_get_counter(adev, pipe);
+               DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
+       }
+
+       return count;
 }
 
 /**
index b62c1710cab6b0fc00e33013ad741c97f9583ed2..064ebb3470748619e0b43ff7bfe2671a58c50bbd 100644 (file)
@@ -407,6 +407,7 @@ struct amdgpu_crtc {
        u32 line_time;
        u32 wm_low;
        u32 wm_high;
+       u32 lb_vblank_lead_lines;
        struct drm_display_mode hw_mode;
 };
 
@@ -528,6 +529,10 @@ struct amdgpu_framebuffer {
 #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
                                ((em) == ATOM_ENCODER_MODE_DP_MST))
 
+/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
+#define USE_REAL_VBLANKSTART           (1 << 30)
+#define GET_DISTANCE_TO_VBLANKSTART    (1 << 31)
+
 void amdgpu_link_encoder_connector(struct drm_device *dev);
 
 struct drm_connector *
index 0d524384ff79c3b49d2dbaa1489d8cbff231d80a..c3ce103b6a33b2ef3a0a6f7f14877e18150ca405 100644 (file)
@@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        list_del_init(&bo->list);
        mutex_unlock(&bo->adev->gem.mutex);
        drm_gem_object_release(&bo->gem_base);
+       amdgpu_bo_unref(&bo->parent);
        kfree(bo->metadata);
        kfree(bo);
 }
index d4bac5f49939e1a780e5897754779fde9d1733d6..8a1752ff3d8e55a1d8b8ab3061f5c9c425058d0f 100644 (file)
@@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
        uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
        int r;
 
-       if (gtt->userptr)
-               amdgpu_ttm_tt_pin_userptr(ttm);
-
+       if (gtt->userptr) {
+               r = amdgpu_ttm_tt_pin_userptr(ttm);
+               if (r) {
+                       DRM_ERROR("failed to pin userptr\n");
+                       return r;
+               }
+       }
        gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
        if (!ttm->num_pages) {
                WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
@@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
        if (mem && mem->mem_type != TTM_PL_SYSTEM)
                flags |= AMDGPU_PTE_VALID;
 
-       if (mem && mem->mem_type == TTM_PL_TT)
+       if (mem && mem->mem_type == TTM_PL_TT) {
                flags |= AMDGPU_PTE_SYSTEM;
 
-       if (!ttm || ttm->caching_state == tt_cached)
-               flags |= AMDGPU_PTE_SNOOPED;
+               if (ttm->caching_state == tt_cached)
+                       flags |= AMDGPU_PTE_SNOOPED;
+       }
 
        if (adev->asic_type >= CHIP_TOPAZ)
                flags |= AMDGPU_PTE_EXECUTABLE;
index ae037e5b6ad016d6ee70545b814d082da84f22c3..b53d273eb7a1003a719fff9d1bd32a8b4fa20b78 100644 (file)
@@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
        struct amdgpu_bo_va_mapping *mapping;
        int r;
 
+       spin_lock(&vm->freed_lock);
        while (!list_empty(&vm->freed)) {
                mapping = list_first_entry(&vm->freed,
                        struct amdgpu_bo_va_mapping, list);
                list_del(&mapping->list);
-
+               spin_unlock(&vm->freed_lock);
                r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
                kfree(mapping);
                if (r)
                        return r;
 
+               spin_lock(&vm->freed_lock);
        }
+       spin_unlock(&vm->freed_lock);
+
        return 0;
 
 }
@@ -1079,6 +1083,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                if (r)
                        goto error_free;
 
+               /* Keep a reference to the page table to avoid freeing
+                * them up in the wrong order.
+                */
+               pt->parent = amdgpu_bo_ref(vm->page_directory);
+
                r = amdgpu_vm_clear_bo(adev, pt);
                if (r) {
                        amdgpu_bo_unref(&pt);
@@ -1150,10 +1159,13 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
        spin_unlock(&vm->it_lock);
        trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
-       if (valid)
+       if (valid) {
+               spin_lock(&vm->freed_lock);
                list_add(&mapping->list, &vm->freed);
-       else
+               spin_unlock(&vm->freed_lock);
+       } else {
                kfree(mapping);
+       }
 
        return 0;
 }
@@ -1186,7 +1198,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                interval_tree_remove(&mapping->it, &vm->va);
                spin_unlock(&vm->it_lock);
                trace_amdgpu_vm_bo_unmap(bo_va, mapping);
+               spin_lock(&vm->freed_lock);
                list_add(&mapping->list, &vm->freed);
+               spin_unlock(&vm->freed_lock);
        }
        list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
                list_del(&mapping->list);
@@ -1247,6 +1261,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        INIT_LIST_HEAD(&vm->cleared);
        INIT_LIST_HEAD(&vm->freed);
        spin_lock_init(&vm->it_lock);
+       spin_lock_init(&vm->freed_lock);
        pd_size = amdgpu_vm_directory_size(adev);
        pd_entries = amdgpu_vm_num_pdes(adev);
 
index cb0f7747e3dcac2dfa923d43fc0dd8574968d575..4dcc8fba579203297ece7849ab8a861ecbe33f10 100644 (file)
@@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
        u32 pixel_period;
        u32 line_time = 0;
        u32 latency_watermark_a = 0, latency_watermark_b = 0;
-       u32 tmp, wm_mask;
+       u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
        if (amdgpu_crtc->base.enabled && num_heads && mode) {
                pixel_period = 1000000 / (u32)mode->clock;
@@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
                    (adev->mode_info.disp_priority == 2)) {
                        DRM_DEBUG_KMS("force priority to high\n");
                }
+               lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
        }
 
        /* select wm A */
@@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
        amdgpu_crtc->line_time = line_time;
        amdgpu_crtc->wm_high = latency_watermark_a;
        amdgpu_crtc->wm_low = latency_watermark_b;
+       /* Save number of lines the linebuffer leads before the scanout */
+       amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 }
 
 /**
index 5af3721851d67e99b521328babc38c811a7e2c09..8f1e51128b33882d5680731b9afb08fcf1b1023c 100644 (file)
@@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
        u32 pixel_period;
        u32 line_time = 0;
        u32 latency_watermark_a = 0, latency_watermark_b = 0;
-       u32 tmp, wm_mask;
+       u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
        if (amdgpu_crtc->base.enabled && num_heads && mode) {
                pixel_period = 1000000 / (u32)mode->clock;
@@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
                    (adev->mode_info.disp_priority == 2)) {
                        DRM_DEBUG_KMS("force priority to high\n");
                }
+               lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
        }
 
        /* select wm A */
@@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
        amdgpu_crtc->line_time = line_time;
        amdgpu_crtc->wm_high = latency_watermark_a;
        amdgpu_crtc->wm_low = latency_watermark_b;
+       /* Save number of lines the linebuffer leads before the scanout */
+       amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 }
 
 /**
index 4f7b49a6dc500c431ba9aa52727232156f60bda1..42d954dc436d03206c48aafe7655966870dfc931 100644 (file)
@@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
        u32 pixel_period;
        u32 line_time = 0;
        u32 latency_watermark_a = 0, latency_watermark_b = 0;
-       u32 tmp, wm_mask;
+       u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
        if (amdgpu_crtc->base.enabled && num_heads && mode) {
                pixel_period = 1000000 / (u32)mode->clock;
@@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
                    (adev->mode_info.disp_priority == 2)) {
                        DRM_DEBUG_KMS("force priority to high\n");
                }
+               lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
        }
 
        /* select wm A */
@@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
        amdgpu_crtc->line_time = line_time;
        amdgpu_crtc->wm_high = latency_watermark_a;
        amdgpu_crtc->wm_low = latency_watermark_b;
+       /* Save number of lines the linebuffer leads before the scanout */
+       amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 }
 
 /**
index 7427d8cd4c43fae6068c8a3455ac98f3627ff044..ed8abb58a785ef3efaba87a806e7bcdef7188a1b 100644 (file)
@@ -513,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
        WREG32(mmVM_L2_CNTL3, tmp);
        /* setup context0 */
        WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
        WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
        WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(adev->dummy_page.addr >> 12));
index cb0e50ebb5285cef90b4a2879cd3cd800129c296..d390284408144c923d8571da0df70b3fab38879c 100644 (file)
@@ -657,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        WREG32(mmVM_L2_CNTL4, tmp);
        /* setup context0 */
        WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
+       WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
        WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
        WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(adev->dummy_page.addr >> 12));
index 651129f2ec1d1eac5228d0e0e4c8f0095ad5a35a..3a4820e863ecbbc8ee003c47f3ccc088804f4f53 100644 (file)
@@ -288,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
  */
 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
 {
+       struct amd_gpu_scheduler *sched = sched_job->sched;
        struct amd_sched_entity *entity = sched_job->s_entity;
        bool added, first = false;
 
@@ -302,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
 
        /* first job wakes up scheduler */
        if (first)
-               amd_sched_wakeup(sched_job->sched);
+               amd_sched_wakeup(sched);
 
        return added;
 }
@@ -318,9 +319,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 {
        struct amd_sched_entity *entity = sched_job->s_entity;
 
+       trace_amd_sched_job(sched_job);
        wait_event(entity->sched->job_scheduled,
                   amd_sched_entity_in(sched_job));
-       trace_amd_sched_job(sched_job);
 }
 
 /**
index 9362609df38ae4077ae934d96f5098ffaabd59dd..7dd6728dd092e23af63516295be42331bcc1f3c8 100644 (file)
@@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
                goto out_unlock;
        }
 
+       if (!file_priv->allowed_master) {
+               ret = drm_new_set_master(dev, file_priv);
+               goto out_unlock;
+       }
+
        file_priv->minor->master = drm_master_get(file_priv->master);
        file_priv->is_master = 1;
        if (dev->driver->master_set) {
index c59ce4d0ef75f4e878ad5964c90e86afd2467456..6b5625e6611975735a1bd4c218edd3cfa9ec5e69 100644 (file)
@@ -125,6 +125,60 @@ static int drm_cpu_valid(void)
        return 1;
 }
 
+/**
+ * drm_new_set_master - Allocate a new master object and become master for the
+ * associated master realm.
+ *
+ * @dev: The associated device.
+ * @fpriv: File private identifying the client.
+ *
+ * This function must be called with dev::struct_mutex held.
+ * Returns negative error code on failure. Zero on success.
+ */
+int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
+{
+       struct drm_master *old_master;
+       int ret;
+
+       lockdep_assert_held_once(&dev->master_mutex);
+
+       /* create a new master */
+       fpriv->minor->master = drm_master_create(fpriv->minor);
+       if (!fpriv->minor->master)
+               return -ENOMEM;
+
+       /* take another reference for the copy in the local file priv */
+       old_master = fpriv->master;
+       fpriv->master = drm_master_get(fpriv->minor->master);
+
+       if (dev->driver->master_create) {
+               ret = dev->driver->master_create(dev, fpriv->master);
+               if (ret)
+                       goto out_err;
+       }
+       if (dev->driver->master_set) {
+               ret = dev->driver->master_set(dev, fpriv, true);
+               if (ret)
+                       goto out_err;
+       }
+
+       fpriv->is_master = 1;
+       fpriv->allowed_master = 1;
+       fpriv->authenticated = 1;
+       if (old_master)
+               drm_master_put(&old_master);
+
+       return 0;
+
+out_err:
+       /* drop both references and restore old master on failure */
+       drm_master_put(&fpriv->minor->master);
+       drm_master_put(&fpriv->master);
+       fpriv->master = old_master;
+
+       return ret;
+}
+
 /**
  * Called whenever a process opens /dev/drm.
  *
@@ -189,35 +243,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
        mutex_lock(&dev->master_mutex);
        if (drm_is_primary_client(priv) && !priv->minor->master) {
                /* create a new master */
-               priv->minor->master = drm_master_create(priv->minor);
-               if (!priv->minor->master) {
-                       ret = -ENOMEM;
+               ret = drm_new_set_master(dev, priv);
+               if (ret)
                        goto out_close;
-               }
-
-               priv->is_master = 1;
-               /* take another reference for the copy in the local file priv */
-               priv->master = drm_master_get(priv->minor->master);
-               priv->authenticated = 1;
-
-               if (dev->driver->master_create) {
-                       ret = dev->driver->master_create(dev, priv->master);
-                       if (ret) {
-                               /* drop both references if this fails */
-                               drm_master_put(&priv->minor->master);
-                               drm_master_put(&priv->master);
-                               goto out_close;
-                       }
-               }
-               if (dev->driver->master_set) {
-                       ret = dev->driver->master_set(dev, priv, true);
-                       if (ret) {
-                               /* drop both references if this fails */
-                               drm_master_put(&priv->minor->master);
-                               drm_master_put(&priv->master);
-                               goto out_close;
-                       }
-               }
        } else if (drm_is_primary_client(priv)) {
                /* get a reference to the master */
                priv->master = drm_master_get(priv->minor->master);
index 2151ea551d3bd6526d850754b79ece388a9279df..607f493ae80132890ad97422f58f9cdf95eefc75 100644 (file)
@@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev,
                struct drm_pending_vblank_event *e,
                unsigned long seq, struct timeval *now)
 {
-       WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+       assert_spin_locked(&dev->event_lock);
+
        e->event.sequence = seq;
        e->event.tv_sec = now->tv_sec;
        e->event.tv_usec = now->tv_usec;
@@ -992,6 +993,57 @@ static void send_vblank_event(struct drm_device *dev,
                                         e->event.sequence);
 }
 
+/**
+ * drm_arm_vblank_event - arm vblank event after pageflip
+ * @dev: DRM device
+ * @pipe: CRTC index
+ * @e: the event to prepare to send
+ *
+ * A lot of drivers need to generate vblank events for the very next vblank
+ * interrupt. For example when the page flip interrupt happens when the page
+ * flip gets armed, but not when it actually executes within the next vblank
+ * period. This helper function implements exactly the required vblank arming
+ * behaviour.
+ *
+ * Caller must hold event lock. Caller must also hold a vblank reference for
+ * the event @e, which will be dropped when the next vblank arrives.
+ *
+ * This is the legacy version of drm_crtc_arm_vblank_event().
+ */
+void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
+                         struct drm_pending_vblank_event *e)
+{
+       assert_spin_locked(&dev->event_lock);
+
+       e->pipe = pipe;
+       e->event.sequence = drm_vblank_count(dev, pipe);
+       list_add_tail(&e->base.link, &dev->vblank_event_list);
+}
+EXPORT_SYMBOL(drm_arm_vblank_event);
+
+/**
+ * drm_crtc_arm_vblank_event - arm vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * A lot of drivers need to generate vblank events for the very next vblank
+ * interrupt. For example when the page flip interrupt happens when the page
+ * flip gets armed, but not when it actually executes within the next vblank
+ * period. This helper function implements exactly the required vblank arming
+ * behaviour.
+ *
+ * Caller must hold event lock. Caller must also hold a vblank reference for
+ * the event @e, which will be dropped when the next vblank arrives.
+ *
+ * This is the native KMS version of drm_arm_vblank_event().
+ */
+void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+                              struct drm_pending_vblank_event *e)
+{
+       drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+}
+EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
+
 /**
  * drm_send_vblank_event - helper to send vblank event after pageflip
  * @dev: DRM device
index a3b22bdacd44f539a81429d7e1946d86d8cb4f78..8aab974b0564c4a8bef46a6a05d5c200831d70f6 100644 (file)
@@ -2734,6 +2734,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
                return "AUX_C";
        case POWER_DOMAIN_AUX_D:
                return "AUX_D";
+       case POWER_DOMAIN_GMBUS:
+               return "GMBUS";
        case POWER_DOMAIN_INIT:
                return "INIT";
        default:
index 95bb27de774f8fb2c5bb8cc83567a45eb83a8e43..a01e51581c4c466a000e2aa6b22f42e97ec31b85 100644 (file)
@@ -199,6 +199,7 @@ enum intel_display_power_domain {
        POWER_DOMAIN_AUX_B,
        POWER_DOMAIN_AUX_C,
        POWER_DOMAIN_AUX_D,
+       POWER_DOMAIN_GMBUS,
        POWER_DOMAIN_INIT,
 
        POWER_DOMAIN_NUM,
index 91bb1fc27420f5a58f9fc2ca962a4b452f9cb7e6..32e6aade62238be511237c9ffad1933e18e561cb 100644 (file)
@@ -1210,8 +1210,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
        if (i915_gem_request_completed(req, true))
                return 0;
 
-       timeout_expire = timeout ?
-               jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
+       timeout_expire = 0;
+       if (timeout) {
+               if (WARN_ON(*timeout < 0))
+                       return -EINVAL;
+
+               if (*timeout == 0)
+                       return -ETIME;
+
+               timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+       }
 
        if (INTEL_INFO(dev_priv)->gen >= 6)
                gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
index 40a10b25956c2a26fc75500d2e10a77b639e43fb..f010391b87f5cd0803c7bc994f8b44fd68a5e9b6 100644 (file)
@@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
                }
 
                /* check for L-shaped memory aka modified enhanced addressing */
-               if (IS_GEN4(dev)) {
-                       uint32_t ddc2 = I915_READ(DCC2);
-
-                       if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
-                               dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+               if (IS_GEN4(dev) &&
+                   !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+                       swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+                       swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
                }
 
                if (dcc == 0xffffffff) {
@@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
                 * matching, which was the case for the swizzling required in
                 * the table above, or from the 1-ch value being less than
                 * the minimum size of a rank.
+                *
+                * Reports indicate that the swizzling actually
+                * varies depending upon page placement inside the
+                * channels, i.e. we see swizzled pages where the
+                * banks of memory are paired and unswizzled on the
+                * uneven portion, so leave that as unknown.
                 */
-               if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
-                       swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-               } else {
+               if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
                        swizzle_x = I915_BIT_6_SWIZZLE_9_10;
                        swizzle_y = I915_BIT_6_SWIZZLE_9;
                }
        }
 
+       if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
+           swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
+               /* Userspace likes to explode if it sees unknown swizzling,
+                * so lie. We will finish the lie when reporting through
+                * the get-tiling-ioctl by reporting the physical swizzle
+                * mode as unknown instead.
+                *
+                * As we don't strictly know what the swizzling is, it may be
+                * bit17 dependent, and so we need to also prevent the pages
+                * from being moved.
+                */
+               dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+       }
+
        dev_priv->mm.bit_6_swizzle_x = swizzle_x;
        dev_priv->mm.bit_6_swizzle_y = swizzle_y;
 }
index 71860f8680f9fb95efae6ea310be6ef450d25e57..22e86d2e408d9493e6c2ea6a60e689a5c03934a9 100644 (file)
@@ -5194,11 +5194,31 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
        case PORT_E:
                return POWER_DOMAIN_PORT_DDI_E_2_LANES;
        default:
-               WARN_ON_ONCE(1);
+               MISSING_CASE(port);
                return POWER_DOMAIN_PORT_OTHER;
        }
 }
 
+static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
+{
+       switch (port) {
+       case PORT_A:
+               return POWER_DOMAIN_AUX_A;
+       case PORT_B:
+               return POWER_DOMAIN_AUX_B;
+       case PORT_C:
+               return POWER_DOMAIN_AUX_C;
+       case PORT_D:
+               return POWER_DOMAIN_AUX_D;
+       case PORT_E:
+               /* FIXME: Check VBT for actual wiring of PORT E */
+               return POWER_DOMAIN_AUX_D;
+       default:
+               MISSING_CASE(port);
+               return POWER_DOMAIN_AUX_A;
+       }
+}
+
 #define for_each_power_domain(domain, mask)                            \
        for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
                if ((1 << (domain)) & (mask))
@@ -5230,6 +5250,36 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
        }
 }
 
+enum intel_display_power_domain
+intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
+{
+       struct drm_device *dev = intel_encoder->base.dev;
+       struct intel_digital_port *intel_dig_port;
+
+       switch (intel_encoder->type) {
+       case INTEL_OUTPUT_UNKNOWN:
+       case INTEL_OUTPUT_HDMI:
+               /*
+                * Only DDI platforms should ever use these output types.
+                * We can get here after the HDMI detect code has already set
+                * the type of the shared encoder. Since we can't be sure
+                * what's the status of the given connectors, play safe and
+                * run the DP detection too.
+                */
+               WARN_ON_ONCE(!HAS_DDI(dev));
+       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_EDP:
+               intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+               return port_to_aux_power_domain(intel_dig_port->port);
+       case INTEL_OUTPUT_DP_MST:
+               intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
+               return port_to_aux_power_domain(intel_dig_port->port);
+       default:
+               MISSING_CASE(intel_encoder->type);
+               return POWER_DOMAIN_AUX_A;
+       }
+}
+
 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -12460,7 +12510,6 @@ intel_pipe_config_compare(struct drm_device *dev,
        if (INTEL_INFO(dev)->gen < 8) {
                PIPE_CONF_CHECK_M_N(dp_m_n);
 
-               PIPE_CONF_CHECK_I(has_drrs);
                if (current_config->has_drrs)
                        PIPE_CONF_CHECK_M_N(dp_m2_n2);
        } else
index 09bdd94ca3ba435b452ef4593620b01fd59456d5..78b8ec84d576da24de0f96c56fabaf5d380ddea5 100644 (file)
@@ -277,7 +277,7 @@ static void pps_lock(struct intel_dp *intel_dp)
         * See vlv_power_sequencer_reset() why we need
         * a power domain reference here.
         */
-       power_domain = intel_display_port_power_domain(encoder);
+       power_domain = intel_display_port_aux_power_domain(encoder);
        intel_display_power_get(dev_priv, power_domain);
 
        mutex_lock(&dev_priv->pps_mutex);
@@ -293,7 +293,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
 
        mutex_unlock(&dev_priv->pps_mutex);
 
-       power_domain = intel_display_port_power_domain(encoder);
+       power_domain = intel_display_port_aux_power_domain(encoder);
        intel_display_power_put(dev_priv, power_domain);
 }
 
@@ -816,8 +816,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
 
        intel_dp_check_edp(intel_dp);
 
-       intel_aux_display_runtime_get(dev_priv);
-
        /* Try to wait for any previous AUX channel activity */
        for (try = 0; try < 3; try++) {
                status = I915_READ_NOTRACE(ch_ctl);
@@ -926,7 +924,6 @@ done:
        ret = recv_bytes;
 out:
        pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
-       intel_aux_display_runtime_put(dev_priv);
 
        if (vdd)
                edp_panel_vdd_off(intel_dp, false);
@@ -1784,7 +1781,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
        if (edp_have_panel_vdd(intel_dp))
                return need_to_disable;
 
-       power_domain = intel_display_port_power_domain(intel_encoder);
+       power_domain = intel_display_port_aux_power_domain(intel_encoder);
        intel_display_power_get(dev_priv, power_domain);
 
        DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
@@ -1874,7 +1871,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
        if ((pp & POWER_TARGET_ON) == 0)
                intel_dp->last_power_cycle = jiffies;
 
-       power_domain = intel_display_port_power_domain(intel_encoder);
+       power_domain = intel_display_port_aux_power_domain(intel_encoder);
        intel_display_power_put(dev_priv, power_domain);
 }
 
@@ -2025,7 +2022,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
        wait_panel_off(intel_dp);
 
        /* We got a reference when we enabled the VDD. */
-       power_domain = intel_display_port_power_domain(intel_encoder);
+       power_domain = intel_display_port_aux_power_domain(intel_encoder);
        intel_display_power_put(dev_priv, power_domain);
 }
 
@@ -4765,26 +4762,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
        intel_dp->has_audio = false;
 }
 
-static enum intel_display_power_domain
-intel_dp_power_get(struct intel_dp *dp)
-{
-       struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
-       enum intel_display_power_domain power_domain;
-
-       power_domain = intel_display_port_power_domain(encoder);
-       intel_display_power_get(to_i915(encoder->base.dev), power_domain);
-
-       return power_domain;
-}
-
-static void
-intel_dp_power_put(struct intel_dp *dp,
-                  enum intel_display_power_domain power_domain)
-{
-       struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
-       intel_display_power_put(to_i915(encoder->base.dev), power_domain);
-}
-
 static enum drm_connector_status
 intel_dp_detect(struct drm_connector *connector, bool force)
 {
@@ -4808,7 +4785,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
                return connector_status_disconnected;
        }
 
-       power_domain = intel_dp_power_get(intel_dp);
+       power_domain = intel_display_port_aux_power_domain(intel_encoder);
+       intel_display_power_get(to_i915(dev), power_domain);
 
        /* Can't disconnect eDP, but you can close the lid... */
        if (is_edp(intel_dp))
@@ -4853,7 +4831,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        }
 
 out:
-       intel_dp_power_put(intel_dp, power_domain);
+       intel_display_power_put(to_i915(dev), power_domain);
        return status;
 }
 
@@ -4862,6 +4840,7 @@ intel_dp_force(struct drm_connector *connector)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+       struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
        enum intel_display_power_domain power_domain;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -4871,11 +4850,12 @@ intel_dp_force(struct drm_connector *connector)
        if (connector->status != connector_status_connected)
                return;
 
-       power_domain = intel_dp_power_get(intel_dp);
+       power_domain = intel_display_port_aux_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
 
        intel_dp_set_edid(intel_dp);
 
-       intel_dp_power_put(intel_dp, power_domain);
+       intel_display_power_put(dev_priv, power_domain);
 
        if (intel_encoder->type != INTEL_OUTPUT_EDP)
                intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
@@ -5091,7 +5071,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
         * indefinitely.
         */
        DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
-       power_domain = intel_display_port_power_domain(&intel_dig_port->base);
+       power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
        intel_display_power_get(dev_priv, power_domain);
 
        edp_panel_vdd_schedule_off(intel_dp);
@@ -5153,7 +5133,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
        enum intel_display_power_domain power_domain;
        enum irqreturn ret = IRQ_NONE;
 
-       if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
+       if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
+           intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
                intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
 
        if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -5172,7 +5153,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
                      port_name(intel_dig_port->port),
                      long_hpd ? "long" : "short");
 
-       power_domain = intel_display_port_power_domain(intel_encoder);
+       power_domain = intel_display_port_aux_power_domain(intel_encoder);
        intel_display_power_get(dev_priv, power_domain);
 
        if (long_hpd) {
index 0598932ce6235b623df9ff6a62d74e100087f78e..f2a1142bff34310a40abd262a59106bffdecba6f 100644 (file)
@@ -1169,6 +1169,8 @@ void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
 enum intel_display_power_domain
 intel_display_port_power_domain(struct intel_encoder *intel_encoder);
+enum intel_display_power_domain
+intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
                                 struct intel_crtc_state *pipe_config);
 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
@@ -1377,8 +1379,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain);
 void intel_display_power_put(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain);
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
index 9eafa191cee2e33c34942006ece7b0b8374cbc8f..81cdd9ff38922397171e91558136ae0765229106 100644 (file)
@@ -1335,21 +1335,17 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-       struct intel_encoder *intel_encoder =
-               &hdmi_to_dig_port(intel_hdmi)->base;
-       enum intel_display_power_domain power_domain;
        struct edid *edid = NULL;
        bool connected = false;
 
-       power_domain = intel_display_port_power_domain(intel_encoder);
-       intel_display_power_get(dev_priv, power_domain);
+       intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
        if (force)
                edid = drm_get_edid(connector,
                                    intel_gmbus_get_adapter(dev_priv,
                                    intel_hdmi->ddc_bus));
 
-       intel_display_power_put(dev_priv, power_domain);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
        to_intel_connector(connector)->detect_edid = edid;
        if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1383,6 +1379,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
 
+       intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+
        while (!live_status && --retry) {
                live_status = intel_digital_port_connected(dev_priv,
                                hdmi_to_dig_port(intel_hdmi));
@@ -1402,6 +1400,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
        } else
                status = connector_status_disconnected;
 
+       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+
        return status;
 }
 
index 1369fc41d03913f217894c700fa220465362b975..8324654037b65577fd4bdc65484576e9ceaf432a 100644 (file)
@@ -483,7 +483,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
        int i = 0, inc, try = 0;
        int ret = 0;
 
-       intel_aux_display_runtime_get(dev_priv);
+       intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
        mutex_lock(&dev_priv->gmbus_mutex);
 
        if (bus->force_bit) {
@@ -595,7 +595,9 @@ timeout:
 
 out:
        mutex_unlock(&dev_priv->gmbus_mutex);
-       intel_aux_display_runtime_put(dev_priv);
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+
        return ret;
 }
 
index d89c1d0aa1b74793a4328c861c312a91709092e7..7e23d65c9b246ac6672ad432c0f4f4f38bab6254 100644 (file)
@@ -362,6 +362,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
        BIT(POWER_DOMAIN_AUX_C) |                       \
        BIT(POWER_DOMAIN_AUDIO) |                       \
        BIT(POWER_DOMAIN_VGA) |                         \
+       BIT(POWER_DOMAIN_GMBUS) |                       \
        BIT(POWER_DOMAIN_INIT))
 #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (                \
        BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
@@ -1483,6 +1484,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        BIT(POWER_DOMAIN_AUX_B) |                       \
        BIT(POWER_DOMAIN_AUX_C) |                       \
        BIT(POWER_DOMAIN_AUX_D) |                       \
+       BIT(POWER_DOMAIN_GMBUS) |                       \
        BIT(POWER_DOMAIN_INIT))
 #define HSW_DISPLAY_POWER_DOMAINS (                            \
        (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
@@ -1845,6 +1847,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
        i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
                                                     i915.disable_power_well);
 
+       BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
+
        mutex_init(&power_domains->lock);
 
        /*
@@ -2063,36 +2067,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
        power_domains->initializing = false;
 }
 
-/**
- * intel_aux_display_runtime_get - grab an auxiliary power domain reference
- * @dev_priv: i915 device instance
- *
- * This function grabs a power domain reference for the auxiliary power domain
- * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
- * parents are powered up. Therefore users should only grab a reference to the
- * innermost power domain they need.
- *
- * Any power domain reference obtained by this function must have a symmetric
- * call to intel_aux_display_runtime_put() to release the reference again.
- */
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
-{
-       intel_runtime_pm_get(dev_priv);
-}
-
-/**
- * intel_aux_display_runtime_put - release an auxiliary power domain reference
- * @dev_priv: i915 device instance
- *
- * This function drops the auxiliary power domain reference obtained by
- * intel_aux_display_runtime_get() and might power down the corresponding
- * hardware block right away if this is the last reference.
- */
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
-{
-       intel_runtime_pm_put(dev_priv);
-}
-
 /**
  * intel_runtime_pm_get - grab a runtime pm reference
  * @dev_priv: i915 device instance
index 64f16ea779ef397346c9f75aa946096083225f04..7b990b4e96d234bcdc965572383092e3d9b303a0 100644 (file)
@@ -63,8 +63,7 @@ static void imx_drm_driver_lastclose(struct drm_device *drm)
 #if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
        struct imx_drm_device *imxdrm = drm->dev_private;
 
-       if (imxdrm->fbhelper)
-               drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
+       drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
 #endif
 }
 
@@ -340,7 +339,7 @@ err_kms:
  * imx_drm_add_crtc - add a new crtc
  */
 int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
-               struct imx_drm_crtc **new_crtc,
+               struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
                const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs,
                struct device_node *port)
 {
@@ -379,7 +378,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
        drm_crtc_helper_add(crtc,
                        imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
 
-       drm_crtc_init(drm, crtc,
+       drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
                        imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
 
        return 0;
index 28e776d8d9d270259f0df5b270897cdb8aec0e24..83284b4d4be1ef86ebefbb3276ef41bf3d5eb6ef 100644 (file)
@@ -9,6 +9,7 @@ struct drm_display_mode;
 struct drm_encoder;
 struct drm_fbdev_cma;
 struct drm_framebuffer;
+struct drm_plane;
 struct imx_drm_crtc;
 struct platform_device;
 
@@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs {
 };
 
 int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
-               struct imx_drm_crtc **new_crtc,
+               struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
                const struct imx_drm_crtc_helper_funcs *imx_helper_funcs,
                struct device_node *port);
 int imx_drm_remove_crtc(struct imx_drm_crtc *);
index e671ad3694166041ce76c4eaa57539897eba81d1..f9597146dc674a041e89133143286063d07739f2 100644 (file)
@@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = {
        { .compatible = "fsl,imx53-tve", },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, imx_tve_dt_ids);
 
 static struct platform_driver imx_tve_driver = {
        .probe          = imx_tve_probe,
index 7bc8301faffffff69194f20ca9d64b8e398f6478..4ab841eebee19dc099758f0018d9cf5030b475c0 100644 (file)
@@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
 
        spin_lock_irqsave(&drm->event_lock, flags);
        if (ipu_crtc->page_flip_event)
-               drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event);
+               drm_crtc_send_vblank_event(&ipu_crtc->base,
+                                          ipu_crtc->page_flip_event);
        ipu_crtc->page_flip_event = NULL;
        imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
        spin_unlock_irqrestore(&drm->event_lock, flags);
@@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
        int dp = -EINVAL;
        int ret;
-       int id;
 
        ret = ipu_get_resources(ipu_crtc, pdata);
        if (ret) {
@@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
                return ret;
        }
 
+       if (pdata->dp >= 0)
+               dp = IPU_DP_FLOW_SYNC_BG;
+       ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
+                                           DRM_PLANE_TYPE_PRIMARY);
+       if (IS_ERR(ipu_crtc->plane[0])) {
+               ret = PTR_ERR(ipu_crtc->plane[0]);
+               goto err_put_resources;
+       }
+
        ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
-                       &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node);
+                       &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
+                       ipu_crtc->dev->of_node);
        if (ret) {
                dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
                goto err_put_resources;
        }
 
-       if (pdata->dp >= 0)
-               dp = IPU_DP_FLOW_SYNC_BG;
-       id = imx_drm_crtc_id(ipu_crtc->imx_crtc);
-       ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu,
-                                           pdata->dma[0], dp, BIT(id), true);
        ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
        if (ret) {
                dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
@@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
 
        /* If this crtc is using the DP, add an overlay plane */
        if (pdata->dp >= 0 && pdata->dma[1] > 0) {
-               ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu,
-                                                   pdata->dma[1],
-                                                   IPU_DP_FLOW_SYNC_FG,
-                                                   BIT(id), false);
+               ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1],
+                                               IPU_DP_FLOW_SYNC_FG,
+                                               drm_crtc_mask(&ipu_crtc->base),
+                                               DRM_PLANE_TYPE_OVERLAY);
                if (IS_ERR(ipu_crtc->plane[1]))
                        ipu_crtc->plane[1] = NULL;
        }
@@ -407,28 +412,6 @@ err_put_resources:
        return ret;
 }
 
-static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent,
-                                                 int port_id)
-{
-       struct device_node *port;
-       int id, ret;
-
-       port = of_get_child_by_name(parent, "port");
-       while (port) {
-               ret = of_property_read_u32(port, "reg", &id);
-               if (!ret && id == port_id)
-                       return port;
-
-               do {
-                       port = of_get_next_child(parent, port);
-                       if (!port)
-                               return NULL;
-               } while (of_node_cmp(port->name, "port"));
-       }
-
-       return NULL;
-}
-
 static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
 {
        struct ipu_client_platformdata *pdata = dev->platform_data;
@@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = {
 static int ipu_drm_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct ipu_client_platformdata *pdata = dev->platform_data;
        int ret;
 
        if (!dev->platform_data)
                return -EINVAL;
 
-       if (!dev->of_node) {
-               /* Associate crtc device with the corresponding DI port node */
-               dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node,
-                                                     pdata->di + 2);
-               if (!dev->of_node) {
-                       dev_err(dev, "missing port@%d node in %s\n",
-                               pdata->di + 2, dev->parent->of_node->full_name);
-                       return -ENODEV;
-               }
-       }
-
        ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
        if (ret)
                return ret;
index 575f4c84388f0c64f54d2cdf598064011cfffd6e..e2ff410bab744442e5c6cb2e1bc27cbba58ee7f2 100644 (file)
@@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = {
 
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
                                 int dma, int dp, unsigned int possible_crtcs,
-                                bool priv)
+                                enum drm_plane_type type)
 {
        struct ipu_plane *ipu_plane;
        int ret;
@@ -399,10 +399,9 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
        ipu_plane->dma = dma;
        ipu_plane->dp_flow = dp;
 
-       ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs,
-                            &ipu_plane_funcs, ipu_plane_formats,
-                            ARRAY_SIZE(ipu_plane_formats),
-                            priv);
+       ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
+                                      &ipu_plane_funcs, ipu_plane_formats,
+                                      ARRAY_SIZE(ipu_plane_formats), type);
        if (ret) {
                DRM_ERROR("failed to initialize plane\n");
                kfree(ipu_plane);
index 9b5eff18f5b826b722d97b8e763dbd3017197aee..3a443b413c60caa9734883f7aaa95a69bb7f3e29 100644 (file)
@@ -34,7 +34,7 @@ struct ipu_plane {
 
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
                                 int dma, int dp, unsigned int possible_crtcs,
-                                bool priv);
+                                enum drm_plane_type type);
 
 /* Init IDMAC, DMFC, DP */
 int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
index b4deb9cf9d71613fa49562c4619cf8ffec5a33f3..2e9b9f1b5cd2bc8e37f96a66ea2da2d2e61c2ba6 100644 (file)
@@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
 
        if (imxpd->panel && imxpd->panel->funcs &&
            imxpd->panel->funcs->get_modes) {
+               struct drm_display_info *di = &connector->display_info;
+
                num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
+               if (!imxpd->bus_format && di->num_bus_formats)
+                       imxpd->bus_format = di->bus_formats[0];
                if (num_modes > 0)
                        return num_modes;
        }
index db6bc676054519b08f3dbe6e45a23782362c9376..64c8d932d5f19de27aa6f66c4e5371f5484cdd7c 100644 (file)
@@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
        struct drm_device *dev = drm->dev;
        struct nouveau_page_flip_state *s;
        unsigned long flags;
-       int crtcid = -1;
 
        spin_lock_irqsave(&dev->event_lock, flags);
 
@@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
 
        s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
        if (s->event) {
-               /* Vblank timestamps/counts are only correct on >= NV-50 */
-               if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
-                       crtcid = s->crtc;
+               if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+                       drm_arm_vblank_event(dev, s->crtc, s->event);
+               } else {
+                       drm_send_vblank_event(dev, s->crtc, s->event);
 
-               drm_send_vblank_event(dev, crtcid, s->event);
+                       /* Give up ownership of vblank for page-flipped crtc */
+                       drm_vblank_put(dev, s->crtc);
+               }
+       }
+       else {
+               /* Give up ownership of vblank for page-flipped crtc */
+               drm_vblank_put(dev, s->crtc);
        }
-
-       /* Give up ownership of vblank for page-flipped crtc */
-       drm_vblank_put(dev, s->crtc);
 
        list_del(&s->head);
        if (ps)
index 248953d2fdb742d69111193f62638c0a95f179d5..0154db43860c7ec6ca9d0aeb828650a88e91a718 100644 (file)
@@ -8472,7 +8472,7 @@ restart_ih:
        if (queue_dp)
                schedule_work(&rdev->dp_work);
        if (queue_hotplug)
-               schedule_work(&rdev->hotplug_work);
+               schedule_delayed_work(&rdev->hotplug_work, 0);
        if (queue_reset) {
                rdev->needs_reset = true;
                wake_up_all(&rdev->fence_queue);
@@ -9630,6 +9630,9 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
                    (rdev->disp_priority == 2)) {
                        DRM_DEBUG_KMS("force priority to high\n");
                }
+
+               /* Save number of lines the linebuffer leads before the scanout */
+               radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
        }
 
        /* select wm A */
index 7f33767d7ed65f4a966cb2b3e01b250846cca340..2ad462896896b017402688352428082b6925df7b 100644 (file)
@@ -2372,6 +2372,9 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
                c.full = dfixed_div(c, a);
                priority_b_mark = dfixed_trunc(c);
                priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+               /* Save number of lines the linebuffer leads before the scanout */
+               radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
        }
 
        /* select wm A */
@@ -5344,7 +5347,7 @@ restart_ih:
        if (queue_dp)
                schedule_work(&rdev->dp_work);
        if (queue_hotplug)
-               schedule_work(&rdev->hotplug_work);
+               schedule_delayed_work(&rdev->hotplug_work, 0);
        if (queue_hdmi)
                schedule_work(&rdev->audio_work);
        if (queue_thermal && rdev->pm.dpm_enabled)
index 238b13f045c180480ef4f49983bbc19b76016729..9e7e2bf03b815647451622d69c6cbce2d03582de 100644 (file)
@@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev)
                status = r100_irq_ack(rdev);
        }
        if (queue_hotplug)
-               schedule_work(&rdev->hotplug_work);
+               schedule_delayed_work(&rdev->hotplug_work, 0);
        if (rdev->msi_enabled) {
                switch (rdev->family) {
                case CHIP_RS400:
@@ -3217,6 +3217,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
        uint32_t pixel_bytes1 = 0;
        uint32_t pixel_bytes2 = 0;
 
+       /* Guess line buffer size to be 8192 pixels */
+       u32 lb_size = 8192;
+
        if (!rdev->mode_info.mode_config_initialized)
                return;
 
@@ -3631,6 +3634,13 @@ void r100_bandwidth_update(struct radeon_device *rdev)
                DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
                          (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
        }
+
+       /* Save number of lines the linebuffer leads before the scanout */
+       if (mode1)
+           rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
+
+       if (mode2)
+           rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
 }
 
 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
index 4ea5b10ff5f412fb0fc2b161c8fe936ab154bd2f..cc2fdf0be37a600e313d239e07520331229a0035 100644 (file)
@@ -4276,7 +4276,7 @@ restart_ih:
                WREG32(IH_RB_RPTR, rptr);
        }
        if (queue_hotplug)
-               schedule_work(&rdev->hotplug_work);
+               schedule_delayed_work(&rdev->hotplug_work, 0);
        if (queue_hdmi)
                schedule_work(&rdev->audio_work);
        if (queue_thermal && rdev->pm.dpm_enabled)
index b6cbd816537e7e69bc920482961cdb47b6dfd68d..87db64983ea8c18a0305eeedf1e25f18cffc87ae 100644 (file)
@@ -2414,7 +2414,7 @@ struct radeon_device {
        struct r600_ih ih; /* r6/700 interrupt ring */
        struct radeon_rlc rlc;
        struct radeon_mec mec;
-       struct work_struct hotplug_work;
+       struct delayed_work hotplug_work;
        struct work_struct dp_work;
        struct work_struct audio_work;
        int num_crtc; /* number of crtcs */
index fe994aac3b0403357d40e9b29f46e8818be454bf..c77d349c561c6960e79d0b9338d7368302e1d188 100644 (file)
@@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
        /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
        { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
                PCI_VENDOR_ID_IBM, 0x0550, 1},
+       /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */
+       { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+               PCI_VENDOR_ID_IBM, 0x054d, 1},
        /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
        { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
                PCI_VENDOR_ID_IBM, 0x0530, 1},
index 5a2cafb4f1bc595220e603ed1942e65bf24b20b7..340f3f549f295314788fbf9b3052880b8095a7f1 100644 (file)
@@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
        if (r < 0)
                return connector_status_disconnected;
 
+       if (radeon_connector->detected_hpd_without_ddc) {
+               force = true;
+               radeon_connector->detected_hpd_without_ddc = false;
+       }
+
        if (!force && radeon_check_hpd_status_unchanged(connector)) {
                ret = connector->status;
                goto exit;
        }
 
-       if (radeon_connector->ddc_bus)
+       if (radeon_connector->ddc_bus) {
                dret = radeon_ddc_probe(radeon_connector, false);
+
+               /* Sometimes the pins required for the DDC probe on DVI
+                * connectors don't make contact at the same time that the ones
+                * for HPD do. If the DDC probe fails even though we had an HPD
+                * signal, try again later */
+               if (!dret && !force &&
+                   connector->status != connector_status_connected) {
+                       DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
+                       radeon_connector->detected_hpd_without_ddc = true;
+                       schedule_delayed_work(&rdev->hotplug_work,
+                                             msecs_to_jiffies(1000));
+                       goto exit;
+               }
+       }
        if (dret) {
                radeon_connector->detected_by_load = false;
                radeon_connector_free_edid(connector);
index a8d9927ed9eb9657d89e34c84481c9c9c9e3513f..1eca0acac016f314cd6960b9dda3067bcc04e83c 100644 (file)
@@ -322,7 +322,9 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
         * to complete in this vblank?
         */
        if (update_pending &&
-           (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0,
+           (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev,
+                                                              crtc_id,
+                                                              USE_REAL_VBLANKSTART,
                                                               &vpos, &hpos, NULL, NULL,
                                                               &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
            ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
@@ -401,6 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
        struct drm_crtc *crtc = &radeon_crtc->base;
        unsigned long flags;
        int r;
+       int vpos, hpos, stat, min_udelay;
+       struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
 
         down_read(&rdev->exclusive_lock);
        if (work->fence) {
@@ -437,6 +441,41 @@ static void radeon_flip_work_func(struct work_struct *__work)
        /* set the proper interrupt */
        radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
 
+       /* If this happens to execute within the "virtually extended" vblank
+        * interval before the start of the real vblank interval then it needs
+        * to delay programming the mmio flip until the real vblank is entered.
+        * This prevents completing a flip too early due to the way we fudge
+        * our vblank counter and vblank timestamps in order to work around the
+        * problem that the hw fires vblank interrupts before actual start of
+        * vblank (when line buffer refilling is done for a frame). It
+        * complements the fudging logic in radeon_get_crtc_scanoutpos() for
+        * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
+        *
+        * In practice this won't execute very often unless on very fast
+        * machines because the time window for this to happen is very small.
+        */
+       for (;;) {
+               /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+                * start in hpos, and to the "fudged earlier" vblank start in
+                * vpos.
+                */
+               stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
+                                                 GET_DISTANCE_TO_VBLANKSTART,
+                                                 &vpos, &hpos, NULL, NULL,
+                                                 &crtc->hwmode);
+
+               if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+                   (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
+                   !(vpos >= 0 && hpos <= 0))
+                       break;
+
+               /* Sleep at least until estimated real start of hw vblank */
+               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+               min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+               usleep_range(min_udelay, 2 * min_udelay);
+               spin_lock_irqsave(&crtc->dev->event_lock, flags);
+       };
+
        /* do the flip (mmio) */
        radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
 
@@ -1768,6 +1807,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  * \param dev Device to query.
  * \param crtc Crtc to query.
  * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ *              For driver internal use only also supports these flags:
+ *
+ *              USE_REAL_VBLANKSTART to use the real start of vblank instead
+ *              of a fudged earlier start of vblank.
+ *
+ *              GET_DISTANCE_TO_VBLANKSTART to return distance to the
+ *              fudged earlier start of vblank in *vpos and the distance
+ *              to true start of vblank in *hpos.
+ *
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
  * \param *stime Target location for timestamp taken immediately before
@@ -1911,10 +1959,40 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
                vbl_end = 0;
        }
 
+       /* Called from driver internal vblank counter query code? */
+       if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+           /* Caller wants distance from real vbl_start in *hpos */
+           *hpos = *vpos - vbl_start;
+       }
+
+       /* Fudge vblank to start a few scanlines earlier to handle the
+        * problem that vblank irqs fire a few scanlines before start
+        * of vblank. Some driver internal callers need the true vblank
+        * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
+        *
+        * The cause of the "early" vblank irq is that the irq is triggered
+        * by the line buffer logic when the line buffer read position enters
+        * the vblank, whereas our crtc scanout position naturally lags the
+        * line buffer read position.
+        */
+       if (!(flags & USE_REAL_VBLANKSTART))
+               vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
+
        /* Test scanout position against vblank region. */
        if ((*vpos < vbl_start) && (*vpos >= vbl_end))
                in_vbl = false;
 
+       /* In vblank? */
+       if (in_vbl)
+           ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+       /* Called from driver internal vblank counter query code? */
+       if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+               /* Caller wants distance from fudged earlier vbl_start */
+               *vpos -= vbl_start;
+               return ret;
+       }
+
        /* Check if inside vblank area and apply corrective offsets:
         * vpos will then be >=0 in video scanout area, but negative
         * within vblank area, counting down the number of lines until
@@ -1930,31 +2008,5 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
        /* Correct for shifted end of vbl at vbl_end. */
        *vpos = *vpos - vbl_end;
 
-       /* In vblank? */
-       if (in_vbl)
-               ret |= DRM_SCANOUTPOS_IN_VBLANK;
-
-       /* Is vpos outside nominal vblank area, but less than
-        * 1/100 of a frame height away from start of vblank?
-        * If so, assume this isn't a massively delayed vblank
-        * interrupt, but a vblank interrupt that fired a few
-        * microseconds before true start of vblank. Compensate
-        * by adding a full frame duration to the final timestamp.
-        * Happens, e.g., on ATI R500, R600.
-        *
-        * We only do this if DRM_CALLED_FROM_VBLIRQ.
-        */
-       if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
-               vbl_start = mode->crtc_vdisplay;
-               vtotal = mode->crtc_vtotal;
-
-               if (vbl_start - *vpos < vtotal / 100) {
-                       *vpos -= vtotal;
-
-                       /* Signal this correction as "applied". */
-                       ret |= 0x8;
-               }
-       }
-
        return ret;
 }
index 171d3e43c30cc02257df75645c6c77f0a726de33..979f3bf65f2c474f231a26eb47af963d4436be0f 100644 (file)
@@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
 static void radeon_hotplug_work_func(struct work_struct *work)
 {
        struct radeon_device *rdev = container_of(work, struct radeon_device,
-                                                 hotplug_work);
+                                                 hotplug_work.work);
        struct drm_device *dev = rdev->ddev;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_connector *connector;
@@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
                }
        }
 
-       INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+       INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
        INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
        INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
 
@@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
        r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
        if (r) {
                rdev->irq.installed = false;
-               flush_work(&rdev->hotplug_work);
+               flush_delayed_work(&rdev->hotplug_work);
                return r;
        }
 
@@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
                rdev->irq.installed = false;
                if (rdev->msi_enabled)
                        pci_disable_msi(rdev->pdev);
-               flush_work(&rdev->hotplug_work);
+               flush_delayed_work(&rdev->hotplug_work);
        }
 }
 
index 0ec6fcca16d3b12b898d8c9c036ab330cdc47226..d290a8a09036e9d28aa78332f7f2d22ee46ec0e2 100644 (file)
@@ -755,6 +755,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
  */
 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
 {
+       int vpos, hpos, stat;
+       u32 count;
        struct radeon_device *rdev = dev->dev_private;
 
        if (crtc < 0 || crtc >= rdev->num_crtc) {
@@ -762,7 +764,53 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
                return -EINVAL;
        }
 
-       return radeon_get_vblank_counter(rdev, crtc);
+       /* The hw increments its frame counter at start of vsync, not at start
+        * of vblank, as is required by DRM core vblank counter handling.
+        * Cook the hw count here to make it appear to the caller as if it
+        * incremented at start of vblank. We measure distance to start of
+        * vblank in vpos. vpos therefore will be >= 0 between start of vblank
+        * and start of vsync, so vpos >= 0 means to bump the hw frame counter
+        * result by 1 to give the proper appearance to caller.
+        */
+       if (rdev->mode_info.crtcs[crtc]) {
+               /* Repeat readout if needed to provide stable result if
+                * we cross start of vsync during the queries.
+                */
+               do {
+                       count = radeon_get_vblank_counter(rdev, crtc);
+                       /* Ask radeon_get_crtc_scanoutpos to return vpos as
+                        * distance to start of vblank, instead of regular
+                        * vertical scanout pos.
+                        */
+                       stat = radeon_get_crtc_scanoutpos(
+                               dev, crtc, GET_DISTANCE_TO_VBLANKSTART,
+                               &vpos, &hpos, NULL, NULL,
+                               &rdev->mode_info.crtcs[crtc]->base.hwmode);
+               } while (count != radeon_get_vblank_counter(rdev, crtc));
+
+               if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+                   (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
+                       DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
+               }
+               else {
+                       DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
+                                     crtc, vpos);
+
+                       /* Bump counter if we are at >= leading edge of vblank,
+                        * but before vsync where vpos would turn negative and
+                        * the hw counter really increments.
+                        */
+                       if (vpos >= 0)
+                               count++;
+               }
+       }
+       else {
+           /* Fallback to use value as is. */
+           count = radeon_get_vblank_counter(rdev, crtc);
+           DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
+       }
+
+       return count;
 }
 
 /**
index 830e171c3a9e35c108f69acd2d4d45386033cd8b..bba112628b478476b0685dc8416ef4badc170d46 100644 (file)
@@ -367,6 +367,7 @@ struct radeon_crtc {
        u32 line_time;
        u32 wm_low;
        u32 wm_high;
+       u32 lb_vblank_lead_lines;
        struct drm_display_mode hw_mode;
        enum radeon_output_csc output_csc;
 };
@@ -553,6 +554,7 @@ struct radeon_connector {
        void *con_priv;
        bool dac_load_detect;
        bool detected_by_load; /* if the connection status was determined by load */
+       bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
        uint16_t connector_object_id;
        struct radeon_hpd hpd;
        struct radeon_router router;
@@ -686,6 +688,9 @@ struct atom_voltage_table
        struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
 };
 
+/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */
+#define USE_REAL_VBLANKSTART           (1 << 30)
+#define GET_DISTANCE_TO_VBLANKSTART    (1 << 31)
 
 extern void
 radeon_add_atom_connector(struct drm_device *dev,
index f4f03dcc153049358b7167695c27fff60af55d46..59abebd6b5dc42d4941131adb51b76ab677d3fd4 100644 (file)
@@ -1756,7 +1756,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
         */
        for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
                if (rdev->pm.active_crtcs & (1 << crtc)) {
-                       vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0,
+                       vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
+                                                               crtc,
+                                                               USE_REAL_VBLANKSTART,
                                                                &vpos, &hpos, NULL, NULL,
                                                                &rdev->mode_info.crtcs[crtc]->base.hwmode);
                        if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
index 97a904835759f7876b2b69be104493cf302554a7..6244f4e44e9a541d15d5a76ac36beff8822c3145 100644 (file)
@@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev)
                status = rs600_irq_ack(rdev);
        }
        if (queue_hotplug)
-               schedule_work(&rdev->hotplug_work);
+               schedule_delayed_work(&rdev->hotplug_work, 0);
        if (queue_hdmi)
                schedule_work(&rdev->audio_work);
        if (rdev->msi_enabled) {
index 516ca27cfa12847e904bd9f8ecb8047db53bdba5..6bc44c24e837b30e39970f0a74e64eab4be88294 100644 (file)
@@ -207,6 +207,9 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
 {
        u32 tmp;
 
+       /* Guess line buffer size to be 8192 pixels */
+       u32 lb_size = 8192;
+
        /*
         * Line Buffer Setup
         * There is a single line buffer shared by both display controllers.
@@ -243,6 +246,13 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
                tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
        }
        WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
+
+       /* Save number of lines the linebuffer leads before the scanout */
+       if (mode1)
+               rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
+
+       if (mode2)
+               rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
 }
 
 struct rs690_watermark {
index 07037e32dea327f935ef39e0bdafea8fb640b39a..f878d6962da58b4aed296c22e485846bc14183c4 100644 (file)
@@ -2376,6 +2376,9 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
                c.full = dfixed_div(c, a);
                priority_b_mark = dfixed_trunc(c);
                priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+               /* Save number of lines the linebuffer leads before the scanout */
+               radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
        }
 
        /* select wm A */
@@ -6848,7 +6851,7 @@ restart_ih:
        if (queue_dp)
                schedule_work(&rdev->dp_work);
        if (queue_hotplug)
-               schedule_work(&rdev->hotplug_work);
+               schedule_delayed_work(&rdev->hotplug_work, 0);
        if (queue_thermal && rdev->pm.dpm_enabled)
                schedule_work(&rdev->pm.dpm.thermal.work);
        rdev->ih.rptr = rptr;
index 8caea0a33dd84560be30f18b882dc8c4b3daed17..d908321b94ced4ffe10936f4c7ef37fc518d127a 100644 (file)
@@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
         * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
         */
        vma->vm_flags &= ~VM_PFNMAP;
+       vma->vm_pgoff = 0;
 
        ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
                             obj->size, &rk_obj->dma_attrs);
index 5d8ae5e49c440f98afda4c02d6aea8e335c78c82..03c47eeadc812ba649e68ad97270d5c512efea86 100644 (file)
@@ -374,6 +374,7 @@ static const struct of_device_id vop_driver_dt_match[] = {
          .data = &rk3288_vop },
        {},
 };
+MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
 
 static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
 {
@@ -959,8 +960,8 @@ static int vop_update_plane_event(struct drm_plane *plane,
        val = (dest.y2 - dest.y1 - 1) << 16;
        val |= (dest.x2 - dest.x1 - 1) & 0xffff;
        VOP_WIN_SET(vop, win, dsp_info, val);
-       val = (dsp_sty - 1) << 16;
-       val |= (dsp_stx - 1) & 0xffff;
+       val = dsp_sty << 16;
+       val |= dsp_stx & 0xffff;
        VOP_WIN_SET(vop, win, dsp_st, val);
        VOP_WIN_SET(vop, win, rb_swap, rb_swap);
 
@@ -1289,7 +1290,7 @@ static void vop_win_state_complete(struct vop_win *vop_win,
 
        if (state->event) {
                spin_lock_irqsave(&drm->event_lock, flags);
-               drm_send_vblank_event(drm, -1, state->event);
+               drm_crtc_send_vblank_event(crtc, state->event);
                spin_unlock_irqrestore(&drm->event_lock, flags);
        }
 
@@ -1575,32 +1576,25 @@ static int vop_initial(struct vop *vop)
                return PTR_ERR(vop->dclk);
        }
 
-       ret = clk_prepare(vop->hclk);
-       if (ret < 0) {
-               dev_err(vop->dev, "failed to prepare hclk\n");
-               return ret;
-       }
-
        ret = clk_prepare(vop->dclk);
        if (ret < 0) {
                dev_err(vop->dev, "failed to prepare dclk\n");
-               goto err_unprepare_hclk;
+               return ret;
        }
 
-       ret = clk_prepare(vop->aclk);
+       /* Enable both the hclk and aclk to setup the vop */
+       ret = clk_prepare_enable(vop->hclk);
        if (ret < 0) {
-               dev_err(vop->dev, "failed to prepare aclk\n");
+               dev_err(vop->dev, "failed to prepare/enable hclk\n");
                goto err_unprepare_dclk;
        }
 
-       /*
-        * enable hclk, so that we can config vop register.
-        */
-       ret = clk_enable(vop->hclk);
+       ret = clk_prepare_enable(vop->aclk);
        if (ret < 0) {
-               dev_err(vop->dev, "failed to prepare aclk\n");
-               goto err_unprepare_aclk;
+               dev_err(vop->dev, "failed to prepare/enable aclk\n");
+               goto err_disable_hclk;
        }
+
        /*
         * do hclk_reset, reset all vop registers.
         */
@@ -1608,7 +1602,7 @@ static int vop_initial(struct vop *vop)
        if (IS_ERR(ahb_rst)) {
                dev_err(vop->dev, "failed to get ahb reset\n");
                ret = PTR_ERR(ahb_rst);
-               goto err_disable_hclk;
+               goto err_disable_aclk;
        }
        reset_control_assert(ahb_rst);
        usleep_range(10, 20);
@@ -1634,26 +1628,25 @@ static int vop_initial(struct vop *vop)
        if (IS_ERR(vop->dclk_rst)) {
                dev_err(vop->dev, "failed to get dclk reset\n");
                ret = PTR_ERR(vop->dclk_rst);
-               goto err_unprepare_aclk;
+               goto err_disable_aclk;
        }
        reset_control_assert(vop->dclk_rst);
        usleep_range(10, 20);
        reset_control_deassert(vop->dclk_rst);
 
        clk_disable(vop->hclk);
+       clk_disable(vop->aclk);
 
        vop->is_enabled = false;
 
        return 0;
 
+err_disable_aclk:
+       clk_disable_unprepare(vop->aclk);
 err_disable_hclk:
-       clk_disable(vop->hclk);
-err_unprepare_aclk:
-       clk_unprepare(vop->aclk);
+       clk_disable_unprepare(vop->hclk);
 err_unprepare_dclk:
        clk_unprepare(vop->dclk);
-err_unprepare_hclk:
-       clk_unprepare(vop->hclk);
        return ret;
 }
 
index f545913a56c7a972fe71dea32a20972f5440f5fb..578fe0a9324cd9bd710e3d9ef2d393e8311d3cb2 100644 (file)
@@ -412,7 +412,7 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
        .save = virtio_gpu_conn_save,
        .restore = virtio_gpu_conn_restore,
        .detect = virtio_gpu_conn_detect,
-       .fill_modes = drm_helper_probe_single_connector_modes,
+       .fill_modes = drm_helper_probe_single_connector_modes_nomerge,
        .destroy = virtio_gpu_conn_destroy,
        .reset = drm_atomic_helper_connector_reset,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
index ba47b30d28fad4f3d187393a44a17bd9605f6ad4..f2e13eb8339ffc1287110e247e0e54e922b7d72a 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
 #include <linux/of_device.h>
+#include <linux/of_graph.h>
 
 #include <drm/drm_fourcc.h>
 
@@ -993,11 +994,25 @@ static void platform_device_unregister_children(struct platform_device *pdev)
 struct ipu_platform_reg {
        struct ipu_client_platformdata pdata;
        const char *name;
-       int reg_offset;
 };
 
+/* These must be in the order of the corresponding device tree port nodes */
 static const struct ipu_platform_reg client_reg[] = {
        {
+               .pdata = {
+                       .csi = 0,
+                       .dma[0] = IPUV3_CHANNEL_CSI0,
+                       .dma[1] = -EINVAL,
+               },
+               .name = "imx-ipuv3-camera",
+       }, {
+               .pdata = {
+                       .csi = 1,
+                       .dma[0] = IPUV3_CHANNEL_CSI1,
+                       .dma[1] = -EINVAL,
+               },
+               .name = "imx-ipuv3-camera",
+       }, {
                .pdata = {
                        .di = 0,
                        .dc = 5,
@@ -1015,22 +1030,6 @@ static const struct ipu_platform_reg client_reg[] = {
                        .dma[1] = -EINVAL,
                },
                .name = "imx-ipuv3-crtc",
-       }, {
-               .pdata = {
-                       .csi = 0,
-                       .dma[0] = IPUV3_CHANNEL_CSI0,
-                       .dma[1] = -EINVAL,
-               },
-               .reg_offset = IPU_CM_CSI0_REG_OFS,
-               .name = "imx-ipuv3-camera",
-       }, {
-               .pdata = {
-                       .csi = 1,
-                       .dma[0] = IPUV3_CHANNEL_CSI1,
-                       .dma[1] = -EINVAL,
-               },
-               .reg_offset = IPU_CM_CSI1_REG_OFS,
-               .name = "imx-ipuv3-camera",
        },
 };
 
@@ -1051,22 +1050,30 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
        for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
                const struct ipu_platform_reg *reg = &client_reg[i];
                struct platform_device *pdev;
-               struct resource res;
-
-               if (reg->reg_offset) {
-                       memset(&res, 0, sizeof(res));
-                       res.flags = IORESOURCE_MEM;
-                       res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset;
-                       res.end = res.start + PAGE_SIZE - 1;
-                       pdev = platform_device_register_resndata(dev, reg->name,
-                               id++, &res, 1, &reg->pdata, sizeof(reg->pdata));
-               } else {
-                       pdev = platform_device_register_data(dev, reg->name,
-                               id++, &reg->pdata, sizeof(reg->pdata));
+
+               pdev = platform_device_alloc(reg->name, id++);
+               if (!pdev) {
+                       ret = -ENOMEM;
+                       goto err_register;
+               }
+
+               pdev->dev.parent = dev;
+
+               /* Associate subdevice with the corresponding port node */
+               pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
+               if (!pdev->dev.of_node) {
+                       dev_err(dev, "missing port@%d node in %s\n", i,
+                               dev->of_node->full_name);
+                       ret = -ENODEV;
+                       goto err_register;
                }
 
-               if (IS_ERR(pdev)) {
-                       ret = PTR_ERR(pdev);
+               ret = platform_device_add_data(pdev, &reg->pdata,
+                                              sizeof(reg->pdata));
+               if (!ret)
+                       ret = platform_device_add(pdev);
+               if (ret) {
+                       platform_device_put(pdev);
                        goto err_register;
                }
        }
index ac1feea51be365e3a4c81042b56be75fec26a6a1..9024a3de403253ad03955544dec063430523deb0 100644 (file)
 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST  0xc110
 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
 #define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306
+#define USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS 0xc24d
 #define USB_DEVICE_ID_LOGITECH_MOUSE_C01A      0xc01a
 #define USB_DEVICE_ID_LOGITECH_MOUSE_C05A      0xc05a
 #define USB_DEVICE_ID_LOGITECH_MOUSE_C06A      0xc06a
index c20ac76c0a8cb28ebbc8ea75827f3cfba5e2a787..c690fae02cf823d16b6d985d944b6bda143b6282 100644 (file)
@@ -665,8 +665,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
        struct lg_drv_data *drv_data;
        int ret;
 
-       /* Only work with the 1st interface (G29 presents multiple) */
-       if (iface_num != 0) {
+       /* G29 only work with the 1st interface */
+       if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
+           (iface_num != 0)) {
                dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num);
                return -ENODEV;
        }
index 94bb137abe3281bfb023c7bc438ad8bcf0d716a4..2324520b006dc7cf24dd2af11a36780c3f989bfd 100644 (file)
@@ -84,6 +84,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
index b33f53b3ca93257a6ec34f409c46e5f1d7662172..bf04d2a3cf4afe4614765b2b3d08d32ec77571a6 100644 (file)
@@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
                                ptr--;
                                *ptr++ = '\n';
                                *ptr = 0;
-                               HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+                               HiSax_putstatus(cs, NULL, cs->dlog);
                        } else
                                HiSax_putstatus(cs, "LogEcho: ",
                                                "warning Frame too big (%d)",
index 4a48255281887e8c03f9dbacccf404f4f9c52476..90449e1e91e5a27924da01df6215dfa6c326651e 100644 (file)
@@ -901,7 +901,7 @@ Begin:
                                        ptr--;
                                        *ptr++ = '\n';
                                        *ptr = 0;
-                                       HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+                                       HiSax_putstatus(cs, NULL, cs->dlog);
                                } else
                                        HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
                        }
index b1fad81f0722e64b2e0d671215e8dc89c295f4a1..13b2151c10f54ff9fd5bcec76e1ceef2b99aa472 100644 (file)
@@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs)
                                        ptr--;
                                        *ptr++ = '\n';
                                        *ptr = 0;
-                                       HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+                                       HiSax_putstatus(cs, NULL, cs->dlog);
                                } else
                                        HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
                        }
index b420f8bd862e454df2cc08f4da84b8bc7ef76e57..ba4beb25d872dbc24d2cbe55a411dee28f219b47 100644 (file)
@@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
                dp--;
                *dp++ = '\n';
                *dp = 0;
-               HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+               HiSax_putstatus(cs, NULL, cs->dlog);
        } else
                HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
 }
@@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
        }
        if (finish) {
                *dp = 0;
-               HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+               HiSax_putstatus(cs, NULL, cs->dlog);
                return;
        }
        if ((0xfe & buf[0]) == PROTO_DIS_N0) {  /* 1TR6 */
@@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
                dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
        }
        *dp = 0;
-       HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+       HiSax_putstatus(cs, NULL, cs->dlog);
 }
index 5178645ac42bcb85ddd7130ee81eb07fa45c2818..86ce887b2ed6ab16bfe079300f46652fada2af13 100644 (file)
@@ -123,6 +123,26 @@ void nvm_unregister_mgr(struct nvmm_type *mt)
 }
 EXPORT_SYMBOL(nvm_unregister_mgr);
 
+/* register with device with a supported manager */
+static int register_mgr(struct nvm_dev *dev)
+{
+       struct nvmm_type *mt;
+       int ret = 0;
+
+       list_for_each_entry(mt, &nvm_mgrs, list) {
+               ret = mt->register_mgr(dev);
+               if (ret > 0) {
+                       dev->mt = mt;
+                       break; /* successfully initialized */
+               }
+       }
+
+       if (!ret)
+               pr_info("nvm: no compatible nvm manager found.\n");
+
+       return ret;
+}
+
 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
 {
        struct nvm_dev *dev;
@@ -221,7 +241,6 @@ static void nvm_free(struct nvm_dev *dev)
 
 static int nvm_init(struct nvm_dev *dev)
 {
-       struct nvmm_type *mt;
        int ret = -EINVAL;
 
        if (!dev->q || !dev->ops)
@@ -252,21 +271,13 @@ static int nvm_init(struct nvm_dev *dev)
                goto err;
        }
 
-       /* register with device with a supported manager */
-       list_for_each_entry(mt, &nvm_mgrs, list) {
-               ret = mt->register_mgr(dev);
-               if (ret < 0)
-                       goto err; /* initialization failed */
-               if (ret > 0) {
-                       dev->mt = mt;
-                       break; /* successfully initialized */
-               }
-       }
-
-       if (!ret) {
-               pr_info("nvm: no compatible manager found.\n");
+       down_write(&nvm_lock);
+       ret = register_mgr(dev);
+       up_write(&nvm_lock);
+       if (ret < 0)
+               goto err;
+       if (!ret)
                return 0;
-       }
 
        pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
                        dev->name, dev->sec_per_pg, dev->nr_planes,
@@ -308,6 +319,12 @@ int nvm_register(struct request_queue *q, char *disk_name,
        if (ret)
                goto err_init;
 
+       if (dev->ops->max_phys_sect > 256) {
+               pr_info("nvm: max sectors supported is 256.\n");
+               ret = -EINVAL;
+               goto err_init;
+       }
+
        if (dev->ops->max_phys_sect > 1) {
                dev->ppalist_pool = dev->ops->create_dma_pool(dev->q,
                                                                "ppalist");
@@ -316,10 +333,6 @@ int nvm_register(struct request_queue *q, char *disk_name,
                        ret = -ENOMEM;
                        goto err_init;
                }
-       } else if (dev->ops->max_phys_sect > 256) {
-               pr_info("nvm: max sectors supported is 256.\n");
-               ret = -EINVAL;
-               goto err_init;
        }
 
        down_write(&nvm_lock);
@@ -335,15 +348,17 @@ EXPORT_SYMBOL(nvm_register);
 
 void nvm_unregister(char *disk_name)
 {
-       struct nvm_dev *dev = nvm_find_nvm_dev(disk_name);
+       struct nvm_dev *dev;
 
+       down_write(&nvm_lock);
+       dev = nvm_find_nvm_dev(disk_name);
        if (!dev) {
                pr_err("nvm: could not find device %s to unregister\n",
                                                                disk_name);
+               up_write(&nvm_lock);
                return;
        }
 
-       down_write(&nvm_lock);
        list_del(&dev->devices);
        up_write(&nvm_lock);
 
@@ -361,38 +376,30 @@ static int nvm_create_target(struct nvm_dev *dev,
 {
        struct nvm_ioctl_create_simple *s = &create->conf.s;
        struct request_queue *tqueue;
-       struct nvmm_type *mt;
        struct gendisk *tdisk;
        struct nvm_tgt_type *tt;
        struct nvm_target *t;
        void *targetdata;
        int ret = 0;
 
+       down_write(&nvm_lock);
        if (!dev->mt) {
-               /* register with device with a supported NVM manager */
-               list_for_each_entry(mt, &nvm_mgrs, list) {
-                       ret = mt->register_mgr(dev);
-                       if (ret < 0)
-                               return ret; /* initialization failed */
-                       if (ret > 0) {
-                               dev->mt = mt;
-                               break; /* successfully initialized */
-                       }
-               }
-
-               if (!ret) {
-                       pr_info("nvm: no compatible nvm manager found.\n");
-                       return -ENODEV;
+               ret = register_mgr(dev);
+               if (!ret)
+                       ret = -ENODEV;
+               if (ret < 0) {
+                       up_write(&nvm_lock);
+                       return ret;
                }
        }
 
        tt = nvm_find_target_type(create->tgttype);
        if (!tt) {
                pr_err("nvm: target type %s not found\n", create->tgttype);
+               up_write(&nvm_lock);
                return -EINVAL;
        }
 
-       down_write(&nvm_lock);
        list_for_each_entry(t, &dev->online_targets, list) {
                if (!strcmp(create->tgtname, t->disk->disk_name)) {
                        pr_err("nvm: target name already exists.\n");
@@ -476,7 +483,9 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
        struct nvm_dev *dev;
        struct nvm_ioctl_create_simple *s;
 
+       down_write(&nvm_lock);
        dev = nvm_find_nvm_dev(create->dev);
+       up_write(&nvm_lock);
        if (!dev) {
                pr_err("nvm: device not found\n");
                return -EINVAL;
@@ -535,7 +544,9 @@ static int nvm_configure_show(const char *val)
                return -EINVAL;
        }
 
+       down_write(&nvm_lock);
        dev = nvm_find_nvm_dev(devname);
+       up_write(&nvm_lock);
        if (!dev) {
                pr_err("nvm: device not found\n");
                return -EINVAL;
@@ -680,8 +691,10 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
        info->tgtsize = tgt_iter;
        up_write(&nvm_lock);
 
-       if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info)))
+       if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
+               kfree(info);
                return -EFAULT;
+       }
 
        kfree(info);
        return 0;
@@ -724,8 +737,11 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
 
        devices->nr_devices = i;
 
-       if (copy_to_user(arg, devices, sizeof(struct nvm_ioctl_get_devices)))
+       if (copy_to_user(arg, devices,
+                        sizeof(struct nvm_ioctl_get_devices))) {
+               kfree(devices);
                return -EFAULT;
+       }
 
        kfree(devices);
        return 0;
index e20e74ec6b9163dc401149030c7432b4cca12a22..35dde84b71e97a6e530c2058ca0b68cf574424b2 100644 (file)
@@ -75,7 +75,6 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
        struct nvm_block *blk;
        int i;
 
-       ppa = dev_to_generic_addr(gn->dev, ppa);
        lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
 
        for (i = 0; i < nr_blocks; i++) {
@@ -187,7 +186,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
                        ppa.g.lun = lun->vlun.id;
                        ppa = generic_to_dev_addr(dev, ppa);
 
-                       ret = dev->ops->get_bb_tbl(dev->q, ppa,
+                       ret = dev->ops->get_bb_tbl(dev, ppa,
                                                dev->blks_per_lun,
                                                gennvm_block_bb, gn);
                        if (ret)
@@ -207,6 +206,14 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
        return 0;
 }
 
+static void gennvm_free(struct nvm_dev *dev)
+{
+       gennvm_blocks_free(dev);
+       gennvm_luns_free(dev);
+       kfree(dev->mp);
+       dev->mp = NULL;
+}
+
 static int gennvm_register(struct nvm_dev *dev)
 {
        struct gen_nvm *gn;
@@ -234,16 +241,13 @@ static int gennvm_register(struct nvm_dev *dev)
 
        return 1;
 err:
-       kfree(gn);
+       gennvm_free(dev);
        return ret;
 }
 
 static void gennvm_unregister(struct nvm_dev *dev)
 {
-       gennvm_blocks_free(dev);
-       gennvm_luns_free(dev);
-       kfree(dev->mp);
-       dev->mp = NULL;
+       gennvm_free(dev);
 }
 
 static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
index 57dadd52b428a536d71f2364c006641e5e765083..1deb8ff90a89528e147b42353500d1610ff4a285 100644 (file)
@@ -501,8 +501,6 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
                        cf->data[2] |= CAN_ERR_PROT_FORM;
                else if (status & SER)
                        cf->data[2] |= CAN_ERR_PROT_STUFF;
-               else
-                       cf->data[2] |= CAN_ERR_PROT_UNSPEC;
        }
 
        priv->can.state = state;
index 5d214d1353320856cf3de91d837b910df3f3de24..f91b094288dad3d86064f24a33a97ad58756f3ca 100644 (file)
@@ -962,7 +962,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
         * type of the last error to occur on the CAN bus
         */
        cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
-       cf->data[2] |= CAN_ERR_PROT_UNSPEC;
 
        switch (lec_type) {
        case LEC_STUFF_ERROR:
@@ -975,8 +974,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
                break;
        case LEC_ACK_ERROR:
                netdev_dbg(dev, "ack error\n");
-               cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
-                               CAN_ERR_PROT_LOC_ACK_DEL);
+               cf->data[3] = CAN_ERR_PROT_LOC_ACK;
                break;
        case LEC_BIT1_ERROR:
                netdev_dbg(dev, "bit1 error\n");
@@ -988,8 +986,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
                break;
        case LEC_CRC_ERROR:
                netdev_dbg(dev, "CRC error\n");
-               cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
-                               CAN_ERR_PROT_LOC_CRC_DEL);
+               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
                break;
        default:
                break;
index 70a8cbb29e75844a02bea49a937bf0c05a200910..1e37313054f3950ee30e6c6fccad874d9262013a 100644 (file)
@@ -578,7 +578,7 @@ static int cc770_err(struct net_device *dev, u8 status)
                                cf->data[2] |= CAN_ERR_PROT_BIT0;
                                break;
                        case STAT_LEC_CRC:
-                               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+                               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
                                break;
                        }
                }
index 868fe945e35a39786bf15fb8b142c36a720179a5..41c0fc9f3b1465d9dbbde6b5c7798b1e40cf43ef 100644 (file)
@@ -535,13 +535,13 @@ static void do_bus_err(struct net_device *dev,
        if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
                netdev_dbg(dev, "ACK_ERR irq\n");
                cf->can_id |= CAN_ERR_ACK;
-               cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+               cf->data[3] = CAN_ERR_PROT_LOC_ACK;
                tx_errors = 1;
        }
        if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
                netdev_dbg(dev, "CRC_ERR irq\n");
                cf->data[2] |= CAN_ERR_PROT_BIT;
-               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
                rx_errors = 1;
        }
        if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
index c1e85368a198bd898f853c5842c58fbb3b01b04b..5d04f5464faf29a8b1c99dcc8bae86becbd1c485 100644 (file)
@@ -1096,7 +1096,6 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
                        cf->data[2] |= CAN_ERR_PROT_STUFF;
                        break;
                default:
-                       cf->data[2] |= CAN_ERR_PROT_UNSPEC;
                        cf->data[3] = ecc & ECC_SEG;
                        break;
                }
index ef655177bb5e5b62a7be1f821d08a88226ba7b84..39cf911f7a1e3c364d7818bdc6bcd162a6490951 100644 (file)
@@ -487,7 +487,6 @@ static int m_can_handle_lec_err(struct net_device *dev,
         * type of the last error to occur on the CAN bus
         */
        cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
-       cf->data[2] |= CAN_ERR_PROT_UNSPEC;
 
        switch (lec_type) {
        case LEC_STUFF_ERROR:
@@ -500,8 +499,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
                break;
        case LEC_ACK_ERROR:
                netdev_dbg(dev, "ack error\n");
-               cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
-                               CAN_ERR_PROT_LOC_ACK_DEL);
+               cf->data[3] = CAN_ERR_PROT_LOC_ACK;
                break;
        case LEC_BIT1_ERROR:
                netdev_dbg(dev, "bit1 error\n");
@@ -513,8 +511,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
                break;
        case LEC_CRC_ERROR:
                netdev_dbg(dev, "CRC error\n");
-               cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
-                               CAN_ERR_PROT_LOC_CRC_DEL);
+               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
                break;
        default:
                break;
index e187ca783da0946def7585ff8d85ac76ea201e05..c1317889d3d8d97858604feeb63fa4f7cec8a368 100644 (file)
@@ -559,8 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
                stats->rx_errors++;
                break;
        case PCH_CRC_ERR:
-               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
-                              CAN_ERR_PROT_LOC_CRC_DEL;
+               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
                priv->can.can_stats.bus_error++;
                stats->rx_errors++;
                break;
index 7bd54191f962a4ad3079eabdcefb0d001a14bf0b..bc46be39549d2ab317862ba376ccb063be65b7be 100644 (file)
@@ -241,17 +241,16 @@ static void rcar_can_error(struct net_device *ndev)
                u8 ecsr;
 
                netdev_dbg(priv->ndev, "Bus error interrupt:\n");
-               if (skb) {
+               if (skb)
                        cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
-                       cf->data[2] = CAN_ERR_PROT_UNSPEC;
-               }
+
                ecsr = readb(&priv->regs->ecsr);
                if (ecsr & RCAR_CAN_ECSR_ADEF) {
                        netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
                        tx_errors++;
                        writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
                        if (skb)
-                               cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
+                               cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
                }
                if (ecsr & RCAR_CAN_ECSR_BE0F) {
                        netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
@@ -272,7 +271,7 @@ static void rcar_can_error(struct net_device *ndev)
                        rx_errors++;
                        writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
                        if (skb)
-                               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+                               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
                }
                if (ecsr & RCAR_CAN_ECSR_AEF) {
                        netdev_dbg(priv->ndev, "ACK Error\n");
@@ -280,7 +279,7 @@ static void rcar_can_error(struct net_device *ndev)
                        writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
                        if (skb) {
                                cf->can_id |= CAN_ERR_ACK;
-                               cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+                               cf->data[3] = CAN_ERR_PROT_LOC_ACK;
                        }
                }
                if (ecsr & RCAR_CAN_ECSR_FEF) {
index 7b92e911a6168badb3e30f8fc55b2e6fdd0f61dc..8dda3b703d39a1e82ce7fc6a63707e883d235313 100644 (file)
@@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev)
        priv->write_reg(priv, SJA1000_RXERR, 0x0);
        priv->read_reg(priv, SJA1000_ECC);
 
+       /* clear interrupt flags */
+       priv->read_reg(priv, SJA1000_IR);
+
        /* leave reset mode */
        set_normal_mode(dev);
 }
@@ -446,7 +449,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
                        cf->data[2] |= CAN_ERR_PROT_STUFF;
                        break;
                default:
-                       cf->data[2] |= CAN_ERR_PROT_UNSPEC;
                        cf->data[3] = ecc & ECC_SEG;
                        break;
                }
index d9a42c6467836cdf3aa00adc1d5a4f059b97469e..68ef0a4cd82153cd65699ab0a4d80a5a201393e6 100644 (file)
@@ -575,7 +575,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
                                cf->data[2] |= CAN_ERR_PROT_STUFF;
                                break;
                        default:
-                               cf->data[2] |= CAN_ERR_PROT_UNSPEC;
                                cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE)
                                               >> 16;
                                break;
index cf345cbfe8198ef23ee328fc2eb67f5841751ee1..680d1ff07a55ddd60ceb09eb42bb98faaa42ad9f 100644 (file)
@@ -722,7 +722,6 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
        if (err_status & HECC_BUS_ERROR) {
                ++priv->can.can_stats.bus_error;
                cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
-               cf->data[2] |= CAN_ERR_PROT_UNSPEC;
                if (err_status & HECC_CANES_FE) {
                        hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
                        cf->data[2] |= CAN_ERR_PROT_FORM;
@@ -737,13 +736,11 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
                }
                if (err_status & HECC_CANES_CRCE) {
                        hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
-                       cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
-                                       CAN_ERR_PROT_LOC_CRC_DEL;
+                       cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
                }
                if (err_status & HECC_CANES_ACKE) {
                        hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
-                       cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
-                                       CAN_ERR_PROT_LOC_ACK_DEL;
+                       cf->data[3] = CAN_ERR_PROT_LOC_ACK;
                }
        }
 
index 2d390384ef3bb3d3845fcf6102bef70715f1dd21..fc5b75675cd8b6dbebbbc79d357001676900fefa 100644 (file)
@@ -377,7 +377,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
                        cf->data[2] |= CAN_ERR_PROT_STUFF;
                        break;
                default:
-                       cf->data[2] |= CAN_ERR_PROT_UNSPEC;
                        cf->data[3] = ecc & SJA1000_ECC_SEG;
                        break;
                }
index 0e5a4493ba4fee6d3c4fb5626a676f18802a6ef3..113e64fcd73be9a16635f0029ed3c6d0c2a17306 100644 (file)
@@ -282,7 +282,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
                                cf->data[2] |= CAN_ERR_PROT_STUFF;
                                break;
                        default:
-                               cf->data[2] |= CAN_ERR_PROT_UNSPEC;
                                cf->data[3] = ecc & SJA1000_ECC_SEG;
                                break;
                        }
index 8b17a9065b0b193a0c5e5a93048c637f0f7fbad3..022bfa13ebfa0c85491cfc2cb17a3437c01f0205 100644 (file)
@@ -944,10 +944,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
                        cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
 
                        if (es->leaf.error_factor & M16C_EF_ACKE)
-                               cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
+                               cf->data[3] = CAN_ERR_PROT_LOC_ACK;
                        if (es->leaf.error_factor & M16C_EF_CRCE)
-                               cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
-                                               CAN_ERR_PROT_LOC_CRC_DEL);
+                               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
                        if (es->leaf.error_factor & M16C_EF_FORME)
                                cf->data[2] |= CAN_ERR_PROT_FORM;
                        if (es->leaf.error_factor & M16C_EF_STFE)
index de95b1ccba3e3b6d4d00e313acb280cd178a000d..a731720f1d132501d12d6241ccee833ae2e61787 100644 (file)
@@ -401,9 +401,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
                tx_errors = 1;
                break;
        case USB_8DEV_STATUSMSG_CRC:
-               cf->data[2] |= CAN_ERR_PROT_UNSPEC;
-               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
-                              CAN_ERR_PROT_LOC_CRC_DEL;
+               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
                rx_errors = 1;
                break;
        case USB_8DEV_STATUSMSG_BIT0:
index fc55e8e0351dfe5f3ca436d8aed64bf6d660db84..51670b322409b6606736751dae23433e1d165500 100644 (file)
@@ -608,17 +608,15 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
 
        /* Check for error interrupt */
        if (isr & XCAN_IXR_ERROR_MASK) {
-               if (skb) {
+               if (skb)
                        cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
-                       cf->data[2] |= CAN_ERR_PROT_UNSPEC;
-               }
 
                /* Check for Ack error interrupt */
                if (err_status & XCAN_ESR_ACKER_MASK) {
                        stats->tx_errors++;
                        if (skb) {
                                cf->can_id |= CAN_ERR_ACK;
-                               cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+                               cf->data[3] = CAN_ERR_PROT_LOC_ACK;
                        }
                }
 
@@ -654,8 +652,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
                        stats->rx_errors++;
                        if (skb) {
                                cf->can_id |= CAN_ERR_PROT;
-                               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ |
-                                               CAN_ERR_PROT_LOC_CRC_DEL;
+                               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
                        }
                }
                        priv->can.can_stats.bus_error++;
index 955d06b9cdba75e82fa87deb26fbb37e08140f87..31c5e476fd648fff18a9a4701ba0b214656e45db 100644 (file)
@@ -29,6 +29,7 @@ source "drivers/net/ethernet/apm/Kconfig"
 source "drivers/net/ethernet/apple/Kconfig"
 source "drivers/net/ethernet/arc/Kconfig"
 source "drivers/net/ethernet/atheros/Kconfig"
+source "drivers/net/ethernet/aurora/Kconfig"
 source "drivers/net/ethernet/cadence/Kconfig"
 source "drivers/net/ethernet/adi/Kconfig"
 source "drivers/net/ethernet/broadcom/Kconfig"
index 4a2ee98738f04ef6eda76d776176dba22a9a7ef0..071f84eb6f3f8c8f0b7add3e31d1873609dc4486 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_XGENE) += apm/
 obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
 obj-$(CONFIG_NET_VENDOR_ARC) += arc/
 obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
+obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
 obj-$(CONFIG_NET_CADENCE) += cadence/
 obj-$(CONFIG_NET_BFIN) += adi/
 obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
index 991412ce6f48fbbf6fdfd6255003a0aa90e67e01..9147a0107c44034f015beba83cfc150398c1ac93 100644 (file)
@@ -450,12 +450,12 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
-       pdata->ring_ops->wr_cmd(tx_ring, count);
        skb_tx_timestamp(skb);
 
        pdata->stats.tx_packets++;
        pdata->stats.tx_bytes += skb->len;
 
+       pdata->ring_ops->wr_cmd(tx_ring, count);
        return NETDEV_TX_OK;
 }
 
@@ -688,10 +688,10 @@ static int xgene_enet_open(struct net_device *ndev)
        mac_ops->tx_enable(pdata);
        mac_ops->rx_enable(pdata);
 
+       xgene_enet_napi_enable(pdata);
        ret = xgene_enet_register_irq(ndev);
        if (ret)
                return ret;
-       xgene_enet_napi_enable(pdata);
 
        if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
                phy_start(pdata->phy_dev);
@@ -715,13 +715,13 @@ static int xgene_enet_close(struct net_device *ndev)
        else
                cancel_delayed_work_sync(&pdata->link_work);
 
-       xgene_enet_napi_disable(pdata);
-       xgene_enet_free_irq(ndev);
-       xgene_enet_process_ring(pdata->rx_ring, -1);
-
        mac_ops->tx_disable(pdata);
        mac_ops->rx_disable(pdata);
 
+       xgene_enet_free_irq(ndev);
+       xgene_enet_napi_disable(pdata);
+       xgene_enet_process_ring(pdata->rx_ring, -1);
+
        return 0;
 }
 
@@ -1474,15 +1474,15 @@ static int xgene_enet_probe(struct platform_device *pdev)
        }
        ndev->hw_features = ndev->features;
 
-       ret = register_netdev(ndev);
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
        if (ret) {
-               netdev_err(ndev, "Failed to register netdev\n");
+               netdev_err(ndev, "No usable DMA configuration\n");
                goto err;
        }
 
-       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+       ret = register_netdev(ndev);
        if (ret) {
-               netdev_err(ndev, "No usable DMA configuration\n");
+               netdev_err(ndev, "Failed to register netdev\n");
                goto err;
        }
 
@@ -1490,14 +1490,17 @@ static int xgene_enet_probe(struct platform_device *pdev)
        if (ret)
                goto err;
 
-       xgene_enet_napi_add(pdata);
        mac_ops = pdata->mac_ops;
-       if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+       if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
                ret = xgene_enet_mdio_config(pdata);
-       else
+               if (ret)
+                       goto err;
+       } else {
                INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
+       }
 
-       return ret;
+       xgene_enet_napi_add(pdata);
+       return 0;
 err:
        unregister_netdev(ndev);
        free_netdev(ndev);
index c8af3ce3ea38d16d4c470ec5773b2d4f088b0f15..bd377a6b067d4e7cc2e4640514d6910a03167450 100644 (file)
@@ -1534,6 +1534,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
          .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
        { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
          .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
+         .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
        { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
          .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
        { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
index af006b44b2a66ec3fd0892672e6a61a9ca362cc3..0959e6824cb635f0262524db9729988986193fb4 100644 (file)
@@ -37,6 +37,7 @@
 
 #define ALX_DEV_ID_AR8161                              0x1091
 #define ALX_DEV_ID_E2200                               0xe091
+#define ALX_DEV_ID_E2400                               0xe0a1
 #define ALX_DEV_ID_AR8162                              0x1090
 #define ALX_DEV_ID_AR8171                              0x10A1
 #define ALX_DEV_ID_AR8172                              0x10A0
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
new file mode 100644 (file)
index 0000000..a3c7106
--- /dev/null
@@ -0,0 +1,20 @@
+config NET_VENDOR_AURORA
+       bool "Aurora VLSI devices"
+       help
+         If you have a network (Ethernet) device belonging to this class,
+         say Y.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         questions about Aurora devices. If you say Y, you will be asked
+         for your specific device in the following questions.
+
+if NET_VENDOR_AURORA
+
+config AURORA_NB8800
+       tristate "Aurora AU-NB8800 support"
+       select PHYLIB
+       help
+        Support for the AU-NB8800 gigabit Ethernet controller.
+
+endif
diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile
new file mode 100644 (file)
index 0000000..6cb528a
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_AURORA_NB8800) += nb8800.o
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
new file mode 100644 (file)
index 0000000..ecc4a33
--- /dev/null
@@ -0,0 +1,1552 @@
+/*
+ * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
+ *
+ * Mostly rewritten, based on driver from Sigma Designs.  Original
+ * copyright notice below.
+ *
+ *
+ * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
+ *
+ * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/dma-mapping.h>
+#include <linux/phy.h>
+#include <linux/cache.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <asm/barrier.h>
+
+#include "nb8800.h"
+
+static void nb8800_tx_done(struct net_device *dev);
+static int nb8800_dma_stop(struct net_device *dev);
+
+static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
+{
+       return readb_relaxed(priv->base + reg);
+}
+
+static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
+{
+       return readl_relaxed(priv->base + reg);
+}
+
+static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
+{
+       writeb_relaxed(val, priv->base + reg);
+}
+
+static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
+{
+       writew_relaxed(val, priv->base + reg);
+}
+
+static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
+{
+       writel_relaxed(val, priv->base + reg);
+}
+
+static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
+                               u32 mask, u32 val)
+{
+       u32 old = nb8800_readb(priv, reg);
+       u32 new = (old & ~mask) | (val & mask);
+
+       if (new != old)
+               nb8800_writeb(priv, reg, new);
+}
+
+static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
+                               u32 mask, u32 val)
+{
+       u32 old = nb8800_readl(priv, reg);
+       u32 new = (old & ~mask) | (val & mask);
+
+       if (new != old)
+               nb8800_writel(priv, reg, new);
+}
+
+static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
+                              bool set)
+{
+       nb8800_maskb(priv, reg, bits, set ? bits : 0);
+}
+
+static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
+{
+       nb8800_maskb(priv, reg, bits, bits);
+}
+
+static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
+{
+       nb8800_maskb(priv, reg, bits, 0);
+}
+
+static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
+                              bool set)
+{
+       nb8800_maskl(priv, reg, bits, set ? bits : 0);
+}
+
+static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
+{
+       nb8800_maskl(priv, reg, bits, bits);
+}
+
+static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
+{
+       nb8800_maskl(priv, reg, bits, 0);
+}
+
+static int nb8800_mdio_wait(struct mii_bus *bus)
+{
+       struct nb8800_priv *priv = bus->priv;
+       u32 val;
+
+       return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
+                                        val, !(val & MDIO_CMD_GO), 1, 1000);
+}
+
+static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
+{
+       struct nb8800_priv *priv = bus->priv;
+       int err;
+
+       err = nb8800_mdio_wait(bus);
+       if (err)
+               return err;
+
+       nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
+       udelay(10);
+       nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
+
+       return nb8800_mdio_wait(bus);
+}
+
+static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+       struct nb8800_priv *priv = bus->priv;
+       u32 val;
+       int err;
+
+       err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
+       if (err)
+               return err;
+
+       val = nb8800_readl(priv, NB8800_MDIO_STS);
+       if (val & MDIO_STS_ERR)
+               return 0xffff;
+
+       return val & 0xffff;
+}
+
+static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+{
+       u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
+               MDIO_CMD_DATA(val) | MDIO_CMD_WR;
+
+       return nb8800_mdio_cmd(bus, cmd);
+}
+
+static void nb8800_mac_tx(struct net_device *dev, bool enable)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
+               cpu_relax();
+
+       nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
+}
+
+static void nb8800_mac_rx(struct net_device *dev, bool enable)
+{
+       nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
+}
+
+static void nb8800_mac_af(struct net_device *dev, bool enable)
+{
+       nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
+}
+
+static void nb8800_start_rx(struct net_device *dev)
+{
+       nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
+}
+
+static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
+       struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
+       int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
+       dma_addr_t dma_addr;
+       struct page *page;
+       unsigned long offset;
+       void *data;
+
+       data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
+       if (!data)
+               return -ENOMEM;
+
+       page = virt_to_head_page(data);
+       offset = data - page_address(page);
+
+       dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
+                               DMA_FROM_DEVICE);
+
+       if (dma_mapping_error(&dev->dev, dma_addr)) {
+               skb_free_frag(data);
+               return -ENOMEM;
+       }
+
+       rxb->page = page;
+       rxb->offset = offset;
+       rxd->desc.s_addr = dma_addr;
+
+       return 0;
+}
+
+static void nb8800_receive(struct net_device *dev, unsigned int i,
+                          unsigned int len)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
+       struct page *page = priv->rx_bufs[i].page;
+       int offset = priv->rx_bufs[i].offset;
+       void *data = page_address(page) + offset;
+       dma_addr_t dma = rxd->desc.s_addr;
+       struct sk_buff *skb;
+       unsigned int size;
+       int err;
+
+       size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
+
+       skb = napi_alloc_skb(&priv->napi, size);
+       if (!skb) {
+               netdev_err(dev, "rx skb allocation failed\n");
+               dev->stats.rx_dropped++;
+               return;
+       }
+
+       if (len <= RX_COPYBREAK) {
+               dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
+               memcpy(skb_put(skb, len), data, len);
+               dma_sync_single_for_device(&dev->dev, dma, len,
+                                          DMA_FROM_DEVICE);
+       } else {
+               err = nb8800_alloc_rx(dev, i, true);
+               if (err) {
+                       netdev_err(dev, "rx buffer allocation failed\n");
+                       dev->stats.rx_dropped++;
+                       return;
+               }
+
+               dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
+               memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR);
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                               offset + RX_COPYHDR, len - RX_COPYHDR,
+                               RX_BUF_SIZE);
+       }
+
+       skb->protocol = eth_type_trans(skb, dev);
+       napi_gro_receive(&priv->napi, skb);
+}
+
+static void nb8800_rx_error(struct net_device *dev, u32 report)
+{
+       if (report & RX_LENGTH_ERR)
+               dev->stats.rx_length_errors++;
+
+       if (report & RX_FCS_ERR)
+               dev->stats.rx_crc_errors++;
+
+       if (report & RX_FIFO_OVERRUN)
+               dev->stats.rx_fifo_errors++;
+
+       if (report & RX_ALIGNMENT_ERROR)
+               dev->stats.rx_frame_errors++;
+
+       dev->stats.rx_errors++;
+}
+
+static int nb8800_poll(struct napi_struct *napi, int budget)
+{
+       struct net_device *dev = napi->dev;
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct nb8800_rx_desc *rxd;
+       unsigned int last = priv->rx_eoc;
+       unsigned int next;
+       int work = 0;
+
+       nb8800_tx_done(dev);
+
+again:
+       while (work < budget) {
+               struct nb8800_rx_buf *rxb;
+               unsigned int len;
+
+               next = (last + 1) % RX_DESC_COUNT;
+
+               rxb = &priv->rx_bufs[next];
+               rxd = &priv->rx_descs[next];
+
+               if (!rxd->report)
+                       break;
+
+               len = RX_BYTES_TRANSFERRED(rxd->report);
+
+               if (IS_RX_ERROR(rxd->report))
+                       nb8800_rx_error(dev, rxd->report);
+               else
+                       nb8800_receive(dev, next, len);
+
+               dev->stats.rx_packets++;
+               dev->stats.rx_bytes += len;
+
+               if (rxd->report & RX_MULTICAST_PKT)
+                       dev->stats.multicast++;
+
+               rxd->report = 0;
+               last = next;
+               work++;
+       }
+
+       if (work) {
+               priv->rx_descs[last].desc.config |= DESC_EOC;
+               wmb();  /* ensure new EOC is written before clearing old */
+               priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
+               priv->rx_eoc = last;
+               nb8800_start_rx(dev);
+       }
+
+       if (work < budget) {
+               nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
+
+               /* If a packet arrived after we last checked but
+                * before writing RX_ITR, the interrupt will be
+                * delayed, so we retrieve it now.
+                */
+               if (priv->rx_descs[next].report)
+                       goto again;
+
+               napi_complete_done(napi, work);
+       }
+
+       return work;
+}
+
+static void __nb8800_tx_dma_start(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct nb8800_tx_buf *txb;
+       u32 txc_cr;
+
+       txb = &priv->tx_bufs[priv->tx_queue];
+       if (!txb->ready)
+               return;
+
+       txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
+       if (txc_cr & TCR_EN)
+               return;
+
+       nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
+       wmb();          /* ensure desc addr is written before starting DMA */
+       nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
+
+       priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
+}
+
+static void nb8800_tx_dma_start(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       spin_lock_irq(&priv->tx_lock);
+       __nb8800_tx_dma_start(dev);
+       spin_unlock_irq(&priv->tx_lock);
+}
+
+static void nb8800_tx_dma_start_irq(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       spin_lock(&priv->tx_lock);
+       __nb8800_tx_dma_start(dev);
+       spin_unlock(&priv->tx_lock);
+}
+
+static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct nb8800_tx_desc *txd;
+       struct nb8800_tx_buf *txb;
+       struct nb8800_dma_desc *desc;
+       dma_addr_t dma_addr;
+       unsigned int dma_len;
+       unsigned int align;
+       unsigned int next;
+
+       if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
+               netif_stop_queue(dev);
+               return NETDEV_TX_BUSY;
+       }
+
+       align = (8 - (uintptr_t)skb->data) & 7;
+
+       dma_len = skb->len - align;
+       dma_addr = dma_map_single(&dev->dev, skb->data + align,
+                                 dma_len, DMA_TO_DEVICE);
+
+       if (dma_mapping_error(&dev->dev, dma_addr)) {
+               netdev_err(dev, "tx dma mapping error\n");
+               kfree_skb(skb);
+               dev->stats.tx_dropped++;
+               return NETDEV_TX_OK;
+       }
+
+       if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
+               netif_stop_queue(dev);
+               skb->xmit_more = 0;
+       }
+
+       next = priv->tx_next;
+       txb = &priv->tx_bufs[next];
+       txd = &priv->tx_descs[next];
+       desc = &txd->desc[0];
+
+       next = (next + 1) % TX_DESC_COUNT;
+
+       if (align) {
+               memcpy(txd->buf, skb->data, align);
+
+               desc->s_addr =
+                       txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
+               desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
+               desc->config = DESC_BTS(2) | DESC_DS | align;
+
+               desc++;
+       }
+
+       desc->s_addr = dma_addr;
+       desc->n_addr = priv->tx_bufs[next].dma_desc;
+       desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
+
+       if (!skb->xmit_more)
+               desc->config |= DESC_EOC;
+
+       txb->skb = skb;
+       txb->dma_addr = dma_addr;
+       txb->dma_len = dma_len;
+
+       if (!priv->tx_chain) {
+               txb->chain_len = 1;
+               priv->tx_chain = txb;
+       } else {
+               priv->tx_chain->chain_len++;
+       }
+
+       netdev_sent_queue(dev, skb->len);
+
+       priv->tx_next = next;
+
+       if (!skb->xmit_more) {
+               smp_wmb();
+               priv->tx_chain->ready = true;
+               priv->tx_chain = NULL;
+               nb8800_tx_dma_start(dev);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static void nb8800_tx_error(struct net_device *dev, u32 report)
+{
+       if (report & TX_LATE_COLLISION)
+               dev->stats.collisions++;
+
+       if (report & TX_PACKET_DROPPED)
+               dev->stats.tx_dropped++;
+
+       if (report & TX_FIFO_UNDERRUN)
+               dev->stats.tx_fifo_errors++;
+
+       dev->stats.tx_errors++;
+}
+
+static void nb8800_tx_done(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       unsigned int limit = priv->tx_next;
+       unsigned int done = priv->tx_done;
+       unsigned int packets = 0;
+       unsigned int len = 0;
+
+       while (done != limit) {
+               struct nb8800_tx_desc *txd = &priv->tx_descs[done];
+               struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
+               struct sk_buff *skb;
+
+               if (!txd->report)
+                       break;
+
+               skb = txb->skb;
+               len += skb->len;
+
+               dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
+                                DMA_TO_DEVICE);
+
+               if (IS_TX_ERROR(txd->report)) {
+                       nb8800_tx_error(dev, txd->report);
+                       kfree_skb(skb);
+               } else {
+                       consume_skb(skb);
+               }
+
+               dev->stats.tx_packets++;
+               dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
+               dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
+
+               txb->skb = NULL;
+               txb->ready = false;
+               txd->report = 0;
+
+               done = (done + 1) % TX_DESC_COUNT;
+               packets++;
+       }
+
+       if (packets) {
+               smp_mb__before_atomic();
+               atomic_add(packets, &priv->tx_free);
+               netdev_completed_queue(dev, packets, len);
+               netif_wake_queue(dev);
+               priv->tx_done = done;
+       }
+}
+
+static irqreturn_t nb8800_irq(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct nb8800_priv *priv = netdev_priv(dev);
+       irqreturn_t ret = IRQ_NONE;
+       u32 val;
+
+       /* tx interrupt */
+       val = nb8800_readl(priv, NB8800_TXC_SR);
+       if (val) {
+               nb8800_writel(priv, NB8800_TXC_SR, val);
+
+               if (val & TSR_DI)
+                       nb8800_tx_dma_start_irq(dev);
+
+               if (val & TSR_TI)
+                       napi_schedule_irqoff(&priv->napi);
+
+               if (unlikely(val & TSR_DE))
+                       netdev_err(dev, "TX DMA error\n");
+
+               /* should never happen with automatic status retrieval */
+               if (unlikely(val & TSR_TO))
+                       netdev_err(dev, "TX Status FIFO overflow\n");
+
+               ret = IRQ_HANDLED;
+       }
+
+       /* rx interrupt */
+       val = nb8800_readl(priv, NB8800_RXC_SR);
+       if (val) {
+               nb8800_writel(priv, NB8800_RXC_SR, val);
+
+               if (likely(val & (RSR_RI | RSR_DI))) {
+                       nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
+                       napi_schedule_irqoff(&priv->napi);
+               }
+
+               if (unlikely(val & RSR_DE))
+                       netdev_err(dev, "RX DMA error\n");
+
+               /* should never happen with automatic status retrieval */
+               if (unlikely(val & RSR_RO))
+                       netdev_err(dev, "RX Status FIFO overflow\n");
+
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
+static void nb8800_mac_config(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       bool gigabit = priv->speed == SPEED_1000;
+       u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
+       u32 mac_mode = 0;
+       u32 slot_time;
+       u32 phy_clk;
+       u32 ict;
+
+       if (!priv->duplex)
+               mac_mode |= HALF_DUPLEX;
+
+       if (gigabit) {
+               if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
+                       mac_mode |= RGMII_MODE;
+
+               mac_mode |= GMAC_MODE;
+               phy_clk = 125000000;
+
+               /* Should be 512 but register is only 8 bits */
+               slot_time = 255;
+       } else {
+               phy_clk = 25000000;
+               slot_time = 128;
+       }
+
+       ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
+
+       nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
+       nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
+       nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
+}
+
+static void nb8800_pause_config(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+       u32 rxcr;
+
+       if (priv->pause_aneg) {
+               if (!phydev || !phydev->link)
+                       return;
+
+               priv->pause_rx = phydev->pause;
+               priv->pause_tx = phydev->pause ^ phydev->asym_pause;
+       }
+
+       nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
+
+       rxcr = nb8800_readl(priv, NB8800_RXC_CR);
+       if (!!(rxcr & RCR_FL) == priv->pause_tx)
+               return;
+
+       if (netif_running(dev)) {
+               napi_disable(&priv->napi);
+               netif_tx_lock_bh(dev);
+               nb8800_dma_stop(dev);
+               nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
+               nb8800_start_rx(dev);
+               netif_tx_unlock_bh(dev);
+               napi_enable(&priv->napi);
+       } else {
+               nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
+       }
+}
+
+static void nb8800_link_reconfigure(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+       int change = 0;
+
+       if (phydev->link) {
+               if (phydev->speed != priv->speed) {
+                       priv->speed = phydev->speed;
+                       change = 1;
+               }
+
+               if (phydev->duplex != priv->duplex) {
+                       priv->duplex = phydev->duplex;
+                       change = 1;
+               }
+
+               if (change)
+                       nb8800_mac_config(dev);
+
+               nb8800_pause_config(dev);
+       }
+
+       if (phydev->link != priv->link) {
+               priv->link = phydev->link;
+               change = 1;
+       }
+
+       if (change)
+               phy_print_status(priv->phydev);
+}
+
+static void nb8800_update_mac_addr(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
+
+       for (i = 0; i < ETH_ALEN; i++)
+               nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
+}
+
+static int nb8800_set_mac_address(struct net_device *dev, void *addr)
+{
+       struct sockaddr *sock = addr;
+
+       if (netif_running(dev))
+               return -EBUSY;
+
+       ether_addr_copy(dev->dev_addr, sock->sa_data);
+       nb8800_update_mac_addr(dev);
+
+       return 0;
+}
+
+static void nb8800_mc_init(struct net_device *dev, int val)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       nb8800_writeb(priv, NB8800_MC_INIT, val);
+       readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
+                                 1, 1000);
+}
+
+static void nb8800_set_rx_mode(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct netdev_hw_addr *ha;
+       int i;
+
+       if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+               nb8800_mac_af(dev, false);
+               return;
+       }
+
+       nb8800_mac_af(dev, true);
+       nb8800_mc_init(dev, 0);
+
+       netdev_for_each_mc_addr(ha, dev) {
+               for (i = 0; i < ETH_ALEN; i++)
+                       nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
+
+               nb8800_mc_init(dev, 0xff);
+       }
+}
+
+#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
+#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
+
+static void nb8800_dma_free(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       unsigned int i;
+
+       if (priv->rx_bufs) {
+               for (i = 0; i < RX_DESC_COUNT; i++)
+                       if (priv->rx_bufs[i].page)
+                               put_page(priv->rx_bufs[i].page);
+
+               kfree(priv->rx_bufs);
+               priv->rx_bufs = NULL;
+       }
+
+       if (priv->tx_bufs) {
+               for (i = 0; i < TX_DESC_COUNT; i++)
+                       kfree_skb(priv->tx_bufs[i].skb);
+
+               kfree(priv->tx_bufs);
+               priv->tx_bufs = NULL;
+       }
+
+       if (priv->rx_descs) {
+               dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
+                                 priv->rx_desc_dma);
+               priv->rx_descs = NULL;
+       }
+
+       if (priv->tx_descs) {
+               dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
+                                 priv->tx_desc_dma);
+               priv->tx_descs = NULL;
+       }
+}
+
+static void nb8800_dma_reset(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct nb8800_rx_desc *rxd;
+       struct nb8800_tx_desc *txd;
+       unsigned int i;
+
+       for (i = 0; i < RX_DESC_COUNT; i++) {
+               dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
+
+               rxd = &priv->rx_descs[i];
+               rxd->desc.n_addr = rx_dma + sizeof(*rxd);
+               rxd->desc.r_addr =
+                       rx_dma + offsetof(struct nb8800_rx_desc, report);
+               rxd->desc.config = priv->rx_dma_config;
+               rxd->report = 0;
+       }
+
+       rxd->desc.n_addr = priv->rx_desc_dma;
+       rxd->desc.config |= DESC_EOC;
+
+       priv->rx_eoc = RX_DESC_COUNT - 1;
+
+       for (i = 0; i < TX_DESC_COUNT; i++) {
+               struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
+               dma_addr_t r_dma = txb->dma_desc +
+                       offsetof(struct nb8800_tx_desc, report);
+
+               txd = &priv->tx_descs[i];
+               txd->desc[0].r_addr = r_dma;
+               txd->desc[1].r_addr = r_dma;
+               txd->report = 0;
+       }
+
+       priv->tx_next = 0;
+       priv->tx_queue = 0;
+       priv->tx_done = 0;
+       atomic_set(&priv->tx_free, TX_DESC_COUNT);
+
+       nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
+
+       wmb();          /* ensure all setup is written before starting */
+}
+
+static int nb8800_dma_init(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       unsigned int n_rx = RX_DESC_COUNT;
+       unsigned int n_tx = TX_DESC_COUNT;
+       unsigned int i;
+       int err;
+
+       priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
+                                           &priv->rx_desc_dma, GFP_KERNEL);
+       if (!priv->rx_descs)
+               goto err_out;
+
+       priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
+       if (!priv->rx_bufs)
+               goto err_out;
+
+       for (i = 0; i < n_rx; i++) {
+               err = nb8800_alloc_rx(dev, i, false);
+               if (err)
+                       goto err_out;
+       }
+
+       priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
+                                           &priv->tx_desc_dma, GFP_KERNEL);
+       if (!priv->tx_descs)
+               goto err_out;
+
+       priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
+       if (!priv->tx_bufs)
+               goto err_out;
+
+       for (i = 0; i < n_tx; i++)
+               priv->tx_bufs[i].dma_desc =
+                       priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
+
+       nb8800_dma_reset(dev);
+
+       return 0;
+
+err_out:
+       nb8800_dma_free(dev);
+
+       return -ENOMEM;
+}
+
+static int nb8800_dma_stop(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
+       struct nb8800_tx_desc *txd = &priv->tx_descs[0];
+       int retry = 5;
+       u32 txcr;
+       u32 rxcr;
+       int err;
+       unsigned int i;
+
+       /* wait for tx to finish */
+       err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
+                                       !(txcr & TCR_EN) &&
+                                       priv->tx_done == priv->tx_next,
+                                       1000, 1000000);
+       if (err)
+               return err;
+
+       /* The rx DMA only stops if it reaches the end of chain.
+        * To make this happen, we set the EOC flag on all rx
+        * descriptors, put the device in loopback mode, and send
+        * a few dummy frames.  The interrupt handler will ignore
+        * these since NAPI is disabled and no real frames are in
+        * the tx queue.
+        */
+
+       for (i = 0; i < RX_DESC_COUNT; i++)
+               priv->rx_descs[i].desc.config |= DESC_EOC;
+
+       txd->desc[0].s_addr =
+               txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
+       txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
+       memset(txd->buf, 0, sizeof(txd->buf));
+
+       nb8800_mac_af(dev, false);
+       nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
+
+       do {
+               nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
+               wmb();
+               nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
+
+               err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
+                                               rxcr, !(rxcr & RCR_EN),
+                                               1000, 100000);
+       } while (err && --retry);
+
+       nb8800_mac_af(dev, true);
+       nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
+       nb8800_dma_reset(dev);
+
+       return retry ? 0 : -ETIMEDOUT;
+}
+
+static void nb8800_pause_adv(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       u32 adv = 0;
+
+       if (!priv->phydev)
+               return;
+
+       if (priv->pause_rx)
+               adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+       if (priv->pause_tx)
+               adv ^= ADVERTISED_Asym_Pause;
+
+       priv->phydev->supported |= adv;
+       priv->phydev->advertising |= adv;
+}
+
+static int nb8800_open(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       int err;
+
+       /* clear any pending interrupts */
+       nb8800_writel(priv, NB8800_RXC_SR, 0xf);
+       nb8800_writel(priv, NB8800_TXC_SR, 0xf);
+
+       err = nb8800_dma_init(dev);
+       if (err)
+               return err;
+
+       err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
+       if (err)
+               goto err_free_dma;
+
+       nb8800_mac_rx(dev, true);
+       nb8800_mac_tx(dev, true);
+
+       priv->phydev = of_phy_connect(dev, priv->phy_node,
+                                     nb8800_link_reconfigure, 0,
+                                     priv->phy_mode);
+       if (!priv->phydev)
+               goto err_free_irq;
+
+       nb8800_pause_adv(dev);
+
+       netdev_reset_queue(dev);
+       napi_enable(&priv->napi);
+       netif_start_queue(dev);
+
+       nb8800_start_rx(dev);
+       phy_start(priv->phydev);
+
+       return 0;
+
+err_free_irq:
+       free_irq(dev->irq, dev);
+err_free_dma:
+       nb8800_dma_free(dev);
+
+       return err;
+}
+
+static int nb8800_stop(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       phy_stop(priv->phydev);
+
+       netif_stop_queue(dev);
+       napi_disable(&priv->napi);
+
+       nb8800_dma_stop(dev);
+       nb8800_mac_rx(dev, false);
+       nb8800_mac_tx(dev, false);
+
+       phy_disconnect(priv->phydev);
+       priv->phydev = NULL;
+
+       free_irq(dev->irq, dev);
+
+       nb8800_dma_free(dev);
+
+       return 0;
+}
+
+static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       return phy_mii_ioctl(priv->phydev, rq, cmd);
+}
+
+static const struct net_device_ops nb8800_netdev_ops = {
+       .ndo_open               = nb8800_open,
+       .ndo_stop               = nb8800_stop,
+       .ndo_start_xmit         = nb8800_xmit,
+       .ndo_set_mac_address    = nb8800_set_mac_address,
+       .ndo_set_rx_mode        = nb8800_set_rx_mode,
+       .ndo_do_ioctl           = nb8800_ioctl,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       if (!priv->phydev)
+               return -ENODEV;
+
+       return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       if (!priv->phydev)
+               return -ENODEV;
+
+       return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int nb8800_nway_reset(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       if (!priv->phydev)
+               return -ENODEV;
+
+       return genphy_restart_aneg(priv->phydev);
+}
+
+static void nb8800_get_pauseparam(struct net_device *dev,
+                                 struct ethtool_pauseparam *pp)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       pp->autoneg = priv->pause_aneg;
+       pp->rx_pause = priv->pause_rx;
+       pp->tx_pause = priv->pause_tx;
+}
+
+static int nb8800_set_pauseparam(struct net_device *dev,
+                                struct ethtool_pauseparam *pp)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       priv->pause_aneg = pp->autoneg;
+       priv->pause_rx = pp->rx_pause;
+       priv->pause_tx = pp->tx_pause;
+
+       nb8800_pause_adv(dev);
+
+       if (!priv->pause_aneg)
+               nb8800_pause_config(dev);
+       else if (priv->phydev)
+               phy_start_aneg(priv->phydev);
+
+       return 0;
+}
+
+static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
+       "rx_bytes_ok",
+       "rx_frames_ok",
+       "rx_undersize_frames",
+       "rx_fragment_frames",
+       "rx_64_byte_frames",
+       "rx_127_byte_frames",
+       "rx_255_byte_frames",
+       "rx_511_byte_frames",
+       "rx_1023_byte_frames",
+       "rx_max_size_frames",
+       "rx_oversize_frames",
+       "rx_bad_fcs_frames",
+       "rx_broadcast_frames",
+       "rx_multicast_frames",
+       "rx_control_frames",
+       "rx_pause_frames",
+       "rx_unsup_control_frames",
+       "rx_align_error_frames",
+       "rx_overrun_frames",
+       "rx_jabber_frames",
+       "rx_bytes",
+       "rx_frames",
+
+       "tx_bytes_ok",
+       "tx_frames_ok",
+       "tx_64_byte_frames",
+       "tx_127_byte_frames",
+       "tx_255_byte_frames",
+       "tx_511_byte_frames",
+       "tx_1023_byte_frames",
+       "tx_max_size_frames",
+       "tx_oversize_frames",
+       "tx_broadcast_frames",
+       "tx_multicast_frames",
+       "tx_control_frames",
+       "tx_pause_frames",
+       "tx_underrun_frames",
+       "tx_single_collision_frames",
+       "tx_multi_collision_frames",
+       "tx_deferred_collision_frames",
+       "tx_late_collision_frames",
+       "tx_excessive_collision_frames",
+       "tx_bytes",
+       "tx_frames",
+       "tx_collisions",
+};
+
+#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
+
+static int nb8800_get_sset_count(struct net_device *dev, int sset)
+{
+       if (sset == ETH_SS_STATS)
+               return NB8800_NUM_STATS;
+
+       return -EOPNOTSUPP;
+}
+
+static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
+{
+       if (sset == ETH_SS_STATS)
+               memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
+}
+
+static u32 nb8800_read_stat(struct net_device *dev, int index)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+
+       nb8800_writeb(priv, NB8800_STAT_INDEX, index);
+
+       return nb8800_readl(priv, NB8800_STAT_DATA);
+}
+
+static void nb8800_get_ethtool_stats(struct net_device *dev,
+                                    struct ethtool_stats *estats, u64 *st)
+{
+       unsigned int i;
+       u32 rx, tx;
+
+       for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
+               rx = nb8800_read_stat(dev, i);
+               tx = nb8800_read_stat(dev, i | 0x80);
+               st[i] = rx;
+               st[i + NB8800_NUM_STATS / 2] = tx;
+       }
+}
+
+static const struct ethtool_ops nb8800_ethtool_ops = {
+       .get_settings           = nb8800_get_settings,
+       .set_settings           = nb8800_set_settings,
+       .nway_reset             = nb8800_nway_reset,
+       .get_link               = ethtool_op_get_link,
+       .get_pauseparam         = nb8800_get_pauseparam,
+       .set_pauseparam         = nb8800_set_pauseparam,
+       .get_sset_count         = nb8800_get_sset_count,
+       .get_strings            = nb8800_get_strings,
+       .get_ethtool_stats      = nb8800_get_ethtool_stats,
+};
+
+static int nb8800_hw_init(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       u32 val;
+
+       val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
+       nb8800_writeb(priv, NB8800_TX_CTL1, val);
+
+       /* Collision retry count */
+       nb8800_writeb(priv, NB8800_TX_CTL2, 5);
+
+       val = RX_PAD_STRIP | RX_AF_EN;
+       nb8800_writeb(priv, NB8800_RX_CTL, val);
+
+       /* Chosen by fair dice roll */
+       nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
+
+       /* TX cycles per deferral period */
+       nb8800_writeb(priv, NB8800_TX_SDP, 12);
+
+       /* The following three threshold values have been
+        * experimentally determined for good results.
+        */
+
+       /* RX/TX FIFO threshold for partial empty (64-bit entries) */
+       nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
+
+       /* RX/TX FIFO threshold for partial full (64-bit entries) */
+       nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
+
+       /* Buffer size for transmit (64-bit entries) */
+       nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
+
+       /* Configure tx DMA */
+
+       val = nb8800_readl(priv, NB8800_TXC_CR);
+       val &= TCR_LE;          /* keep endian setting */
+       val |= TCR_DM;          /* DMA descriptor mode */
+       val |= TCR_RS;          /* automatically store tx status  */
+       val |= TCR_DIE;         /* interrupt on DMA chain completion */
+       val |= TCR_TFI(7);      /* interrupt after 7 frames transmitted */
+       val |= TCR_BTS(2);      /* 32-byte bus transaction size */
+       nb8800_writel(priv, NB8800_TXC_CR, val);
+
+       /* TX complete interrupt after 10 ms or 7 frames (see above) */
+       val = clk_get_rate(priv->clk) / 100;
+       nb8800_writel(priv, NB8800_TX_ITR, val);
+
+       /* Configure rx DMA */
+
+       val = nb8800_readl(priv, NB8800_RXC_CR);
+       val &= RCR_LE;          /* keep endian setting */
+       val |= RCR_DM;          /* DMA descriptor mode */
+       val |= RCR_RS;          /* automatically store rx status */
+       val |= RCR_DIE;         /* interrupt at end of DMA chain */
+       val |= RCR_RFI(7);      /* interrupt after 7 frames received */
+       val |= RCR_BTS(2);      /* 32-byte bus transaction size */
+       nb8800_writel(priv, NB8800_RXC_CR, val);
+
+       /* The rx interrupt can fire before the DMA has completed
+        * unless a small delay is added.  50 us is hopefully enough.
+        */
+       priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
+
+       /* In NAPI poll mode we want to disable interrupts, but the
+        * hardware does not permit this.  Delay 10 ms instead.
+        */
+       priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
+
+       nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
+
+       priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
+
+       /* Flow control settings */
+
+       /* Pause time of 0.1 ms */
+       val = 100000 / 512;
+       nb8800_writeb(priv, NB8800_PQ1, val >> 8);
+       nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
+
+       /* Auto-negotiate by default */
+       priv->pause_aneg = true;
+       priv->pause_rx = true;
+       priv->pause_tx = true;
+
+       nb8800_mc_init(dev, 0);
+
+       return 0;
+}
+
+static int nb8800_tangox_init(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       u32 pad_mode = PAD_MODE_MII;
+
+       switch (priv->phy_mode) {
+       case PHY_INTERFACE_MODE_MII:
+       case PHY_INTERFACE_MODE_GMII:
+               pad_mode = PAD_MODE_MII;
+               break;
+
+       case PHY_INTERFACE_MODE_RGMII:
+               pad_mode = PAD_MODE_RGMII;
+               break;
+
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
+               break;
+
+       default:
+               dev_err(dev->dev.parent, "unsupported phy mode %s\n",
+                       phy_modes(priv->phy_mode));
+               return -EINVAL;
+       }
+
+       nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
+
+       return 0;
+}
+
+static int nb8800_tangox_reset(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       int clk_div;
+
+       nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
+       usleep_range(1000, 10000);
+       nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
+
+       wmb();          /* ensure reset is cleared before proceeding */
+
+       clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
+       nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
+
+       return 0;
+}
+
+static const struct nb8800_ops nb8800_tangox_ops = {
+       .init   = nb8800_tangox_init,
+       .reset  = nb8800_tangox_reset,
+};
+
+static int nb8800_tango4_init(struct net_device *dev)
+{
+       struct nb8800_priv *priv = netdev_priv(dev);
+       int err;
+
+       err = nb8800_tangox_init(dev);
+       if (err)
+               return err;
+
+       /* On tango4 interrupt on DMA completion per frame works and gives
+        * better performance despite generating more rx interrupts.
+        */
+
+       /* Disable unnecessary interrupt on rx completion */
+       nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
+
+       /* Request interrupt on descriptor DMA completion */
+       priv->rx_dma_config |= DESC_ID;
+
+       return 0;
+}
+
+static const struct nb8800_ops nb8800_tango4_ops = {
+       .init   = nb8800_tango4_init,
+       .reset  = nb8800_tangox_reset,
+};
+
+static const struct of_device_id nb8800_dt_ids[] = {
+       {
+               .compatible = "aurora,nb8800",
+       },
+       {
+               .compatible = "sigma,smp8642-ethernet",
+               .data = &nb8800_tangox_ops,
+       },
+       {
+               .compatible = "sigma,smp8734-ethernet",
+               .data = &nb8800_tango4_ops,
+       },
+       { }
+};
+
+static int nb8800_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+       const struct nb8800_ops *ops = NULL;
+       struct nb8800_priv *priv;
+       struct resource *res;
+       struct net_device *dev;
+       struct mii_bus *bus;
+       const unsigned char *mac;
+       void __iomem *base;
+       int irq;
+       int ret;
+
+       match = of_match_device(nb8800_dt_ids, &pdev->dev);
+       if (match)
+               ops = match->data;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               dev_err(&pdev->dev, "No IRQ\n");
+               return -EINVAL;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
+
+       dev = alloc_etherdev(sizeof(*priv));
+       if (!dev)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       priv = netdev_priv(dev);
+       priv->base = base;
+
+       priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+       if (priv->phy_mode < 0)
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
+
+       priv->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(priv->clk)) {
+               dev_err(&pdev->dev, "failed to get clock\n");
+               ret = PTR_ERR(priv->clk);
+               goto err_free_dev;
+       }
+
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               goto err_free_dev;
+
+       spin_lock_init(&priv->tx_lock);
+
+       if (ops && ops->reset) {
+               ret = ops->reset(dev);
+               if (ret)
+                       goto err_free_dev;
+       }
+
+       bus = devm_mdiobus_alloc(&pdev->dev);
+       if (!bus) {
+               ret = -ENOMEM;
+               goto err_disable_clk;
+       }
+
+       bus->name = "nb8800-mii";
+       bus->read = nb8800_mdio_read;
+       bus->write = nb8800_mdio_write;
+       bus->parent = &pdev->dev;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
+                (unsigned long)res->start);
+       bus->priv = priv;
+
+       ret = of_mdiobus_register(bus, pdev->dev.of_node);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to register MII bus\n");
+               goto err_disable_clk;
+       }
+
+       priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+       if (!priv->phy_node) {
+               dev_err(&pdev->dev, "no PHY specified\n");
+               ret = -ENODEV;
+               goto err_free_bus;
+       }
+
+       priv->mii_bus = bus;
+
+       ret = nb8800_hw_init(dev);
+       if (ret)
+               goto err_free_bus;
+
+       if (ops && ops->init) {
+               ret = ops->init(dev);
+               if (ret)
+                       goto err_free_bus;
+       }
+
+       dev->netdev_ops = &nb8800_netdev_ops;
+       dev->ethtool_ops = &nb8800_ethtool_ops;
+       dev->flags |= IFF_MULTICAST;
+       dev->irq = irq;
+
+       mac = of_get_mac_address(pdev->dev.of_node);
+       if (mac)
+               ether_addr_copy(dev->dev_addr, mac);
+
+       if (!is_valid_ether_addr(dev->dev_addr))
+               eth_hw_addr_random(dev);
+
+       nb8800_update_mac_addr(dev);
+
+       netif_carrier_off(dev);
+
+       ret = register_netdev(dev);
+       if (ret) {
+               netdev_err(dev, "failed to register netdev\n");
+               goto err_free_dma;
+       }
+
+       netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
+
+       netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
+
+       return 0;
+
+err_free_dma:
+       nb8800_dma_free(dev);
+err_free_bus:
+       mdiobus_unregister(bus);
+err_disable_clk:
+       clk_disable_unprepare(priv->clk);
+err_free_dev:
+       free_netdev(dev);
+
+       return ret;
+}
+
+static int nb8800_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct nb8800_priv *priv = netdev_priv(ndev);
+
+       unregister_netdev(ndev);
+
+       mdiobus_unregister(priv->mii_bus);
+
+       clk_disable_unprepare(priv->clk);
+
+       nb8800_dma_free(ndev);
+       free_netdev(ndev);
+
+       return 0;
+}
+
+static struct platform_driver nb8800_driver = {
+       .driver = {
+               .name           = "nb8800",
+               .of_match_table = nb8800_dt_ids,
+       },
+       .probe  = nb8800_probe,
+       .remove = nb8800_remove,
+};
+
+module_platform_driver(nb8800_driver);
+
+MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
+MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
new file mode 100644 (file)
index 0000000..e5adbc2
--- /dev/null
@@ -0,0 +1,316 @@
+#ifndef _NB8800_H_
+#define _NB8800_H_
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/bitops.h>
+
+#define RX_DESC_COUNT                  256
+#define TX_DESC_COUNT                  256
+
+#define NB8800_DESC_LOW                        4
+
+#define RX_BUF_SIZE                    1552
+
+#define RX_COPYBREAK                   256
+#define RX_COPYHDR                     128
+
+#define MAX_MDC_CLOCK                  2500000
+
+/* Stargate Solutions SSN8800 core registers */
+#define NB8800_TX_CTL1                 0x000
+#define TX_TPD                         BIT(5)
+#define TX_APPEND_FCS                  BIT(4)
+#define TX_PAD_EN                      BIT(3)
+#define TX_RETRY_EN                    BIT(2)
+#define TX_EN                          BIT(0)
+
+#define NB8800_TX_CTL2                 0x001
+
+#define NB8800_RX_CTL                  0x004
+#define RX_BC_DISABLE                  BIT(7)
+#define RX_RUNT                                BIT(6)
+#define RX_AF_EN                       BIT(5)
+#define RX_PAUSE_EN                    BIT(3)
+#define RX_SEND_CRC                    BIT(2)
+#define RX_PAD_STRIP                   BIT(1)
+#define RX_EN                          BIT(0)
+
+#define NB8800_RANDOM_SEED             0x008
+#define NB8800_TX_SDP                  0x14
+#define NB8800_TX_TPDP1                        0x18
+#define NB8800_TX_TPDP2                        0x19
+#define NB8800_SLOT_TIME               0x1c
+
+#define NB8800_MDIO_CMD                        0x020
+#define MDIO_CMD_GO                    BIT(31)
+#define MDIO_CMD_WR                    BIT(26)
+#define MDIO_CMD_ADDR(x)               ((x) << 21)
+#define MDIO_CMD_REG(x)                        ((x) << 16)
+#define MDIO_CMD_DATA(x)               ((x) <<  0)
+
+#define NB8800_MDIO_STS                        0x024
+#define MDIO_STS_ERR                   BIT(31)
+
+#define NB8800_MC_ADDR(i)              (0x028 + (i))
+#define NB8800_MC_INIT                 0x02e
+#define NB8800_UC_ADDR(i)              (0x03c + (i))
+
+#define NB8800_MAC_MODE                        0x044
+#define RGMII_MODE                     BIT(7)
+#define HALF_DUPLEX                    BIT(4)
+#define BURST_EN                       BIT(3)
+#define LOOPBACK_EN                    BIT(2)
+#define GMAC_MODE                      BIT(0)
+
+#define NB8800_IC_THRESHOLD            0x050
+#define NB8800_PE_THRESHOLD            0x051
+#define NB8800_PF_THRESHOLD            0x052
+#define NB8800_TX_BUFSIZE              0x054
+#define NB8800_FIFO_CTL                        0x056
+#define NB8800_PQ1                     0x060
+#define NB8800_PQ2                     0x061
+#define NB8800_SRC_ADDR(i)             (0x06a + (i))
+#define NB8800_STAT_DATA               0x078
+#define NB8800_STAT_INDEX              0x07c
+#define NB8800_STAT_CLEAR              0x07d
+
+#define NB8800_SLEEP_MODE              0x07e
+#define SLEEP_MODE                     BIT(0)
+
+#define NB8800_WAKEUP                  0x07f
+#define WAKEUP                         BIT(0)
+
+/* Aurora NB8800 host interface registers */
+#define NB8800_TXC_CR                  0x100
+#define TCR_LK                         BIT(12)
+#define TCR_DS                         BIT(11)
+#define TCR_BTS(x)                     (((x) & 0x7) << 8)
+#define TCR_DIE                                BIT(7)
+#define TCR_TFI(x)                     (((x) & 0x7) << 4)
+#define TCR_LE                         BIT(3)
+#define TCR_RS                         BIT(2)
+#define TCR_DM                         BIT(1)
+#define TCR_EN                         BIT(0)
+
+#define NB8800_TXC_SR                  0x104
+#define TSR_DE                         BIT(3)
+#define TSR_DI                         BIT(2)
+#define TSR_TO                         BIT(1)
+#define TSR_TI                         BIT(0)
+
+#define NB8800_TX_SAR                  0x108
+#define NB8800_TX_DESC_ADDR            0x10c
+
+#define NB8800_TX_REPORT_ADDR          0x110
+#define TX_BYTES_TRANSFERRED(x)                (((x) >> 16) & 0xffff)
+#define TX_FIRST_DEFERRAL              BIT(7)
+#define TX_EARLY_COLLISIONS(x)         (((x) >> 3) & 0xf)
+#define TX_LATE_COLLISION              BIT(2)
+#define TX_PACKET_DROPPED              BIT(1)
+#define TX_FIFO_UNDERRUN               BIT(0)
+#define IS_TX_ERROR(r)                 ((r) & 0x07)
+
+#define NB8800_TX_FIFO_SR              0x114
+#define NB8800_TX_ITR                  0x118
+
+#define NB8800_RXC_CR                  0x200
+#define RCR_FL                         BIT(13)
+#define RCR_LK                         BIT(12)
+#define RCR_DS                         BIT(11)
+#define RCR_BTS(x)                     (((x) & 7) << 8)
+#define RCR_DIE                                BIT(7)
+#define RCR_RFI(x)                     (((x) & 7) << 4)
+#define RCR_LE                         BIT(3)
+#define RCR_RS                         BIT(2)
+#define RCR_DM                         BIT(1)
+#define RCR_EN                         BIT(0)
+
+#define NB8800_RXC_SR                  0x204
+#define RSR_DE                         BIT(3)
+#define RSR_DI                         BIT(2)
+#define RSR_RO                         BIT(1)
+#define RSR_RI                         BIT(0)
+
+#define NB8800_RX_SAR                  0x208
+#define NB8800_RX_DESC_ADDR            0x20c
+
+#define NB8800_RX_REPORT_ADDR          0x210
+#define RX_BYTES_TRANSFERRED(x)                (((x) >> 16) & 0xFFFF)
+#define RX_MULTICAST_PKT               BIT(9)
+#define RX_BROADCAST_PKT               BIT(8)
+#define RX_LENGTH_ERR                  BIT(7)
+#define RX_FCS_ERR                     BIT(6)
+#define RX_RUNT_PKT                    BIT(5)
+#define RX_FIFO_OVERRUN                        BIT(4)
+#define RX_LATE_COLLISION              BIT(3)
+#define RX_ALIGNMENT_ERROR             BIT(2)
+#define RX_ERROR_MASK                  0xfc
+#define IS_RX_ERROR(r)                 ((r) & RX_ERROR_MASK)
+
+#define NB8800_RX_FIFO_SR              0x214
+#define NB8800_RX_ITR                  0x218
+
+/* Sigma Designs SMP86xx additional registers */
+#define NB8800_TANGOX_PAD_MODE         0x400
+#define PAD_MODE_MASK                  0x7
+#define PAD_MODE_MII                   0x0
+#define PAD_MODE_RGMII                 0x1
+#define PAD_MODE_GTX_CLK_INV           BIT(3)
+#define PAD_MODE_GTX_CLK_DELAY         BIT(4)
+
+#define NB8800_TANGOX_MDIO_CLKDIV      0x420
+#define NB8800_TANGOX_RESET            0x424
+
+/* Hardware DMA descriptor */
+struct nb8800_dma_desc {
+       u32                             s_addr; /* start address */
+       u32                             n_addr; /* next descriptor address */
+       u32                             r_addr; /* report address */
+       u32                             config;
+} __aligned(8);
+
+#define DESC_ID                                BIT(23)
+#define DESC_EOC                       BIT(22)
+#define DESC_EOF                       BIT(21)
+#define DESC_LK                                BIT(20)
+#define DESC_DS                                BIT(19)
+#define DESC_BTS(x)                    (((x) & 0x7) << 16)
+
+/* DMA descriptor and associated data for rx.
+ * Allocated from coherent memory.
+ */
+struct nb8800_rx_desc {
+       /* DMA descriptor */
+       struct nb8800_dma_desc          desc;
+
+       /* Status report filled in by hardware */
+       u32                             report;
+};
+
+/* Address of buffer on rx ring */
+struct nb8800_rx_buf {
+       struct page                     *page;
+       unsigned long                   offset;
+};
+
+/* DMA descriptors and associated data for tx.
+ * Allocated from coherent memory.
+ */
+struct nb8800_tx_desc {
+       /* DMA descriptor.  The second descriptor is used if packet
+        * data is unaligned.
+        */
+       struct nb8800_dma_desc          desc[2];
+
+       /* Status report filled in by hardware */
+       u32                             report;
+
+       /* Bounce buffer for initial unaligned part of packet */
+       u8                              buf[8] __aligned(8);
+};
+
+/* Packet in tx queue */
+struct nb8800_tx_buf {
+       /* Currently queued skb */
+       struct sk_buff                  *skb;
+
+       /* DMA address of the first descriptor */
+       dma_addr_t                      dma_desc;
+
+       /* DMA address of packet data */
+       dma_addr_t                      dma_addr;
+
+       /* Length of DMA mapping, less than skb->len if alignment
+        * buffer is used.
+        */
+       unsigned int                    dma_len;
+
+       /* Number of packets in chain starting here */
+       unsigned int                    chain_len;
+
+       /* Packet chain ready to be submitted to hardware */
+       bool                            ready;
+};
+
+struct nb8800_priv {
+       struct napi_struct              napi;
+
+       void __iomem                    *base;
+
+       /* RX DMA descriptors */
+       struct nb8800_rx_desc           *rx_descs;
+
+       /* RX buffers referenced by DMA descriptors */
+       struct nb8800_rx_buf            *rx_bufs;
+
+       /* Current end of chain */
+       u32                             rx_eoc;
+
+       /* Value for rx interrupt time register in NAPI interrupt mode */
+       u32                             rx_itr_irq;
+
+       /* Value for rx interrupt time register in NAPI poll mode */
+       u32                             rx_itr_poll;
+
+       /* Value for config field of rx DMA descriptors */
+       u32                             rx_dma_config;
+
+       /* TX DMA descriptors */
+       struct nb8800_tx_desc           *tx_descs;
+
+       /* TX packet queue */
+       struct nb8800_tx_buf            *tx_bufs;
+
+       /* Number of free tx queue entries */
+       atomic_t                        tx_free;
+
+       /* First free tx queue entry */
+       u32                             tx_next;
+
+       /* Next buffer to transmit */
+       u32                             tx_queue;
+
+       /* Start of current packet chain */
+       struct nb8800_tx_buf            *tx_chain;
+
+       /* Next buffer to reclaim */
+       u32                             tx_done;
+
+       /* Lock for DMA activation */
+       spinlock_t                      tx_lock;
+
+       struct mii_bus                  *mii_bus;
+       struct device_node              *phy_node;
+       struct phy_device               *phydev;
+
+       /* PHY connection type from DT */
+       int                             phy_mode;
+
+       /* Current link status */
+       int                             speed;
+       int                             duplex;
+       int                             link;
+
+       /* Pause settings */
+       bool                            pause_aneg;
+       bool                            pause_rx;
+       bool                            pause_tx;
+
+       /* DMA base address of rx descriptors, see rx_descs above */
+       dma_addr_t                      rx_desc_dma;
+
+       /* DMA base address of tx descriptors, see tx_descs above */
+       dma_addr_t                      tx_desc_dma;
+
+       struct clk                      *clk;
+};
+
+struct nb8800_ops {
+       int                             (*init)(struct net_device *dev);
+       int                             (*reset)(struct net_device *dev);
+};
+
+#endif /* _NB8800_H_ */
index c9b036789184e5cadc7a6e9892e1778c2b9b1582..2e611dc5f16210393852110c7dda5dadf4dfc560 100644 (file)
@@ -10139,8 +10139,8 @@ static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
                DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
                return;
        }
-       bp->vxlan_dst_port--;
-       if (bp->vxlan_dst_port)
+       bp->vxlan_dst_port_count--;
+       if (bp->vxlan_dst_port_count)
                return;
 
        if (netif_running(bp->dev)) {
index db15c5ee09c53a528ea405961734dae927af0e06..bdf094fb6ef92062d62d428109e277ba9e9d15f5 100644 (file)
@@ -3625,6 +3625,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
                pf->fw_fid = le16_to_cpu(resp->fid);
                pf->port_id = le16_to_cpu(resp->port_id);
                memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+               memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
                pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
                pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
                pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -3648,8 +3649,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 
                vf->fw_fid = le16_to_cpu(resp->fid);
                memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
-               if (!is_valid_ether_addr(vf->mac_addr))
-                       random_ether_addr(vf->mac_addr);
+               if (is_valid_ether_addr(vf->mac_addr))
+                       /* overwrite netdev dev_adr with admin VF MAC */
+                       memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+               else
+                       random_ether_addr(bp->dev->dev_addr);
 
                vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
                vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -3880,6 +3884,8 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
 #endif
 }
 
+static int bnxt_cfg_rx_mode(struct bnxt *);
+
 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
 {
        int rc = 0;
@@ -3946,11 +3952,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
                bp->vnic_info[0].rx_mask |=
                                CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
 
-       rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
-       if (rc) {
-               netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+       rc = bnxt_cfg_rx_mode(bp);
+       if (rc)
                goto err_out;
-       }
 
        rc = bnxt_hwrm_set_coal(bp);
        if (rc)
@@ -4865,7 +4869,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
        }
 }
 
-static void bnxt_cfg_rx_mode(struct bnxt *bp)
+static int bnxt_cfg_rx_mode(struct bnxt *bp)
 {
        struct net_device *dev = bp->dev;
        struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
@@ -4914,6 +4918,7 @@ static void bnxt_cfg_rx_mode(struct bnxt *bp)
                        netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
                                   rc);
                        vnic->uc_filter_count = i;
+                       return rc;
                }
        }
 
@@ -4922,6 +4927,8 @@ skip_uc:
        if (rc)
                netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
                           rc);
+
+       return rc;
 }
 
 static netdev_features_t bnxt_fix_features(struct net_device *dev,
@@ -5212,13 +5219,27 @@ init_err:
 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
 {
        struct sockaddr *addr = p;
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
+#ifdef CONFIG_BNXT_SRIOV
+       if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
+               return -EADDRNOTAVAIL;
+#endif
+
+       if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+               return 0;
+
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       if (netif_running(dev)) {
+               bnxt_close_nic(bp, false, false);
+               rc = bnxt_open_nic(bp, false, false);
+       }
 
-       return 0;
+       return rc;
 }
 
 /* rtnl_lock held */
@@ -5686,15 +5707,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        bnxt_set_tpa_flags(bp);
        bnxt_set_ring_params(bp);
        dflt_rings = netif_get_num_default_rss_queues();
-       if (BNXT_PF(bp)) {
-               memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+       if (BNXT_PF(bp))
                bp->pf.max_irqs = max_irqs;
-       } else {
 #if defined(CONFIG_BNXT_SRIOV)
-               memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+       else
                bp->vf.max_irqs = max_irqs;
 #endif
-       }
        bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
        bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
        bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
index f4cf6886106906d90bff221038b83914d9bbbd24..7a9af2887d8ed83f6a8672f8c1b6e18cd572c28a 100644 (file)
@@ -804,10 +804,9 @@ void bnxt_update_vf_mac(struct bnxt *bp)
        if (!is_valid_ether_addr(resp->perm_mac_address))
                goto update_vf_mac_exit;
 
-       if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
-               goto update_vf_mac_exit;
-
-       memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+       if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
+               memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+       /* overwrite netdev dev_adr with admin VF MAC */
        memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
 update_vf_mac_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
index 88c1e1a834f8c44491c76269be7c6aa805906ab2..169059c92f80b776fb3901578ba56bb959624a07 100644 (file)
@@ -1682,6 +1682,8 @@ static void macb_init_hw(struct macb *bp)
        macb_set_hwaddr(bp);
 
        config = macb_mdc_clk_div(bp);
+       if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
+               config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
        config |= MACB_BF(RBOF, NET_IP_ALIGN);  /* Make eth data aligned */
        config |= MACB_BIT(PAE);                /* PAuse Enable */
        config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
@@ -2416,6 +2418,8 @@ static int macb_init(struct platform_device *pdev)
        /* Set MII management clock divider */
        val = macb_mdc_clk_div(bp);
        val |= macb_dbw(bp);
+       if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
+               val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
        macb_writel(bp, NCFGR, val);
 
        return 0;
index 6e1faea00ca829f5c9df34b45518d7e1a881c5c3..d83b0db7782194ad0695f0d2454a69ccf1d68220 100644 (file)
 /* GEM specific NCFGR bitfields. */
 #define GEM_GBE_OFFSET         10 /* Gigabit mode enable */
 #define GEM_GBE_SIZE           1
+#define GEM_PCSSEL_OFFSET      11
+#define GEM_PCSSEL_SIZE                1
 #define GEM_CLK_OFFSET         18 /* MDC clock division */
 #define GEM_CLK_SIZE           3
 #define GEM_DBW_OFFSET         21 /* Data bus width */
 #define GEM_DBW_SIZE           2
 #define GEM_RXCOEN_OFFSET      24
 #define GEM_RXCOEN_SIZE                1
+#define GEM_SGMIIEN_OFFSET     27
+#define GEM_SGMIIEN_SIZE       1
+
 
 /* Constants for data bus width. */
 #define GEM_DBW32              0 /* 32 bit AMBA AHB data bus width */
index d3950b20feb9e2a62d5a13aab9b08e0683fb7365..39ca6744a4e68fe317f93e1318e729509c1b978e 100644 (file)
  * Calculated for SCLK of 700Mhz
  * value written should be a 1/16th of what is expected
  *
- * 1 tick per 0.05usec = value of 2.2
- * This 10% would be covered in CQ timer thresh value
+ * 1 tick per 0.025usec
  */
-#define NICPF_CLK_PER_INT_TICK         2
+#define NICPF_CLK_PER_INT_TICK         1
 
 /* Time to wait before we decide that a SQ is stuck.
  *
index c561fdcb79a730aeeb890c5a985b05cb873faa45..4b7fd63ae57c1d26ab54d9359297c56f35c6105c 100644 (file)
@@ -37,6 +37,7 @@ struct nicpf {
 #define        NIC_GET_BGX_FROM_VF_LMAC_MAP(map)       ((map >> 4) & 0xF)
 #define        NIC_GET_LMAC_FROM_VF_LMAC_MAP(map)      (map & 0xF)
        u8                      vf_lmac_map[MAX_LMAC];
+       u8                      lmac_cnt;
        struct delayed_work     dwork;
        struct workqueue_struct *check_link;
        u8                      link[MAX_LMAC];
@@ -279,6 +280,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
        u64 lmac_credit;
 
        nic->num_vf_en = 0;
+       nic->lmac_cnt = 0;
 
        for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
                if (!(bgx_map & (1 << bgx)))
@@ -288,6 +290,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
                        nic->vf_lmac_map[next_bgx_lmac++] =
                                                NIC_SET_VF_LMAC_MAP(bgx, lmac);
                nic->num_vf_en += lmac_cnt;
+               nic->lmac_cnt += lmac_cnt;
 
                /* Program LMAC credits */
                lmac_credit = (1ull << 1); /* channel credit enable */
@@ -715,6 +718,13 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
        case NIC_MBOX_MSG_CFG_DONE:
                /* Last message of VF config msg sequence */
                nic->vf_enabled[vf] = true;
+               if (vf >= nic->lmac_cnt)
+                       goto unlock;
+
+               bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+               lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+               bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true);
                goto unlock;
        case NIC_MBOX_MSG_SHUTDOWN:
                /* First msg in VF teardown sequence */
@@ -722,6 +732,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
                if (vf >= nic->num_vf_en)
                        nic->sqs_used[vf - nic->num_vf_en] = false;
                nic->pqs_vf[vf] = 0;
+
+               if (vf >= nic->lmac_cnt)
+                       break;
+
+               bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+               lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+               bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false);
                break;
        case NIC_MBOX_MSG_ALLOC_SQS:
                nic_alloc_sqs(nic, &mbx.sqs_alloc);
@@ -940,7 +958,7 @@ static void nic_poll_for_link(struct work_struct *work)
 
        mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
 
-       for (vf = 0; vf < nic->num_vf_en; vf++) {
+       for (vf = 0; vf < nic->lmac_cnt; vf++) {
                /* Poll only if VF is UP */
                if (!nic->vf_enabled[vf])
                        continue;
@@ -1074,8 +1092,7 @@ static void nic_remove(struct pci_dev *pdev)
 
        if (nic->check_link) {
                /* Destroy work Queue */
-               cancel_delayed_work(&nic->dwork);
-               flush_workqueue(nic->check_link);
+               cancel_delayed_work_sync(&nic->dwork);
                destroy_workqueue(nic->check_link);
        }
 
index af54c10945c25175e344ebf052e47e88c62219a3..a12b2e38cf61221fc4de44f6395eb60de76825a5 100644 (file)
@@ -112,6 +112,13 @@ static int nicvf_get_settings(struct net_device *netdev,
 
        cmd->supported = 0;
        cmd->transceiver = XCVR_EXTERNAL;
+
+       if (!nic->link_up) {
+               cmd->duplex = DUPLEX_UNKNOWN;
+               ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+               return 0;
+       }
+
        if (nic->speed <= 1000) {
                cmd->port = PORT_MII;
                cmd->autoneg = AUTONEG_ENABLE;
@@ -125,6 +132,13 @@ static int nicvf_get_settings(struct net_device *netdev,
        return 0;
 }
 
+static u32 nicvf_get_link(struct net_device *netdev)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+
+       return nic->link_up;
+}
+
 static void nicvf_get_drvinfo(struct net_device *netdev,
                              struct ethtool_drvinfo *info)
 {
@@ -660,7 +674,7 @@ static int nicvf_set_channels(struct net_device *dev,
 
 static const struct ethtool_ops nicvf_ethtool_ops = {
        .get_settings           = nicvf_get_settings,
-       .get_link               = ethtool_op_get_link,
+       .get_link               = nicvf_get_link,
        .get_drvinfo            = nicvf_get_drvinfo,
        .get_msglevel           = nicvf_get_msglevel,
        .set_msglevel           = nicvf_set_msglevel,
index 7f709cbdcd87d2f0a0beedd338437218ee0af094..dde8dc720cd3f3b7d513776662a4f05b1241486d 100644 (file)
@@ -1057,6 +1057,7 @@ int nicvf_stop(struct net_device *netdev)
 
        netif_carrier_off(netdev);
        netif_tx_stop_all_queues(nic->netdev);
+       nic->link_up = false;
 
        /* Teardown secondary qsets first */
        if (!nic->sqs_mode) {
@@ -1211,9 +1212,6 @@ int nicvf_open(struct net_device *netdev)
        nic->drv_stats.txq_stop = 0;
        nic->drv_stats.txq_wake = 0;
 
-       netif_carrier_on(netdev);
-       netif_tx_start_all_queues(netdev);
-
        return 0;
 cleanup:
        nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
index e404ea837727eada97159544ccaca9406c7dd940..206b6a71a545aff776cdd88809c2f59b2f9d1216 100644 (file)
@@ -592,7 +592,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
        /* Set threshold value for interrupt generation */
        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
-                             qidx, nic->cq_coalesce_usecs);
+                             qidx, CMP_QUEUE_TIMER_THRESH);
 }
 
 /* Configures transmit queue */
index fb4957d099144ca7ff62064b66be137dbbf6733e..033e8306e91c6942862ffe9dae0ca3836a8228f5 100644 (file)
@@ -76,7 +76,7 @@
 #define CMP_QSIZE              CMP_QUEUE_SIZE2
 #define CMP_QUEUE_LEN          (1ULL << (CMP_QSIZE + 10))
 #define CMP_QUEUE_CQE_THRESH   0
-#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
+#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
 
 #define RBDR_SIZE              RBDR_SIZE0
 #define RCV_BUF_COUNT          (1ULL << (RBDR_SIZE + 13))
index 180aa9fabf4820df042f18cba8c39d5ce1668e9d..9df26c2263bcc37bdfced23e2db688c151942733 100644 (file)
@@ -186,6 +186,23 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
 }
 EXPORT_SYMBOL(bgx_set_lmac_mac);
 
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
+{
+       struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+       u64 cfg;
+
+       if (!bgx)
+               return;
+
+       cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+       if (enable)
+               cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
+       else
+               cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+}
+EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+
 static void bgx_sgmii_change_link_state(struct lmac *lmac)
 {
        struct bgx *bgx = lmac->bgx;
@@ -612,6 +629,8 @@ static void bgx_poll_for_link(struct work_struct *work)
                lmac->last_duplex = 1;
        } else {
                lmac->link_up = 0;
+               lmac->last_speed = SPEED_UNKNOWN;
+               lmac->last_duplex = DUPLEX_UNKNOWN;
        }
 
        if (lmac->last_link != lmac->link_up) {
@@ -654,8 +673,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
        }
 
        /* Enable lmac */
-       bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
-                      CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+       bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
 
        /* Restore default cfg, incase low level firmware changed it */
        bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
@@ -695,8 +713,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
        lmac = &bgx->lmac[lmacid];
        if (lmac->check_link) {
                /* Destroy work queue */
-               cancel_delayed_work(&lmac->dwork);
-               flush_workqueue(lmac->check_link);
+               cancel_delayed_work_sync(&lmac->dwork);
                destroy_workqueue(lmac->check_link);
        }
 
@@ -1009,6 +1026,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct bgx *bgx = NULL;
        u8 lmac;
 
+       /* Load octeon mdio driver */
+       octeon_mdiobus_force_mod_depencency();
+
        bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
        if (!bgx)
                return -ENOMEM;
index 07b7ec66c60db8dd398b7ff60321598c9317c4c9..149e179363a1e6dd71e0b7cbd00f26a5e5ef0116 100644 (file)
@@ -182,6 +182,8 @@ enum MCAST_MODE {
 #define BCAST_ACCEPT   1
 #define CAM_ACCEPT     1
 
+void octeon_mdiobus_force_mod_depencency(void);
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
 void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
 unsigned bgx_get_map(int node);
 int bgx_get_lmac_count(int node, int bgx);
index ed41559bae771b8d58e49f21a46f7a79d9f20cac..b553409e04ad33ed50fafe3bdaa8de74d965e6eb 100644 (file)
@@ -98,8 +98,7 @@ static int csr0 = 0x01A00000 | 0x4800;
 #elif defined(__mips__)
 static int csr0 = 0x00200000 | 0x4000;
 #else
-#warning Processor architecture undefined!
-static int csr0 = 0x00A00000 | 0x4800;
+static int csr0;
 #endif
 
 /* Operational parameters that usually are not changed. */
@@ -1982,6 +1981,12 @@ static int __init tulip_init (void)
        pr_info("%s", version);
 #endif
 
+       if (!csr0) {
+               pr_warn("tulip: unknown CPU architecture, using default csr0\n");
+               /* default to 8 longword cache line alignment */
+               csr0 = 0x00A00000 | 0x4800;
+       }
+
        /* copy module parms into globals */
        tulip_rx_copybreak = rx_copybreak;
        tulip_max_interrupt_work = max_interrupt_work;
index 9beb3d34d4bad9f81ec8c11c560150d8b4640ed8..3c0e4d5c5fef41a8fe1fe99c0a163d30cd74fc31 100644 (file)
@@ -907,7 +907,7 @@ static void init_registers(struct net_device *dev)
 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
        i |= 0x4800;
 #else
-#warning Processor architecture undefined
+       dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
        i |= 0x4800;
 #endif
        iowrite32(i, ioaddr + PCIBusCfg);
index ff76d4e9dc1ba5eab90413f82592a876092a8ddf..bee32a9d9876f02719a5c46be9b65ff145f76c3b 100644 (file)
@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE
        default y
        depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
                   M523x || M527x || M5272 || M528x || M520x || M532x || \
-                  ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
+                  ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
+                  ARCH_LAYERSCAPE
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
 
index 3e6b9b437497dba9431fdedd3cf03262c0af3b0f..7cf898455e60076d52c1031422f3d90a3a10f4a3 100644 (file)
@@ -647,9 +647,9 @@ static int gfar_parse_group(struct device_node *np,
        if (model && strcasecmp(model, "FEC")) {
                gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
                gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
-               if (gfar_irq(grp, TX)->irq == NO_IRQ ||
-                   gfar_irq(grp, RX)->irq == NO_IRQ ||
-                   gfar_irq(grp, ER)->irq == NO_IRQ)
+               if (!gfar_irq(grp, TX)->irq ||
+                   !gfar_irq(grp, RX)->irq ||
+                   !gfar_irq(grp, ER)->irq)
                        return -EINVAL;
        }
 
index 664d0c261269097bccae55c12dcd7abcab140d5b..b40fba929d650d282c23f5d96636806ef4797d48 100644 (file)
@@ -467,7 +467,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
 
        etsects->irq = platform_get_irq(dev, 0);
 
-       if (etsects->irq == NO_IRQ) {
+       if (etsects->irq < 0) {
                pr_err("irq not in device tree\n");
                goto no_node;
        }
index 639263d5e833a04d19967f6df3cd20d8ce574a3b..7781e80896a60a59e12eec15582cc3743447e1dd 100644 (file)
@@ -627,8 +627,10 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
 
                /* verify the skb head is not shared */
                err = skb_cow_head(skb, 0);
-               if (err)
+               if (err) {
+                       dev_kfree_skb(skb);
                        return NETDEV_TX_OK;
+               }
 
                /* locate vlan header */
                vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
index e84c7f2634d3759805326707a29c33737bcad615..ed622fa29dfab61c448c0c446ad2cae467ee1df2 100644 (file)
@@ -36,7 +36,7 @@
 
 /* Registers */
 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
-#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(1)
+#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
 #define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
 #define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
 #define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
@@ -62,6 +62,7 @@
 #define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
 #define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
 #define MVNETA_BASE_ADDR_ENABLE                 0x2290
+#define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
 #define MVNETA_PORT_CONFIG                      0x2400
 #define      MVNETA_UNI_PROMISC_MODE            BIT(0)
 #define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
 
 #define MVNETA_INTR_ENABLE                       0x25b8
 #define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
-#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0xff000000  // note: neta says it's 0x000000FF
+#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
 
 #define MVNETA_RXQ_CMD                           0x2680
 #define      MVNETA_RXQ_DISABLE_SHIFT            8
 #define MVNETA_VLAN_TAG_LEN             4
 
 #define MVNETA_CPU_D_CACHE_LINE_SIZE    32
+#define MVNETA_TX_CSUM_DEF_SIZE                1600
 #define MVNETA_TX_CSUM_MAX_SIZE                9800
 #define MVNETA_ACC_MODE_EXT            1
 
@@ -1579,12 +1581,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                }
 
                skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
-               if (!skb)
-                       goto err_drop_frame;
 
+               /* After refill old buffer has to be unmapped regardless
+                * the skb is successfully built or not.
+                */
                dma_unmap_single(dev->dev.parent, phys_addr,
                                 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
 
+               if (!skb)
+                       goto err_drop_frame;
+
                rcvd_pkts++;
                rcvd_bytes += rx_bytes;
 
@@ -3191,6 +3197,7 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
        }
 
        mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
+       mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
 }
 
 /* Power up the port */
@@ -3250,6 +3257,7 @@ static int mvneta_probe(struct platform_device *pdev)
        char hw_mac_addr[ETH_ALEN];
        const char *mac_from;
        const char *managed;
+       int tx_csum_limit;
        int phy_mode;
        int err;
        int cpu;
@@ -3350,8 +3358,21 @@ static int mvneta_probe(struct platform_device *pdev)
                }
        }
 
-       if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
-               pp->tx_csum_limit = 1600;
+       if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
+               if (tx_csum_limit < 0 ||
+                   tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
+                       tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
+                       dev_info(&pdev->dev,
+                                "Wrong TX csum limit in DT, set to %dB\n",
+                                MVNETA_TX_CSUM_DEF_SIZE);
+               }
+       } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
+               tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
+       } else {
+               tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
+       }
+
+       pp->tx_csum_limit = tx_csum_limit;
 
        pp->tx_ring_size = MVNETA_MAX_TXD;
        pp->rx_ring_size = MVNETA_MAX_RXD;
index b159ef8303cc3e65d1e374367d19ca590d934901..057665180f13f3d9980eac2b807793675df16021 100644 (file)
@@ -1326,7 +1326,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        /* Get platform resources */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
-       if ((!res) || (irq < 0) || (irq >= NR_IRQS)) {
+       if (!res || irq < 0) {
                dev_err(&pdev->dev, "error getting resources.\n");
                ret = -ENXIO;
                goto err_exit;
index ee8d1ec61fabaffe081e777d70fd1b381990fbea..ed5da4d476684d502a7d83469ee83566635d3d46 100644 (file)
@@ -1225,7 +1225,7 @@ static int ravb_open(struct net_device *ndev)
        /* Device init */
        error = ravb_dmac_init(ndev);
        if (error)
-               goto out_free_irq;
+               goto out_free_irq2;
        ravb_emac_init(ndev);
 
        /* Initialise PTP Clock driver */
@@ -1243,9 +1243,11 @@ static int ravb_open(struct net_device *ndev)
 out_ptp_stop:
        /* Stop PTP Clock driver */
        ravb_ptp_stop(ndev);
+out_free_irq2:
+       if (priv->chip_id == RCAR_GEN3)
+               free_irq(priv->emac_irq, ndev);
 out_free_irq:
        free_irq(ndev->irq, ndev);
-       free_irq(priv->emac_irq, ndev);
 out_napi_off:
        napi_disable(&priv->napi[RAVB_NC]);
        napi_disable(&priv->napi[RAVB_BE]);
index 7f6f4a4fcc708973af0aa48418bedaad984ef5fd..58c05acc2aabbdf63419874605ae11af298471ca 100644 (file)
@@ -299,16 +299,17 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
        if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
                const char *rs;
 
+               dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
+
                err = of_property_read_string(np, "st,tx-retime-src", &rs);
                if (err < 0) {
                        dev_warn(dev, "Use internal clock source\n");
-                       dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
-               } else if (!strcasecmp(rs, "clk_125")) {
-                       dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
-               } else if (!strcasecmp(rs, "txclk")) {
-                       dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
+               } else {
+                       if (!strcasecmp(rs, "clk_125"))
+                               dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
+                       else if (!strcasecmp(rs, "txclk"))
+                               dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
                }
-
                dwmac->speed = SPEED_1000;
        }
 
index 64d8aa4e0cad6420bcb1fa4cd827ad823ba3601d..3c6549aee11dee5cf13c92c90c5b4b7a8618f375 100644 (file)
@@ -185,7 +185,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
                        priv->clk_csr = STMMAC_CSR_100_150M;
                else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
                        priv->clk_csr = STMMAC_CSR_150_250M;
-               else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+               else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
                        priv->clk_csr = STMMAC_CSR_250_300M;
        }
 }
@@ -2232,6 +2232,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
 
                        frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
 
+                       /*  check if frame_len fits the preallocated memory */
+                       if (frame_len > priv->dma_buf_sz) {
+                               priv->dev->stats.rx_length_errors++;
+                               break;
+                       }
+
                        /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
                         * Type frames (LLC/LLC-SNAP)
                         */
@@ -3102,6 +3108,7 @@ int stmmac_resume(struct net_device *ndev)
        init_dma_desc_rings(ndev, GFP_ATOMIC);
        stmmac_hw_setup(ndev, false);
        stmmac_init_tx_coalesce(priv);
+       stmmac_set_rx_mode(ndev);
 
        napi_enable(&priv->napi);
 
index ebf6abc4853f300392677614d6f45be0f95e8fca..bba670c42e3749483bf2218ad2da6b3b4c8d0587 100644 (file)
@@ -138,7 +138,6 @@ int stmmac_mdio_reset(struct mii_bus *bus)
 
 #ifdef CONFIG_OF
        if (priv->device->of_node) {
-               int reset_gpio, active_low;
 
                if (data->reset_gpio < 0) {
                        struct device_node *np = priv->device->of_node;
@@ -154,24 +153,23 @@ int stmmac_mdio_reset(struct mii_bus *bus)
                                                "snps,reset-active-low");
                        of_property_read_u32_array(np,
                                "snps,reset-delays-us", data->delays, 3);
-               }
 
-               reset_gpio = data->reset_gpio;
-               active_low = data->active_low;
+                       if (gpio_request(data->reset_gpio, "mdio-reset"))
+                               return 0;
+               }
 
-               if (!gpio_request(reset_gpio, "mdio-reset")) {
-                       gpio_direction_output(reset_gpio, active_low ? 1 : 0);
-                       if (data->delays[0])
-                               msleep(DIV_ROUND_UP(data->delays[0], 1000));
+               gpio_direction_output(data->reset_gpio,
+                                     data->active_low ? 1 : 0);
+               if (data->delays[0])
+                       msleep(DIV_ROUND_UP(data->delays[0], 1000));
 
-                       gpio_set_value(reset_gpio, active_low ? 0 : 1);
-                       if (data->delays[1])
-                               msleep(DIV_ROUND_UP(data->delays[1], 1000));
+               gpio_set_value(data->reset_gpio, data->active_low ? 0 : 1);
+               if (data->delays[1])
+                       msleep(DIV_ROUND_UP(data->delays[1], 1000));
 
-                       gpio_set_value(reset_gpio, active_low ? 1 : 0);
-                       if (data->delays[2])
-                               msleep(DIV_ROUND_UP(data->delays[2], 1000));
-               }
+               gpio_set_value(data->reset_gpio, data->active_low ? 1 : 0);
+               if (data->delays[2])
+                       msleep(DIV_ROUND_UP(data->delays[2], 1000));
        }
 #endif
 
index c08be62bceba6f2e2eb59e0da7ac9c3bdbe61813..1562ab4151e192a079fc2a54dec7f8c101bcd109 100644 (file)
@@ -78,6 +78,9 @@ static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
 
 int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
 {
+       if (of_machine_is_compatible("ti,dm8148"))
+               return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
        if (of_machine_is_compatible("ti,am33xx"))
                return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
 
index 54036ae0a388c9a34cbb92fc832980fc1e584dc2..0fc521941c718dbcdea487a18a900984ad176ea5 100644 (file)
@@ -498,7 +498,7 @@ static void macvtap_sock_write_space(struct sock *sk)
        wait_queue_head_t *wqueue;
 
        if (!sock_writeable(sk) ||
-           !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
+           !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
                return;
 
        wqueue = sk_sleep(sk);
@@ -585,7 +585,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
                mask |= POLLIN | POLLRDNORM;
 
        if (sock_writeable(&q->sk) ||
-           (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
+           (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
             sock_writeable(&q->sk)))
                mask |= POLLOUT | POLLWRNORM;
 
index 07a6119121c38922aa50b3241e95d4604435a051..3ce5d9514623ce210ba9f8b13a113b0ef4c91165 100644 (file)
@@ -614,7 +614,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
        { PHY_ID_BCM5461, 0xfffffff0 },
        { PHY_ID_BCM54616S, 0xfffffff0 },
        { PHY_ID_BCM5464, 0xfffffff0 },
-       { PHY_ID_BCM5482, 0xfffffff0 },
+       { PHY_ID_BCM5481, 0xfffffff0 },
        { PHY_ID_BCM5482, 0xfffffff0 },
        { PHY_ID_BCM50610, 0xfffffff0 },
        { PHY_ID_BCM50610M, 0xfffffff0 },
index 48ce6ef400fe7fdda6ae78a9a22943b2307c1c8b..47cd306dbb3c4909ae95feb586f0b89198126995 100644 (file)
@@ -448,7 +448,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
                mdiobus_write(phydev->bus, mii_data->phy_id,
                              mii_data->reg_num, val);
 
-               if (mii_data->reg_num == MII_BMCR &&
+               if (mii_data->phy_id == phydev->addr &&
+                   mii_data->reg_num == MII_BMCR &&
                    val & BMCR_RESET)
                        return phy_init_hw(phydev);
 
index b1878faea3974f6ae984763109edae78c4cd3342..f0db770e8b2f9655be8d3c3000a9395f945284e2 100644 (file)
@@ -1040,7 +1040,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
                mask |= POLLIN | POLLRDNORM;
 
        if (sock_writeable(sk) ||
-           (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+           (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
             sock_writeable(sk)))
                mask |= POLLOUT | POLLWRNORM;
 
@@ -1488,7 +1488,7 @@ static void tun_sock_write_space(struct sock *sk)
        if (!sock_writeable(sk))
                return;
 
-       if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
+       if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
                return;
 
        wqueue = sk_sleep(sk);
index a187f08113ecbb77612dd11a2611d4649d79c47d..3b1ba823776842445b5d5b6a709c58208c357f2b 100644 (file)
@@ -691,7 +691,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
 
 int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
 {
-       const struct usb_cdc_union_desc *union_desc = NULL;
        struct cdc_ncm_ctx *ctx;
        struct usb_driver *driver;
        u8 *buf;
@@ -725,15 +724,16 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
        /* parse through descriptors associated with control interface */
        cdc_parse_cdc_header(&hdr, intf, buf, len);
 
-       ctx->data = usb_ifnum_to_if(dev->udev,
-                                   hdr.usb_cdc_union_desc->bSlaveInterface0);
+       if (hdr.usb_cdc_union_desc)
+               ctx->data = usb_ifnum_to_if(dev->udev,
+                                           hdr.usb_cdc_union_desc->bSlaveInterface0);
        ctx->ether_desc = hdr.usb_cdc_ether_desc;
        ctx->func_desc = hdr.usb_cdc_ncm_desc;
        ctx->mbim_desc = hdr.usb_cdc_mbim_desc;
        ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc;
 
        /* some buggy devices have an IAD but no CDC Union */
-       if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
+       if (!hdr.usb_cdc_union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
                ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
                dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
        }
index 34799eaace41bcdb93cfa4c1ed76af31ae052ef3..9a5be8b851867e53f7427ba31f7a089ad02562fd 100644 (file)
@@ -725,6 +725,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
        {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},    /* Telit LE920 */
+       {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},    /* XS Stick W100-2 from 4G Systems */
        {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)},    /* Olivetti Olicard 100 */
        {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)},    /* Olivetti Olicard 120 */
        {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)},    /* Olivetti Olicard 140 */
index d8838dedb7a4cd06f54e2fa8a522b6c4bb15091e..f94ab786088fc8fd7f848edeaedd4d2aff160606 100644 (file)
@@ -140,6 +140,12 @@ struct virtnet_info {
 
        /* CPU hot plug notifier */
        struct notifier_block nb;
+
+       /* Control VQ buffers: protected by the rtnl lock */
+       struct virtio_net_ctrl_hdr ctrl_hdr;
+       virtio_net_ctrl_ack ctrl_status;
+       u8 ctrl_promisc;
+       u8 ctrl_allmulti;
 };
 
 struct padded_vnet_hdr {
@@ -976,31 +982,30 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
                                 struct scatterlist *out)
 {
        struct scatterlist *sgs[4], hdr, stat;
-       struct virtio_net_ctrl_hdr ctrl;
-       virtio_net_ctrl_ack status = ~0;
        unsigned out_num = 0, tmp;
 
        /* Caller should know better */
        BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
 
-       ctrl.class = class;
-       ctrl.cmd = cmd;
+       vi->ctrl_status = ~0;
+       vi->ctrl_hdr.class = class;
+       vi->ctrl_hdr.cmd = cmd;
        /* Add header */
-       sg_init_one(&hdr, &ctrl, sizeof(ctrl));
+       sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
        sgs[out_num++] = &hdr;
 
        if (out)
                sgs[out_num++] = out;
 
        /* Add return status. */
-       sg_init_one(&stat, &status, sizeof(status));
+       sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
        sgs[out_num] = &stat;
 
        BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
        virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
 
        if (unlikely(!virtqueue_kick(vi->cvq)))
-               return status == VIRTIO_NET_OK;
+               return vi->ctrl_status == VIRTIO_NET_OK;
 
        /* Spin for a response, the kick causes an ioport write, trapping
         * into the hypervisor, so the request should be handled immediately.
@@ -1009,7 +1014,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
               !virtqueue_is_broken(vi->cvq))
                cpu_relax();
 
-       return status == VIRTIO_NET_OK;
+       return vi->ctrl_status == VIRTIO_NET_OK;
 }
 
 static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -1151,7 +1156,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg[2];
-       u8 promisc, allmulti;
        struct virtio_net_ctrl_mac *mac_data;
        struct netdev_hw_addr *ha;
        int uc_count;
@@ -1163,22 +1167,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
        if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
                return;
 
-       promisc = ((dev->flags & IFF_PROMISC) != 0);
-       allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+       vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
+       vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
 
-       sg_init_one(sg, &promisc, sizeof(promisc));
+       sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
                                  VIRTIO_NET_CTRL_RX_PROMISC, sg))
                dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
-                        promisc ? "en" : "dis");
+                        vi->ctrl_promisc ? "en" : "dis");
 
-       sg_init_one(sg, &allmulti, sizeof(allmulti));
+       sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
                                  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
                dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
-                        allmulti ? "en" : "dis");
+                        vi->ctrl_allmulti ? "en" : "dis");
 
        uc_count = netdev_uc_count(dev);
        mc_count = netdev_mc_count(dev);
index 899ea42881970bb7e579d091696ef9ca1f9aa896..417903715437ebda5b07a859effdca86a62d196b 100644 (file)
@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
                                                &adapter->pdev->dev,
                                                rbi->skb->data, rbi->len,
                                                PCI_DMA_FROMDEVICE);
+                               if (dma_mapping_error(&adapter->pdev->dev,
+                                                     rbi->dma_addr)) {
+                                       dev_kfree_skb_any(rbi->skb);
+                                       rq->stats.rx_buf_alloc_failure++;
+                                       break;
+                               }
                        } else {
                                /* rx buffer skipped by the device */
                        }
@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
                                                &adapter->pdev->dev,
                                                rbi->page, 0, PAGE_SIZE,
                                                PCI_DMA_FROMDEVICE);
+                               if (dma_mapping_error(&adapter->pdev->dev,
+                                                     rbi->dma_addr)) {
+                                       put_page(rbi->page);
+                                       rq->stats.rx_buf_alloc_failure++;
+                                       break;
+                               }
                        } else {
                                /* rx buffers skipped by the device */
                        }
                        val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
                }
 
-               BUG_ON(rbi->dma_addr == 0);
                gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
                gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
                                           | val | rbi->len);
@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
 }
 
 
-static void
+static int
 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
                struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
                struct vmxnet3_adapter *adapter)
@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
                tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
                                skb->data + buf_offset, buf_size,
                                PCI_DMA_TODEVICE);
+               if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+                       return -EFAULT;
 
                tbi->len = buf_size;
 
@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
                        tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
                                                         buf_offset, buf_size,
                                                         DMA_TO_DEVICE);
+                       if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+                               return -EFAULT;
 
                        tbi->len = buf_size;
 
@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
        /* set the last buf_info for the pkt */
        tbi->skb = skb;
        tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
+
+       return 0;
 }
 
 
@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
        }
 
        /* fill tx descs related to addr & len */
-       vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
+       if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
+               goto unlock_drop_pkt;
 
        /* setup the EOP desc */
        ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                struct vmxnet3_rx_buf_info *rbi;
                struct sk_buff *skb, *new_skb = NULL;
                struct page *new_page = NULL;
+               dma_addr_t new_dma_addr;
                int num_to_alloc;
                struct Vmxnet3_RxDesc *rxd;
                u32 idx, ring_idx;
@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                                skip_page_frags = true;
                                goto rcd_done;
                        }
+                       new_dma_addr = dma_map_single(&adapter->pdev->dev,
+                                                     new_skb->data, rbi->len,
+                                                     PCI_DMA_FROMDEVICE);
+                       if (dma_mapping_error(&adapter->pdev->dev,
+                                             new_dma_addr)) {
+                               dev_kfree_skb(new_skb);
+                               /* Skb allocation failed, do not handover this
+                                * skb to stack. Reuse it. Drop the existing pkt
+                                */
+                               rq->stats.rx_buf_alloc_failure++;
+                               ctx->skb = NULL;
+                               rq->stats.drop_total++;
+                               skip_page_frags = true;
+                               goto rcd_done;
+                       }
 
                        dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
                                         rbi->len,
@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
 
                        /* Immediate refill */
                        rbi->skb = new_skb;
-                       rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
-                                                      rbi->skb->data, rbi->len,
-                                                      PCI_DMA_FROMDEVICE);
+                       rbi->dma_addr = new_dma_addr;
                        rxd->addr = cpu_to_le64(rbi->dma_addr);
                        rxd->len = rbi->len;
                        if (adapter->version == 2 &&
@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                                        skip_page_frags = true;
                                        goto rcd_done;
                                }
+                               new_dma_addr = dma_map_page(&adapter->pdev->dev
+                                                       , rbi->page,
+                                                       0, PAGE_SIZE,
+                                                       PCI_DMA_FROMDEVICE);
+                               if (dma_mapping_error(&adapter->pdev->dev,
+                                                     new_dma_addr)) {
+                                       put_page(new_page);
+                                       rq->stats.rx_buf_alloc_failure++;
+                                       dev_kfree_skb(ctx->skb);
+                                       ctx->skb = NULL;
+                                       skip_page_frags = true;
+                                       goto rcd_done;
+                               }
 
                                dma_unmap_page(&adapter->pdev->dev,
                                               rbi->dma_addr, rbi->len,
@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
 
                                /* Immediate refill */
                                rbi->page = new_page;
-                               rbi->dma_addr = dma_map_page(&adapter->pdev->dev
-                                                       , rbi->page,
-                                                       0, PAGE_SIZE,
-                                                       PCI_DMA_FROMDEVICE);
+                               rbi->dma_addr = new_dma_addr;
                                rxd->addr = cpu_to_le64(rbi->dma_addr);
                                rxd->len = rbi->len;
                        }
@@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev)
                                                        PCI_DMA_TODEVICE);
                        }
 
-                       if (new_table_pa) {
+                       if (!dma_mapping_error(&adapter->pdev->dev,
+                                              new_table_pa)) {
                                new_mode |= VMXNET3_RXM_MCAST;
                                rxConf->mfTablePA = cpu_to_le64(new_table_pa);
                        } else {
@@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
                                             sizeof(struct vmxnet3_adapter),
                                             PCI_DMA_TODEVICE);
+       if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
+               dev_err(&pdev->dev, "Failed to map dma\n");
+               err = -EFAULT;
+               goto err_dma_map;
+       }
        adapter->shared = dma_alloc_coherent(
                                &adapter->pdev->dev,
                                sizeof(struct Vmxnet3_DriverShared),
@@ -3233,6 +3281,7 @@ err_alloc_queue_desc:
 err_alloc_shared:
        dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
                         sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
+err_dma_map:
        free_netdev(netdev);
        return err;
 }
index 92fa3e1ea65cca564907a43a78a859b6063f7f65..4f9748457f5a722658e5cfb01b6cbcc0f488d14f 100644 (file)
@@ -907,7 +907,6 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
                       struct nlattr *tb[], struct nlattr *data[])
 {
        struct net_vrf *vrf = netdev_priv(dev);
-       int err;
 
        if (!data || !data[IFLA_VRF_TABLE])
                return -EINVAL;
@@ -916,15 +915,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
 
        dev->priv_flags |= IFF_L3MDEV_MASTER;
 
-       err = register_netdevice(dev);
-       if (err < 0)
-               goto out_fail;
-
-       return 0;
-
-out_fail:
-       free_netdev(dev);
-       return err;
+       return register_netdevice(dev);
 }
 
 static size_t vrf_nl_getsize(const struct net_device *dev)
index e92aaf61590109430aa6b9b6cd100b7d35daf90b..89541cc90e877b476bf6d1fc973d4393bc01acc7 100644 (file)
@@ -1075,11 +1075,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
 
        used = pvc_is_used(pvc);
 
-       if (type == ARPHRD_ETHER) {
+       if (type == ARPHRD_ETHER)
                dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
                                   ether_setup);
-               dev->priv_flags &= ~IFF_TX_SKB_SHARING;
-       } else
+       else
                dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
 
        if (!dev) {
@@ -1088,9 +1087,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
                return -ENOBUFS;
        }
 
-       if (type == ARPHRD_ETHER)
+       if (type == ARPHRD_ETHER) {
+               dev->priv_flags &= ~IFF_TX_SKB_SHARING;
                eth_hw_addr_random(dev);
-       else {
+       else {
                *(__be16*)dev->dev_addr = htons(dlci);
                dlci_to_q922(dev->broadcast, dlci);
        }
index 5c47b011a9d7f3000a27d4c81d3dc75e5f855129..cd39025d2abf5923f49e83adb08bb8de88373cb5 100644 (file)
@@ -549,16 +549,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty,
 
 static int x25_asy_open_tty(struct tty_struct *tty)
 {
-       struct x25_asy *sl = tty->disc_data;
+       struct x25_asy *sl;
        int err;
 
        if (tty->ops->write == NULL)
                return -EOPNOTSUPP;
 
-       /* First make sure we're not already connected. */
-       if (sl && sl->magic == X25_ASY_MAGIC)
-               return -EEXIST;
-
        /* OK.  Find a free X.25 channel to use. */
        sl = x25_asy_alloc();
        if (sl == NULL)
index aa9bd92ac4edf7e5b78229a136ca499f8f7c15e3..0947cc271e693feaf73b3ae25d0478d2fa529ff1 100644 (file)
@@ -51,6 +51,7 @@ MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        {
                .id = QCA988X_HW_2_0_VERSION,
+               .dev_id = QCA988X_2_0_DEVICE_ID,
                .name = "qca988x hw2.0",
                .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
@@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
+               .dev_id = QCA6164_2_1_DEVICE_ID,
+               .name = "qca6164 hw2.1",
+               .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
+               .uart_pin = 6,
+               .otp_exe_param = 0,
+               .channel_counters_freq_hz = 88000,
+               .max_probe_resp_desc_thres = 0,
+               .fw = {
+                       .dir = QCA6174_HW_2_1_FW_DIR,
+                       .fw = QCA6174_HW_2_1_FW_FILE,
+                       .otp = QCA6174_HW_2_1_OTP_FILE,
+                       .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
+                       .board_size = QCA6174_BOARD_DATA_SZ,
+                       .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+               },
+       },
+       {
+               .id = QCA6174_HW_2_1_VERSION,
+               .dev_id = QCA6174_2_1_DEVICE_ID,
                .name = "qca6174 hw2.1",
                .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
                .uart_pin = 6,
@@ -86,6 +106,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        },
        {
                .id = QCA6174_HW_3_0_VERSION,
+               .dev_id = QCA6174_2_1_DEVICE_ID,
                .name = "qca6174 hw3.0",
                .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
                .uart_pin = 6,
@@ -103,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        },
        {
                .id = QCA6174_HW_3_2_VERSION,
+               .dev_id = QCA6174_2_1_DEVICE_ID,
                .name = "qca6174 hw3.2",
                .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
                .uart_pin = 6,
@@ -121,6 +143,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        },
        {
                .id = QCA99X0_HW_2_0_DEV_VERSION,
+               .dev_id = QCA99X0_2_0_DEVICE_ID,
                .name = "qca99x0 hw2.0",
                .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
@@ -139,10 +162,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        },
        {
                .id = QCA9377_HW_1_0_DEV_VERSION,
+               .dev_id = QCA9377_1_0_DEVICE_ID,
                .name = "qca9377 hw1.0",
                .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
-               .uart_pin = 7,
+               .uart_pin = 6,
                .otp_exe_param = 0,
+               .channel_counters_freq_hz = 88000,
+               .max_probe_resp_desc_thres = 0,
+               .fw = {
+                       .dir = QCA9377_HW_1_0_FW_DIR,
+                       .fw = QCA9377_HW_1_0_FW_FILE,
+                       .otp = QCA9377_HW_1_0_OTP_FILE,
+                       .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
+                       .board_size = QCA9377_BOARD_DATA_SZ,
+                       .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+               },
+       },
+       {
+               .id = QCA9377_HW_1_1_DEV_VERSION,
+               .dev_id = QCA9377_1_0_DEVICE_ID,
+               .name = "qca9377 hw1.1",
+               .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+               .uart_pin = 6,
+               .otp_exe_param = 0,
+               .channel_counters_freq_hz = 88000,
+               .max_probe_resp_desc_thres = 0,
                .fw = {
                        .dir = QCA9377_HW_1_0_FW_DIR,
                        .fw = QCA9377_HW_1_0_FW_FILE,
@@ -1263,7 +1307,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)
        for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
                hw_params = &ath10k_hw_params_list[i];
 
-               if (hw_params->id == ar->target_version)
+               if (hw_params->id == ar->target_version &&
+                   hw_params->dev_id == ar->dev_id)
                        break;
        }
 
index 018c64f4fd25b8cf1ab8db0f3b21e65962a5f45e..858d75f49a9ffe5b1ea9e511d834bd3a5815e3de 100644 (file)
@@ -636,6 +636,7 @@ struct ath10k {
 
        struct ath10k_hw_params {
                u32 id;
+               u16 dev_id;
                const char *name;
                u32 patch_load_addr;
                int uart_pin;
index 39966a05c1cc20f5b69e7111d5ddb711a070c5e9..713c2bcea17825f701b2be37a9cf82badef11caf 100644 (file)
 
 #define ATH10K_FW_DIR                  "ath10k"
 
+#define QCA988X_2_0_DEVICE_ID   (0x003c)
+#define QCA6164_2_1_DEVICE_ID   (0x0041)
+#define QCA6174_2_1_DEVICE_ID   (0x003e)
+#define QCA99X0_2_0_DEVICE_ID   (0x0040)
+#define QCA9377_1_0_DEVICE_ID   (0x0042)
+
 /* QCA988X 1.0 definitions (unsupported) */
 #define QCA988X_HW_1_0_CHIP_ID_REV     0x0
 
 #define QCA6174_HW_3_0_VERSION         0x05020000
 #define QCA6174_HW_3_2_VERSION         0x05030000
 
+/* QCA9377 target BMI version signatures */
+#define QCA9377_HW_1_0_DEV_VERSION     0x05020000
+#define QCA9377_HW_1_1_DEV_VERSION     0x05020001
+
 enum qca6174_pci_rev {
        QCA6174_PCI_REV_1_1 = 0x11,
        QCA6174_PCI_REV_1_3 = 0x13,
@@ -60,6 +70,11 @@ enum qca6174_chip_id_rev {
        QCA6174_HW_3_2_CHIP_ID_REV = 10,
 };
 
+enum qca9377_chip_id_rev {
+       QCA9377_HW_1_0_CHIP_ID_REV = 0x0,
+       QCA9377_HW_1_1_CHIP_ID_REV = 0x1,
+};
+
 #define QCA6174_HW_2_1_FW_DIR          "ath10k/QCA6174/hw2.1"
 #define QCA6174_HW_2_1_FW_FILE         "firmware.bin"
 #define QCA6174_HW_2_1_OTP_FILE                "otp.bin"
@@ -85,8 +100,6 @@ enum qca6174_chip_id_rev {
 #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
 
 /* QCA9377 1.0 definitions */
-#define QCA9377_HW_1_0_DEV_VERSION     0x05020001
-#define QCA9377_HW_1_0_CHIP_ID_REV     0x1
 #define QCA9377_HW_1_0_FW_DIR          ATH10K_FW_DIR "/QCA9377/hw1.0"
 #define QCA9377_HW_1_0_FW_FILE         "firmware.bin"
 #define QCA9377_HW_1_0_OTP_FILE        "otp.bin"
index a7411fe90cc43ebcef7590538cb3d6cb73a6a13a..95a55405ebf0d1f8348cbfa54f567e780c7dfc13 100644 (file)
@@ -4225,7 +4225,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
 
 static u32 get_nss_from_chainmask(u16 chain_mask)
 {
-       if ((chain_mask & 0x15) == 0x15)
+       if ((chain_mask & 0xf) == 0xf)
                return 4;
        else if ((chain_mask & 0x7) == 0x7)
                return 3;
index 3fca200b986ca75baa0de2568d36460be709d834..930785a724e1e50705d55e6051d9f53c595ef5ed 100644 (file)
@@ -57,12 +57,6 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
 #define ATH10K_PCI_TARGET_WAIT 3000
 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
 
-#define QCA988X_2_0_DEVICE_ID  (0x003c)
-#define QCA6164_2_1_DEVICE_ID  (0x0041)
-#define QCA6174_2_1_DEVICE_ID  (0x003e)
-#define QCA99X0_2_0_DEVICE_ID  (0x0040)
-#define QCA9377_1_0_DEVICE_ID  (0x0042)
-
 static const struct pci_device_id ath10k_pci_id_table[] = {
        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
        { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -92,7 +86,9 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
 
        { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
+
        { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
+       { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
 };
 
 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@@ -111,8 +107,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 
-static const struct ce_attr host_ce_config_wlan[] = {
+static struct ce_attr host_ce_config_wlan[] = {
        /* CE0: host->target HTC control and raw streams */
        {
                .flags = CE_ATTR_FLAGS,
@@ -128,7 +125,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
                .src_nentries = 0,
                .src_sz_max = 2048,
                .dest_nentries = 512,
-               .recv_cb = ath10k_pci_htc_rx_cb,
+               .recv_cb = ath10k_pci_htt_htc_rx_cb,
        },
 
        /* CE2: target->host WMI */
@@ -217,7 +214,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
 };
 
 /* Target firmware's Copy Engine configuration. */
-static const struct ce_pipe_config target_ce_config_wlan[] = {
+static struct ce_pipe_config target_ce_config_wlan[] = {
        /* CE0: host->target HTC control and raw streams */
        {
                .pipenum = __cpu_to_le32(0),
@@ -330,7 +327,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
  * This table is derived from the CE_PCI TABLE, above.
  * It is passed to the Target at startup for use by firmware.
  */
-static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
+static struct service_to_pipe target_service_to_ce_map_wlan[] = {
        {
                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
@@ -1208,6 +1205,16 @@ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
        ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
 }
 
+static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+       /* CE4 polling needs to be done whenever CE pipe which transports
+        * HTT Rx (target->host) is processed.
+        */
+       ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+       ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
 /* Called by lower (CE) layer when a send to HTT Target completes. */
 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
 {
@@ -2027,6 +2034,29 @@ static int ath10k_pci_init_config(struct ath10k *ar)
        return 0;
 }
 
+static void ath10k_pci_override_ce_config(struct ath10k *ar)
+{
+       struct ce_attr *attr;
+       struct ce_pipe_config *config;
+
+       /* For QCA6174 we're overriding the Copy Engine 5 configuration,
+        * since it is currently used for other feature.
+        */
+
+       /* Override Host's Copy Engine 5 configuration */
+       attr = &host_ce_config_wlan[5];
+       attr->src_sz_max = 0;
+       attr->dest_nentries = 0;
+
+       /* Override Target firmware's Copy Engine configuration */
+       config = &target_ce_config_wlan[5];
+       config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
+       config->nbytes_max = __cpu_to_le32(2048);
+
+       /* Map from service/endpoint to Copy Engine */
+       target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
+}
+
 static int ath10k_pci_alloc_pipes(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -3020,6 +3050,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                goto err_core_destroy;
        }
 
+       if (QCA_REV_6174(ar))
+               ath10k_pci_override_ce_config(ar);
+
        ret = ath10k_pci_alloc_pipes(ar);
        if (ret) {
                ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
index 1a73c7a1da77d0e0fe16fbdc6fb866d6c65226ee..bf88ec3a65fab3e04aab4398185236bdb955126b 100644 (file)
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  17
+#define IWL7260_UCODE_API_MAX  19
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   13
index 0116e5a4c39389ac4123100bf1486d96c11ad10a..9bcc0bf937d8810f42da44656cf6bc7d11df4f5a 100644 (file)
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  17
+#define IWL8000_UCODE_API_MAX  19
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   13
index 85ae902df7c08d9d1d0ad4611e7947ace08bf0f7..29ae58ebf223fa82be3e072ea4ac000e62d54884 100644 (file)
@@ -309,9 +309,9 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
         * to transmit packets to the AP, i.e. the PTK.
         */
        if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
-               key->hw_key_idx = 0;
                mvm->ptk_ivlen = key->iv_len;
                mvm->ptk_icvlen = key->icv_len;
+               ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
        } else {
                /*
                 * firmware only supports TSC/RSC for a single key,
@@ -319,12 +319,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
                 * with new ones -- this relies on mac80211 doing
                 * list_add_tail().
                 */
-               key->hw_key_idx = 1;
                mvm->gtk_ivlen = key->iv_len;
                mvm->gtk_icvlen = key->icv_len;
+               ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
        }
 
-       ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
        data->error = ret != 0;
 out_unlock:
        mutex_unlock(&mvm->mutex);
@@ -772,9 +771,6 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
         */
        set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
 
-       /* We reprogram keys and shouldn't allocate new key indices */
-       memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
-
        mvm->ptk_ivlen = 0;
        mvm->ptk_icvlen = 0;
        mvm->ptk_ivlen = 0;
index 1fb684693040eac8130cf62c348b0bf96dd4f005..e88afac51c5d69fe5d6c8f4e032cdcf95f48a168 100644 (file)
@@ -2941,6 +2941,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        int ret;
+       u8 key_offset;
 
        if (iwlwifi_mod_params.sw_crypto) {
                IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
@@ -3006,10 +3007,14 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                        break;
                }
 
+               /* in HW restart reuse the index, otherwise request a new one */
+               if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+                       key_offset = key->hw_key_idx;
+               else
+                       key_offset = STA_KEY_IDX_INVALID;
+
                IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
-               ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
-                                         test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
-                                                  &mvm->status));
+               ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
                if (ret) {
                        IWL_WARN(mvm, "set key failed\n");
                        /*
index 300a249486e4df2398738b62c53a7870f87a757b..354acbde088e9e6fad0927a9ef73787ab235558c 100644 (file)
@@ -1201,7 +1201,8 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
        return max_offs;
 }
 
-static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
+static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif,
                                 struct ieee80211_sta *sta)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1218,8 +1219,21 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
         * station ID, then use AP's station ID.
         */
        if (vif->type == NL80211_IFTYPE_STATION &&
-           mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT)
-               return mvmvif->ap_sta_id;
+           mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+               u8 sta_id = mvmvif->ap_sta_id;
+
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                               lockdep_is_held(&mvm->mutex));
+               /*
+                * It is possible that the 'sta' parameter is NULL,
+                * for example when a GTK is removed - the sta_id will then
+                * be the AP ID, and no station was passed by mac80211.
+                */
+               if (IS_ERR_OR_NULL(sta))
+                       return IWL_MVM_STATION_COUNT;
+
+               return sta_id;
+       }
 
        return IWL_MVM_STATION_COUNT;
 }
@@ -1227,7 +1241,8 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
                                struct iwl_mvm_sta *mvm_sta,
                                struct ieee80211_key_conf *keyconf, bool mcast,
-                               u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags)
+                               u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
+                               u8 key_offset)
 {
        struct iwl_mvm_add_sta_key_cmd cmd = {};
        __le16 key_flags;
@@ -1269,7 +1284,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
        if (mcast)
                key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
 
-       cmd.key_offset = keyconf->hw_key_idx;
+       cmd.key_offset = key_offset;
        cmd.key_flags = key_flags;
        cmd.sta_id = sta_id;
 
@@ -1360,6 +1375,7 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_sta *sta,
                                 struct ieee80211_key_conf *keyconf,
+                                u8 key_offset,
                                 bool mcast)
 {
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@@ -1375,17 +1391,17 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
                ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
-                                          seq.tkip.iv32, p1k, 0);
+                                          seq.tkip.iv32, p1k, 0, key_offset);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
                ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
-                                          0, NULL, 0);
+                                          0, NULL, 0, key_offset);
                break;
        default:
                ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
-                                          0, NULL, 0);
+                                          0, NULL, 0, key_offset);
        }
 
        return ret;
@@ -1433,7 +1449,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
                        struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta,
                        struct ieee80211_key_conf *keyconf,
-                       bool have_key_offset)
+                       u8 key_offset)
 {
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
        u8 sta_id;
@@ -1443,7 +1459,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvm->mutex);
 
        /* Get the station id from the mvm local station table */
-       sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+       sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
        if (sta_id == IWL_MVM_STATION_COUNT) {
                IWL_ERR(mvm, "Failed to find station id\n");
                return -EINVAL;
@@ -1470,18 +1486,25 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
        if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
                return -EINVAL;
 
-       if (!have_key_offset) {
-               /*
-                * The D3 firmware hardcodes the PTK offset to 0, so we have to
-                * configure it there. As a result, this workaround exists to
-                * let the caller set the key offset (hw_key_idx), see d3.c.
-                */
-               keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm);
-               if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+       /* If the key_offset is not pre-assigned, we need to find a
+        * new offset to use.  In normal cases, the offset is not
+        * pre-assigned, but during HW_RESTART we want to reuse the
+        * same indices, so we pass them when this function is called.
+        *
+        * In D3 entry, we need to hardcoded the indices (because the
+        * firmware hardcodes the PTK offset to 0).  In this case, we
+        * need to make sure we don't overwrite the hw_key_idx in the
+        * keyconf structure, because otherwise we cannot configure
+        * the original ones back when resuming.
+        */
+       if (key_offset == STA_KEY_IDX_INVALID) {
+               key_offset  = iwl_mvm_set_fw_key_idx(mvm);
+               if (key_offset == STA_KEY_IDX_INVALID)
                        return -ENOSPC;
+               keyconf->hw_key_idx = key_offset;
        }
 
-       ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast);
+       ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
        if (ret) {
                __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
                goto end;
@@ -1495,7 +1518,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
         */
        if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
            keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
-               ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast);
+               ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
+                                           key_offset, !mcast);
                if (ret) {
                        __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
                        __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
@@ -1521,7 +1545,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvm->mutex);
 
        /* Get the station id from the mvm local station table */
-       sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+       sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
 
        IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
                      keyconf->keyidx, sta_id);
@@ -1547,24 +1571,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
                return 0;
        }
 
-       /*
-        * It is possible that the 'sta' parameter is NULL, and thus
-        * there is a need to retrieve the sta from the local station table,
-        * for example when a GTK is removed (where the sta_id will then be
-        * the AP ID, and no station was passed by mac80211.)
-        */
-       if (!sta) {
-               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
-                                               lockdep_is_held(&mvm->mutex));
-               if (!sta) {
-                       IWL_ERR(mvm, "Invalid station id\n");
-                       return -EINVAL;
-               }
-       }
-
-       if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
-               return -EINVAL;
-
        ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
        if (ret)
                return ret;
@@ -1584,7 +1590,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
                             u16 *phase1key)
 {
        struct iwl_mvm_sta *mvm_sta;
-       u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+       u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
 
        if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
@@ -1602,7 +1608,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
 
        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
-                            iv32, phase1key, CMD_ASYNC);
+                            iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
        rcu_read_unlock();
 }
 
index eedb215eba3f6efd08f2ca387de80e46ec39bfb2..0631cc0a6d3c908d71c93270a15679270272c6c7 100644 (file)
@@ -365,8 +365,8 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
                        struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta,
-                       struct ieee80211_key_conf *key,
-                       bool have_key_offset);
+                       struct ieee80211_key_conf *keyconf,
+                       u8 key_offset);
 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
                           struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta,
index 644b58bc5226c52b3cdee0a24b9a392c25e8ac02..639761fb2bfb2f8a1dbd5925984e6ad0c15f9587 100644 (file)
@@ -423,14 +423,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 /* 8000 Series */
        {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
@@ -438,18 +445,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)},
        {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
index 6e9418ed90c289bee5b7f2dfc478f847dfc7ca68..bbb789f8990b10944d0a8eed60c7700af2a4121e 100644 (file)
@@ -2272,7 +2272,7 @@ void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       if (!rtlpci->int_clear)
+       if (rtlpci->int_clear)
                rtl8821ae_clear_interrupt(hw);/*clear it here first*/
 
        rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
index 8ee141a55bc5cc6b566e79dde58cdb05583e7fdf..142bdff4ed605b6e148cdbac588f80020b2c76c9 100644 (file)
@@ -448,7 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
 MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
-MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n");
+MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
 
index 219dc206fa5f24dc6fdffb3a2a6ec17df20d5281..a5fe239525868b1b86ec2d5337cfdb5e324424d6 100644 (file)
@@ -1,4 +1,5 @@
 
 obj-$(CONFIG_BLK_DEV_NVME)     += nvme.o
 
-nvme-y         += pci.o scsi.o lightnvm.o
+lightnvm-$(CONFIG_NVM) := lightnvm.o
+nvme-y         += pci.o scsi.o $(lightnvm-y)
index 9202d1a468d049f30284f39e6c2894e0711b3add..06c3364102350e7f90d3a357bfdfc54b728f724f 100644 (file)
@@ -22,8 +22,6 @@
 
 #include "nvme.h"
 
-#ifdef CONFIG_NVM
-
 #include <linux/nvme.h>
 #include <linux/bitops.h>
 #include <linux/lightnvm.h>
@@ -357,10 +355,11 @@ out:
        return ret;
 }
 
-static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa,
+static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
                                int nr_blocks, nvm_bb_update_fn *update_bbtbl,
                                void *priv)
 {
+       struct request_queue *q = nvmdev->q;
        struct nvme_ns *ns = q->queuedata;
        struct nvme_dev *dev = ns->dev;
        struct nvme_nvm_command c = {};
@@ -404,6 +403,7 @@ static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa,
                goto out;
        }
 
+       ppa = dev_to_generic_addr(nvmdev, ppa);
        ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
        if (ret) {
                ret = -EINTR;
@@ -571,31 +571,27 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
        nvm_unregister(disk_name);
 }
 
+/* move to shared place when used in multiple places. */
+#define PCI_VENDOR_ID_CNEX 0x1d1d
+#define PCI_DEVICE_ID_CNEX_WL 0x2807
+#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
+
 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
 {
        struct nvme_dev *dev = ns->dev;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
        /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
-       if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 &&
+       if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
+                               pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
                                                        id->vs[0] == 0x1)
                return 1;
 
        /* CNEX Labs - PCI ID + Vendor specific bit */
-       if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 &&
+       if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
+                               pdev->device == PCI_DEVICE_ID_CNEX_WL &&
                                                        id->vs[0] == 0x1)
                return 1;
 
        return 0;
 }
-#else
-int nvme_nvm_register(struct request_queue *q, char *disk_name)
-{
-       return 0;
-}
-void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
-int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
-{
-       return 0;
-}
-#endif /* CONFIG_NVM */
index fdb4e5bad9ac73c59c67e70100aba411d124ab2e..044253dca30a433549a37ec7e28c3a6c161d38a5 100644 (file)
@@ -136,8 +136,22 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
 int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
 int nvme_sg_get_version_num(int __user *ip);
 
+#ifdef CONFIG_NVM
 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
 int nvme_nvm_register(struct request_queue *q, char *disk_name);
 void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
+#else
+static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
+{
+       return 0;
+}
+
+static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
+
+static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+       return 0;
+}
+#endif /* CONFIG_NVM */
 
 #endif /* _NVME_H */
index f3b53af789efccbddfedc0b37319dcaa1c264bfd..9e294ff4e652a2bb8daa9e3f4a94cc09e8bb8b88 100644 (file)
@@ -2708,6 +2708,18 @@ static int nvme_dev_map(struct nvme_dev *dev)
        dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
        dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
        dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+       /*
+        * Temporary fix for the Apple controller found in the MacBook8,1 and
+        * some MacBook7,1 to avoid controller resets and data loss.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
+               dev->q_depth = 2;
+               dev_warn(dev->dev, "detected Apple NVMe controller, set "
+                       "queue depth=%u to work around controller resets\n",
+                       dev->q_depth);
+       }
+
        if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
                dev->cmb = nvme_map_cmb(dev);
 
index cd53fe4a0c8684900a94fb4bee76f9bd668e3837..9582c5703b3c905486da173d08a2f986883bda26 100644 (file)
@@ -485,9 +485,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
        int rone;
        u64 offset = OF_BAD_ADDR;
 
-       /* Normally, an absence of a "ranges" property means we are
+       /*
+        * Normally, an absence of a "ranges" property means we are
         * crossing a non-translatable boundary, and thus the addresses
-        * below the current not cannot be converted to CPU physical ones.
+        * below the current cannot be converted to CPU physical ones.
         * Unfortunately, while this is very clear in the spec, it's not
         * what Apple understood, and they do have things like /uni-n or
         * /ht nodes with no "ranges" property and a lot of perfectly
index d2430298a309a86d3f2110623d3728b9dedb228f..655f79db7899ffd0628714d51203847630a8075c 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/initrd.h>
 #include <linux/memblock.h>
+#include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/of_reserved_mem.h>
@@ -436,6 +437,8 @@ static void *kernel_tree_alloc(u64 size, u64 align)
        return kzalloc(size, GFP_KERNEL);
 }
 
+static DEFINE_MUTEX(of_fdt_unflatten_mutex);
+
 /**
  * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
  *
@@ -447,7 +450,9 @@ static void *kernel_tree_alloc(u64 size, u64 align)
 void of_fdt_unflatten_tree(const unsigned long *blob,
                        struct device_node **mynodes)
 {
+       mutex_lock(&of_fdt_unflatten_mutex);
        __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc);
+       mutex_unlock(&of_fdt_unflatten_mutex);
 }
 EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
 
@@ -1041,7 +1046,7 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
                                        phys_addr_t size, bool nomap)
 {
-       pr_err("Reserved memory not supported, ignoring range 0x%pa - 0x%pa%s\n",
+       pr_err("Reserved memory not supported, ignoring range %pa - %pa%s\n",
                  &base, &size, nomap ? " (nomap)" : "");
        return -ENOSYS;
 }
index 902b89be7217137726be7eb1ab19263c0372635a..4fa916dffc91924a0354c3c779ae5bf41cb79cec 100644 (file)
@@ -53,7 +53,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
  * Returns a pointer to the interrupt parent node, or NULL if the interrupt
  * parent could not be determined.
  */
-static struct device_node *of_irq_find_parent(struct device_node *child)
+struct device_node *of_irq_find_parent(struct device_node *child)
 {
        struct device_node *p;
        const __be32 *parp;
@@ -77,6 +77,7 @@ static struct device_node *of_irq_find_parent(struct device_node *child)
 
        return p;
 }
+EXPORT_SYMBOL_GPL(of_irq_find_parent);
 
 /**
  * of_irq_parse_raw - Low level interrupt tree parsing
index be77e75c587db95c55a51e556f228f97810e051c..1a3556a9e9ea126b451b2a3c68e0407135af242f 100644 (file)
@@ -206,7 +206,13 @@ static int __init __rmem_cmp(const void *a, const void *b)
 {
        const struct reserved_mem *ra = a, *rb = b;
 
-       return ra->base - rb->base;
+       if (ra->base < rb->base)
+               return -1;
+
+       if (ra->base > rb->base)
+               return 1;
+
+       return 0;
 }
 
 static void __init __rmem_check_for_overlap(void)
index e5dda38bdde5e1735a50b4de9fcd2cd6307d13c5..99da549d5d06a067b58d3bbe08c39041a9d90605 100644 (file)
 #define TLP_CFG_DW2(bus, devfn, offset)        \
                                (((bus) << 24) | ((devfn) << 16) | (offset))
 #define TLP_REQ_ID(bus, devfn)         (((bus) << 8) | (devfn))
+#define TLP_COMP_STATUS(s)             (((s) >> 12) & 7)
 #define TLP_HDR_SIZE                   3
 #define TLP_LOOP                       500
+#define RP_DEVFN                       0
 
 #define INTX_NUM                       4
 
@@ -166,34 +168,41 @@ static bool altera_pcie_valid_config(struct altera_pcie *pcie,
 
 static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
 {
-       u8 loop;
+       int i;
        bool sop = 0;
        u32 ctrl;
        u32 reg0, reg1;
+       u32 comp_status = 1;
 
        /*
         * Minimum 2 loops to read TLP headers and 1 loop to read data
         * payload.
         */
-       for (loop = 0; loop < TLP_LOOP; loop++) {
+       for (i = 0; i < TLP_LOOP; i++) {
                ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
                if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
                        reg0 = cra_readl(pcie, RP_RXCPL_REG0);
                        reg1 = cra_readl(pcie, RP_RXCPL_REG1);
 
-                       if (ctrl & RP_RXCPL_SOP)
+                       if (ctrl & RP_RXCPL_SOP) {
                                sop = true;
+                               comp_status = TLP_COMP_STATUS(reg1);
+                       }
 
                        if (ctrl & RP_RXCPL_EOP) {
+                               if (comp_status)
+                                       return PCIBIOS_DEVICE_NOT_FOUND;
+
                                if (value)
                                        *value = reg0;
+
                                return PCIBIOS_SUCCESSFUL;
                        }
                }
                udelay(5);
        }
 
-       return -ENOENT;
+       return PCIBIOS_DEVICE_NOT_FOUND;
 }
 
 static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
@@ -233,7 +242,7 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
        else
                headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
 
-       headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn),
+       headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
                                        TLP_READ_TAG, byte_en);
        headers[2] = TLP_CFG_DW2(bus, devfn, where);
 
@@ -253,7 +262,7 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
        else
                headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
 
-       headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn),
+       headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
                                        TLP_WRITE_TAG, byte_en);
        headers[2] = TLP_CFG_DW2(bus, devfn, where);
 
@@ -458,7 +467,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
        struct device_node *node = dev->of_node;
 
        /* Setup INTx */
-       pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM,
+       pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM + 1,
                                        &intx_domain_ops, pcie);
        if (!pcie->irq_domain) {
                dev_err(dev, "Failed to get a INTx IRQ domain\n");
index 53e463244bb7e35ac368ff088a4976c2153fce3d..7eaa4c87fec71c8dd792ccffc919b322711d91dd 100644 (file)
@@ -54,7 +54,7 @@ static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
        struct irq_domain *domain;
 
        domain = pci_msi_get_domain(dev);
-       if (domain)
+       if (domain && irq_domain_is_hierarchy(domain))
                return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
 
        return arch_setup_msi_irqs(dev, nvec, type);
@@ -65,7 +65,7 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
        struct irq_domain *domain;
 
        domain = pci_msi_get_domain(dev);
-       if (domain)
+       if (domain && irq_domain_is_hierarchy(domain))
                pci_msi_domain_free_irqs(domain, dev);
        else
                arch_teardown_msi_irqs(dev);
index 4446fcb5effd347d87fb6314473297acc4558769..d7ffd66814bb51c14cbdf849fcd6a0a33f30dba7 100644 (file)
@@ -1146,9 +1146,21 @@ static int pci_pm_runtime_suspend(struct device *dev)
        pci_dev->state_saved = false;
        pci_dev->no_d3cold = false;
        error = pm->runtime_suspend(dev);
-       suspend_report_result(pm->runtime_suspend, error);
-       if (error)
+       if (error) {
+               /*
+                * -EBUSY and -EAGAIN is used to request the runtime PM core
+                * to schedule a new suspend, so log the event only with debug
+                * log level.
+                */
+               if (error == -EBUSY || error == -EAGAIN)
+                       dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
+                               pm->runtime_suspend, error);
+               else
+                       dev_err(dev, "can't suspend (%pf returned %d)\n",
+                               pm->runtime_suspend, error);
+
                return error;
+       }
        if (!pci_dev->d3cold_allowed)
                pci_dev->no_d3cold = true;
 
index b422e4ed73f4266f7a8d13db4ec1096395163fc3..312c78b27a3206c813c85912e14b570ffa43a0a6 100644 (file)
@@ -5,8 +5,6 @@
 config PINCTRL
        bool
 
-if PINCTRL
-
 menu "Pin controllers"
        depends on PINCTRL
 
@@ -274,5 +272,3 @@ config PINCTRL_TB10X
        select GPIOLIB
 
 endmenu
-
-endif
index 88a7fac11bd499f72c831b91f0f6c05bd29b19f7..acaf84cadca3fcac638656638a3ea8310011c6da 100644 (file)
@@ -538,8 +538,10 @@ static int imx1_pinctrl_parse_functions(struct device_node *np,
                func->groups[i] = child->name;
                grp = &info->groups[grp_index++];
                ret = imx1_pinctrl_parse_groups(child, grp, info, i++);
-               if (ret == -ENOMEM)
+               if (ret == -ENOMEM) {
+                       of_node_put(child);
                        return ret;
+               }
        }
 
        return 0;
@@ -582,8 +584,10 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev,
 
        for_each_child_of_node(np, child) {
                ret = imx1_pinctrl_parse_functions(child, info, ifunc++);
-               if (ret == -ENOMEM)
+               if (ret == -ENOMEM) {
+                       of_node_put(child);
                        return -ENOMEM;
+               }
        }
 
        return 0;
index f307f1d27d646fcd3954a9d497528fffbf16dc2b..5c717275a7fa805f370cdd68815e64f8982b142b 100644 (file)
@@ -747,7 +747,7 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
        reg_addr =  mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
        bit = BIT(offset & 0xf);
        regmap_read(pctl->regmap1, reg_addr, &read_val);
-       return !!(read_val & bit);
+       return !(read_val & bit);
 }
 
 static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -757,12 +757,8 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
        unsigned int read_val = 0;
        struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
 
-       if (mtk_gpio_get_direction(chip, offset))
-               reg_addr = mtk_get_port(pctl, offset) +
-                       pctl->devdata->dout_offset;
-       else
-               reg_addr = mtk_get_port(pctl, offset) +
-                       pctl->devdata->din_offset;
+       reg_addr = mtk_get_port(pctl, offset) +
+               pctl->devdata->din_offset;
 
        bit = BIT(offset & 0xf);
        regmap_read(pctl->regmap1, reg_addr, &read_val);
@@ -997,6 +993,7 @@ static struct gpio_chip mtk_gpio_chip = {
        .owner                  = THIS_MODULE,
        .request                = gpiochip_generic_request,
        .free                   = gpiochip_generic_free,
+       .get_direction          = mtk_gpio_get_direction,
        .direction_input        = mtk_gpio_direction_input,
        .direction_output       = mtk_gpio_direction_output,
        .get                    = mtk_gpio_get,
index d809c9eaa3231817512bdda858ad208d0f1138e4..19a3c3bc2f1f7213d31ea8f23ef56b815031a3a0 100644 (file)
@@ -672,7 +672,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        pctrl->dev = &pdev->dev;
-       pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+       pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
 
        pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
        if (!pctrl->regmap) {
index 8982027de8e8b528f026f38d58ccdf692901635c..b868ef1766a0910e50dbfe43b80e650cea19739f 100644 (file)
@@ -763,7 +763,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        pctrl->dev = &pdev->dev;
-       pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+       pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
 
        pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
        if (!pctrl->regmap) {
index e7deb51de7dc4fd4c76bc1b7483ac2127e1da850..9842bb1067969701a8879b3de481f3da6d2349a7 100644 (file)
        PORT_GP_12(5, fn, sfx)
 
 #undef _GP_DATA
-#define _GP_DATA(bank, pin, name, sfx)                                 \
+#define _GP_DATA(bank, pin, name, sfx, cfg)                            \
        PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT)
 
-#define _GP_INOUTSEL(bank, pin, name, sfx)     name##_IN, name##_OUT
-#define _GP_INDT(bank, pin, name, sfx)         name##_DATA
+#define _GP_INOUTSEL(bank, pin, name, sfx, cfg)        name##_IN, name##_OUT
+#define _GP_INDT(bank, pin, name, sfx, cfg)    name##_DATA
 #define GP_INOUTSEL(bank)      PORT_GP_32_REV(bank, _GP_INOUTSEL, unused)
 #define GP_INDT(bank)          PORT_GP_32_REV(bank, _GP_INDT, unused)
 
index 8b3130f22b42b334ff0d22718e46484012fb2c2b..9e03d158f4119133f09b0979d8b5b70e1ae63c06 100644 (file)
@@ -1478,6 +1478,8 @@ module_init(remoteproc_init);
 
 static void __exit remoteproc_exit(void)
 {
+       ida_destroy(&rproc_dev_index);
+
        rproc_exit_debugfs();
 }
 module_exit(remoteproc_exit);
index 9d30809bb407174214e1be674b323b0dd88c0a8e..916af5096f57b93b7f7e148d28716329b31c7be6 100644 (file)
@@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf,
        char buf[10];
        int ret;
 
-       if (count > sizeof(buf))
+       if (count < 1 || count > sizeof(buf))
                return count;
 
        ret = copy_from_user(buf, user_buf, count);
index 5f692ae4074959e019a74eb0c81405cb2869195c..64eed87d34a87840d5da60d267383589541e1d73 100644 (file)
@@ -364,6 +364,7 @@ config SCSI_HPSA
        tristate "HP Smart Array SCSI driver"
        depends on PCI && SCSI
        select CHECK_SIGNATURE
+       select SCSI_SAS_ATTRS
        help
          This driver supports HP Smart Array Controllers (circa 2009).
          It is a SCSI alternative to the cciss driver, which is a block
@@ -499,6 +500,7 @@ config SCSI_ADVANSYS
        tristate "AdvanSys SCSI support"
        depends on SCSI
        depends on ISA || EISA || PCI
+       depends on ISA_DMA_API || !ISA
        help
          This is a driver for all SCSI host adapters manufactured by
          AdvanSys. It is documented in the kernel source in
index 519f9a4b3dadf39c07be7e61f20fc2481cddbd02..febbd83e2ecd9d6a6984500eb8cc02d6690c3984 100644 (file)
@@ -7803,7 +7803,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
                return ASC_BUSY;
        }
        scsiqp->sense_addr = cpu_to_le32(sense_addr);
-       scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
+       scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
 
        /* Build ADV_SCSI_REQ_Q */
 
index 323982fd00c3ba985dfb59bf04b6eff04f86e9b7..82ac1cd818ac18e1310ba2abaede9c8b262f9976 100644 (file)
@@ -333,6 +333,17 @@ static void scsi_host_dev_release(struct device *dev)
                kfree(queuedata);
        }
 
+       if (shost->shost_state == SHOST_CREATED) {
+               /*
+                * Free the shost_dev device name here if scsi_host_alloc()
+                * and scsi_host_put() have been called but neither
+                * scsi_host_add() nor scsi_host_remove() has been called.
+                * This avoids that the memory allocated for the shost_dev
+                * name is leaked.
+                */
+               kfree(dev_name(&shost->shost_dev));
+       }
+
        scsi_destroy_command_freelist(shost);
        if (shost_use_blk_mq(shost)) {
                if (shost->tag_set.tags)
index 6a8f95808ee0f92027796651a6254e6c9382ca5a..a3860367b568aa06adf4290bf84717458c241e77 100644 (file)
@@ -8671,7 +8671,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
        if ((rc != 0)  || (c->err_info->CommandStatus != 0))
                goto errout;
 
-       if (*options && HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
+       if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
                goto out;
 
 errout:
index 29061467cc174e118848dd44244ff28bb198deb3..b736dbc8048530e6c2c8dae2a8f7e1aac896ce10 100644 (file)
@@ -71,3 +71,12 @@ config SCSI_MPT3SAS_MAX_SGE
        MAX_PHYS_SEGMENTS in most kernels.  However in SuSE kernels this
        can be 256. However, it may decreased down to 16.  Decreasing this
        parameter will reduce memory requirements on a per controller instance.
+
+config SCSI_MPT2SAS
+       tristate "Legacy MPT2SAS config option"
+       default n
+       select SCSI_MPT3SAS
+       depends on PCI && SCSI
+       ---help---
+       Dummy config option for backwards compatiblity: configure the MPT3SAS
+       driver instead.
index d95206b7e1167861531d2cd29c9ef27e7da82c25..9ab77b06434d19a61a92bb771aef0e70da0f31d8 100644 (file)
@@ -3905,8 +3905,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
         * We do not expose raid functionality to upper layer for warpdrive.
         */
        if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)
-           && (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
-           scmd->cmd_len != 32)
+           && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
                mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
 
        smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
index 90fdf0e859e31fd9e72352d2ed19a889061e6ed4..675e7fab0796e4c1ebfc3e7e77aa109592a25c02 100644 (file)
@@ -758,7 +758,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
                        struct device_attribute *attr,
                        const char *buffer, size_t size)
 {
-       int val = 0;
+       unsigned int val = 0;
        struct mvs_info *mvi = NULL;
        struct Scsi_Host *shost = class_to_shost(cdev);
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -766,7 +766,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
        if (buffer == NULL)
                return size;
 
-       if (sscanf(buffer, "%d", &val) != 1)
+       if (sscanf(buffer, "%u", &val) != 1)
                return -EINVAL;
 
        if (val >= 0x10000) {
index eb0cc5475c451860d56c6f74679ce026f3808710..b6b4cfdd76201d76f26ca4a99c60aa49ec6b4c69 100644 (file)
@@ -433,7 +433,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
        if (off_in < QLA82XX_PCI_CRBSPACE)
                return -1;
 
-       *off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE);
+       off_in -= QLA82XX_PCI_CRBSPACE;
 
        /* Try direct map */
        m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
@@ -443,6 +443,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
                return 0;
        }
        /* Not in direct map, use crb window */
+       *off_out = (void __iomem *)off_in;
        return 1;
 }
 
index dfcc45bb03b1f30e808e611a567c2f76cc734d3c..d09d60293c272663b5b9b84f27ce2fab3e61a6aa 100644 (file)
@@ -465,8 +465,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
             0} },
        {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-       {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
-           {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+       {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
+           {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
+            0, 0, 0, 0, 0, 0} },
        {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
            vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
                      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
@@ -477,8 +478,8 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
            {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
             0} },
 /* 20 */
-       {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
-           {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+       {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
+           {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
        {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
            {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
        {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
index 83245391e956bc6f351a668473debeba6121b71b..054923e3393c6f2dffd0959fea6ee610ecf177ac 100644 (file)
@@ -701,9 +701,12 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
         * strings.
         */
        if (sdev->inquiry_len < 36) {
-               sdev_printk(KERN_INFO, sdev,
-                           "scsi scan: INQUIRY result too short (%d),"
-                           " using 36\n", sdev->inquiry_len);
+               if (!sdev->host->short_inquiry) {
+                       shost_printk(KERN_INFO, sdev->host,
+                                   "scsi scan: INQUIRY result too short (%d),"
+                                   " using 36\n", sdev->inquiry_len);
+                       sdev->host->short_inquiry = 1;
+               }
                sdev->inquiry_len = 36;
        }
 
index 8d2312239ae0cda01238ae549f0ae8007de233ea..21930c9ac9cd90caf6e5386b4495992fb3f8542d 100644 (file)
@@ -1102,6 +1102,14 @@ void __scsi_remove_device(struct scsi_device *sdev)
 {
        struct device *dev = &sdev->sdev_gendev;
 
+       /*
+        * This cleanup path is not reentrant and while it is impossible
+        * to get a new reference with scsi_device_get() someone can still
+        * hold a previously acquired one.
+        */
+       if (sdev->sdev_state == SDEV_DEL)
+               return;
+
        if (sdev->is_visible) {
                if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
                        return;
@@ -1110,7 +1118,9 @@ void __scsi_remove_device(struct scsi_device *sdev)
                device_unregister(&sdev->sdev_dev);
                transport_remove_device(dev);
                scsi_dh_remove_device(sdev);
-       }
+               device_del(dev);
+       } else
+               put_device(&sdev->sdev_dev);
 
        /*
         * Stop accepting new requests and wait until all queuecommand() and
@@ -1121,16 +1131,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
        blk_cleanup_queue(sdev->request_queue);
        cancel_work_sync(&sdev->requeue_work);
 
-       /*
-        * Remove the device after blk_cleanup_queue() has been called such
-        * a possible bdi_register() call with the same name occurs after
-        * blk_cleanup_queue() has called bdi_destroy().
-        */
-       if (sdev->is_visible)
-               device_del(dev);
-       else
-               put_device(&sdev->sdev_dev);
-
        if (sdev->host->hostt->slave_destroy)
                sdev->host->hostt->slave_destroy(sdev);
        transport_destroy_device(dev);
index 54519804c46a57b99ce680ee665b4fe28dd91148..3d22fc3e3c1a7ba6df90665f59cf10e8d38ef715 100644 (file)
@@ -638,11 +638,24 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
        unsigned int max_blocks = 0;
 
        q->limits.discard_zeroes_data = 0;
-       q->limits.discard_alignment = sdkp->unmap_alignment *
-               logical_block_size;
-       q->limits.discard_granularity =
-               max(sdkp->physical_block_size,
-                   sdkp->unmap_granularity * logical_block_size);
+
+       /*
+        * When LBPRZ is reported, discard alignment and granularity
+        * must be fixed to the logical block size. Otherwise the block
+        * layer will drop misaligned portions of the request which can
+        * lead to data corruption. If LBPRZ is not set, we honor the
+        * device preference.
+        */
+       if (sdkp->lbprz) {
+               q->limits.discard_alignment = 0;
+               q->limits.discard_granularity = 1;
+       } else {
+               q->limits.discard_alignment = sdkp->unmap_alignment *
+                       logical_block_size;
+               q->limits.discard_granularity =
+                       max(sdkp->physical_block_size,
+                           sdkp->unmap_granularity * logical_block_size);
+       }
 
        sdkp->provisioning_mode = mode;
 
@@ -2321,11 +2334,8 @@ got_data:
                }
        }
 
-       if (sdkp->capacity > 0xffffffff) {
+       if (sdkp->capacity > 0xffffffff)
                sdp->use_16_for_rw = 1;
-               sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS;
-       } else
-               sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
 
        /* Rescale capacity to 512-byte units */
        if (sector_size == 4096)
@@ -2642,7 +2652,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
 {
        unsigned int sector_sz = sdkp->device->sector_size;
        const int vpd_len = 64;
-       u32 max_xfer_length;
        unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
 
        if (!buffer ||
@@ -2650,14 +2659,11 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
            scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
                goto out;
 
-       max_xfer_length = get_unaligned_be32(&buffer[8]);
-       if (max_xfer_length)
-               sdkp->max_xfer_blocks = max_xfer_length;
-
        blk_queue_io_min(sdkp->disk->queue,
                         get_unaligned_be16(&buffer[6]) * sector_sz);
-       blk_queue_io_opt(sdkp->disk->queue,
-                        get_unaligned_be32(&buffer[12]) * sector_sz);
+
+       sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
+       sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
 
        if (buffer[3] == 0x3c) {
                unsigned int lba_count, desc_count;
@@ -2806,6 +2812,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
        return 0;
 }
 
+static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
+{
+       return blocks << (ilog2(sdev->sector_size) - 9);
+}
+
 /**
  *     sd_revalidate_disk - called the first time a new disk is seen,
  *     performs disk spin up, read_capacity, etc.
@@ -2815,8 +2826,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
 {
        struct scsi_disk *sdkp = scsi_disk(disk);
        struct scsi_device *sdp = sdkp->device;
+       struct request_queue *q = sdkp->disk->queue;
        unsigned char *buffer;
-       unsigned int max_xfer;
+       unsigned int dev_max, rw_max;
 
        SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
                                      "sd_revalidate_disk\n"));
@@ -2864,11 +2876,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
         */
        sd_set_flush_flag(sdkp);
 
-       max_xfer = sdkp->max_xfer_blocks;
-       max_xfer <<= ilog2(sdp->sector_size) - 9;
+       /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
+       dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
+
+       /* Some devices report a maximum block count for READ/WRITE requests. */
+       dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
+       q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
+
+       /*
+        * Use the device's preferred I/O size for reads and writes
+        * unless the reported value is unreasonably large (or garbage).
+        */
+       if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max &&
+           sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS)
+               rw_max = q->limits.io_opt =
+                       logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+       else
+               rw_max = BLK_DEF_MAX_SECTORS;
 
-       sdkp->disk->queue->limits.max_sectors =
-               min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+       /* Combine with controller limits */
+       q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
 
        set_capacity(disk, sdkp->capacity);
        sd_config_write_same(sdkp);
index 63ba5ca7f9a16b21735c1283c2a17df40a7ee06c..5f2a84aff29fb9cbd003e5bbe6996355a5c07075 100644 (file)
@@ -67,6 +67,7 @@ struct scsi_disk {
        atomic_t        openers;
        sector_t        capacity;       /* size in 512-byte sectors */
        u32             max_xfer_blocks;
+       u32             opt_xfer_blocks;
        u32             max_ws_blocks;
        u32             max_unmap_blocks;
        u32             unmap_granularity;
index e0a1e52a04e736b35dc2b03371a72fc0590b85b1..2e522951b619740b08a712695dfca31962b16de0 100644 (file)
@@ -4083,6 +4083,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
        }
        cdev->owner = THIS_MODULE;
        cdev->ops = &st_fops;
+       STm->cdevs[rew] = cdev;
 
        error = cdev_add(cdev, cdev_devno, 1);
        if (error) {
@@ -4091,7 +4092,6 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
                pr_err("st%d: Device not attached.\n", dev_num);
                goto out_free;
        }
-       STm->cdevs[rew] = cdev;
 
        i = mode << (4 - ST_NBR_MODE_BITS);
        snprintf(name, 10, "%s%s%s", rew ? "n" : "",
@@ -4110,8 +4110,9 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
        return 0;
 out_free:
        cdev_del(STm->cdevs[rew]);
-       STm->cdevs[rew] = NULL;
 out:
+       STm->cdevs[rew] = NULL;
+       STm->devs[rew] = NULL;
        return error;
 }
 
index 06858e04ec59a8f2e290c398f6f06e2d1b15e28a..bf9a610e5b898106af79e49a586d7ad4061ef4e0 100644 (file)
@@ -562,8 +562,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
                goto out_clk_disable;
        }
 
-       dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n",
-                r->start, irq, bs->fifo_size);
+       dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n",
+                r, irq, bs->fifo_size);
 
        return 0;
 
index 563954a614242718bd125f16f9a3549f783346b8..7840067062a8daf950f4de9a07e232f1482c9485 100644 (file)
@@ -410,7 +410,7 @@ static int mtk_spi_setup(struct spi_device *spi)
        if (!spi->controller_data)
                spi->controller_data = (void *)&mtk_default_chip_info;
 
-       if (mdata->dev_comp->need_pad_sel)
+       if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
                gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
 
        return 0;
@@ -632,13 +632,23 @@ static int mtk_spi_probe(struct platform_device *pdev)
                        goto err_put_master;
                }
 
-               for (i = 0; i < master->num_chipselect; i++) {
-                       ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
-                                               dev_name(&pdev->dev));
-                       if (ret) {
-                               dev_err(&pdev->dev,
-                                       "can't get CS GPIO %i\n", i);
-                               goto err_put_master;
+               if (!master->cs_gpios && master->num_chipselect > 1) {
+                       dev_err(&pdev->dev,
+                               "cs_gpios not specified and num_chipselect > 1\n");
+                       ret = -EINVAL;
+                       goto err_put_master;
+               }
+
+               if (master->cs_gpios) {
+                       for (i = 0; i < master->num_chipselect; i++) {
+                               ret = devm_gpio_request(&pdev->dev,
+                                                       master->cs_gpios[i],
+                                                       dev_name(&pdev->dev));
+                               if (ret) {
+                                       dev_err(&pdev->dev,
+                                               "can't get CS GPIO %i\n", i);
+                                       goto err_put_master;
+                               }
                        }
                }
        }
index 94af80676684e4708d373e1551b8a6c46ea118fa..5e5fd77e27119d6ceb8d64f657884ab4ef9f3759 100644 (file)
@@ -1171,19 +1171,31 @@ err_no_rxchan:
 static int pl022_dma_autoprobe(struct pl022 *pl022)
 {
        struct device *dev = &pl022->adev->dev;
+       struct dma_chan *chan;
+       int err;
 
        /* automatically configure DMA channels from platform, normally using DT */
-       pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx");
-       if (!pl022->dma_rx_channel)
+       chan = dma_request_slave_channel_reason(dev, "rx");
+       if (IS_ERR(chan)) {
+               err = PTR_ERR(chan);
                goto err_no_rxchan;
+       }
+
+       pl022->dma_rx_channel = chan;
 
-       pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx");
-       if (!pl022->dma_tx_channel)
+       chan = dma_request_slave_channel_reason(dev, "tx");
+       if (IS_ERR(chan)) {
+               err = PTR_ERR(chan);
                goto err_no_txchan;
+       }
+
+       pl022->dma_tx_channel = chan;
 
        pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!pl022->dummypage)
+       if (!pl022->dummypage) {
+               err = -ENOMEM;
                goto err_no_dummypage;
+       }
 
        return 0;
 
@@ -1194,7 +1206,7 @@ err_no_txchan:
        dma_release_channel(pl022->dma_rx_channel);
        pl022->dma_rx_channel = NULL;
 err_no_rxchan:
-       return -ENODEV;
+       return err;
 }
                
 static void terminate_dma(struct pl022 *pl022)
@@ -2236,6 +2248,10 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
 
        /* Get DMA channels, try autoconfiguration first */
        status = pl022_dma_autoprobe(pl022);
+       if (status == -EPROBE_DEFER) {
+               dev_dbg(dev, "deferring probe to get DMA channel\n");
+               goto err_no_irq;
+       }
 
        /* If that failed, use channels from platform_info */
        if (status == 0)
index e2415be209d5a77e9224add30db37d65244fabda..2b0a8ec3affb87333f8bdb2e9a2d9ad95855d4c1 100644 (file)
@@ -376,6 +376,7 @@ static void spi_drv_shutdown(struct device *dev)
 
 /**
  * __spi_register_driver - register a SPI driver
+ * @owner: owner module of the driver to register
  * @sdrv: the driver to register
  * Context: can sleep
  *
@@ -2130,6 +2131,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
         * Set transfer tx_nbits and rx_nbits as single transfer default
         * (SPI_NBITS_SINGLE) if it is not set for this transfer.
         */
+       message->frame_length = 0;
        list_for_each_entry(xfer, &message->transfers, transfer_list) {
                message->frame_length += xfer->len;
                if (!xfer->bits_per_word)
index f5d741f25ffdeb189a6b56f7ce871630f7e303be..485ab267091859214576da349e2bf89a5874667d 100644 (file)
@@ -110,7 +110,6 @@ struct libcfs_ioctl_handler {
 #define IOC_LIBCFS_CLEAR_DEBUG      _IOWR('e', 31, long)
 #define IOC_LIBCFS_MARK_DEBUG        _IOWR('e', 32, long)
 #define IOC_LIBCFS_MEMHOG                _IOWR('e', 36, long)
-#define IOC_LIBCFS_PING_TEST          _IOWR('e', 37, long)
 /* lnet ioctls */
 #define IOC_LIBCFS_GET_NI                _IOWR('e', 50, long)
 #define IOC_LIBCFS_FAIL_NID            _IOWR('e', 51, long)
index 07a68594c2791df5c75cbe138564b78008e8aefb..e7c2b26156b9d36026a4a64a4e0565804bcc1312 100644 (file)
@@ -274,23 +274,6 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
                }
                break;
 
-       case IOC_LIBCFS_PING_TEST: {
-               extern void (kping_client)(struct libcfs_ioctl_data *);
-               void (*ping)(struct libcfs_ioctl_data *);
-
-               CDEBUG(D_IOCTL, "doing %d pings to nid %s (%s)\n",
-                      data->ioc_count, libcfs_nid2str(data->ioc_nid),
-                      libcfs_nid2str(data->ioc_nid));
-               ping = symbol_get(kping_client);
-               if (!ping)
-                       CERROR("symbol_get failed\n");
-               else {
-                       ping(data);
-                       symbol_put(kping_client);
-               }
-               return 0;
-       }
-
        default: {
                struct libcfs_ioctl_handler *hand;
 
index da6e2ce77495b21ec127209664567b7e2f8f467e..850d86ca685b273344271eda7c95ad4f82b28244 100644 (file)
@@ -31,21 +31,6 @@ menuconfig VFIO
 
          If you don't know what to do here, say N.
 
-menuconfig VFIO_NOIOMMU
-       bool "VFIO No-IOMMU support"
-       depends on VFIO
-       help
-         VFIO is built on the ability to isolate devices using the IOMMU.
-         Only with an IOMMU can userspace access to DMA capable devices be
-         considered secure.  VFIO No-IOMMU mode enables IOMMU groups for
-         devices without IOMMU backing for the purpose of re-using the VFIO
-         infrastructure in a non-secure mode.  Use of this mode will result
-         in an unsupportable kernel and will therefore taint the kernel.
-         Device assignment to virtual machines is also not possible with
-         this mode since there is no IOMMU to provide DMA translation.
-
-         If you don't know what to do here, say N.
-
 source "drivers/vfio/pci/Kconfig"
 source "drivers/vfio/platform/Kconfig"
 source "virt/lib/Kconfig"
index 32b88bd2c82c7e3a3f4cd8cae11a253a171c76fa..56bf6dbb93db7ac281a0af4fa01a8d212fd87670 100644 (file)
@@ -940,13 +940,13 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
                return -EINVAL;
 
-       group = vfio_iommu_group_get(&pdev->dev);
+       group = iommu_group_get(&pdev->dev);
        if (!group)
                return -EINVAL;
 
        vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
        if (!vdev) {
-               vfio_iommu_group_put(group, &pdev->dev);
+               iommu_group_put(group);
                return -ENOMEM;
        }
 
@@ -957,7 +957,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
        if (ret) {
-               vfio_iommu_group_put(group, &pdev->dev);
+               iommu_group_put(group);
                kfree(vdev);
                return ret;
        }
@@ -993,7 +993,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
        if (!vdev)
                return;
 
-       vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
+       iommu_group_put(pdev->dev.iommu_group);
        kfree(vdev);
 
        if (vfio_pci_is_vga(pdev)) {
@@ -1035,7 +1035,7 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
        return PCI_ERS_RESULT_CAN_RECOVER;
 }
 
-static struct pci_error_handlers vfio_err_handlers = {
+static const struct pci_error_handlers vfio_err_handlers = {
        .error_detected = vfio_pci_aer_err_detected,
 };
 
index f1625dcfbb23d49d6b0586977a203462df0e75a5..b1cc3a768784426fbf7616316aa9731f21b77ddb 100644 (file)
@@ -92,7 +92,6 @@ static struct platform_driver vfio_platform_driver = {
        .remove         = vfio_platform_remove,
        .driver = {
                .name   = "vfio-platform",
-               .owner  = THIS_MODULE,
        },
 };
 
index a1c50d63079200ce90741ccd55e60bc703c73eb2..418cdd9ba3f4841353c5cbdddb345e9363578736 100644 (file)
@@ -51,13 +51,10 @@ static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
 
 static void vfio_platform_get_reset(struct vfio_platform_device *vdev)
 {
-       char modname[256];
-
        vdev->reset = vfio_platform_lookup_reset(vdev->compat,
                                                &vdev->reset_module);
        if (!vdev->reset) {
-               snprintf(modname, 256, "vfio-reset:%s", vdev->compat);
-               request_module(modname);
+               request_module("vfio-reset:%s", vdev->compat);
                vdev->reset = vfio_platform_lookup_reset(vdev->compat,
                                                         &vdev->reset_module);
        }
index de632da2e22f60d4eee042631821c1a4a9b7e558..6070b793cbcb244d98a784bf6c41a5f0fabb662b 100644 (file)
@@ -62,7 +62,6 @@ struct vfio_container {
        struct rw_semaphore             group_lock;
        struct vfio_iommu_driver        *iommu_driver;
        void                            *iommu_data;
-       bool                            noiommu;
 };
 
 struct vfio_unbound_dev {
@@ -85,7 +84,6 @@ struct vfio_group {
        struct list_head                unbound_list;
        struct mutex                    unbound_lock;
        atomic_t                        opened;
-       bool                            noiommu;
 };
 
 struct vfio_device {
@@ -97,147 +95,6 @@ struct vfio_device {
        void                            *device_data;
 };
 
-#ifdef CONFIG_VFIO_NOIOMMU
-static bool noiommu __read_mostly;
-module_param_named(enable_unsafe_noiommu_support,
-                  noiommu, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode.  This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel.  If you do not know what this is for, step away. (default: false)");
-#endif
-
-/*
- * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
- * and remove functions, any use cases other than acquiring the first
- * reference for the purpose of calling vfio_add_group_dev() or removing
- * that symmetric reference after vfio_del_group_dev() should use the raw
- * iommu_group_{get,put} functions.  In particular, vfio_iommu_group_put()
- * removes the device from the dummy group and cannot be nested.
- */
-struct iommu_group *vfio_iommu_group_get(struct device *dev)
-{
-       struct iommu_group *group;
-       int __maybe_unused ret;
-
-       group = iommu_group_get(dev);
-
-#ifdef CONFIG_VFIO_NOIOMMU
-       /*
-        * With noiommu enabled, an IOMMU group will be created for a device
-        * that doesn't already have one and doesn't have an iommu_ops on their
-        * bus.  We use iommu_present() again in the main code to detect these
-        * fake groups.
-        */
-       if (group || !noiommu || iommu_present(dev->bus))
-               return group;
-
-       group = iommu_group_alloc();
-       if (IS_ERR(group))
-               return NULL;
-
-       iommu_group_set_name(group, "vfio-noiommu");
-       ret = iommu_group_add_device(group, dev);
-       iommu_group_put(group);
-       if (ret)
-               return NULL;
-
-       /*
-        * Where to taint?  At this point we've added an IOMMU group for a
-        * device that is not backed by iommu_ops, therefore any iommu_
-        * callback using iommu_ops can legitimately Oops.  So, while we may
-        * be about to give a DMA capable device to a user without IOMMU
-        * protection, which is clearly taint-worthy, let's go ahead and do
-        * it here.
-        */
-       add_taint(TAINT_USER, LOCKDEP_STILL_OK);
-       dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
-#endif
-
-       return group;
-}
-EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
-
-void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
-{
-#ifdef CONFIG_VFIO_NOIOMMU
-       if (!iommu_present(dev->bus))
-               iommu_group_remove_device(dev);
-#endif
-
-       iommu_group_put(group);
-}
-EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
-
-#ifdef CONFIG_VFIO_NOIOMMU
-static void *vfio_noiommu_open(unsigned long arg)
-{
-       if (arg != VFIO_NOIOMMU_IOMMU)
-               return ERR_PTR(-EINVAL);
-       if (!capable(CAP_SYS_RAWIO))
-               return ERR_PTR(-EPERM);
-
-       return NULL;
-}
-
-static void vfio_noiommu_release(void *iommu_data)
-{
-}
-
-static long vfio_noiommu_ioctl(void *iommu_data,
-                              unsigned int cmd, unsigned long arg)
-{
-       if (cmd == VFIO_CHECK_EXTENSION)
-               return arg == VFIO_NOIOMMU_IOMMU ? 1 : 0;
-
-       return -ENOTTY;
-}
-
-static int vfio_iommu_present(struct device *dev, void *unused)
-{
-       return iommu_present(dev->bus) ? 1 : 0;
-}
-
-static int vfio_noiommu_attach_group(void *iommu_data,
-                                    struct iommu_group *iommu_group)
-{
-       return iommu_group_for_each_dev(iommu_group, NULL,
-                                       vfio_iommu_present) ? -EINVAL : 0;
-}
-
-static void vfio_noiommu_detach_group(void *iommu_data,
-                                     struct iommu_group *iommu_group)
-{
-}
-
-static struct vfio_iommu_driver_ops vfio_noiommu_ops = {
-       .name = "vfio-noiommu",
-       .owner = THIS_MODULE,
-       .open = vfio_noiommu_open,
-       .release = vfio_noiommu_release,
-       .ioctl = vfio_noiommu_ioctl,
-       .attach_group = vfio_noiommu_attach_group,
-       .detach_group = vfio_noiommu_detach_group,
-};
-
-static struct vfio_iommu_driver vfio_noiommu_driver = {
-       .ops = &vfio_noiommu_ops,
-};
-
-/*
- * Wrap IOMMU drivers, the noiommu driver is the one and only driver for
- * noiommu groups (and thus containers) and not available for normal groups.
- */
-#define vfio_for_each_iommu_driver(con, pos)                           \
-       for (pos = con->noiommu ? &vfio_noiommu_driver :                \
-            list_first_entry(&vfio.iommu_drivers_list,                 \
-                             struct vfio_iommu_driver, vfio_next);     \
-            (con->noiommu ? pos != NULL :                              \
-                       &pos->vfio_next != &vfio.iommu_drivers_list);   \
-             pos = con->noiommu ? NULL : list_next_entry(pos, vfio_next))
-#else
-#define vfio_for_each_iommu_driver(con, pos)                           \
-       list_for_each_entry(pos, &vfio.iommu_drivers_list, vfio_next)
-#endif
-
-
 /**
  * IOMMU driver registration
  */
@@ -342,8 +199,7 @@ static void vfio_group_unlock_and_free(struct vfio_group *group)
 /**
  * Group objects - create, release, get, put, search
  */
-static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
-                                           bool noiommu)
+static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
 {
        struct vfio_group *group, *tmp;
        struct device *dev;
@@ -361,7 +217,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
        atomic_set(&group->container_users, 0);
        atomic_set(&group->opened, 0);
        group->iommu_group = iommu_group;
-       group->noiommu = noiommu;
 
        group->nb.notifier_call = vfio_iommu_group_notifier;
 
@@ -397,8 +252,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
 
        dev = device_create(vfio.class, NULL,
                            MKDEV(MAJOR(vfio.group_devt), minor),
-                           group, "%s%d", noiommu ? "noiommu-" : "",
-                           iommu_group_id(iommu_group));
+                           group, "%d", iommu_group_id(iommu_group));
        if (IS_ERR(dev)) {
                vfio_free_group_minor(minor);
                vfio_group_unlock_and_free(group);
@@ -682,7 +536,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
                return 0;
 
        /* TODO Prevent device auto probing */
-       WARN("Device %s added to live group %d!\n", dev_name(dev),
+       WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
             iommu_group_id(group->iommu_group));
 
        return 0;
@@ -786,8 +640,7 @@ int vfio_add_group_dev(struct device *dev,
 
        group = vfio_group_get_from_iommu(iommu_group);
        if (!group) {
-               group = vfio_create_group(iommu_group,
-                                         !iommu_present(dev->bus));
+               group = vfio_create_group(iommu_group);
                if (IS_ERR(group)) {
                        iommu_group_put(iommu_group);
                        return PTR_ERR(group);
@@ -999,7 +852,8 @@ static long vfio_ioctl_check_extension(struct vfio_container *container,
                 */
                if (!driver) {
                        mutex_lock(&vfio.iommu_drivers_lock);
-                       vfio_for_each_iommu_driver(container, driver) {
+                       list_for_each_entry(driver, &vfio.iommu_drivers_list,
+                                           vfio_next) {
                                if (!try_module_get(driver->ops->owner))
                                        continue;
 
@@ -1068,7 +922,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
        }
 
        mutex_lock(&vfio.iommu_drivers_lock);
-       vfio_for_each_iommu_driver(container, driver) {
+       list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
                void *data;
 
                if (!try_module_get(driver->ops->owner))
@@ -1333,9 +1187,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
        if (atomic_read(&group->container_users))
                return -EINVAL;
 
-       if (group->noiommu && !capable(CAP_SYS_RAWIO))
-               return -EPERM;
-
        f = fdget(container_fd);
        if (!f.file)
                return -EBADF;
@@ -1351,13 +1202,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
 
        down_write(&container->group_lock);
 
-       /* Real groups and fake groups cannot mix */
-       if (!list_empty(&container->group_list) &&
-           container->noiommu != group->noiommu) {
-               ret = -EPERM;
-               goto unlock_out;
-       }
-
        driver = container->iommu_driver;
        if (driver) {
                ret = driver->ops->attach_group(container->iommu_data,
@@ -1367,7 +1211,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
        }
 
        group->container = container;
-       container->noiommu = group->noiommu;
        list_add(&group->container_next, &container->group_list);
 
        /* Get a reference on the container and mark a user within the group */
@@ -1398,9 +1241,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
            !group->container->iommu_driver || !vfio_group_viable(group))
                return -EINVAL;
 
-       if (group->noiommu && !capable(CAP_SYS_RAWIO))
-               return -EPERM;
-
        device = vfio_device_get_from_name(group, buf);
        if (!device)
                return -ENODEV;
@@ -1443,10 +1283,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
 
        fd_install(ret, filep);
 
-       if (group->noiommu)
-               dev_warn(device->dev, "vfio-noiommu device opened by user "
-                        "(%s:%d)\n", current->comm, task_pid_nr(current));
-
        return ret;
 }
 
@@ -1535,11 +1371,6 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
        if (!group)
                return -ENODEV;
 
-       if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
-               vfio_group_put(group);
-               return -EPERM;
-       }
-
        /* Do we need multiple instances of the group open?  Seems not. */
        opened = atomic_cmpxchg(&group->opened, 0, 1);
        if (opened) {
@@ -1702,11 +1533,6 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
        if (!atomic_inc_not_zero(&group->container_users))
                return ERR_PTR(-EINVAL);
 
-       if (group->noiommu) {
-               atomic_dec(&group->container_users);
-               return ERR_PTR(-EPERM);
-       }
-
        if (!group->container->iommu_driver ||
                        !vfio_group_viable(group)) {
                atomic_dec(&group->container_users);
index eec2f11809ff2463d2a714224925af9c679fead1..ad2146a9ab2d4b19cca5469402c203ade61e8545 100644 (file)
@@ -819,7 +819,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
                BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
                if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
                    (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
-                   (a.log_guest_addr & (sizeof(u64) - 1))) {
+                   (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
                        r = -EINVAL;
                        break;
                }
@@ -1369,7 +1369,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
        /* Grab the next descriptor number they're advertising, and increment
         * the index we've seen. */
        if (unlikely(__get_user(ring_head,
-                               &vq->avail->ring[last_avail_idx % vq->num]))) {
+                               &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
                vq_err(vq, "Failed to read head: idx %d address %p\n",
                       last_avail_idx,
                       &vq->avail->ring[last_avail_idx % vq->num]);
@@ -1489,7 +1489,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
        u16 old, new;
        int start;
 
-       start = vq->last_used_idx % vq->num;
+       start = vq->last_used_idx & (vq->num - 1);
        used = vq->used->ring + start;
        if (count == 1) {
                if (__put_user(heads[0].id, &used->id)) {
@@ -1531,7 +1531,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
 {
        int start, n, r;
 
-       start = vq->last_used_idx % vq->num;
+       start = vq->last_used_idx & (vq->num - 1);
        n = vq->num - start;
        if (n < count) {
                r = __vhost_add_used_n(vq, heads, n);
index b1877d73fa563d6d48f2d55b7958f02daa8d9ad7..7062bb0975a521f1a28125c092c2f0d0bbd35787 100644 (file)
@@ -412,6 +412,7 @@ static int virtio_init(void)
 static void __exit virtio_exit(void)
 {
        bus_unregister(&virtio_bus);
+       ida_destroy(&virtio_index_ida);
 }
 core_initcall(virtio_init);
 module_exit(virtio_exit);
index 096b857e7b75abad526f487c84392d863c3c6b06..ee663c458b20a449c353c5ea0df4632933087e66 100644 (file)
@@ -80,6 +80,12 @@ struct vring_virtqueue {
        /* Last used index we've seen. */
        u16 last_used_idx;
 
+       /* Last written value to avail->flags */
+       u16 avail_flags_shadow;
+
+       /* Last written value to avail->idx in guest byte order */
+       u16 avail_idx_shadow;
+
        /* How to notify other side. FIXME: commonalize hcalls! */
        bool (*notify)(struct virtqueue *vq);
 
@@ -109,7 +115,7 @@ static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
         * otherwise virt_to_phys will give us bogus addresses in the
         * virtqueue.
         */
-       gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
+       gfp &= ~__GFP_HIGHMEM;
 
        desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
        if (!desc)
@@ -235,13 +241,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
 
        /* Put entry in available array (but don't update avail->idx until they
         * do sync). */
-       avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1);
+       avail = vq->avail_idx_shadow & (vq->vring.num - 1);
        vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
 
        /* Descriptors and available array need to be set before we expose the
         * new available array entries. */
        virtio_wmb(vq->weak_barriers);
-       vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1);
+       vq->avail_idx_shadow++;
+       vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
        vq->num_added++;
 
        pr_debug("Added buffer head %i to %p\n", head, vq);
@@ -354,8 +361,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
         * event. */
        virtio_mb(vq->weak_barriers);
 
-       old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added;
-       new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx);
+       old = vq->avail_idx_shadow - vq->num_added;
+       new = vq->avail_idx_shadow;
        vq->num_added = 0;
 
 #ifdef DEBUG
@@ -510,7 +517,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
        /* If we expect an interrupt for the next entry, tell host
         * by writing event index and flush out the write before
         * the read in the next get_buf call. */
-       if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) {
+       if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
                vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
                virtio_mb(vq->weak_barriers);
        }
@@ -537,7 +544,11 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
-       vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT);
+       if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
+               vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+               vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+       }
+
 }
 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
 
@@ -565,7 +576,10 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
        /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
         * either clear the flags bit or point the event index at the next
         * entry. Always do both to keep code simple. */
-       vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
+       if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
+               vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
+               vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+       }
        vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
        END_USE(vq);
        return last_used_idx;
@@ -633,9 +647,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
        /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
         * either clear the flags bit or point the event index at the next
         * entry. Always do both to keep code simple. */
-       vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
+       if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
+               vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
+               vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+       }
        /* TODO: tune this threshold */
-       bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4;
+       bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
        vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
        virtio_mb(vq->weak_barriers);
        if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
@@ -670,7 +687,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
                /* detach_buf clears data, so grab it now. */
                buf = vq->data[i];
                detach_buf(vq, i);
-               vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1);
+               vq->avail_idx_shadow--;
+               vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
                END_USE(vq);
                return buf;
        }
@@ -735,6 +753,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
        vq->weak_barriers = weak_barriers;
        vq->broken = false;
        vq->last_used_idx = 0;
+       vq->avail_flags_shadow = 0;
+       vq->avail_idx_shadow = 0;
        vq->num_added = 0;
        list_add_tail(&vq->vq.list, &vdev->vqs);
 #ifdef DEBUG
@@ -746,8 +766,10 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
        vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
 
        /* No callback?  Tell other side not to bother us. */
-       if (!callback)
-               vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT);
+       if (!callback) {
+               vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+               vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
+       }
 
        /* Put everything in free lists. */
        vq->free_head = 0;
index 699941e906672b87eb1b5199f7966e29528e0116..511078586fa13543c30ac887dbb4e825888efee0 100644 (file)
@@ -451,9 +451,9 @@ void v9fs_evict_inode(struct inode *inode)
 {
        struct v9fs_inode *v9inode = V9FS_I(inode);
 
-       truncate_inode_pages_final(inode->i_mapping);
+       truncate_inode_pages_final(&inode->i_data);
        clear_inode(inode);
-       filemap_fdatawrite(inode->i_mapping);
+       filemap_fdatawrite(&inode->i_data);
 
        v9fs_cache_inode_put_cookie(inode);
        /* clunk the fid stashed in writeback_fid */
index cb5337d8c273a5dd58e97d9be587fb56d643e52a..602e8441bc0fb6b094ec96dfb3273ff73b8b1506 100644 (file)
@@ -1169,6 +1169,16 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
                }
        }
 
+       /* Once we sampled i_size check for reads beyond EOF */
+       dio->i_size = i_size_read(inode);
+       if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
+               if (dio->flags & DIO_LOCKING)
+                       mutex_unlock(&inode->i_mutex);
+               kmem_cache_free(dio_cache, dio);
+               retval = 0;
+               goto out;
+       }
+
        /*
         * For file extending writes updating i_size before data writeouts
         * complete can expose uninitialized blocks in dumb filesystems.
@@ -1222,7 +1232,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
        sdio.next_block_for_io = -1;
 
        dio->iocb = iocb;
-       dio->i_size = i_size_read(inode);
 
        spin_lock_init(&dio->bio_lock);
        dio->refcount = 1;
index 87e9d796cf7dd9ae2e4e0f221867dc69e8ea28df..3a37bd3f9637811c3b86e5c05be5aa47f32819c3 100644 (file)
@@ -421,7 +421,7 @@ static void lowcomms_write_space(struct sock *sk)
 
        if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
                con->sock->sk->sk_write_pending--;
-               clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags);
+               clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
        }
 
        if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
@@ -1448,7 +1448,7 @@ static void send_to_sock(struct connection *con)
                                              msg_flags);
                        if (ret == -EAGAIN || ret == 0) {
                                if (ret == -EAGAIN &&
-                                   test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) &&
+                                   test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
                                    !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
                                        /* Notify TCP that we're limited by the
                                         * application window size.
index af06830bfc00c743369737551e7bceea3b61eede..1a0835073663ff3f1dad1f376328de5e2b1332ce 100644 (file)
@@ -389,7 +389,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
        struct ext4_crypto_ctx  *ctx;
        struct page             *ciphertext_page = NULL;
        struct bio              *bio;
-       ext4_lblk_t             lblk = ex->ee_block;
+       ext4_lblk_t             lblk = le32_to_cpu(ex->ee_block);
        ext4_fsblk_t            pblk = ext4_ext_pblock(ex);
        unsigned int            len = ext4_ext_get_actual_len(ex);
        int                     ret, err = 0;
index 750063f7a50c6cc2b9808318538762be67ce098d..cc7ca4e87144a540332213ce03b63e80e601f40e 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/seqlock.h>
 #include <linux/mutex.h>
 #include <linux/timer.h>
+#include <linux/version.h>
 #include <linux/wait.h>
 #include <linux/blockgroup_lock.h>
 #include <linux/percpu_counter.h>
@@ -727,19 +728,55 @@ struct move_extent {
        <= (EXT4_GOOD_OLD_INODE_SIZE +                  \
            (einode)->i_extra_isize))                   \
 
+/*
+ * We use an encoding that preserves the times for extra epoch "00":
+ *
+ * extra  msb of                         adjust for signed
+ * epoch  32-bit                         32-bit tv_sec to
+ * bits   time    decoded 64-bit tv_sec  64-bit tv_sec      valid time range
+ * 0 0    1    -0x80000000..-0x00000001  0x000000000 1901-12-13..1969-12-31
+ * 0 0    0    0x000000000..0x07fffffff  0x000000000 1970-01-01..2038-01-19
+ * 0 1    1    0x080000000..0x0ffffffff  0x100000000 2038-01-19..2106-02-07
+ * 0 1    0    0x100000000..0x17fffffff  0x100000000 2106-02-07..2174-02-25
+ * 1 0    1    0x180000000..0x1ffffffff  0x200000000 2174-02-25..2242-03-16
+ * 1 0    0    0x200000000..0x27fffffff  0x200000000 2242-03-16..2310-04-04
+ * 1 1    1    0x280000000..0x2ffffffff  0x300000000 2310-04-04..2378-04-22
+ * 1 1    0    0x300000000..0x37fffffff  0x300000000 2378-04-22..2446-05-10
+ *
+ * Note that previous versions of the kernel on 64-bit systems would
+ * incorrectly use extra epoch bits 1,1 for dates between 1901 and
+ * 1970.  e2fsck will correct this, assuming that it is run on the
+ * affected filesystem before 2242.
+ */
+
 static inline __le32 ext4_encode_extra_time(struct timespec *time)
 {
-       return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
-                          (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) |
-                          ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK));
+       u32 extra = sizeof(time->tv_sec) > 4 ?
+               ((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
+       return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
 }
 
 static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
 {
-       if (sizeof(time->tv_sec) > 4)
-              time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
-                              << 32;
-       time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
+       if (unlikely(sizeof(time->tv_sec) > 4 &&
+                       (extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)
+               /* Handle legacy encoding of pre-1970 dates with epoch
+                * bits 1,1.  We assume that by kernel version 4.20,
+                * everyone will have run fsck over the affected
+                * filesystems to correct the problem.  (This
+                * backwards compatibility may be removed before this
+                * time, at the discretion of the ext4 developers.)
+                */
+               u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK;
+               if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0)
+                       extra_bits = 0;
+               time->tv_sec += extra_bits << 32;
+#else
+               time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
+#endif
+       }
+       time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
 }
 
 #define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode)                         \
index abe2401ce405669f0d315319f5275a9fc321c697..e8e7af62ac95fc4e5268c32df1bc496a337202a7 100644 (file)
@@ -52,7 +52,7 @@ static const char *ext4_encrypted_follow_link(struct dentry *dentry, void **cook
        /* Symlink is encrypted */
        sd = (struct ext4_encrypted_symlink_data *)caddr;
        cstr.name = sd->encrypted_path;
-       cstr.len  = le32_to_cpu(sd->len);
+       cstr.len  = le16_to_cpu(sd->len);
        if ((cstr.len +
             sizeof(struct ext4_encrypted_symlink_data) - 1) >
            max_size) {
index 1b57c72f4a009aafc8c3510eac81dd9d8a5f9482..1420a3c614afb1a4c06e87471163acf01b3b98d8 100644 (file)
@@ -358,7 +358,7 @@ static int name##_open(struct inode *inode, struct file *file) \
        return single_open(file, ext4_seq_##name##_show, PDE_DATA(inode)); \
 } \
 \
-const struct file_operations ext4_seq_##name##_fops = { \
+static const struct file_operations ext4_seq_##name##_fops = { \
        .owner          = THIS_MODULE, \
        .open           = name##_open, \
        .read           = seq_read, \
index 89463eee67914643a02ce711463e62fac0b83c8d..ca181e81c765518d4a599025c914adbac626e739 100644 (file)
@@ -1009,7 +1009,8 @@ out:
 }
 
 /* Fast check whether buffer is already attached to the required transaction */
-static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh)
+static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
+                                                       bool undo)
 {
        struct journal_head *jh;
        bool ret = false;
@@ -1036,6 +1037,9 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh)
        jh = READ_ONCE(bh->b_private);
        if (!jh)
                goto out;
+       /* For undo access buffer must have data copied */
+       if (undo && !jh->b_committed_data)
+               goto out;
        if (jh->b_transaction != handle->h_transaction &&
            jh->b_next_transaction != handle->h_transaction)
                goto out;
@@ -1073,7 +1077,7 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
        struct journal_head *jh;
        int rc;
 
-       if (jbd2_write_access_granted(handle, bh))
+       if (jbd2_write_access_granted(handle, bh, false))
                return 0;
 
        jh = jbd2_journal_add_journal_head(bh);
@@ -1210,7 +1214,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
        char *committed_data = NULL;
 
        JBUFFER_TRACE(jh, "entry");
-       if (jbd2_write_access_granted(handle, bh))
+       if (jbd2_write_access_granted(handle, bh, true))
                return 0;
 
        jh = jbd2_journal_add_journal_head(bh);
@@ -2152,6 +2156,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
 
                if (!buffer_dirty(bh)) {
                        /* bdflush has written it.  We can drop it now */
+                       __jbd2_journal_remove_checkpoint(jh);
                        goto zap_buffer;
                }
 
@@ -2181,6 +2186,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
                                /* The orphan record's transaction has
                                 * committed.  We can cleanse this buffer */
                                clear_buffer_jbddirty(bh);
+                               __jbd2_journal_remove_checkpoint(jh);
                                goto zap_buffer;
                        }
                }
index d84d7c7515fc44415f11488f192e52ef3c9c9990..0c3974cd3ecd55670ccab51c596b0b141cbdd721 100644 (file)
@@ -1996,7 +1996,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
        nd->last_type = LAST_ROOT; /* if there are only slashes... */
        nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
        nd->depth = 0;
-       nd->total_link_count = 0;
        if (flags & LOOKUP_ROOT) {
                struct dentry *root = nd->root.dentry;
                struct inode *inode = root->d_inode;
index 871fcb67be9741f2aab81f3d6552306dedf4c967..0a8983492d917bbc61888559e8417f738eb607be 100644 (file)
@@ -195,8 +195,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
 
 static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
                              struct dentry *dentry, struct path *lowerpath,
-                             struct kstat *stat, struct iattr *attr,
-                             const char *link)
+                             struct kstat *stat, const char *link)
 {
        struct inode *wdir = workdir->d_inode;
        struct inode *udir = upperdir->d_inode;
@@ -240,8 +239,6 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 
        mutex_lock(&newdentry->d_inode->i_mutex);
        err = ovl_set_attr(newdentry, stat);
-       if (!err && attr)
-               err = notify_change(newdentry, attr, NULL);
        mutex_unlock(&newdentry->d_inode->i_mutex);
        if (err)
                goto out_cleanup;
@@ -286,8 +283,7 @@ out_cleanup:
  * that point the file will have already been copied up anyway.
  */
 int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
-                   struct path *lowerpath, struct kstat *stat,
-                   struct iattr *attr)
+                   struct path *lowerpath, struct kstat *stat)
 {
        struct dentry *workdir = ovl_workdir(dentry);
        int err;
@@ -345,26 +341,19 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
        }
        upperdentry = ovl_dentry_upper(dentry);
        if (upperdentry) {
-               unlock_rename(workdir, upperdir);
+               /* Raced with another copy-up?  Nothing to do, then... */
                err = 0;
-               /* Raced with another copy-up?  Do the setattr here */
-               if (attr) {
-                       mutex_lock(&upperdentry->d_inode->i_mutex);
-                       err = notify_change(upperdentry, attr, NULL);
-                       mutex_unlock(&upperdentry->d_inode->i_mutex);
-               }
-               goto out_put_cred;
+               goto out_unlock;
        }
 
        err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath,
-                                stat, attr, link);
+                                stat, link);
        if (!err) {
                /* Restore timestamps on parent (best effort) */
                ovl_set_timestamps(upperdir, &pstat);
        }
 out_unlock:
        unlock_rename(workdir, upperdir);
-out_put_cred:
        revert_creds(old_cred);
        put_cred(override_cred);
 
@@ -406,7 +395,7 @@ int ovl_copy_up(struct dentry *dentry)
                ovl_path_lower(next, &lowerpath);
                err = vfs_getattr(&lowerpath, &stat);
                if (!err)
-                       err = ovl_copy_up_one(parent, next, &lowerpath, &stat, NULL);
+                       err = ovl_copy_up_one(parent, next, &lowerpath, &stat);
 
                dput(parent);
                dput(next);
index ec0c2a050043afbb3eff4c7451930dc6d86006ec..4060ffde87225c81114d086c000773d4019255b5 100644 (file)
@@ -12,8 +12,7 @@
 #include <linux/xattr.h>
 #include "overlayfs.h"
 
-static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr,
-                           bool no_data)
+static int ovl_copy_up_truncate(struct dentry *dentry)
 {
        int err;
        struct dentry *parent;
@@ -30,10 +29,8 @@ static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr,
        if (err)
                goto out_dput_parent;
 
-       if (no_data)
-               stat.size = 0;
-
-       err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat, attr);
+       stat.size = 0;
+       err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
 
 out_dput_parent:
        dput(parent);
@@ -49,13 +46,13 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
        if (err)
                goto out;
 
-       upperdentry = ovl_dentry_upper(dentry);
-       if (upperdentry) {
+       err = ovl_copy_up(dentry);
+       if (!err) {
+               upperdentry = ovl_dentry_upper(dentry);
+
                mutex_lock(&upperdentry->d_inode->i_mutex);
                err = notify_change(upperdentry, attr, NULL);
                mutex_unlock(&upperdentry->d_inode->i_mutex);
-       } else {
-               err = ovl_copy_up_last(dentry, attr, false);
        }
        ovl_drop_write(dentry);
 out:
@@ -353,7 +350,7 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
                        return ERR_PTR(err);
 
                if (file_flags & O_TRUNC)
-                       err = ovl_copy_up_last(dentry, NULL, true);
+                       err = ovl_copy_up_truncate(dentry);
                else
                        err = ovl_copy_up(dentry);
                ovl_drop_write(dentry);
index ea5a40b06e3ad3f9e114bd7d3aab8b931b567051..e17154aeaae4761ca8ec466b96afe913cd40c322 100644 (file)
@@ -194,7 +194,6 @@ void ovl_cleanup(struct inode *dir, struct dentry *dentry);
 /* copy_up.c */
 int ovl_copy_up(struct dentry *dentry);
 int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
-                   struct path *lowerpath, struct kstat *stat,
-                   struct iattr *attr);
+                   struct path *lowerpath, struct kstat *stat);
 int ovl_copy_xattr(struct dentry *old, struct dentry *new);
 int ovl_set_attr(struct dentry *upper, struct kstat *stat);
index db284bff29dcceb39360d458cec3a194745955f8..9dbb739cafa0c16dda9d011d30ce8cd9b4091fd4 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2001 Red Hat, Inc.
  * Based on code from mm/memory.c Copyright Linus Torvalds and others.
  *
- * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright 2011 Red Hat, Inc., Peter Zijlstra
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 0b921ae06cd83585e1d1cf2adb6baf665f203013..0a271ca1f7c7ec12199b4c9781bb5aa5ded75dae 100644 (file)
@@ -309,6 +309,11 @@ struct drm_file {
        unsigned universal_planes:1;
        /* true if client understands atomic properties */
        unsigned atomic:1;
+       /*
+        * This client is allowed to gain master privileges for @master.
+        * Protected by struct drm_device::master_mutex.
+        */
+       unsigned allowed_master:1;
 
        struct pid *pid;
        kuid_t uid;
@@ -910,6 +915,7 @@ extern int drm_open(struct inode *inode, struct file *filp);
 extern ssize_t drm_read(struct file *filp, char __user *buffer,
                        size_t count, loff_t *offset);
 extern int drm_release(struct inode *inode, struct file *filp);
+extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
 
                                /* Mapping support (drm_vm.h) */
 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
@@ -947,6 +953,10 @@ extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
                                  struct drm_pending_vblank_event *e);
 extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
                                       struct drm_pending_vblank_event *e);
+extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
+                                struct drm_pending_vblank_event *e);
+extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+                                     struct drm_pending_vblank_event *e);
 extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
 extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
 extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
index 05483393999534d91425b01c4573a5a4aa494fbc..1991aea2ec4cff401b84db2841ac60fe28f00080 100644 (file)
@@ -870,8 +870,8 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
 }
 
 static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
-                               const char *name, const char *cells_name,
-                               size_t index, struct acpi_reference_args *args)
+                               const char *name, size_t index,
+                               struct acpi_reference_args *args)
 {
        return -ENXIO;
 }
index 2b8ed123ad36b26bc26956293452937d3ac1b90e..defeaac0745f1b26340d7a13f5b73810de743193 100644 (file)
@@ -107,7 +107,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
  */
 static inline __u32 rol32(__u32 word, unsigned int shift)
 {
-       return (word << shift) | (word >> (32 - shift));
+       return (word << shift) | (word >> ((-shift) & 31));
 }
 
 /**
index c0d2b7927c1f5a73afc2a56552116d86c04ca947..0169ba2e2e64b9c4bba07bf72b9c1194fe2db08a 100644 (file)
@@ -254,6 +254,7 @@ struct queue_limits {
        unsigned long           virt_boundary_mask;
 
        unsigned int            max_hw_sectors;
+       unsigned int            max_dev_sectors;
        unsigned int            chunk_sectors;
        unsigned int            max_sectors;
        unsigned int            max_segment_size;
@@ -773,7 +774,6 @@ extern void blk_rq_set_block_pc(struct request *);
 extern void blk_requeue_request(struct request_queue *, struct request *);
 extern void blk_add_request_payload(struct request *rq, struct page *page,
                unsigned int len);
-extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
 extern int blk_lld_busy(struct request_queue *q);
 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
                             struct bio_set *bs, gfp_t gfp_mask,
@@ -960,7 +960,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
 extern void blk_cleanup_queue(struct request_queue *);
 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
 extern void blk_queue_bounce_limit(struct request_queue *, u64);
-extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
index de464e6683b68f247492d69a02b463f6bfb4b3df..83d1926c61e4567b881bfbc26b75b802c428cbc3 100644 (file)
@@ -40,6 +40,7 @@ struct bpf_map {
        struct user_struct *user;
        const struct bpf_map_ops *ops;
        struct work_struct work;
+       atomic_t usercnt;
 };
 
 struct bpf_map_type_list {
@@ -167,8 +168,10 @@ struct bpf_prog *bpf_prog_get(u32 ufd);
 void bpf_prog_put(struct bpf_prog *prog);
 void bpf_prog_put_rcu(struct bpf_prog *prog);
 
-struct bpf_map *bpf_map_get(u32 ufd);
+struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
+void bpf_map_inc(struct bpf_map *map, bool uref);
+void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 
 extern int sysctl_unprivileged_bpf_disabled;
index 60d44b26276d84a77d7f7e3379ae12821a8b3eeb..06b77f9dd3f2052ac87f970b8aa8c195b78f7ada 100644 (file)
@@ -90,7 +90,6 @@ enum {
  */
 struct cgroup_file {
        /* do not access any fields from outside cgroup core */
-       struct list_head node;                  /* anchored at css->files */
        struct kernfs_node *kn;
 };
 
@@ -134,9 +133,6 @@ struct cgroup_subsys_state {
         */
        u64 serial_nr;
 
-       /* all cgroup_files associated with this css */
-       struct list_head files;
-
        /* percpu_ref killing and RCU release */
        struct rcu_head rcu_head;
        struct work_struct destroy_work;
@@ -426,12 +422,9 @@ struct cgroup_subsys {
        void (*css_reset)(struct cgroup_subsys_state *css);
        void (*css_e_css_changed)(struct cgroup_subsys_state *css);
 
-       int (*can_attach)(struct cgroup_subsys_state *css,
-                         struct cgroup_taskset *tset);
-       void (*cancel_attach)(struct cgroup_subsys_state *css,
-                             struct cgroup_taskset *tset);
-       void (*attach)(struct cgroup_subsys_state *css,
-                      struct cgroup_taskset *tset);
+       int (*can_attach)(struct cgroup_taskset *tset);
+       void (*cancel_attach)(struct cgroup_taskset *tset);
+       void (*attach)(struct cgroup_taskset *tset);
        int (*can_fork)(struct task_struct *task, void **priv_p);
        void (*cancel_fork)(struct task_struct *task, void *priv);
        void (*fork)(struct task_struct *task, void *priv);
index 22e3754f89c511374af4ca8ac5a518786dcd6d88..cb91b44f5f7877d5899403b26ca1d9231c1ac9b6 100644 (file)
@@ -88,6 +88,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
 int cgroup_rm_cftypes(struct cftype *cfts);
+void cgroup_file_notify(struct cgroup_file *cfile);
 
 char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
@@ -119,8 +120,10 @@ struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state
 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
                                                     struct cgroup_subsys_state *css);
 
-struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
-struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
+struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
+                                        struct cgroup_subsys_state **dst_cssp);
+struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
+                                       struct cgroup_subsys_state **dst_cssp);
 
 void css_task_iter_start(struct cgroup_subsys_state *css,
                         struct css_task_iter *it);
@@ -235,30 +238,39 @@ void css_task_iter_end(struct css_task_iter *it);
 /**
  * cgroup_taskset_for_each - iterate cgroup_taskset
  * @task: the loop cursor
+ * @dst_css: the destination css
  * @tset: taskset to iterate
  *
  * @tset may contain multiple tasks and they may belong to multiple
- * processes.  When there are multiple tasks in @tset, if a task of a
- * process is in @tset, all tasks of the process are in @tset.  Also, all
- * are guaranteed to share the same source and destination csses.
+ * processes.
+ *
+ * On the v2 hierarchy, there may be tasks from multiple processes and they
+ * may not share the source or destination csses.
+ *
+ * On traditional hierarchies, when there are multiple tasks in @tset, if a
+ * task of a process is in @tset, all tasks of the process are in @tset.
+ * Also, all are guaranteed to share the same source and destination csses.
  *
  * Iteration is not in any specific order.
  */
-#define cgroup_taskset_for_each(task, tset)                            \
-       for ((task) = cgroup_taskset_first((tset)); (task);             \
-            (task) = cgroup_taskset_next((tset)))
+#define cgroup_taskset_for_each(task, dst_css, tset)                   \
+       for ((task) = cgroup_taskset_first((tset), &(dst_css));         \
+            (task);                                                    \
+            (task) = cgroup_taskset_next((tset), &(dst_css)))
 
 /**
  * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
  * @leader: the loop cursor
+ * @dst_css: the destination css
  * @tset: takset to iterate
  *
  * Iterate threadgroup leaders of @tset.  For single-task migrations, @tset
  * may not contain any.
  */
-#define cgroup_taskset_for_each_leader(leader, tset)                   \
-       for ((leader) = cgroup_taskset_first((tset)); (leader);         \
-            (leader) = cgroup_taskset_next((tset)))                    \
+#define cgroup_taskset_for_each_leader(leader, dst_css, tset)          \
+       for ((leader) = cgroup_taskset_first((tset), &(dst_css));       \
+            (leader);                                                  \
+            (leader) = cgroup_taskset_next((tset), &(dst_css)))        \
                if ((leader) != (leader)->group_leader)                 \
                        ;                                               \
                else
@@ -516,19 +528,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
        pr_cont_kernfs_path(cgrp->kn);
 }
 
-/**
- * cgroup_file_notify - generate a file modified event for a cgroup_file
- * @cfile: target cgroup_file
- *
- * @cfile must have been obtained by setting cftype->file_offset.
- */
-static inline void cgroup_file_notify(struct cgroup_file *cfile)
-{
-       /* might not have been created due to one of the CFTYPE selector flags */
-       if (cfile->kn)
-               kernfs_notify(cfile->kn);
-}
-
 #else /* !CONFIG_CGROUPS */
 
 struct cgroup_subsys_state;
index ef4c5b1a860f5c610c0ee4646aa2b729aa81f71a..177c7680c1a8a81bcc942497ee228c148fbf5a0b 100644 (file)
@@ -77,6 +77,7 @@ struct cpufreq_policy {
        unsigned int            suspend_freq; /* freq to set during suspend */
 
        unsigned int            policy; /* see above */
+       unsigned int            last_policy; /* policy before unplug */
        struct cpufreq_governor *governor; /* see below */
        void                    *governor_data;
        bool                    governor_enabled; /* governor start/stop flag */
index cc92268af89ae02e8a26fe08f671485b2ca67bc3..6ac3cad9aef109171a5882ee2c5afb30e230c8d4 100644 (file)
@@ -27,7 +27,7 @@
 #ifdef __KERNEL__
 
 extern int dns_query(const char *type, const char *name, size_t namelen,
-                    const char *options, char **_result, time_t *_expiry);
+                    const char *options, char **_result, time64_t *_expiry);
 
 #endif /* KERNEL */
 
index 0ef2a97ccdb50bc83baaf727f37d2fa034eea036..402753bccafa37b4ec1e597902f608548fbdbd22 100644 (file)
@@ -227,7 +227,7 @@ struct ipv6_pinfo {
        struct ipv6_ac_socklist *ipv6_ac_list;
        struct ipv6_fl_socklist __rcu *ipv6_fl_list;
 
-       struct ipv6_txoptions   *opt;
+       struct ipv6_txoptions __rcu     *opt;
        struct sk_buff          *pktoptions;
        struct sk_buff          *rxpmtu;
        struct inet6_cork       cork;
index 8dde55974f186bca7c1488866aaacdd805c347b3..0536524bb9eb6467013a51a70ed28b576494e649 100644 (file)
@@ -5,7 +5,7 @@
  * Jump label support
  *
  * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
- * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
  *
  * DEPRECATED API:
  *
index 83577f8fd15bcd0f8693ef6a1a2a9fb216b32b2b..600c1e0626a5ff6c91df3a0b2f2fcf0ef485fedf 100644 (file)
@@ -210,6 +210,7 @@ enum {
        ATA_FLAG_SLAVE_POSS     = (1 << 0), /* host supports slave dev */
                                            /* (doesn't imply presence) */
        ATA_FLAG_SATA           = (1 << 1),
+       ATA_FLAG_NO_LOG_PAGE    = (1 << 5), /* do not issue log page read */
        ATA_FLAG_NO_ATAPI       = (1 << 6), /* No ATAPI support */
        ATA_FLAG_PIO_DMA        = (1 << 7), /* PIO cmds via DMA */
        ATA_FLAG_PIO_LBA48      = (1 << 8), /* Host DMA engine is LBA28 only */
index 3db5552b17d5b5e489eb3d5bb3c6a12fcb779b29..c6916aec43b66200033b5a2a2a155d8b72a947bf 100644 (file)
@@ -179,7 +179,7 @@ typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
 typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
 typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
                                nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int,
+typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
                                nvm_bb_update_fn *, void *);
 typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
 typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
index 70400dc7660f72028cd6ce84cf6ec90b69537137..c57e424d914b70fc5032f9b6d1ec918e8e195c64 100644 (file)
@@ -2,7 +2,7 @@
  * Runtime locking correctness validator
  *
  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  *
  * see Documentation/locking/lockdep-design.txt for more details.
  */
index 70ac5e28e6b737f7aac7b0b27d929adacf4aad12..0b4ac7da583a8b2dff7983b4602cb9a81887ea86 100644 (file)
@@ -34,8 +34,12 @@ struct inode;
 struct file;
 struct net;
 
-#define SOCK_ASYNC_NOSPACE     0
-#define SOCK_ASYNC_WAITDATA    1
+/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
+ * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
+ * Eventually all flags will be in sk->sk_wq_flags.
+ */
+#define SOCKWQ_ASYNC_NOSPACE   0
+#define SOCKWQ_ASYNC_WAITDATA  1
 #define SOCK_NOSPACE           2
 #define SOCK_PASSCRED          3
 #define SOCK_PASSSEC           4
@@ -89,6 +93,7 @@ struct socket_wq {
        /* Note: wait MUST be first field of socket_wq */
        wait_queue_head_t       wait;
        struct fasync_struct    *fasync_list;
+       unsigned long           flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */
        struct rcu_head         rcu;
 } ____cacheline_aligned_in_smp;
 
@@ -96,7 +101,7 @@ struct socket_wq {
  *  struct socket - general BSD socket
  *  @state: socket state (%SS_CONNECTED, etc)
  *  @type: socket type (%SOCK_STREAM, etc)
- *  @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc)
+ *  @flags: socket flags (%SOCK_NOSPACE, etc)
  *  @ops: protocol specific socket operations
  *  @file: File back pointer for gc
  *  @sk: internal networking protocol agnostic socket representation
@@ -202,7 +207,7 @@ enum {
        SOCK_WAKE_URG,
 };
 
-int sock_wake_async(struct socket *sk, int how, int band);
+int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
 int sock_register(const struct net_proto_family *fam);
 void sock_unregister(int family);
 int __sock_create(struct net *net, int family, int type, int proto,
index 67bfac1abfc1ac8bd95bf3ddcf35aac07145166b..3b5d134e945a9b1a5dda9b636c04909318fe8a67 100644 (file)
@@ -1398,7 +1398,8 @@ enum netdev_priv_flags {
  *     @dma:           DMA channel
  *     @mtu:           Interface MTU value
  *     @type:          Interface hardware type
- *     @hard_header_len: Hardware header length
+ *     @hard_header_len: Hardware header length, which means that this is the
+ *                       minimum size of a packet.
  *
  *     @needed_headroom: Extra headroom the hardware may need, but not in all
  *                       cases can this be guaranteed
index 039f2eec49ced0ae1c638354d1a34c924cebde51..1e0deb8e849458ca179c1df4734bb24f0e4cd748 100644 (file)
@@ -46,12 +46,14 @@ extern int of_irq_get(struct device_node *dev, int index);
 extern int of_irq_get_byname(struct device_node *dev, const char *name);
 extern int of_irq_to_resource_table(struct device_node *dev,
                struct resource *res, int nr_irqs);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
 extern struct irq_domain *of_msi_get_domain(struct device *dev,
                                            struct device_node *np,
                                            enum irq_domain_bus_token token);
 extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
                                                       u32 rid);
 extern void of_msi_configure(struct device *dev, struct device_node *np);
+u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
 #else
 static inline int of_irq_count(struct device_node *dev)
 {
@@ -70,6 +72,11 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
 {
        return 0;
 }
+static inline void *of_irq_find_parent(struct device_node *child)
+{
+       return NULL;
+}
+
 static inline struct irq_domain *of_msi_get_domain(struct device *dev,
                                                   struct device_node *np,
                                                   enum irq_domain_bus_token token)
@@ -84,6 +91,11 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev
 static inline void of_msi_configure(struct device *dev, struct device_node *np)
 {
 }
+static inline u32 of_msi_map_rid(struct device *dev,
+                                struct device_node *msi_np, u32 rid_in)
+{
+       return rid_in;
+}
 #endif
 
 #if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC)
@@ -93,7 +105,6 @@ static inline void of_msi_configure(struct device *dev, struct device_node *np)
  * so declare it here regardless of the CONFIG_OF_IRQ setting.
  */
 extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
-u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
 
 #else /* !CONFIG_OF && !CONFIG_SPARC */
 static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
@@ -101,12 +112,6 @@ static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
 {
        return 0;
 }
-
-static inline u32 of_msi_map_rid(struct device *dev,
-                                struct device_node *msi_np, u32 rid_in)
-{
-       return rid_in;
-}
 #endif /* !CONFIG_OF */
 
 #endif /* __OF_IRQ_H */
index d841d33bcdc9c370742db6408e1f979594d37099..f9828a48f16addab4737683f3c8345ab87e52a0a 100644 (file)
@@ -697,9 +697,11 @@ struct perf_cgroup {
  * if there is no cgroup event for the current CPU context.
  */
 static inline struct perf_cgroup *
-perf_cgroup_from_task(struct task_struct *task)
+perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
 {
-       return container_of(task_css(task, perf_event_cgrp_id),
+       return container_of(task_css_check(task, perf_event_cgrp_id,
+                                          ctx ? lockdep_is_held(&ctx->lock)
+                                              : true),
                            struct perf_cgroup, css);
 }
 #endif /* CONFIG_CGROUP_PERF */
index 5440f64d2942a7957e0831a15918fc070ac63193..21221338ad18018b9b8c479efe1f938b67e6f685 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * FLoating proportions
  *
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  *
  * This file contains the public data structure and API definitions.
  */
index 0bdc72f3690545dd3ac34d1f4b204f2e1db02eb6..4a29c75b146e1c9afc550c6c6bab58f17ed8e2da 100644 (file)
@@ -21,7 +21,7 @@
  * Authors:
  *     Srikar Dronamraju
  *     Jim Keniston
- * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
  */
 
 #include <linux/errno.h>
index 610a86a892b8896363ece505cdbe686096505cfc..ddb44097538245f17e882efff635143ada49e089 100644 (file)
@@ -44,9 +44,6 @@ struct vfio_device_ops {
        void    (*request)(void *device_data, unsigned int count);
 };
 
-extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
-extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
-
 extern int vfio_add_group_dev(struct device *dev,
                              const struct vfio_device_ops *ops,
                              void *device_data);
index b36d837c701ec9fe94280a91df3cf1e359ad50af..2a91a0561a478393ca9e9d2f1993467cc4c5c9cb 100644 (file)
@@ -62,6 +62,7 @@ struct unix_sock {
 #define UNIX_GC_CANDIDATE      0
 #define UNIX_GC_MAYBE_CYCLE    1
        struct socket_wq        peer_wq;
+       wait_queue_t            peer_wake;
 };
 
 static inline struct unix_sock *unix_sk(const struct sock *sk)
index 2bfb2ad2fab1981696e52a914cd89a0dad495e76..877f682989b892fb1d70256bda6b33f03e34a0f5 100644 (file)
@@ -133,27 +133,18 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
 /*
  *     Store a destination cache entry in a socket
  */
-static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
-                                  const struct in6_addr *daddr,
-                                  const struct in6_addr *saddr)
+static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
+                                const struct in6_addr *daddr,
+                                const struct in6_addr *saddr)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct rt6_info *rt = (struct rt6_info *) dst;
 
+       np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst);
        sk_setup_caps(sk, dst);
        np->daddr_cache = daddr;
 #ifdef CONFIG_IPV6_SUBTREES
        np->saddr_cache = saddr;
 #endif
-       np->dst_cookie = rt6_get_cookie(rt);
-}
-
-static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
-                                struct in6_addr *daddr, struct in6_addr *saddr)
-{
-       spin_lock(&sk->sk_dst_lock);
-       __ip6_dst_store(sk, dst, daddr, saddr);
-       spin_unlock(&sk->sk_dst_lock);
 }
 
 static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
index e1a10b0ac0b027189732372e2c0040e5ea8350f2..9a5c9f01378455c4c8b9eb860b170504c878f1a5 100644 (file)
@@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock;
  */
 
 struct ipv6_txoptions {
+       atomic_t                refcnt;
        /* Length of this structure */
        int                     tot_len;
 
@@ -217,7 +218,7 @@ struct ipv6_txoptions {
        struct ipv6_opt_hdr     *dst0opt;
        struct ipv6_rt_hdr      *srcrt; /* Routing Header */
        struct ipv6_opt_hdr     *dst1opt;
-
+       struct rcu_head         rcu;
        /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
 };
 
@@ -252,6 +253,24 @@ struct ipv6_fl_socklist {
        struct rcu_head                 rcu;
 };
 
+static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
+{
+       struct ipv6_txoptions *opt;
+
+       rcu_read_lock();
+       opt = rcu_dereference(np->opt);
+       if (opt && !atomic_inc_not_zero(&opt->refcnt))
+               opt = NULL;
+       rcu_read_unlock();
+       return opt;
+}
+
+static inline void txopt_put(struct ipv6_txoptions *opt)
+{
+       if (opt && atomic_dec_and_test(&opt->refcnt))
+               kfree_rcu(opt, rcu);
+}
+
 struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
                                         struct ip6_flowlabel *fl,
@@ -490,6 +509,7 @@ struct ip6_create_arg {
        u32 user;
        const struct in6_addr *src;
        const struct in6_addr *dst;
+       int iif;
        u8 ecn;
 };
 
index 82045fca388b20a9cf28ad9112236668c31f46cb..760bc4d5a2cfe87aadd2e96a98292855cf2eb050 100644 (file)
@@ -2003,8 +2003,10 @@ enum ieee80211_hw_flags {
  *     it shouldn't be set.
  *
  * @max_tx_aggregation_subframes: maximum number of subframes in an
- *     aggregate an HT driver will transmit, used by the peer as a
- *     hint to size its reorder buffer.
+ *     aggregate an HT driver will transmit. Though ADDBA will advertise
+ *     a constant value of 64 as some older APs can crash if the window
+ *     size is smaller (an example is LinkSys WRT120N with FW v1.0.07
+ *     build 002 Jun 18 2012).
  *
  * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
  *     (if %IEEE80211_HW_QUEUE_CONTROL is set)
index bf39374310305d61e2948cbd1801eee056efc323..2d8edaad29cb1cf0fe1e9a6dc9ae8f2653a3334e 100644 (file)
@@ -181,8 +181,7 @@ void ndisc_cleanup(void);
 int ndisc_rcv(struct sk_buff *skb);
 
 void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
-                  const struct in6_addr *daddr, const struct in6_addr *saddr,
-                  struct sk_buff *oskb);
+                  const struct in6_addr *daddr, const struct in6_addr *saddr);
 
 void ndisc_send_rs(struct net_device *dev,
                   const struct in6_addr *saddr, const struct in6_addr *daddr);
index 4c79ce8c1f92f2d47eb87ffacd55fa01292e7378..b2a8e6338576d3e91f0906297e6b34e7db0eade3 100644 (file)
@@ -61,6 +61,9 @@ struct Qdisc {
                                      */
 #define TCQ_F_WARN_NONWC       (1 << 16)
 #define TCQ_F_CPUSTATS         0x20 /* run using percpu statistics */
+#define TCQ_F_NOPARENT         0x40 /* root of its hierarchy :
+                                     * qdisc_tree_decrease_qlen() should stop.
+                                     */
        u32                     limit;
        const struct Qdisc_ops  *ops;
        struct qdisc_size_table __rcu *stab;
index 495c87e367b3f2e8941807f56a77d2e14469bfed..7bbb71081aeb6cfdc9cb049cf7a094dbcd4603bb 100644 (file)
@@ -775,10 +775,10 @@ struct sctp_transport {
                hb_sent:1,
 
                /* Is the Path MTU update pending on this tranport */
-               pmtu_pending:1;
+               pmtu_pending:1,
 
-       /* Has this transport moved the ctsn since we last sacked */
-       __u32 sack_generation;
+               /* Has this transport moved the ctsn since we last sacked */
+               sack_generation:1;
        u32 dst_cookie;
 
        struct flowi fl;
@@ -1482,19 +1482,19 @@ struct sctp_association {
                        prsctp_capable:1,   /* Can peer do PR-SCTP? */
                        auth_capable:1;     /* Is peer doing SCTP-AUTH? */
 
-               /* Ack State   : This flag indicates if the next received
+               /* sack_needed : This flag indicates if the next received
                 *             : packet is to be responded to with a
-                *             : SACK. This is initializedto 0.  When a packet
-                *             : is received it is incremented. If this value
+                *             : SACK. This is initialized to 0.  When a packet
+                *             : is received sack_cnt is incremented. If this value
                 *             : reaches 2 or more, a SACK is sent and the
                 *             : value is reset to 0. Note: This is used only
                 *             : when no DATA chunks are received out of
                 *             : order.  When DATA chunks are out of order,
                 *             : SACK's are not delayed (see Section 6).
                 */
-               __u8    sack_needed;     /* Do we need to sack the peer? */
+               __u8    sack_needed:1,     /* Do we need to sack the peer? */
+                       sack_generation:1;
                __u32   sack_cnt;
-               __u32   sack_generation;
 
                __u32   adaptation_ind;  /* Adaptation Code point. */
 
index 7f89e4ba18d11ee6a9261edf85cac743d9f8d5ea..52d27ee924f47867026d8f65c65551a9137219d3 100644 (file)
@@ -254,7 +254,6 @@ struct cg_proto;
   *    @sk_wq: sock wait queue and async head
   *    @sk_rx_dst: receive input route used by early demux
   *    @sk_dst_cache: destination cache
-  *    @sk_dst_lock: destination cache lock
   *    @sk_policy: flow policy
   *    @sk_receive_queue: incoming packets
   *    @sk_wmem_alloc: transmit queue bytes committed
@@ -384,14 +383,16 @@ struct sock {
        int                     sk_rcvbuf;
 
        struct sk_filter __rcu  *sk_filter;
-       struct socket_wq __rcu  *sk_wq;
-
+       union {
+               struct socket_wq __rcu  *sk_wq;
+               struct socket_wq        *sk_wq_raw;
+       };
 #ifdef CONFIG_XFRM
        struct xfrm_policy      *sk_policy[2];
 #endif
        struct dst_entry        *sk_rx_dst;
        struct dst_entry __rcu  *sk_dst_cache;
-       spinlock_t              sk_dst_lock;
+       /* Note: 32bit hole on 64bit arches */
        atomic_t                sk_wmem_alloc;
        atomic_t                sk_omem_alloc;
        int                     sk_sndbuf;
@@ -2005,10 +2006,27 @@ static inline unsigned long sock_wspace(struct sock *sk)
        return amt;
 }
 
-static inline void sk_wake_async(struct sock *sk, int how, int band)
+/* Note:
+ *  We use sk->sk_wq_raw, from contexts knowing this
+ *  pointer is not NULL and cannot disappear/change.
+ */
+static inline void sk_set_bit(int nr, struct sock *sk)
 {
-       if (sock_flag(sk, SOCK_FASYNC))
-               sock_wake_async(sk->sk_socket, how, band);
+       set_bit(nr, &sk->sk_wq_raw->flags);
+}
+
+static inline void sk_clear_bit(int nr, struct sock *sk)
+{
+       clear_bit(nr, &sk->sk_wq_raw->flags);
+}
+
+static inline void sk_wake_async(const struct sock *sk, int how, int band)
+{
+       if (sock_flag(sk, SOCK_FASYNC)) {
+               rcu_read_lock();
+               sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
+               rcu_read_unlock();
+       }
 }
 
 /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
index ed527121031dd31b424c1ff596f9139cfcdefd02..fcfa3d7f5e7e38f1bffcf50495730f24bb9e6b2b 100644 (file)
@@ -668,6 +668,9 @@ struct Scsi_Host {
        unsigned use_blk_mq:1;
        unsigned use_cmd_list:1;
 
+       /* Host responded with short (<36 bytes) INQUIRY result */
+       unsigned short_inquiry:1;
+
        /*
         * Optional work queue to be utilized by the transport
         */
index 7855cfe46b69a044040f9ac5be92591e33af1799..95a937eafb79419271acd193ad678e2e4b207a92 100644 (file)
@@ -398,6 +398,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
 int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
                             const struct snd_soc_dapm_route *route, int num);
 void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
+void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm);
 
 /* dapm events */
 void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
index 751b69f858c80411c910f21db7fb50c8902e94cf..9fd7b5d8df2fa357f434a899cccfa7810f826107 100644 (file)
 
 #define VFIO_SPAPR_TCE_v2_IOMMU                7
 
-/*
- * The No-IOMMU IOMMU offers no translation or isolation for devices and
- * supports no ioctls outside of VFIO_CHECK_EXTENSION.  Use of VFIO's No-IOMMU
- * code will taint the host kernel and should be used with extreme caution.
- */
-#define VFIO_NOIOMMU_IOMMU             8
-
 /*
  * The IOCTL interface is designed for extensibility by embedding the
  * structure length (argsz) and flags into structures passed between
index 85dedca3dcfb04e738764023b673a7ba3d47926d..eeba75395f7d10b61fabf995d2b0157aec6d9ee5 100644 (file)
@@ -343,7 +343,6 @@ struct ipu_client_platformdata {
        int di;
        int dc;
        int dp;
-       int dmfc;
        int dma[2];
 };
 
index 3f4c99e06c6bbf8a5e28f882633e8161f3974929..b0799bced518691bd847a16973be74184bcdcbaf 100644 (file)
@@ -28,11 +28,17 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
            attr->value_size == 0)
                return ERR_PTR(-EINVAL);
 
+       if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
+               /* if value_size is bigger, the user space won't be able to
+                * access the elements.
+                */
+               return ERR_PTR(-E2BIG);
+
        elem_size = round_up(attr->value_size, 8);
 
        /* check round_up into zero and u32 overflow */
        if (elem_size == 0 ||
-           attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size)
+           attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size)
                return ERR_PTR(-ENOMEM);
 
        array_size = sizeof(*array) + attr->max_entries * elem_size;
@@ -105,7 +111,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
                /* all elements already exist */
                return -EEXIST;
 
-       memcpy(array->value + array->elem_size * index, value, array->elem_size);
+       memcpy(array->value + array->elem_size * index, value, map->value_size);
        return 0;
 }
 
index 19909b22b4f8398d7373cc3838b835728265ed23..34777b3746fadf6407ca9805f33df608be619f21 100644 (file)
@@ -64,12 +64,35 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                 */
                goto free_htab;
 
-       err = -ENOMEM;
+       if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
+           MAX_BPF_STACK - sizeof(struct htab_elem))
+               /* if value_size is bigger, the user space won't be able to
+                * access the elements via bpf syscall. This check also makes
+                * sure that the elem_size doesn't overflow and it's
+                * kmalloc-able later in htab_map_update_elem()
+                */
+               goto free_htab;
+
+       htab->elem_size = sizeof(struct htab_elem) +
+                         round_up(htab->map.key_size, 8) +
+                         htab->map.value_size;
+
        /* prevent zero size kmalloc and check for u32 overflow */
        if (htab->n_buckets == 0 ||
            htab->n_buckets > U32_MAX / sizeof(struct hlist_head))
                goto free_htab;
 
+       if ((u64) htab->n_buckets * sizeof(struct hlist_head) +
+           (u64) htab->elem_size * htab->map.max_entries >=
+           U32_MAX - PAGE_SIZE)
+               /* make sure page count doesn't overflow */
+               goto free_htab;
+
+       htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
+                                  htab->elem_size * htab->map.max_entries,
+                                  PAGE_SIZE) >> PAGE_SHIFT;
+
+       err = -ENOMEM;
        htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head),
                                      GFP_USER | __GFP_NOWARN);
 
@@ -85,13 +108,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
        raw_spin_lock_init(&htab->lock);
        htab->count = 0;
 
-       htab->elem_size = sizeof(struct htab_elem) +
-                         round_up(htab->map.key_size, 8) +
-                         htab->map.value_size;
-
-       htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
-                                  htab->elem_size * htab->map.max_entries,
-                                  PAGE_SIZE) >> PAGE_SHIFT;
        return &htab->map;
 
 free_htab:
@@ -222,7 +238,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
        WARN_ON_ONCE(!rcu_read_lock_held());
 
        /* allocate new element outside of lock */
-       l_new = kmalloc(htab->elem_size, GFP_ATOMIC);
+       l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
        if (!l_new)
                return -ENOMEM;
 
index be6d726e31c9429860141925130d0e5c71011e7b..5a8a797d50b74a8bb698760856c1849ff9bcc084 100644 (file)
@@ -34,7 +34,7 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
                atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
                break;
        case BPF_TYPE_MAP:
-               atomic_inc(&((struct bpf_map *)raw)->refcnt);
+               bpf_map_inc(raw, true);
                break;
        default:
                WARN_ON_ONCE(1);
@@ -51,7 +51,7 @@ static void bpf_any_put(void *raw, enum bpf_type type)
                bpf_prog_put(raw);
                break;
        case BPF_TYPE_MAP:
-               bpf_map_put(raw);
+               bpf_map_put_with_uref(raw);
                break;
        default:
                WARN_ON_ONCE(1);
@@ -64,7 +64,7 @@ static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
        void *raw;
 
        *type = BPF_TYPE_MAP;
-       raw = bpf_map_get(ufd);
+       raw = bpf_map_get_with_uref(ufd);
        if (IS_ERR(raw)) {
                *type = BPF_TYPE_PROG;
                raw = bpf_prog_get(ufd);
index 0d3313d02a7e512e1ca7f58fb52aa3e39bae60a3..3b39550d84856494d43aefe926457add41d2a435 100644 (file)
@@ -82,6 +82,14 @@ static void bpf_map_free_deferred(struct work_struct *work)
        map->ops->map_free(map);
 }
 
+static void bpf_map_put_uref(struct bpf_map *map)
+{
+       if (atomic_dec_and_test(&map->usercnt)) {
+               if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
+                       bpf_fd_array_map_clear(map);
+       }
+}
+
 /* decrement map refcnt and schedule it for freeing via workqueue
  * (unrelying map implementation ops->map_free() might sleep)
  */
@@ -93,17 +101,15 @@ void bpf_map_put(struct bpf_map *map)
        }
 }
 
-static int bpf_map_release(struct inode *inode, struct file *filp)
+void bpf_map_put_with_uref(struct bpf_map *map)
 {
-       struct bpf_map *map = filp->private_data;
-
-       if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
-               /* prog_array stores refcnt-ed bpf_prog pointers
-                * release them all when user space closes prog_array_fd
-                */
-               bpf_fd_array_map_clear(map);
-
+       bpf_map_put_uref(map);
        bpf_map_put(map);
+}
+
+static int bpf_map_release(struct inode *inode, struct file *filp)
+{
+       bpf_map_put_with_uref(filp->private_data);
        return 0;
 }
 
@@ -142,6 +148,7 @@ static int map_create(union bpf_attr *attr)
                return PTR_ERR(map);
 
        atomic_set(&map->refcnt, 1);
+       atomic_set(&map->usercnt, 1);
 
        err = bpf_map_charge_memlock(map);
        if (err)
@@ -174,7 +181,14 @@ struct bpf_map *__bpf_map_get(struct fd f)
        return f.file->private_data;
 }
 
-struct bpf_map *bpf_map_get(u32 ufd)
+void bpf_map_inc(struct bpf_map *map, bool uref)
+{
+       atomic_inc(&map->refcnt);
+       if (uref)
+               atomic_inc(&map->usercnt);
+}
+
+struct bpf_map *bpf_map_get_with_uref(u32 ufd)
 {
        struct fd f = fdget(ufd);
        struct bpf_map *map;
@@ -183,7 +197,7 @@ struct bpf_map *bpf_map_get(u32 ufd)
        if (IS_ERR(map))
                return map;
 
-       atomic_inc(&map->refcnt);
+       bpf_map_inc(map, true);
        fdput(f);
 
        return map;
@@ -226,7 +240,7 @@ static int map_lookup_elem(union bpf_attr *attr)
                goto free_key;
 
        err = -ENOMEM;
-       value = kmalloc(map->value_size, GFP_USER);
+       value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
        if (!value)
                goto free_key;
 
@@ -285,7 +299,7 @@ static int map_update_elem(union bpf_attr *attr)
                goto free_key;
 
        err = -ENOMEM;
-       value = kmalloc(map->value_size, GFP_USER);
+       value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
        if (!value)
                goto free_key;
 
index c6073056badf02293c7e9d1e35dce9bb88536c67..a7945d10b378bed8eb7243bd52923d232b1f49de 100644 (file)
@@ -2021,8 +2021,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
                         * will be used by the valid program until it's unloaded
                         * and all maps are released in free_bpf_prog_info()
                         */
-                       atomic_inc(&map->refcnt);
-
+                       bpf_map_inc(map, false);
                        fdput(f);
 next_insn:
                        insn++;
index f1603c153890d2b9dbd37a5c687fd297c6137f24..470f6536b9e8cfb029eedb5ecdb3c3e8c3c341be 100644 (file)
@@ -97,6 +97,12 @@ static DEFINE_SPINLOCK(css_set_lock);
  */
 static DEFINE_SPINLOCK(cgroup_idr_lock);
 
+/*
+ * Protects cgroup_file->kn for !self csses.  It synchronizes notifications
+ * against file removal/re-creation across css hiding.
+ */
+static DEFINE_SPINLOCK(cgroup_file_kn_lock);
+
 /*
  * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
  * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
@@ -754,9 +760,11 @@ static void put_css_set_locked(struct css_set *cset)
        if (!atomic_dec_and_test(&cset->refcount))
                return;
 
-       /* This css_set is dead. unlink it and release cgroup refcounts */
-       for_each_subsys(ss, ssid)
+       /* This css_set is dead. unlink it and release cgroup and css refs */
+       for_each_subsys(ss, ssid) {
                list_del(&cset->e_cset_node[ssid]);
+               css_put(cset->subsys[ssid]);
+       }
        hash_del(&cset->hlist);
        css_set_count--;
 
@@ -1056,9 +1064,13 @@ static struct css_set *find_css_set(struct css_set *old_cset,
        key = css_set_hash(cset->subsys);
        hash_add(css_set_table, &cset->hlist, key);
 
-       for_each_subsys(ss, ssid)
+       for_each_subsys(ss, ssid) {
+               struct cgroup_subsys_state *css = cset->subsys[ssid];
+
                list_add_tail(&cset->e_cset_node[ssid],
-                             &cset->subsys[ssid]->cgroup->e_csets[ssid]);
+                             &css->cgroup->e_csets[ssid]);
+               css_get(css);
+       }
 
        spin_unlock_bh(&css_set_lock);
 
@@ -1393,6 +1405,16 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
        char name[CGROUP_FILE_NAME_MAX];
 
        lockdep_assert_held(&cgroup_mutex);
+
+       if (cft->file_offset) {
+               struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
+               struct cgroup_file *cfile = (void *)css + cft->file_offset;
+
+               spin_lock_irq(&cgroup_file_kn_lock);
+               cfile->kn = NULL;
+               spin_unlock_irq(&cgroup_file_kn_lock);
+       }
+
        kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
 }
 
@@ -1856,7 +1878,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
 
        INIT_LIST_HEAD(&cgrp->self.sibling);
        INIT_LIST_HEAD(&cgrp->self.children);
-       INIT_LIST_HEAD(&cgrp->self.files);
        INIT_LIST_HEAD(&cgrp->cset_links);
        INIT_LIST_HEAD(&cgrp->pidlists);
        mutex_init(&cgrp->pidlist_mutex);
@@ -2216,6 +2237,9 @@ struct cgroup_taskset {
        struct list_head        src_csets;
        struct list_head        dst_csets;
 
+       /* the subsys currently being processed */
+       int                     ssid;
+
        /*
         * Fields for cgroup_taskset_*() iteration.
         *
@@ -2278,25 +2302,29 @@ static void cgroup_taskset_add(struct task_struct *task,
 /**
  * cgroup_taskset_first - reset taskset and return the first task
  * @tset: taskset of interest
+ * @dst_cssp: output variable for the destination css
  *
  * @tset iteration is initialized and the first task is returned.
  */
-struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
+struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
+                                        struct cgroup_subsys_state **dst_cssp)
 {
        tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
        tset->cur_task = NULL;
 
-       return cgroup_taskset_next(tset);
+       return cgroup_taskset_next(tset, dst_cssp);
 }
 
 /**
  * cgroup_taskset_next - iterate to the next task in taskset
  * @tset: taskset of interest
+ * @dst_cssp: output variable for the destination css
  *
  * Return the next task in @tset.  Iteration must have been initialized
  * with cgroup_taskset_first().
  */
-struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
+struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
+                                       struct cgroup_subsys_state **dst_cssp)
 {
        struct css_set *cset = tset->cur_cset;
        struct task_struct *task = tset->cur_task;
@@ -2311,6 +2339,18 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
                if (&task->cg_list != &cset->mg_tasks) {
                        tset->cur_cset = cset;
                        tset->cur_task = task;
+
+                       /*
+                        * This function may be called both before and
+                        * after cgroup_taskset_migrate().  The two cases
+                        * can be distinguished by looking at whether @cset
+                        * has its ->mg_dst_cset set.
+                        */
+                       if (cset->mg_dst_cset)
+                               *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
+                       else
+                               *dst_cssp = cset->subsys[tset->ssid];
+
                        return task;
                }
 
@@ -2346,7 +2386,8 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
        /* check that we can legitimately attach to the cgroup */
        for_each_e_css(css, i, dst_cgrp) {
                if (css->ss->can_attach) {
-                       ret = css->ss->can_attach(css, tset);
+                       tset->ssid = i;
+                       ret = css->ss->can_attach(tset);
                        if (ret) {
                                failed_css = css;
                                goto out_cancel_attach;
@@ -2379,9 +2420,12 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
         */
        tset->csets = &tset->dst_csets;
 
-       for_each_e_css(css, i, dst_cgrp)
-               if (css->ss->attach)
-                       css->ss->attach(css, tset);
+       for_each_e_css(css, i, dst_cgrp) {
+               if (css->ss->attach) {
+                       tset->ssid = i;
+                       css->ss->attach(tset);
+               }
+       }
 
        ret = 0;
        goto out_release_tset;
@@ -2390,8 +2434,10 @@ out_cancel_attach:
        for_each_e_css(css, i, dst_cgrp) {
                if (css == failed_css)
                        break;
-               if (css->ss->cancel_attach)
-                       css->ss->cancel_attach(css, tset);
+               if (css->ss->cancel_attach) {
+                       tset->ssid = i;
+                       css->ss->cancel_attach(tset);
+               }
        }
 out_release_tset:
        spin_lock_bh(&css_set_lock);
@@ -3313,9 +3359,9 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
        if (cft->file_offset) {
                struct cgroup_file *cfile = (void *)css + cft->file_offset;
 
-               kernfs_get(kn);
+               spin_lock_irq(&cgroup_file_kn_lock);
                cfile->kn = kn;
-               list_add(&cfile->node, &css->files);
+               spin_unlock_irq(&cgroup_file_kn_lock);
        }
 
        return 0;
@@ -3552,6 +3598,22 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
        return cgroup_add_cftypes(ss, cfts);
 }
 
+/**
+ * cgroup_file_notify - generate a file modified event for a cgroup_file
+ * @cfile: target cgroup_file
+ *
+ * @cfile must have been obtained by setting cftype->file_offset.
+ */
+void cgroup_file_notify(struct cgroup_file *cfile)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cgroup_file_kn_lock, flags);
+       if (cfile->kn)
+               kernfs_notify(cfile->kn);
+       spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
+}
+
 /**
  * cgroup_task_count - count the number of tasks in a cgroup.
  * @cgrp: the cgroup in question
@@ -4613,13 +4675,9 @@ static void css_free_work_fn(struct work_struct *work)
                container_of(work, struct cgroup_subsys_state, destroy_work);
        struct cgroup_subsys *ss = css->ss;
        struct cgroup *cgrp = css->cgroup;
-       struct cgroup_file *cfile;
 
        percpu_ref_exit(&css->refcnt);
 
-       list_for_each_entry(cfile, &css->files, node)
-               kernfs_put(cfile->kn);
-
        if (ss) {
                /* css free path */
                int id = css->id;
@@ -4724,7 +4782,6 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
        css->ss = ss;
        INIT_LIST_HEAD(&css->sibling);
        INIT_LIST_HEAD(&css->children);
-       INIT_LIST_HEAD(&css->files);
        css->serial_nr = css_serial_nr_next++;
 
        if (cgroup_parent(cgrp)) {
index f1b30ad5dc6d925685f92ecbc0918b7c78018f30..2d3df82c54f2dd84484e0b4d739551cb69861e4e 100644 (file)
@@ -155,12 +155,10 @@ static void freezer_css_free(struct cgroup_subsys_state *css)
  * @freezer->lock.  freezer_attach() makes the new tasks conform to the
  * current state and all following state changes can see the new tasks.
  */
-static void freezer_attach(struct cgroup_subsys_state *new_css,
-                          struct cgroup_taskset *tset)
+static void freezer_attach(struct cgroup_taskset *tset)
 {
-       struct freezer *freezer = css_freezer(new_css);
        struct task_struct *task;
-       bool clear_frozen = false;
+       struct cgroup_subsys_state *new_css;
 
        mutex_lock(&freezer_mutex);
 
@@ -174,22 +172,21 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
         * current state before executing the following - !frozen tasks may
         * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
         */
-       cgroup_taskset_for_each(task, tset) {
+       cgroup_taskset_for_each(task, new_css, tset) {
+               struct freezer *freezer = css_freezer(new_css);
+
                if (!(freezer->state & CGROUP_FREEZING)) {
                        __thaw_task(task);
                } else {
                        freeze_task(task);
-                       freezer->state &= ~CGROUP_FROZEN;
-                       clear_frozen = true;
+                       /* clear FROZEN and propagate upwards */
+                       while (freezer && (freezer->state & CGROUP_FROZEN)) {
+                               freezer->state &= ~CGROUP_FROZEN;
+                               freezer = parent_freezer(freezer);
+                       }
                }
        }
 
-       /* propagate FROZEN clearing upwards */
-       while (clear_frozen && (freezer = parent_freezer(freezer))) {
-               freezer->state &= ~CGROUP_FROZEN;
-               clear_frozen = freezer->state & CGROUP_FREEZING;
-       }
-
        mutex_unlock(&freezer_mutex);
 }
 
index cdd8df4e991c7781ac5996677d724bcd23314623..b50d5a167fda7d2ae6902efd55e551efed780007 100644 (file)
@@ -106,7 +106,7 @@ static void pids_uncharge(struct pids_cgroup *pids, int num)
 {
        struct pids_cgroup *p;
 
-       for (p = pids; p; p = parent_pids(p))
+       for (p = pids; parent_pids(p); p = parent_pids(p))
                pids_cancel(p, num);
 }
 
@@ -123,7 +123,7 @@ static void pids_charge(struct pids_cgroup *pids, int num)
 {
        struct pids_cgroup *p;
 
-       for (p = pids; p; p = parent_pids(p))
+       for (p = pids; parent_pids(p); p = parent_pids(p))
                atomic64_add(num, &p->counter);
 }
 
@@ -140,7 +140,7 @@ static int pids_try_charge(struct pids_cgroup *pids, int num)
 {
        struct pids_cgroup *p, *q;
 
-       for (p = pids; p; p = parent_pids(p)) {
+       for (p = pids; parent_pids(p); p = parent_pids(p)) {
                int64_t new = atomic64_add_return(num, &p->counter);
 
                /*
@@ -162,13 +162,13 @@ revert:
        return -EAGAIN;
 }
 
-static int pids_can_attach(struct cgroup_subsys_state *css,
-                          struct cgroup_taskset *tset)
+static int pids_can_attach(struct cgroup_taskset *tset)
 {
-       struct pids_cgroup *pids = css_pids(css);
        struct task_struct *task;
+       struct cgroup_subsys_state *dst_css;
 
-       cgroup_taskset_for_each(task, tset) {
+       cgroup_taskset_for_each(task, dst_css, tset) {
+               struct pids_cgroup *pids = css_pids(dst_css);
                struct cgroup_subsys_state *old_css;
                struct pids_cgroup *old_pids;
 
@@ -187,13 +187,13 @@ static int pids_can_attach(struct cgroup_subsys_state *css,
        return 0;
 }
 
-static void pids_cancel_attach(struct cgroup_subsys_state *css,
-                              struct cgroup_taskset *tset)
+static void pids_cancel_attach(struct cgroup_taskset *tset)
 {
-       struct pids_cgroup *pids = css_pids(css);
        struct task_struct *task;
+       struct cgroup_subsys_state *dst_css;
 
-       cgroup_taskset_for_each(task, tset) {
+       cgroup_taskset_for_each(task, dst_css, tset) {
+               struct pids_cgroup *pids = css_pids(dst_css);
                struct cgroup_subsys_state *old_css;
                struct pids_cgroup *old_pids;
 
@@ -205,65 +205,28 @@ static void pids_cancel_attach(struct cgroup_subsys_state *css,
        }
 }
 
+/*
+ * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
+ * on threadgroup_change_begin() held by the copy_process().
+ */
 static int pids_can_fork(struct task_struct *task, void **priv_p)
 {
        struct cgroup_subsys_state *css;
        struct pids_cgroup *pids;
-       int err;
 
-       /*
-        * Use the "current" task_css for the pids subsystem as the tentative
-        * css. It is possible we will charge the wrong hierarchy, in which
-        * case we will forcefully revert/reapply the charge on the right
-        * hierarchy after it is committed to the task proper.
-        */
-       css = task_get_css(current, pids_cgrp_id);
+       css = task_css_check(current, pids_cgrp_id, true);
        pids = css_pids(css);
-
-       err = pids_try_charge(pids, 1);
-       if (err)
-               goto err_css_put;
-
-       *priv_p = css;
-       return 0;
-
-err_css_put:
-       css_put(css);
-       return err;
+       return pids_try_charge(pids, 1);
 }
 
 static void pids_cancel_fork(struct task_struct *task, void *priv)
-{
-       struct cgroup_subsys_state *css = priv;
-       struct pids_cgroup *pids = css_pids(css);
-
-       pids_uncharge(pids, 1);
-       css_put(css);
-}
-
-static void pids_fork(struct task_struct *task, void *priv)
 {
        struct cgroup_subsys_state *css;
-       struct cgroup_subsys_state *old_css = priv;
        struct pids_cgroup *pids;
-       struct pids_cgroup *old_pids = css_pids(old_css);
 
-       css = task_get_css(task, pids_cgrp_id);
+       css = task_css_check(current, pids_cgrp_id, true);
        pids = css_pids(css);
-
-       /*
-        * If the association has changed, we have to revert and reapply the
-        * charge/uncharge on the wrong hierarchy to the current one. Since
-        * the association can only change due to an organisation event, its
-        * okay for us to ignore the limit in this case.
-        */
-       if (pids != old_pids) {
-               pids_uncharge(old_pids, 1);
-               pids_charge(pids, 1);
-       }
-
-       css_put(css);
-       css_put(old_css);
+       pids_uncharge(pids, 1);
 }
 
 static void pids_free(struct task_struct *task)
@@ -335,6 +298,7 @@ static struct cftype pids_files[] = {
        {
                .name = "current",
                .read_s64 = pids_current_read,
+               .flags = CFTYPE_NOT_ON_ROOT,
        },
        { }     /* terminate */
 };
@@ -346,7 +310,6 @@ struct cgroup_subsys pids_cgrp_subsys = {
        .cancel_attach  = pids_cancel_attach,
        .can_fork       = pids_can_fork,
        .cancel_fork    = pids_cancel_fork,
-       .fork           = pids_fork,
        .free           = pids_free,
        .legacy_cftypes = pids_files,
        .dfl_cftypes    = pids_files,
index 10ae73611d80a560977aa554d6cbedee78bec19a..02a8ea5c99632907b2ae887015c59bd4ad35da9d 100644 (file)
@@ -1429,15 +1429,16 @@ static int fmeter_getrate(struct fmeter *fmp)
 static struct cpuset *cpuset_attach_old_cs;
 
 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
-static int cpuset_can_attach(struct cgroup_subsys_state *css,
-                            struct cgroup_taskset *tset)
+static int cpuset_can_attach(struct cgroup_taskset *tset)
 {
-       struct cpuset *cs = css_cs(css);
+       struct cgroup_subsys_state *css;
+       struct cpuset *cs;
        struct task_struct *task;
        int ret;
 
        /* used later by cpuset_attach() */
-       cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset));
+       cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
+       cs = css_cs(css);
 
        mutex_lock(&cpuset_mutex);
 
@@ -1447,7 +1448,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
            (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
                goto out_unlock;
 
-       cgroup_taskset_for_each(task, tset) {
+       cgroup_taskset_for_each(task, css, tset) {
                ret = task_can_attach(task, cs->cpus_allowed);
                if (ret)
                        goto out_unlock;
@@ -1467,9 +1468,14 @@ out_unlock:
        return ret;
 }
 
-static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
-                                struct cgroup_taskset *tset)
+static void cpuset_cancel_attach(struct cgroup_taskset *tset)
 {
+       struct cgroup_subsys_state *css;
+       struct cpuset *cs;
+
+       cgroup_taskset_first(tset, &css);
+       cs = css_cs(css);
+
        mutex_lock(&cpuset_mutex);
        css_cs(css)->attach_in_progress--;
        mutex_unlock(&cpuset_mutex);
@@ -1482,16 +1488,19 @@ static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
  */
 static cpumask_var_t cpus_attach;
 
-static void cpuset_attach(struct cgroup_subsys_state *css,
-                         struct cgroup_taskset *tset)
+static void cpuset_attach(struct cgroup_taskset *tset)
 {
        /* static buf protected by cpuset_mutex */
        static nodemask_t cpuset_attach_nodemask_to;
        struct task_struct *task;
        struct task_struct *leader;
-       struct cpuset *cs = css_cs(css);
+       struct cgroup_subsys_state *css;
+       struct cpuset *cs;
        struct cpuset *oldcs = cpuset_attach_old_cs;
 
+       cgroup_taskset_first(tset, &css);
+       cs = css_cs(css);
+
        mutex_lock(&cpuset_mutex);
 
        /* prepare for attach */
@@ -1502,7 +1511,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
 
        guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
 
-       cgroup_taskset_for_each(task, tset) {
+       cgroup_taskset_for_each(task, css, tset) {
                /*
                 * can_attach beforehand should guarantee that this doesn't
                 * fail.  TODO: have a better way to handle failure here
@@ -1518,7 +1527,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
         * sleep and should be moved outside migration path proper.
         */
        cpuset_attach_nodemask_to = cs->effective_mems;
-       cgroup_taskset_for_each_leader(leader, tset) {
+       cgroup_taskset_for_each_leader(leader, css, tset) {
                struct mm_struct *mm = get_task_mm(leader);
 
                if (mm) {
index d659487254d5d77de07f279ceb0ba0bebe93ed03..9c418002b8c1fa15217b3390250453b492849344 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
- *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  *
  * For licensing details see kernel-base/COPYING
index 36babfd2064842c28d4951a656ea1e84500bbf8f..ef2d6ea10736e4805e758cae83524dc13c2f06e9 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
- *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  *
  * For licensing details see kernel-base/COPYING
@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
        if (!is_cgroup_event(event))
                return;
 
-       cgrp = perf_cgroup_from_task(current);
+       cgrp = perf_cgroup_from_task(current, event->ctx);
        /*
         * Do not update time when cgroup is not active
         */
@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
        if (!task || !ctx->nr_cgroups)
                return;
 
-       cgrp = perf_cgroup_from_task(task);
+       cgrp = perf_cgroup_from_task(task, ctx);
        info = this_cpu_ptr(cgrp->info);
        info->timestamp = ctx->timestamp;
 }
@@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
         * we reschedule only in the presence of cgroup
         * constrained events.
         */
-       rcu_read_lock();
 
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@ -522,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
                                 * set cgrp before ctxsw in to allow
                                 * event_filter_match() to not have to pass
                                 * task around
+                                * we pass the cpuctx->ctx to perf_cgroup_from_task()
+                                * because cgorup events are only per-cpu
                                 */
-                               cpuctx->cgrp = perf_cgroup_from_task(task);
+                               cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
                                cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
                        }
                        perf_pmu_enable(cpuctx->ctx.pmu);
@@ -531,8 +532,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
                }
        }
 
-       rcu_read_unlock();
-
        local_irq_restore(flags);
 }
 
@@ -542,17 +541,20 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
        struct perf_cgroup *cgrp1;
        struct perf_cgroup *cgrp2 = NULL;
 
+       rcu_read_lock();
        /*
         * we come here when we know perf_cgroup_events > 0
+        * we do not need to pass the ctx here because we know
+        * we are holding the rcu lock
         */
-       cgrp1 = perf_cgroup_from_task(task);
+       cgrp1 = perf_cgroup_from_task(task, NULL);
 
        /*
         * next is NULL when called from perf_event_enable_on_exec()
         * that will systematically cause a cgroup_switch()
         */
        if (next)
-               cgrp2 = perf_cgroup_from_task(next);
+               cgrp2 = perf_cgroup_from_task(next, NULL);
 
        /*
         * only schedule out current cgroup events if we know
@@ -561,6 +563,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
         */
        if (cgrp1 != cgrp2)
                perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+
+       rcu_read_unlock();
 }
 
 static inline void perf_cgroup_sched_in(struct task_struct *prev,
@@ -569,13 +573,16 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
        struct perf_cgroup *cgrp1;
        struct perf_cgroup *cgrp2 = NULL;
 
+       rcu_read_lock();
        /*
         * we come here when we know perf_cgroup_events > 0
+        * we do not need to pass the ctx here because we know
+        * we are holding the rcu lock
         */
-       cgrp1 = perf_cgroup_from_task(task);
+       cgrp1 = perf_cgroup_from_task(task, NULL);
 
        /* prev can never be NULL */
-       cgrp2 = perf_cgroup_from_task(prev);
+       cgrp2 = perf_cgroup_from_task(prev, NULL);
 
        /*
         * only need to schedule in cgroup events if we are changing
@@ -584,6 +591,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
         */
        if (cgrp1 != cgrp2)
                perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+
+       rcu_read_unlock();
 }
 
 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -4216,7 +4225,14 @@ retry:
                goto retry;
        }
 
-       __perf_event_period(&pe);
+       if (event->attr.freq) {
+               event->attr.sample_freq = value;
+       } else {
+               event->attr.sample_period = value;
+               event->hw.sample_period = value;
+       }
+
+       local64_set(&event->hw.period_left, 0);
        raw_spin_unlock_irq(&ctx->lock);
 
        return 0;
@@ -5666,6 +5682,17 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
        }
 }
 
+static void
+perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
+                       struct perf_event_context *task_ctx)
+{
+       rcu_read_lock();
+       preempt_disable();
+       perf_event_aux_ctx(task_ctx, output, data);
+       preempt_enable();
+       rcu_read_unlock();
+}
+
 static void
 perf_event_aux(perf_event_aux_output_cb output, void *data,
               struct perf_event_context *task_ctx)
@@ -5675,14 +5702,23 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
        struct pmu *pmu;
        int ctxn;
 
+       /*
+        * If we have task_ctx != NULL we only notify
+        * the task context itself. The task_ctx is set
+        * only for EXIT events before releasing task
+        * context.
+        */
+       if (task_ctx) {
+               perf_event_aux_task_ctx(output, data, task_ctx);
+               return;
+       }
+
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
                if (cpuctx->unique_pmu != pmu)
                        goto next;
                perf_event_aux_ctx(&cpuctx->ctx, output, data);
-               if (task_ctx)
-                       goto next;
                ctxn = pmu->task_ctx_nr;
                if (ctxn < 0)
                        goto next;
@@ -5692,12 +5728,6 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
 next:
                put_cpu_ptr(pmu->pmu_cpu_context);
        }
-
-       if (task_ctx) {
-               preempt_disable();
-               perf_event_aux_ctx(task_ctx, output, data);
-               preempt_enable();
-       }
        rcu_read_unlock();
 }
 
@@ -8787,10 +8817,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
        struct perf_event_context *child_ctx, *clone_ctx = NULL;
        unsigned long flags;
 
-       if (likely(!child->perf_event_ctxp[ctxn])) {
-               perf_event_task(child, NULL, 0);
+       if (likely(!child->perf_event_ctxp[ctxn]))
                return;
-       }
 
        local_irq_save(flags);
        /*
@@ -8874,6 +8902,14 @@ void perf_event_exit_task(struct task_struct *child)
 
        for_each_task_context_nr(ctxn)
                perf_event_exit_task_context(child, ctxn);
+
+       /*
+        * The perf_event_exit_task_context calls perf_event_task
+        * with child's task_ctx, which generates EXIT events for
+        * child contexts and sets child->perf_event_ctxp[] to NULL.
+        * At this point we need to send EXIT events to cpu contexts.
+        */
+       perf_event_task(child, NULL, 0);
 }
 
 static void perf_free_event(struct perf_event *event,
@@ -9452,16 +9488,18 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
 static int __perf_cgroup_move(void *info)
 {
        struct task_struct *task = info;
+       rcu_read_lock();
        perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+       rcu_read_unlock();
        return 0;
 }
 
-static void perf_cgroup_attach(struct cgroup_subsys_state *css,
-                              struct cgroup_taskset *tset)
+static void perf_cgroup_attach(struct cgroup_taskset *tset)
 {
        struct task_struct *task;
+       struct cgroup_subsys_state *css;
 
-       cgroup_taskset_for_each(task, tset)
+       cgroup_taskset_for_each(task, css, tset)
                task_function_call(task, __perf_cgroup_move, task);
 }
 
index b5d1ea79c5953e2a2c7a5ee843de5f76ad47077a..adfdc0536117c1f10bf8f0dd8014798011e1f855 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
- *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  *
  * For licensing details see kernel-base/COPYING
index 4e5e9798aa0c0d426962642b69985b9eb09021d6..7dad84913abfb06df2495fae555e7f8bcac2b104 100644 (file)
@@ -19,7 +19,7 @@
  * Authors:
  *     Srikar Dronamraju
  *     Jim Keniston
- * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
  */
 
 #include <linux/kernel.h>
index f97f2c449f5cf556ea6c54cb4aec6e894dd8bab5..fce002ee3ddffbab7613d0f9ec390683901f3c67 100644 (file)
@@ -1368,8 +1368,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->real_start_time = ktime_get_boot_ns();
        p->io_context = NULL;
        p->audit_context = NULL;
-       if (clone_flags & CLONE_THREAD)
-               threadgroup_change_begin(current);
+       threadgroup_change_begin(current);
        cgroup_fork(p);
 #ifdef CONFIG_NUMA
        p->mempolicy = mpol_dup(p->mempolicy);
@@ -1610,8 +1609,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        proc_fork_connector(p);
        cgroup_post_fork(p, cgrp_ss_priv);
-       if (clone_flags & CLONE_THREAD)
-               threadgroup_change_end(current);
+       threadgroup_change_end(current);
        perf_event_fork(p);
 
        trace_task_newtask(p, clone_flags);
@@ -1652,8 +1650,7 @@ bad_fork_cleanup_policy:
        mpol_put(p->mempolicy);
 bad_fork_cleanup_threadgroup_lock:
 #endif
-       if (clone_flags & CLONE_THREAD)
-               threadgroup_change_end(current);
+       threadgroup_change_end(current);
        delayacct_tsk_free(p);
 bad_fork_cleanup_count:
        atomic_dec(&p->cred->user->processes);
index cbf9fb899d929bfc52a2d803b64643732b0b7bbd..bcf107ce085450552c17d6b045816cd4656e97c0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  *
  * Provides a framework for enqueueing and running callbacks from hardirq
  * context. The enqueueing is NMI-safe.
index f7dd15d537f9b1135eb01036bf2fa3a5594ed9b6..05254eeb4b4e485be75bacff667c8ed3aab4a200 100644 (file)
@@ -2,7 +2,7 @@
  * jump label support
  *
  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
- * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2011 Peter Zijlstra
  *
  */
 #include <linux/memory.h>
index deae3907ac1eec585bbe71a44f6dc57ad024784a..60ace56618f6c222de90e569bf1702b6dba05e26 100644 (file)
@@ -6,7 +6,7 @@
  * Started by Ingo Molnar:
  *
  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  *
  * this code maps all the lock dependencies as they occur in a live kernel
  * and will warn about the following classes of locking bugs:
index d83d798bef95a042e1060a35bf4b79e7c7a6c05c..dbb61a3025484b4d38bd090bd5923c21a3960ced 100644 (file)
@@ -6,7 +6,7 @@
  * Started by Ingo Molnar:
  *
  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  *
  * Code for /proc/lockdep and /proc/lockdep_stats:
  *
index c0a205101c231a5b6e29e786228fb69c46772f5e..caf4041f5b0ae6769bc562fccc189852eae77fcf 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * sched_clock for unstable cpu clocks
  *
- *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
  *
  *  Updates and enhancements:
  *    Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
index 4d568ac9319eaf04c9d00673483678bc5e14f22e..732e993b564b8179e5723bb88839329eec6df8f5 100644 (file)
@@ -1946,6 +1946,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                goto stat;
 
 #ifdef CONFIG_SMP
+       /*
+        * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
+        * possible to, falsely, observe p->on_cpu == 0.
+        *
+        * One must be running (->on_cpu == 1) in order to remove oneself
+        * from the runqueue.
+        *
+        *  [S] ->on_cpu = 1;   [L] ->on_rq
+        *      UNLOCK rq->lock
+        *                      RMB
+        *      LOCK   rq->lock
+        *  [S] ->on_rq = 0;    [L] ->on_cpu
+        *
+        * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
+        * from the consecutive calls to schedule(); the first switching to our
+        * task, the second putting it to sleep.
+        */
+       smp_rmb();
+
        /*
         * If the owning (remote) cpu is still in the middle of schedule() with
         * this task as prev, wait until its done referencing the task.
@@ -1953,7 +1972,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        while (p->on_cpu)
                cpu_relax();
        /*
-        * Pairs with the smp_wmb() in finish_lock_switch().
+        * Combined with the control dependency above, we have an effective
+        * smp_load_acquire() without the need for full barriers.
+        *
+        * Pairs with the smp_store_release() in finish_lock_switch().
+        *
+        * This ensures that tasks getting woken will be fully ordered against
+        * their previous state and preserve Program Order.
         */
        smp_rmb();
 
@@ -2039,7 +2064,6 @@ out:
  */
 int wake_up_process(struct task_struct *p)
 {
-       WARN_ON(task_is_stopped_or_traced(p));
        return try_to_wake_up(p, TASK_NORMAL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
@@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd)
 {
        memset(rd, 0, sizeof(*rd));
 
-       if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
                goto out;
-       if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
                goto free_span;
-       if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
                goto free_online;
-       if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
                goto free_dlo_mask;
 
        init_dl_bw(&rd->dl_bw);
@@ -8217,12 +8241,12 @@ static void cpu_cgroup_fork(struct task_struct *task, void *private)
        sched_move_task(task);
 }
 
-static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
-                                struct cgroup_taskset *tset)
+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
 {
        struct task_struct *task;
+       struct cgroup_subsys_state *css;
 
-       cgroup_taskset_for_each(task, tset) {
+       cgroup_taskset_for_each(task, css, tset) {
 #ifdef CONFIG_RT_GROUP_SCHED
                if (!sched_rt_can_attach(css_tg(css), task))
                        return -EINVAL;
@@ -8235,12 +8259,12 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
        return 0;
 }
 
-static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
-                             struct cgroup_taskset *tset)
+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
 {
        struct task_struct *task;
+       struct cgroup_subsys_state *css;
 
-       cgroup_taskset_for_each(task, tset)
+       cgroup_taskset_for_each(task, css, tset)
                sched_move_task(task);
 }
 
index 26a54461bf59ca46ce7f68d7a926b03840ca8711..05de80b48586e9fa3241c708c7e6fd7c9b6fb24a 100644 (file)
@@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t)
        unsigned int seq;
        cputime_t gtime;
 
+       if (!context_tracking_is_enabled())
+               return t->gtime;
+
        do {
                seq = read_seqbegin(&t->vtime_seqlock);
 
index f04fda8f669c8eff57e936fcb1334b13a4140ce5..90e26b11deaa1ab4b78302605850523a7852720b 100644 (file)
@@ -17,7 +17,7 @@
  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  *
  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  */
 
 #include <linux/latencytop.h>
index e3cc16312046689fd04db8748304210c0d9fc8df..8ec86abe0ea188369ee4e7efd21789d8cb127c14 100644 (file)
@@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
        raw_spin_unlock(&rt_b->rt_runtime_lock);
 }
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
 static void push_irq_work_func(struct irq_work *work);
 #endif
 
index efd3bfc7e34722883e2f08ca82f91cffde812963..b242775bf670e116233862c590915e06132485ca 100644 (file)
@@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
         * We must ensure this doesn't happen until the switch is completely
         * finished.
         *
+        * In particular, the load of prev->state in finish_task_switch() must
+        * happen before this.
+        *
         * Pairs with the control dependency and rmb in try_to_wake_up().
         */
        smp_store_release(&prev->on_cpu, 0);
index 052e02672d12428ce1e9e1f7266c7cd754ace5af..f10bd873e6840441ed4750a5eb1c2d462ad0b436 100644 (file)
@@ -583,18 +583,18 @@ EXPORT_SYMBOL(wake_up_atomic_t);
 
 __sched int bit_wait(struct wait_bit_key *word)
 {
-       if (signal_pending_state(current->state, current))
-               return 1;
        schedule();
+       if (signal_pending(current))
+               return -EINTR;
        return 0;
 }
 EXPORT_SYMBOL(bit_wait);
 
 __sched int bit_wait_io(struct wait_bit_key *word)
 {
-       if (signal_pending_state(current->state, current))
-               return 1;
        io_schedule();
+       if (signal_pending(current))
+               return -EINTR;
        return 0;
 }
 EXPORT_SYMBOL(bit_wait_io);
@@ -602,11 +602,11 @@ EXPORT_SYMBOL(bit_wait_io);
 __sched int bit_wait_timeout(struct wait_bit_key *word)
 {
        unsigned long now = READ_ONCE(jiffies);
-       if (signal_pending_state(current->state, current))
-               return 1;
        if (time_after_eq(now, word->timeout))
                return -EAGAIN;
        schedule_timeout(word->timeout - now);
+       if (signal_pending(current))
+               return -EINTR;
        return 0;
 }
 EXPORT_SYMBOL_GPL(bit_wait_timeout);
@@ -614,11 +614,11 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
 __sched int bit_wait_io_timeout(struct wait_bit_key *word)
 {
        unsigned long now = READ_ONCE(jiffies);
-       if (signal_pending_state(current->state, current))
-               return 1;
        if (time_after_eq(now, word->timeout))
                return -EAGAIN;
        io_schedule_timeout(word->timeout - now);
+       if (signal_pending(current))
+               return -EINTR;
        return 0;
 }
 EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
index 75f1d05ea82dcce04e9f6a581f26969e503dc773..9c6045a27ba356252546971fb26dbae30149c89c 100644 (file)
@@ -1887,12 +1887,6 @@ rb_event_index(struct ring_buffer_event *event)
        return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
 }
 
-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
-{
-       cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
-       cpu_buffer->reader_page->read = 0;
-}
-
 static void rb_inc_iter(struct ring_buffer_iter *iter)
 {
        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -2803,8 +2797,11 @@ rb_reserve_next_event(struct ring_buffer *buffer,
 
        event = __rb_reserve_next(cpu_buffer, &info);
 
-       if (unlikely(PTR_ERR(event) == -EAGAIN))
+       if (unlikely(PTR_ERR(event) == -EAGAIN)) {
+               if (info.add_timestamp)
+                       info.length -= RB_LEN_TIME_EXTEND;
                goto again;
+       }
 
        if (!event)
                goto out_fail;
@@ -3626,7 +3623,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
 
        /* Finally update the reader page to the new head */
        cpu_buffer->reader_page = reader;
-       rb_reset_reader_page(cpu_buffer);
+       cpu_buffer->reader_page->read = 0;
 
        if (overwrite != cpu_buffer->last_overrun) {
                cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
@@ -3636,6 +3633,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        goto again;
 
  out:
+       /* Update the read_stamp on the first event */
+       if (reader && reader->read == 0)
+               cpu_buffer->read_stamp = reader->page->time_stamp;
+
        arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
 
index abfc903e741e8550d67bd638ead8a338a0c95fec..cc9f7a9319bea63104ef05c97634c43286a5268c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * trace event based perf event profiling/tracing
  *
- * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  */
 
index 6bbc5f652355745d24f6252a93d7b437a0efea15..4f6ef6912e00173040867f6a68c52f010bbfd21d 100644 (file)
@@ -582,6 +582,12 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
        unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
        unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
 
+       unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
+       unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
+
+       unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
+       unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
+
        list_for_each_entry(file, &tr->events, list) {
                clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
        }
@@ -1729,6 +1735,16 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
                                                 tr, INT_MAX);
                register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
                                                 tr, 0);
+
+               register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
+                                                    tr, INT_MAX);
+               register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
+                                                    tr, 0);
+
+               register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
+                                                tr, INT_MAX);
+               register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
+                                                tr, 0);
        }
 
        /*
index 4264871ea1a00194750116a82c48e18d18edbd44..f93a945274af12575f8fbbceb821552b2a13e61e 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
  * Bits and pieces stolen from Peter Zijlstra's code, which is
- * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright 2007, Red Hat Inc. Peter Zijlstra
  * GPLv2
  *
  * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
index 6f724298f67a11199407870e2dbb1541ee55db6c..efa54f259ea9d316176c03c66badd9c23cfc8e20 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Floating proportions
  *
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  *
  * Description:
  *
index 9acfb165eb52eeff2271888ec61fe093c9015641..c92a65b2b4ab4103454aa1f8bca828384b22e07b 100644 (file)
@@ -4779,23 +4779,18 @@ static void mem_cgroup_clear_mc(void)
        spin_unlock(&mc.lock);
 }
 
-static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
-                                struct cgroup_taskset *tset)
+static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+       struct cgroup_subsys_state *css;
+       struct mem_cgroup *memcg;
        struct mem_cgroup *from;
        struct task_struct *leader, *p;
        struct mm_struct *mm;
        unsigned long move_flags;
        int ret = 0;
 
-       /*
-        * We are now commited to this value whatever it is. Changes in this
-        * tunable will only affect upcoming migrations, not the current one.
-        * So we need to save it, and keep it going.
-        */
-       move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
-       if (!move_flags)
+       /* charge immigration isn't supported on the default hierarchy */
+       if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
                return 0;
 
        /*
@@ -4805,13 +4800,23 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
         * multiple.
         */
        p = NULL;
-       cgroup_taskset_for_each_leader(leader, tset) {
+       cgroup_taskset_for_each_leader(leader, css, tset) {
                WARN_ON_ONCE(p);
                p = leader;
+               memcg = mem_cgroup_from_css(css);
        }
        if (!p)
                return 0;
 
+       /*
+        * We are now commited to this value whatever it is. Changes in this
+        * tunable will only affect upcoming migrations, not the current one.
+        * So we need to save it, and keep it going.
+        */
+       move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
+       if (!move_flags)
+               return 0;
+
        from = mem_cgroup_from_task(p);
 
        VM_BUG_ON(from == memcg);
@@ -4842,8 +4847,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
        return ret;
 }
 
-static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
-                                    struct cgroup_taskset *tset)
+static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
 {
        if (mc.to)
                mem_cgroup_clear_mc();
@@ -4985,10 +4989,10 @@ retry:
        atomic_dec(&mc.from->moving_account);
 }
 
-static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
-                                struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(struct cgroup_taskset *tset)
 {
-       struct task_struct *p = cgroup_taskset_first(tset);
+       struct cgroup_subsys_state *css;
+       struct task_struct *p = cgroup_taskset_first(tset, &css);
        struct mm_struct *mm = get_task_mm(p);
 
        if (mm) {
@@ -5000,17 +5004,14 @@ static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
                mem_cgroup_clear_mc();
 }
 #else  /* !CONFIG_MMU */
-static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
-                                struct cgroup_taskset *tset)
+static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
 {
        return 0;
 }
-static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
-                                    struct cgroup_taskset *tset)
+static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
 {
 }
-static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
-                                struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(struct cgroup_taskset *tset)
 {
 }
 #endif
index 3e4d65445fa71d7d629868c1db70af2bfb1fab28..d15d88c8efa1e25bf76fe3bf07f6eeb08fd14fd5 100644 (file)
@@ -2,7 +2,7 @@
  * mm/page-writeback.c
  *
  * Copyright (C) 2002, Linus Torvalds.
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  *
  * Contains functions related to writing back dirty pages at the
  * address_space level.
index a3bffd1ec2b46adbc7a2130573ae991950789a77..70306cc9d8140f696e440de10f9fff864b0abd94 100644 (file)
@@ -271,11 +271,11 @@ static long bt_sock_data_wait(struct sock *sk, long timeo)
                if (signal_pending(current) || !timeo)
                        break;
 
-               set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
                release_sock(sk);
                timeo = schedule_timeout(timeo);
                lock_sock(sk);
-               clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+               sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
        }
 
        __set_current_state(TASK_RUNNING);
@@ -441,7 +441,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock,
        if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
        else
-               set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        return mask;
 }
index c91353841e40500790c13d2ce894460e1cbbe9e3..ffed8a1d4f27634866c93d22b4ceb059b956cc91 100644 (file)
@@ -3027,8 +3027,13 @@ static void smp_ready_cb(struct l2cap_chan *chan)
 
        BT_DBG("chan %p", chan);
 
+       /* No need to call l2cap_chan_hold() here since we already own
+        * the reference taken in smp_new_conn_cb(). This is just the
+        * first time that we tie it to a specific pointer. The code in
+        * l2cap_core.c ensures that there's no risk this function wont
+        * get called if smp_new_conn_cb was previously called.
+        */
        conn->smp = chan;
-       l2cap_chan_hold(chan);
 
        if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags))
                bredr_pairing(chan);
index cc858919108ee1f9645bce1046be8650a640d821..aa209b1066c9699a12510055e70ae79b33ee05b7 100644 (file)
@@ -323,7 +323,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
                        !timeo)
                        break;
 
-               set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
                release_sock(sk);
                timeo = schedule_timeout(timeo);
                lock_sock(sk);
@@ -331,7 +331,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
                if (sock_flag(sk, SOCK_DEAD))
                        break;
 
-               clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+               sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
        }
 
        finish_wait(sk_sleep(sk), &wait);
index 617088aee21d41ba98d4ef5ebee5d6c002efe029..d62af69ad844de0f940cdc2f38c5e2720053895c 100644 (file)
@@ -785,7 +785,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
        if (sock_writeable(sk))
                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
        else
-               set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        return mask;
 }
index e6af42da28d9552643751a1af7dfcd96cad05c3d..f18ae91b652e971ccba5c03177301f1a46a8da57 100644 (file)
@@ -2215,7 +2215,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
        ndm->ndm_pad2    = 0;
        ndm->ndm_flags   = pn->flags | NTF_PROXY;
        ndm->ndm_type    = RTN_UNICAST;
-       ndm->ndm_ifindex = pn->dev->ifindex;
+       ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
        ndm->ndm_state   = NUD_NONE;
 
        if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
@@ -2333,7 +2333,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                if (h > s_h)
                        s_idx = 0;
                for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
-                       if (dev_net(n->dev) != net)
+                       if (pneigh_net(n) != net)
                                continue;
                        if (idx < s_idx)
                                goto next;
index 6441f47b1a8ffc78731896fd4ab1b12db43f0992..d9ee8d08a3a6944ebb6e5d8dafe6fb5032aa7a52 100644 (file)
@@ -56,7 +56,7 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
        kfree(css_cls_state(css));
 }
 
-static int update_classid(const void *v, struct file *file, unsigned n)
+static int update_classid_sock(const void *v, struct file *file, unsigned n)
 {
        int err;
        struct socket *sock = sock_from_file(file, &err);
@@ -67,18 +67,27 @@ static int update_classid(const void *v, struct file *file, unsigned n)
        return 0;
 }
 
-static void cgrp_attach(struct cgroup_subsys_state *css,
-                       struct cgroup_taskset *tset)
+static void update_classid(struct cgroup_subsys_state *css, void *v)
 {
-       struct cgroup_cls_state *cs = css_cls_state(css);
-       void *v = (void *)(unsigned long)cs->classid;
+       struct css_task_iter it;
        struct task_struct *p;
 
-       cgroup_taskset_for_each(p, tset) {
+       css_task_iter_start(css, &it);
+       while ((p = css_task_iter_next(&it))) {
                task_lock(p);
-               iterate_fd(p->files, 0, update_classid, v);
+               iterate_fd(p->files, 0, update_classid_sock, v);
                task_unlock(p);
        }
+       css_task_iter_end(&it);
+}
+
+static void cgrp_attach(struct cgroup_taskset *tset)
+{
+       struct cgroup_subsys_state *css;
+
+       cgroup_taskset_first(tset, &css);
+       update_classid(css,
+                      (void *)(unsigned long)css_cls_state(css)->classid);
 }
 
 static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -89,8 +98,11 @@ static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
 static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
                         u64 value)
 {
-       css_cls_state(css)->classid = (u32) value;
+       struct cgroup_cls_state *cs = css_cls_state(css);
+
+       cs->classid = (u32)value;
 
+       update_classid(css, (void *)(unsigned long)cs->classid);
        return 0;
 }
 
index cbd0a199bf52c79780e7935f1ed123f3f86758ec..40fd09fe06ae91e133bbf1fe499e38870903c8ca 100644 (file)
@@ -218,13 +218,14 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
        return 0;
 }
 
-static void net_prio_attach(struct cgroup_subsys_state *css,
-                           struct cgroup_taskset *tset)
+static void net_prio_attach(struct cgroup_taskset *tset)
 {
        struct task_struct *p;
-       void *v = (void *)(unsigned long)css->cgroup->id;
+       struct cgroup_subsys_state *css;
+
+       cgroup_taskset_for_each(p, css, tset) {
+               void *v = (void *)(unsigned long)css->cgroup->id;
 
-       cgroup_taskset_for_each(p, tset) {
                task_lock(p);
                iterate_fd(p->files, 0, update_netprio, v);
                task_unlock(p);
index 3b6899b7d810d569057b051162b51fde7c51cba8..8a1741b14302bd0cecdc265848feba8222400d17 100644 (file)
@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
                        err = put_user(cmlen, &cm->cmsg_len);
                if (!err) {
                        cmlen = CMSG_SPACE(i*sizeof(int));
+                       if (msg->msg_controllen < cmlen)
+                               cmlen = msg->msg_controllen;
                        msg->msg_control += cmlen;
                        msg->msg_controllen -= cmlen;
                }
index 1e4dd54bfb5a525ef6070905f07472f60e9f137f..e31dfcee1729aa23bdd2ed692fda1b90bd75afb8 100644 (file)
@@ -1530,7 +1530,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                skb_queue_head_init(&newsk->sk_receive_queue);
                skb_queue_head_init(&newsk->sk_write_queue);
 
-               spin_lock_init(&newsk->sk_dst_lock);
                rwlock_init(&newsk->sk_callback_lock);
                lockdep_set_class_and_name(&newsk->sk_callback_lock,
                                af_callback_keys + newsk->sk_family,
@@ -1607,7 +1606,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
 {
        u32 max_segs = 1;
 
-       __sk_dst_set(sk, dst);
+       sk_dst_set(sk, dst);
        sk->sk_route_caps = dst->dev->features;
        if (sk->sk_route_caps & NETIF_F_GSO)
                sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
@@ -1815,7 +1814,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
 {
        DEFINE_WAIT(wait);
 
-       clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+       sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
        for (;;) {
                if (!timeo)
                        break;
@@ -1861,7 +1860,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
                        break;
 
-               set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                err = -EAGAIN;
                if (!timeo)
@@ -2048,9 +2047,9 @@ int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
        DEFINE_WAIT(wait);
 
        prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-       set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+       sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
        rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
-       clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+       sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
        finish_wait(sk_sleep(sk), &wait);
        return rc;
 }
@@ -2388,7 +2387,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        } else
                sk->sk_wq       =       NULL;
 
-       spin_lock_init(&sk->sk_dst_lock);
        rwlock_init(&sk->sk_callback_lock);
        lockdep_set_class_and_name(&sk->sk_callback_lock,
                        af_callback_keys + sk->sk_family,
index d70f77a0c8898582e0adabd24c6165675d12dce7..b96f7a79e54458bfa1d0d1a51f77be08a45e8207 100644 (file)
@@ -39,7 +39,7 @@ void sk_stream_write_space(struct sock *sk)
                        wake_up_interruptible_poll(&wq->wait, POLLOUT |
                                                POLLWRNORM | POLLWRBAND);
                if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
-                       sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
+                       sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
                rcu_read_unlock();
        }
 }
@@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
                current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
 
        while (1) {
-               set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 
@@ -139,7 +139,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
                }
                if (signal_pending(current))
                        goto do_interrupted;
-               clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+               sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
                if (sk_stream_memory_free(sk) && !vm_wait)
                        break;
 
index db5fc2440a232c856bccfd8e0222a24bcf3d6d11..9c6d0508e63a2ab7f13105cd91ea8c027c4a7557 100644 (file)
@@ -202,7 +202,9 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
        security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
 
-       final_p = fl6_update_dst(&fl6, np->opt, &final);
+       rcu_read_lock();
+       final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
+       rcu_read_unlock();
 
        dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
        if (IS_ERR(dst)) {
@@ -219,7 +221,10 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
                                                         &ireq->ir_v6_loc_addr,
                                                         &ireq->ir_v6_rmt_addr);
                fl6.daddr = ireq->ir_v6_rmt_addr;
-               err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
+               rcu_read_lock();
+               err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+                              np->tclass);
+               rcu_read_unlock();
                err = net_xmit_eval(err);
        }
 
@@ -387,6 +392,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
        struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *newnp;
        const struct ipv6_pinfo *np = inet6_sk(sk);
+       struct ipv6_txoptions *opt;
        struct inet_sock *newinet;
        struct dccp6_sock *newdp6;
        struct sock *newsk;
@@ -453,7 +459,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
         * comment in that function for the gory details. -acme
         */
 
-       __ip6_dst_store(newsk, dst, NULL, NULL);
+       ip6_dst_store(newsk, dst, NULL, NULL);
        newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
                                                      NETIF_F_TSO);
        newdp6 = (struct dccp6_sock *)newsk;
@@ -488,13 +494,15 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
         * Yes, keeping reference count would be much more clever, but we make
         * one more one thing there: reattach optmem to newsk.
         */
-       if (np->opt != NULL)
-               newnp->opt = ipv6_dup_options(newsk, np->opt);
-
+       opt = rcu_dereference(np->opt);
+       if (opt) {
+               opt = ipv6_dup_options(newsk, opt);
+               RCU_INIT_POINTER(newnp->opt, opt);
+       }
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
-       if (newnp->opt != NULL)
-               inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
-                                                    newnp->opt->opt_flen);
+       if (opt)
+               inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
+                                                   opt->opt_flen;
 
        dccp_sync_mss(newsk, dst_mtu(dst));
 
@@ -757,6 +765,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
        struct in6_addr *saddr = NULL, *final_p, final;
+       struct ipv6_txoptions *opt;
        struct flowi6 fl6;
        struct dst_entry *dst;
        int addr_type;
@@ -856,7 +865,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        fl6.fl6_sport = inet->inet_sport;
        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-       final_p = fl6_update_dst(&fl6, np->opt, &final);
+       opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+       final_p = fl6_update_dst(&fl6, opt, &final);
 
        dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
        if (IS_ERR(dst)) {
@@ -873,12 +883,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        np->saddr = *saddr;
        inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 
-       __ip6_dst_store(sk, dst, NULL, NULL);
+       ip6_dst_store(sk, dst, NULL, NULL);
 
        icsk->icsk_ext_hdr_len = 0;
-       if (np->opt != NULL)
-               icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
-                                         np->opt->opt_nflen);
+       if (opt)
+               icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
 
        inet->inet_dport = usin->sin6_port;
 
index b5cf13a2800923486ad597c296a66145bc248596..41e65804ddf59651c78ae58b697e7e5e603c9167 100644 (file)
@@ -339,8 +339,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
                        if (sk_stream_is_writeable(sk)) {
                                mask |= POLLOUT | POLLWRNORM;
                        } else {  /* send SIGIO later */
-                               set_bit(SOCK_ASYNC_NOSPACE,
-                                       &sk->sk_socket->flags);
+                               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
                                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 
                                /* Race breaker. If space is freed after
index 675cf94e04f862b77644f86628af6e8a46933055..eebf5ac8ce18abdb7ac094a1e6de081fcbfa79aa 100644 (file)
@@ -1747,9 +1747,9 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                }
 
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-               set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
                sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
-               clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+               sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
                finish_wait(sk_sleep(sk), &wait);
        }
 
@@ -2004,10 +2004,10 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
                        }
 
                        prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-                       set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+                       sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
                        sk_wait_event(sk, &timeo,
                                      !dn_queue_too_long(scp, queue, flags));
-                       clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+                       sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
                        finish_wait(sk_sleep(sk), &wait);
                        continue;
                }
index 4677b6fa6dda2c71ace9d97536a450684d18d3ef..ecc28cff08ab8110a9d96d6f7abe690ca77e9962 100644 (file)
@@ -67,7 +67,7 @@
  * Returns the size of the result on success, -ve error code otherwise.
  */
 int dns_query(const char *type, const char *name, size_t namelen,
-             const char *options, char **_result, time_t *_expiry)
+             const char *options, char **_result, time64_t *_expiry)
 {
        struct key *rkey;
        const struct user_key_payload *upayload;
index 35a9788bb3ae734d8e5b2f5199901a6c47f7a587..c7d1adca30d891b183b0832712e0d57aa1f33201 100644 (file)
@@ -312,7 +312,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, u8 type)
        return;
 
 out:
-       WARN_ON_ONCE("HSR: Could not send supervision frame\n");
+       WARN_ONCE(1, "HSR: Could not send supervision frame\n");
        kfree_skb(skb);
 }
 
index 6baf36e11808e5c93c2e092139bed60cdacc4c8a..05e4cba14162f3583ec588657af7e8b68546b111 100644 (file)
@@ -2126,7 +2126,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
        ASSERT_RTNL();
 
        in_dev = ip_mc_find_dev(net, imr);
-       if (!in_dev) {
+       if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) {
                ret = -ENODEV;
                goto out;
        }
@@ -2147,7 +2147,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
                *imlp = iml->next_rcu;
 
-               ip_mc_dec_group(in_dev, group);
+               if (in_dev)
+                       ip_mc_dec_group(in_dev, group);
 
                /* decrease mem now to avoid the memleak warning */
                atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
index 92dd4b74d513ab40ba4f6f3f2f5ce7a73085f379..c3a38353f5dc8094de5c1dcec06ae54ab0b29a9e 100644 (file)
@@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
                              struct mfc_cache *c, struct rtmsg *rtm);
 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
                                 int cmd);
-static void mroute_clean_tables(struct mr_table *mrt);
+static void mroute_clean_tables(struct mr_table *mrt, bool all);
 static void ipmr_expire_process(unsigned long arg);
 
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -350,7 +350,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
 static void ipmr_free_table(struct mr_table *mrt)
 {
        del_timer_sync(&mrt->ipmr_expire_timer);
-       mroute_clean_tables(mrt);
+       mroute_clean_tables(mrt, true);
        kfree(mrt);
 }
 
@@ -441,10 +441,6 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
        return dev;
 
 failure:
-       /* allow the register to be completed before unregistering. */
-       rtnl_unlock();
-       rtnl_lock();
-
        unregister_netdevice(dev);
        return NULL;
 }
@@ -540,10 +536,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
        return dev;
 
 failure:
-       /* allow the register to be completed before unregistering. */
-       rtnl_unlock();
-       rtnl_lock();
-
        unregister_netdevice(dev);
        return NULL;
 }
@@ -1208,7 +1200,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
  *     Close the multicast socket, and clear the vif tables etc
  */
 
-static void mroute_clean_tables(struct mr_table *mrt)
+static void mroute_clean_tables(struct mr_table *mrt, bool all)
 {
        int i;
        LIST_HEAD(list);
@@ -1217,8 +1209,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
        /* Shut down all active vif entries */
 
        for (i = 0; i < mrt->maxvif; i++) {
-               if (!(mrt->vif_table[i].flags & VIFF_STATIC))
-                       vif_delete(mrt, i, 0, &list);
+               if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
+                       continue;
+               vif_delete(mrt, i, 0, &list);
        }
        unregister_netdevice_many(&list);
 
@@ -1226,7 +1219,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
 
        for (i = 0; i < MFC_LINES; i++) {
                list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
-                       if (c->mfc_flags & MFC_STATIC)
+                       if (!all && (c->mfc_flags & MFC_STATIC))
                                continue;
                        list_del_rcu(&c->list);
                        mroute_netlink_event(mrt, c, RTM_DELROUTE);
@@ -1261,7 +1254,7 @@ static void mrtsock_destruct(struct sock *sk)
                                                    NETCONFA_IFINDEX_ALL,
                                                    net->ipv4.devconf_all);
                        RCU_INIT_POINTER(mrt->mroute_sk, NULL);
-                       mroute_clean_tables(mrt);
+                       mroute_clean_tables(mrt, false);
                }
        }
        rtnl_unlock();
index c1728771cf89c46a82af0187a02029450adb854b..c82cca18c90fbd67c2daf71c6769ee5fef21d2a9 100644 (file)
@@ -517,8 +517,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                        if (sk_stream_is_writeable(sk)) {
                                mask |= POLLOUT | POLLWRNORM;
                        } else {  /* send SIGIO later */
-                               set_bit(SOCK_ASYNC_NOSPACE,
-                                       &sk->sk_socket->flags);
+                               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
                                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 
                                /* Race breaker. If space is freed after
@@ -906,7 +905,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
                        goto out_err;
        }
 
-       clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+       sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        mss_now = tcp_send_mss(sk, &size_goal, flags);
        copied = 0;
@@ -1134,7 +1133,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        }
 
        /* This should be in poll */
-       clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+       sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        mss_now = tcp_send_mss(sk, &size_goal, flags);
 
index fdd88c3803a673881053039cdc8ff44bc1b8aa4a..2d656eef7f8e16458127ba591e0a4154365f6a83 100644 (file)
@@ -4481,19 +4481,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
 {
        struct sk_buff *skb;
+       int err = -ENOMEM;
+       int data_len = 0;
        bool fragstolen;
 
        if (size == 0)
                return 0;
 
-       skb = alloc_skb(size, sk->sk_allocation);
+       if (size > PAGE_SIZE) {
+               int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
+
+               data_len = npages << PAGE_SHIFT;
+               size = data_len + (size & ~PAGE_MASK);
+       }
+       skb = alloc_skb_with_frags(size - data_len, data_len,
+                                  PAGE_ALLOC_COSTLY_ORDER,
+                                  &err, sk->sk_allocation);
        if (!skb)
                goto err;
 
+       skb_put(skb, size - data_len);
+       skb->data_len = data_len;
+       skb->len = size;
+
        if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
                goto err_free;
 
-       if (memcpy_from_msg(skb_put(skb, size), msg, size))
+       err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
+       if (err)
                goto err_free;
 
        TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
@@ -4509,7 +4524,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
 err_free:
        kfree_skb(skb);
 err:
-       return -ENOMEM;
+       return err;
+
 }
 
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
@@ -5667,6 +5683,7 @@ discard:
                }
 
                tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+               tp->copied_seq = tp->rcv_nxt;
                tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
 
                /* RFC1323: The window in SYN & SYN/ACK segments is
index ba09016d1bfd2a778d57cdc37f0a7db539f9cb3a..db003438aaf5f6a2b27319c4669be1b1f62c7ae3 100644 (file)
@@ -921,7 +921,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
        }
 
        md5sig = rcu_dereference_protected(tp->md5sig_info,
-                                          sock_owned_by_user(sk));
+                                          sock_owned_by_user(sk) ||
+                                          lockdep_is_held(&sk->sk_lock.slock));
        if (!md5sig) {
                md5sig = kmalloc(sizeof(*md5sig), gfp);
                if (!md5sig)
index c9c716a483e457e8b4fe063776ae3d7a855889cc..193ba1fa8a9abbc190823a86398722d5c5a605fe 100644 (file)
@@ -168,7 +168,7 @@ static int tcp_write_timeout(struct sock *sk)
                        dst_negative_advice(sk);
                        if (tp->syn_fastopen || tp->syn_data)
                                tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-                       if (tp->syn_data)
+                       if (tp->syn_data && icsk->icsk_retransmits == 1)
                                NET_INC_STATS_BH(sock_net(sk),
                                                 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                }
@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
                syn_set = true;
        } else {
                if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
+                       /* Some middle-boxes may black-hole Fast Open _after_
+                        * the handshake. Therefore we conservatively disable
+                        * Fast Open on this path on recurring timeouts with
+                        * few or zero bytes acked after Fast Open.
+                        */
+                       if (tp->syn_data_acked &&
+                           tp->bytes_acked <= tp->rx_opt.mss_clamp) {
+                               tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
+                               if (icsk->icsk_retransmits == sysctl_tcp_retries1)
+                                       NET_INC_STATS_BH(sock_net(sk),
+                                                        LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+                       }
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
 
index 24ec14f9825c20834eca20b820ec953ba108da52..0c7b0e61b917158af7e431f9e7781f7bce313c83 100644 (file)
 #include <linux/slab.h>
 #include <net/tcp_states.h>
 #include <linux/skbuff.h>
-#include <linux/netdevice.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <net/net_namespace.h>
index d84742f003a9fca65a3545abdb7a4d517989ed4c..61f26851655ccd23200278d34ceda44351da1c17 100644 (file)
@@ -3642,7 +3642,7 @@ static void addrconf_dad_work(struct work_struct *w)
 
        /* send a neighbour solicitation for our addr */
        addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
-       ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, NULL);
+       ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any);
 out:
        in6_ifa_put(ifp);
        rtnl_unlock();
index 44bb66bde0e2d97308c3c68a8d6b225ce04d08a8..8ec0df75f1c4f81bea71cb466c4da802c39099d8 100644 (file)
@@ -428,9 +428,11 @@ void inet6_destroy_sock(struct sock *sk)
 
        /* Free tx options */
 
-       opt = xchg(&np->opt, NULL);
-       if (opt)
-               sock_kfree_s(sk, opt, opt->tot_len);
+       opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
+       if (opt) {
+               atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+               txopt_put(opt);
+       }
 }
 EXPORT_SYMBOL_GPL(inet6_destroy_sock);
 
@@ -659,7 +661,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
                fl6.fl6_sport = inet->inet_sport;
                security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-               final_p = fl6_update_dst(&fl6, np->opt, &final);
+               rcu_read_lock();
+               final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
+                                        &final);
+               rcu_read_unlock();
 
                dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
                if (IS_ERR(dst)) {
@@ -668,7 +673,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
                        return PTR_ERR(dst);
                }
 
-               __ip6_dst_store(sk, dst, NULL, NULL);
+               ip6_dst_store(sk, dst, NULL, NULL);
        }
 
        return 0;
index d70b0238f468f4e5602d09469eddc98a07a3e61c..517c55b01ba84b55a0004ce9505d14c4a3951cfc 100644 (file)
@@ -167,8 +167,10 @@ ipv4_connected:
 
        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-       opt = flowlabel ? flowlabel->opt : np->opt;
+       rcu_read_lock();
+       opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
        final_p = fl6_update_dst(&fl6, opt, &final);
+       rcu_read_unlock();
 
        dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
        err = 0;
index ce203b0402bea3b16deb34b4835cd2e89e94f899..ea7c4d64a00adad60a634afb2c6efca4ab029799 100644 (file)
@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
                        *((char **)&opt2->dst1opt) += dif;
                if (opt2->srcrt)
                        *((char **)&opt2->srcrt) += dif;
+               atomic_set(&opt2->refcnt, 1);
        }
        return opt2;
 }
@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
                return ERR_PTR(-ENOBUFS);
 
        memset(opt2, 0, tot_len);
-
+       atomic_set(&opt2->refcnt, 1);
        opt2->tot_len = tot_len;
        p = (char *)(opt2 + 1);
 
index 36c5a98b04727b220e9ea77e00d96410d4bb3f74..0a37ddc7af51579f56b644ba0e4c3c3a7a2e2bc7 100644 (file)
@@ -834,11 +834,6 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
        security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
 }
 
-/*
- * Special lock-class for __icmpv6_sk:
- */
-static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
-
 static int __net_init icmpv6_sk_init(struct net *net)
 {
        struct sock *sk;
@@ -860,15 +855,6 @@ static int __net_init icmpv6_sk_init(struct net *net)
 
                net->ipv6.icmp_sk[i] = sk;
 
-               /*
-                * Split off their lock-class, because sk->sk_dst_lock
-                * gets used from softirqs, which is safe for
-                * __icmpv6_sk (because those never get directly used
-                * via userspace syscalls), but unsafe for normal sockets.
-                */
-               lockdep_set_class(&sk->sk_dst_lock,
-                                 &icmpv6_socket_sk_dst_lock_key);
-
                /* Enough space for 2 64K ICMP packets, including
                 * sk_buff struct overhead.
                 */
index 5d1c7cee2cb2bdc45b0889b4fe197c7af4db01e7..a7ca2cde2ecbcff85c9a6151b4770e3897d16314 100644 (file)
@@ -78,7 +78,9 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
        memset(fl6, 0, sizeof(*fl6));
        fl6->flowi6_proto = proto;
        fl6->daddr = ireq->ir_v6_rmt_addr;
-       final_p = fl6_update_dst(fl6, np->opt, &final);
+       rcu_read_lock();
+       final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+       rcu_read_unlock();
        fl6->saddr = ireq->ir_v6_loc_addr;
        fl6->flowi6_oif = ireq->ir_iif;
        fl6->flowi6_mark = ireq->ir_mark;
@@ -108,14 +110,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
 }
 EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
 
-static inline
-void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
-                          const struct in6_addr *daddr,
-                          const struct in6_addr *saddr)
-{
-       __ip6_dst_store(sk, dst, daddr, saddr);
-}
-
 static inline
 struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
 {
@@ -142,14 +136,16 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
        fl6->fl6_dport = inet->inet_dport;
        security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
 
-       final_p = fl6_update_dst(fl6, np->opt, &final);
+       rcu_read_lock();
+       final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+       rcu_read_unlock();
 
        dst = __inet6_csk_dst_check(sk, np->dst_cookie);
        if (!dst) {
                dst = ip6_dst_lookup_flow(sk, fl6, final_p);
 
                if (!IS_ERR(dst))
-                       __inet6_csk_dst_store(sk, dst, NULL, NULL);
+                       ip6_dst_store(sk, dst, NULL, NULL);
        }
        return dst;
 }
@@ -175,7 +171,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
        /* Restore final destination back after routing done */
        fl6.daddr = sk->sk_v6_daddr;
 
-       res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
+       res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+                      np->tclass);
        rcu_read_unlock();
        return res;
 }
index eabffbb89795d921b0977989345b25d81f553ee0..137fca42aaa6bb809d46e7809b240d8810d89a04 100644 (file)
@@ -177,7 +177,7 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t)
        int i;
 
        for_each_possible_cpu(i)
-               ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL);
+               ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
 }
 EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
 
index ad19136086dd5e1ae4e362e3da0f0267fc755b02..a10e77103c88dfc952f80c645a7b87c57b8f6dbf 100644 (file)
@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
                              int cmd);
 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
                               struct netlink_callback *cb);
-static void mroute_clean_tables(struct mr6_table *mrt);
+static void mroute_clean_tables(struct mr6_table *mrt, bool all);
 static void ipmr_expire_process(unsigned long arg);
 
 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
 static void ip6mr_free_table(struct mr6_table *mrt)
 {
        del_timer_sync(&mrt->ipmr_expire_timer);
-       mroute_clean_tables(mrt);
+       mroute_clean_tables(mrt, true);
        kfree(mrt);
 }
 
@@ -765,10 +765,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
        return dev;
 
 failure:
-       /* allow the register to be completed before unregistering. */
-       rtnl_unlock();
-       rtnl_lock();
-
        unregister_netdevice(dev);
        return NULL;
 }
@@ -1542,7 +1538,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
  *     Close the multicast socket, and clear the vif tables etc
  */
 
-static void mroute_clean_tables(struct mr6_table *mrt)
+static void mroute_clean_tables(struct mr6_table *mrt, bool all)
 {
        int i;
        LIST_HEAD(list);
@@ -1552,8 +1548,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
         *      Shut down all active vif entries
         */
        for (i = 0; i < mrt->maxvif; i++) {
-               if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
-                       mif6_delete(mrt, i, &list);
+               if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
+                       continue;
+               mif6_delete(mrt, i, &list);
        }
        unregister_netdevice_many(&list);
 
@@ -1562,7 +1559,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
         */
        for (i = 0; i < MFC6_LINES; i++) {
                list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
-                       if (c->mfc_flags & MFC_STATIC)
+                       if (!all && (c->mfc_flags & MFC_STATIC))
                                continue;
                        write_lock_bh(&mrt_lock);
                        list_del(&c->list);
@@ -1625,7 +1622,7 @@ int ip6mr_sk_done(struct sock *sk)
                                                     net->ipv6.devconf_all);
                        write_unlock_bh(&mrt_lock);
 
-                       mroute_clean_tables(mrt);
+                       mroute_clean_tables(mrt, false);
                        err = 0;
                        break;
                }
index 63e6956917c9cf20ee74968de3a7f03c1b48c849..4449ad1f81147cab79c44209f3f2a73dc1f6a934 100644 (file)
@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
                        icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
                }
        }
-       opt = xchg(&inet6_sk(sk)->opt, opt);
+       opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
+                  opt);
        sk_dst_reset(sk);
 
        return opt;
@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                                sk->sk_socket->ops = &inet_dgram_ops;
                                sk->sk_family = PF_INET;
                        }
-                       opt = xchg(&np->opt, NULL);
-                       if (opt)
-                               sock_kfree_s(sk, opt, opt->tot_len);
+                       opt = xchg((__force struct ipv6_txoptions **)&np->opt,
+                                  NULL);
+                       if (opt) {
+                               atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+                               txopt_put(opt);
+                       }
                        pktopt = xchg(&np->pktoptions, NULL);
                        kfree_skb(pktopt);
 
@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
                        break;
 
-               opt = ipv6_renew_options(sk, np->opt, optname,
+               opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+               opt = ipv6_renew_options(sk, opt, optname,
                                         (struct ipv6_opt_hdr __user *)optval,
                                         optlen);
                if (IS_ERR(opt)) {
@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                retv = 0;
                opt = ipv6_update_options(sk, opt);
 sticky_done:
-               if (opt)
-                       sock_kfree_s(sk, opt, opt->tot_len);
+               if (opt) {
+                       atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+                       txopt_put(opt);
+               }
                break;
        }
 
@@ -486,6 +493,7 @@ sticky_done:
                        break;
 
                memset(opt, 0, sizeof(*opt));
+               atomic_set(&opt->refcnt, 1);
                opt->tot_len = sizeof(*opt) + optlen;
                retv = -EFAULT;
                if (copy_from_user(opt+1, optval, optlen))
@@ -502,8 +510,10 @@ update:
                retv = 0;
                opt = ipv6_update_options(sk, opt);
 done:
-               if (opt)
-                       sock_kfree_s(sk, opt, opt->tot_len);
+               if (opt) {
+                       atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+                       txopt_put(opt);
+               }
                break;
        }
        case IPV6_UNICAST_HOPS:
@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
        case IPV6_RTHDR:
        case IPV6_DSTOPTS:
        {
+               struct ipv6_txoptions *opt;
 
                lock_sock(sk);
-               len = ipv6_getsockopt_sticky(sk, np->opt,
-                                            optname, optval, len);
+               opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+               len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
                release_sock(sk);
                /* check if ipv6_getsockopt_sticky() returns err code */
                if (len < 0)
index 3e0f855e1bead049064a284494eda378f85ae47e..d6161e1c48c86f4a5dfa5cedc89d4b93f8e3d24a 100644 (file)
@@ -556,8 +556,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
 }
 
 void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
-                  const struct in6_addr *daddr, const struct in6_addr *saddr,
-                  struct sk_buff *oskb)
+                  const struct in6_addr *daddr, const struct in6_addr *saddr)
 {
        struct sk_buff *skb;
        struct in6_addr addr_buf;
@@ -593,9 +592,6 @@ void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
                ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
                                       dev->dev_addr);
 
-       if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
-               skb_dst_copy(skb, oskb);
-
        ndisc_send_skb(skb, daddr, saddr);
 }
 
@@ -682,12 +678,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
                                  "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
                                  __func__, target);
                }
-               ndisc_send_ns(dev, target, target, saddr, skb);
+               ndisc_send_ns(dev, target, target, saddr);
        } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
                neigh_app_ns(neigh);
        } else {
                addrconf_addr_solict_mult(target, &mcaddr);
-               ndisc_send_ns(dev, target, &mcaddr, saddr, skb);
+               ndisc_send_ns(dev, target, &mcaddr, saddr);
        }
 }
 
index d5efeb87350e7fc8643962036c0b26cfe471cdc4..bab4441ed4e43906ce0b20d7618668eef81f1162 100644 (file)
@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data)
 /* Creation primitives. */
 static inline struct frag_queue *fq_find(struct net *net, __be32 id,
                                         u32 user, struct in6_addr *src,
-                                        struct in6_addr *dst, u8 ecn)
+                                        struct in6_addr *dst, int iif, u8 ecn)
 {
        struct inet_frag_queue *q;
        struct ip6_create_arg arg;
@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
        arg.user = user;
        arg.src = src;
        arg.dst = dst;
+       arg.iif = iif;
        arg.ecn = ecn;
 
        local_bh_disable();
@@ -601,7 +602,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use
        fhdr = (struct frag_hdr *)skb_transport_header(clone);
 
        fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
-                    ip6_frag_ecn(hdr));
+                    skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
        if (fq == NULL) {
                pr_debug("Can't find and can't create new queue\n");
                goto ret_orig;
index dc65ec198f7c3f7ae19176fdaa8752c7fb4e13a5..99140986e88716529b90f082bf2b7011a2db8c15 100644 (file)
@@ -733,6 +733,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
 
 static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
+       struct ipv6_txoptions *opt_to_free = NULL;
        struct ipv6_txoptions opt_space;
        DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
        struct in6_addr *daddr, *final_p, final;
@@ -839,8 +840,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                if (!(opt->opt_nflen|opt->opt_flen))
                        opt = NULL;
        }
-       if (!opt)
-               opt = np->opt;
+       if (!opt) {
+               opt = txopt_get(np);
+               opt_to_free = opt;
+               }
        if (flowlabel)
                opt = fl6_merge_options(&opt_space, flowlabel, opt);
        opt = ipv6_fixup_options(&opt_space, opt);
@@ -906,6 +909,7 @@ done:
        dst_release(dst);
 out:
        fl6_sock_release(flowlabel);
+       txopt_put(opt_to_free);
        return err < 0 ? err : len;
 do_confirm:
        dst_confirm(dst);
index 44e21a03cfc3fff395903d36e8e01a7a3e395afa..45f5ae51de65c0fb4d700bad79a05c1e750309e0 100644 (file)
@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
        return  fq->id == arg->id &&
                fq->user == arg->user &&
                ipv6_addr_equal(&fq->saddr, arg->src) &&
-               ipv6_addr_equal(&fq->daddr, arg->dst);
+               ipv6_addr_equal(&fq->daddr, arg->dst) &&
+               (arg->iif == fq->iif ||
+                !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
+                                              IPV6_ADDR_LINKLOCAL)));
 }
 EXPORT_SYMBOL(ip6_frag_match);
 
@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
 
 static struct frag_queue *
 fq_find(struct net *net, __be32 id, const struct in6_addr *src,
-       const struct in6_addr *dst, u8 ecn)
+       const struct in6_addr *dst, int iif, u8 ecn)
 {
        struct inet_frag_queue *q;
        struct ip6_create_arg arg;
@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
        arg.user = IP6_DEFRAG_LOCAL_DELIVER;
        arg.src = src;
        arg.dst = dst;
+       arg.iif = iif;
        arg.ecn = ecn;
 
        hash = inet6_hash_frag(id, src, dst);
@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
        }
 
        fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
-                    ip6_frag_ecn(hdr));
+                    skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
        if (fq) {
                int ret;
 
index 6f01fe122abd7c0348bfcd67d1a55ecb40f1c0b0..826e6aa44f8d42c9c2e815ee134a4f58f1f29c80 100644 (file)
@@ -523,7 +523,7 @@ static void rt6_probe_deferred(struct work_struct *w)
                container_of(w, struct __rt6_probe_work, work);
 
        addrconf_addr_solict_mult(&work->target, &mcaddr);
-       ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, NULL);
+       ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL);
        dev_put(work->dev);
        kfree(work);
 }
index bb8f2fa1c7fbbe1ac0936688ad684ae30679d917..eaf7ac496d506937cd52944bbdc17da720110b59 100644 (file)
@@ -222,7 +222,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = IPPROTO_TCP;
                fl6.daddr = ireq->ir_v6_rmt_addr;
-               final_p = fl6_update_dst(&fl6, np->opt, &final);
+               final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
                fl6.saddr = ireq->ir_v6_loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
                fl6.flowi6_mark = ireq->ir_mark;
index c5429a636f1aef539d0ea6bb27f585a3297b7769..e7aab561b7b463d4bfbdd7e7a89302403e252560 100644 (file)
@@ -120,6 +120,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct in6_addr *saddr = NULL, *final_p, final;
+       struct ipv6_txoptions *opt;
        struct flowi6 fl6;
        struct dst_entry *dst;
        int addr_type;
@@ -235,7 +236,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        fl6.fl6_dport = usin->sin6_port;
        fl6.fl6_sport = inet->inet_sport;
 
-       final_p = fl6_update_dst(&fl6, np->opt, &final);
+       opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+       final_p = fl6_update_dst(&fl6, opt, &final);
 
        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
@@ -255,7 +257,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 
        sk->sk_gso_type = SKB_GSO_TCPV6;
-       __ip6_dst_store(sk, dst, NULL, NULL);
+       ip6_dst_store(sk, dst, NULL, NULL);
 
        if (tcp_death_row.sysctl_tw_recycle &&
            !tp->rx_opt.ts_recent_stamp &&
@@ -263,9 +265,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                tcp_fetch_timewait_stamp(sk, dst);
 
        icsk->icsk_ext_hdr_len = 0;
-       if (np->opt)
-               icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
-                                         np->opt->opt_nflen);
+       if (opt)
+               icsk->icsk_ext_hdr_len = opt->opt_flen +
+                                        opt->opt_nflen;
 
        tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 
@@ -461,7 +463,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                if (np->repflow && ireq->pktopts)
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
-               err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
+               err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
+                              np->tclass);
                err = net_xmit_eval(err);
        }
 
@@ -972,6 +975,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
        struct inet_request_sock *ireq;
        struct ipv6_pinfo *newnp;
        const struct ipv6_pinfo *np = inet6_sk(sk);
+       struct ipv6_txoptions *opt;
        struct tcp6_sock *newtcp6sk;
        struct inet_sock *newinet;
        struct tcp_sock *newtp;
@@ -1056,7 +1060,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
         */
 
        newsk->sk_gso_type = SKB_GSO_TCPV6;
-       __ip6_dst_store(newsk, dst, NULL, NULL);
+       ip6_dst_store(newsk, dst, NULL, NULL);
        inet6_sk_rx_dst_set(newsk, skb);
 
        newtcp6sk = (struct tcp6_sock *)newsk;
@@ -1098,13 +1102,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
           but we make one more one thing there: reattach optmem
           to newsk.
         */
-       if (np->opt)
-               newnp->opt = ipv6_dup_options(newsk, np->opt);
-
+       opt = rcu_dereference(np->opt);
+       if (opt) {
+               opt = ipv6_dup_options(newsk, opt);
+               RCU_INIT_POINTER(newnp->opt, opt);
+       }
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
-       if (newnp->opt)
-               inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
-                                                    newnp->opt->opt_flen);
+       if (opt)
+               inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
+                                                   opt->opt_flen;
 
        tcp_ca_openreq_child(newsk, dst);
 
index 01bcb49619ee6659aefbd198d40f25bab8319749..9da3287a392370a5c05c538a30613f8ce74f8de1 100644 (file)
@@ -1110,6 +1110,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
        struct in6_addr *daddr, *final_p, final;
        struct ipv6_txoptions *opt = NULL;
+       struct ipv6_txoptions *opt_to_free = NULL;
        struct ip6_flowlabel *flowlabel = NULL;
        struct flowi6 fl6;
        struct dst_entry *dst;
@@ -1263,8 +1264,10 @@ do_udp_sendmsg:
                        opt = NULL;
                connected = 0;
        }
-       if (!opt)
-               opt = np->opt;
+       if (!opt) {
+               opt = txopt_get(np);
+               opt_to_free = opt;
+       }
        if (flowlabel)
                opt = fl6_merge_options(&opt_space, flowlabel, opt);
        opt = ipv6_fixup_options(&opt_space, opt);
@@ -1373,6 +1376,7 @@ release_dst:
 out:
        dst_release(dst);
        fl6_sock_release(flowlabel);
+       txopt_put(opt_to_free);
        if (!err)
                return len;
        /*
index fcb2752419c6635b06706d4cec542cb30ae3c85c..435608c4306d4afccf690eda945d4cb7eb962c6b 100644 (file)
@@ -1483,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
        if (sock_writeable(sk) && iucv_below_msglim(sk))
                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
        else
-               set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        return mask;
 }
index aca38d8aed8e80b47ded2c81bbf346a39a678ce2..a2c8747d2936c305753224e7a786d67087b2cb1a 100644 (file)
@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
        struct in6_addr *daddr, *final_p, final;
        struct ipv6_pinfo *np = inet6_sk(sk);
+       struct ipv6_txoptions *opt_to_free = NULL;
        struct ipv6_txoptions *opt = NULL;
        struct ip6_flowlabel *flowlabel = NULL;
        struct dst_entry *dst = NULL;
@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                        opt = NULL;
        }
 
-       if (opt == NULL)
-               opt = np->opt;
+       if (!opt) {
+               opt = txopt_get(np);
+               opt_to_free = opt;
+       }
        if (flowlabel)
                opt = fl6_merge_options(&opt_space, flowlabel, opt);
        opt = ipv6_fixup_options(&opt_space, opt);
@@ -631,6 +634,7 @@ done:
        dst_release(dst);
 out:
        fl6_sock_release(flowlabel);
+       txopt_put(opt_to_free);
 
        return err < 0 ? err : len;
 
index a758eb84e8f057ac6518eef6c73bc8785dc7e746..ff757181b0a85c820e1acc53a088e95c78b87bff 100644 (file)
@@ -500,7 +500,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        /* send AddBA request */
        ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
                                     tid_tx->dialog_token, start_seq_num,
-                                    local->hw.max_tx_aggregation_subframes,
+                                    IEEE80211_MAX_AMPDU_BUF,
                                     tid_tx->timeout);
 }
 
@@ -926,6 +926,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
        amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
        tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
        buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
+       buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
 
        mutex_lock(&sta->ampdu_mlme.mtx);
 
index c2bd1b6a69224e4e07b6247c71077e2a79939dac..da471eef07bb1a6ba6e42d3e5b570dbdff6c21e3 100644 (file)
@@ -3454,8 +3454,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                        goto out_unlock;
                }
        } else {
-               /* for cookie below */
-               ack_skb = skb;
+               /* Assign a dummy non-zero cookie, it's not sent to
+                * userspace in this case but we rely on its value
+                * internally in the need_offchan case to distinguish
+                * mgmt-tx from remain-on-channel.
+                */
+               *cookie = 0xffffffff;
        }
 
        if (!need_offchan) {
index d0dc1bfaeec2d5440573d114b98e16099616f43d..c9e325d2e120c0f9c230dbace71c3c405b9216a3 100644 (file)
@@ -76,7 +76,8 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
 void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
                              bool update_bss)
 {
-       if (__ieee80211_recalc_txpower(sdata) || update_bss)
+       if (__ieee80211_recalc_txpower(sdata) ||
+           (update_bss && ieee80211_sdata_running(sdata)))
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
 }
 
@@ -1861,6 +1862,7 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
                unregister_netdevice(sdata->dev);
        } else {
                cfg80211_unregister_wdev(&sdata->wdev);
+               ieee80211_teardown_sdata(sdata);
                kfree(sdata);
        }
 }
@@ -1870,7 +1872,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
        if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
                return;
        ieee80211_do_stop(sdata, true);
-       ieee80211_teardown_sdata(sdata);
 }
 
 void ieee80211_remove_interfaces(struct ieee80211_local *local)
index 858f6b1cb1494702bbc6e0d8d25f1b4d4b4d1470..175ffcf7fb06bfb5c9c1f95e2c17f42d666ee26f 100644 (file)
@@ -541,8 +541,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
                           NL80211_FEATURE_HT_IBSS |
                           NL80211_FEATURE_VIF_TXPOWER |
                           NL80211_FEATURE_MAC_ON_CREATE |
-                          NL80211_FEATURE_USERSPACE_MPM |
-                          NL80211_FEATURE_FULL_AP_CLIENT_STATE;
+                          NL80211_FEATURE_USERSPACE_MPM;
 
        if (!ops->hw_scan)
                wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
index b890e225a8f1b98681c53c764fc5252e5f2d5c8d..b3b44a5dd375c3a4bc391e29de21ae702a62418d 100644 (file)
@@ -779,10 +779,8 @@ void mesh_plink_broken(struct sta_info *sta)
 static void mesh_path_node_reclaim(struct rcu_head *rp)
 {
        struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
-       struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
 
        del_timer_sync(&node->mpath->timer);
-       atomic_dec(&sdata->u.mesh.mpaths);
        kfree(node->mpath);
        kfree(node);
 }
@@ -790,8 +788,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
 /* needs to be called with the corresponding hashwlock taken */
 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
 {
-       struct mesh_path *mpath;
-       mpath = node->mpath;
+       struct mesh_path *mpath = node->mpath;
+       struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+
        spin_lock(&mpath->state_lock);
        mpath->flags |= MESH_PATH_RESOLVING;
        if (mpath->is_gate)
@@ -799,6 +798,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
        hlist_del_rcu(&node->list);
        call_rcu(&node->rcu, mesh_path_node_reclaim);
        spin_unlock(&mpath->state_lock);
+       atomic_dec(&sdata->u.mesh.mpaths);
        atomic_dec(&tbl->entries);
 }
 
index 4aeca4b0c3cb426ba65b23d4a8ebc756e7a826a4..a413e52f7691418116d928f684a0fc27b308c2d6 100644 (file)
@@ -597,8 +597,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
                /* We need to ensure power level is at max for scanning. */
                ieee80211_hw_config(local, 0);
 
-               if ((req->channels[0]->flags &
-                    IEEE80211_CHAN_NO_IR) ||
+               if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
+                                               IEEE80211_CHAN_RADAR)) ||
                    !req->n_ssids) {
                        next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
                } else {
@@ -645,7 +645,7 @@ ieee80211_scan_get_channel_time(struct ieee80211_channel *chan)
         * TODO: channel switching also consumes quite some time,
         * add that delay as well to get a better estimation
         */
-       if (chan->flags & IEEE80211_CHAN_NO_IR)
+       if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR))
                return IEEE80211_PASSIVE_CHANNEL_TIME;
        return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME;
 }
@@ -777,7 +777,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
         *
         * In any case, it is not necessary for a passive scan.
         */
-       if (chan->flags & IEEE80211_CHAN_NO_IR || !scan_req->n_ssids) {
+       if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) ||
+           !scan_req->n_ssids) {
                *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
                local->next_scan_state = SCAN_DECISION;
                return;
index b7de0da46acddc1eaaf041dedfbc9e2cf3697559..ecf0a0196f18040fb3fb2dc21b79273da92a470e 100644 (file)
@@ -572,7 +572,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
        if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
        else
-               set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        pr_debug("mask 0x%x\n", mask);
 
index a7a80a6b77b0ab15ecdd5859ee732417ee4a4705..653d073bae4533acf80d63fec54ed604249d83f0 100644 (file)
@@ -58,7 +58,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
                        struct hlist_node *n;
 
                        hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
-                               if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
+                               if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL)
                                        continue;
 
                                if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
index efb736bb685545a0cb6a323d3eca87fc54eeb9f4..e41cd12d9b2d1aacc7a143138511ddf86f6597df 100644 (file)
@@ -117,7 +117,6 @@ static struct vport_ops ovs_geneve_vport_ops = {
        .destroy        = ovs_netdev_tunnel_destroy,
        .get_options    = geneve_get_options,
        .send           = dev_queue_xmit,
-       .owner          = THIS_MODULE,
 };
 
 static int __init ovs_geneve_tnl_init(void)
index c3257d78d3d28e6ed06e1be9c2e4a4f452c52886..7f8897f33a67fe6512436aff86c43098c15e3445 100644 (file)
@@ -89,7 +89,6 @@ static struct vport_ops ovs_gre_vport_ops = {
        .create         = gre_create,
        .send           = dev_queue_xmit,
        .destroy        = ovs_netdev_tunnel_destroy,
-       .owner          = THIS_MODULE,
 };
 
 static int __init ovs_gre_tnl_init(void)
index b327368a3848238013cf0f6f62445569d7e29251..6b0190b987ec62c043e04fea80b4122a81bd67b9 100644 (file)
@@ -180,9 +180,13 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
        if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
                ovs_netdev_detach_dev(vport);
 
-       /* Early release so we can unregister the device */
+       /* We can be invoked by both explicit vport deletion and
+        * underlying netdev deregistration; delete the link only
+        * if it's not already shutting down.
+        */
+       if (vport->dev->reg_state == NETREG_REGISTERED)
+               rtnl_delete_link(vport->dev);
        dev_put(vport->dev);
-       rtnl_delete_link(vport->dev);
        vport->dev = NULL;
        rtnl_unlock();
 
index 0ac0fd004d7ed885c009560d966da5b29b47f242..31cbc8c5c7db821e69945957f346fc361a07f9a0 100644 (file)
@@ -71,7 +71,7 @@ static struct hlist_head *hash_bucket(const struct net *net, const char *name)
        return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
 }
 
-int ovs_vport_ops_register(struct vport_ops *ops)
+int __ovs_vport_ops_register(struct vport_ops *ops)
 {
        int err = -EEXIST;
        struct vport_ops *o;
@@ -87,7 +87,7 @@ errout:
        ovs_unlock();
        return err;
 }
-EXPORT_SYMBOL_GPL(ovs_vport_ops_register);
+EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
 
 void ovs_vport_ops_unregister(struct vport_ops *ops)
 {
@@ -256,8 +256,8 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
  *
  * @vport: vport to delete.
  *
- * Detaches @vport from its datapath and destroys it.  It is possible to fail
- * for reasons such as lack of memory.  ovs_mutex must be held.
+ * Detaches @vport from its datapath and destroys it.  ovs_mutex must
+ * be held.
  */
 void ovs_vport_del(struct vport *vport)
 {
index bdfd82a7c064948dc1dc83acbc85b6534c1bcf9b..8ea3a96980acff90b1237387be70967c25f1236e 100644 (file)
@@ -196,7 +196,13 @@ static inline const char *ovs_vport_name(struct vport *vport)
        return vport->dev->name;
 }
 
-int ovs_vport_ops_register(struct vport_ops *ops);
+int __ovs_vport_ops_register(struct vport_ops *ops);
+#define ovs_vport_ops_register(ops)            \
+       ({                                      \
+               (ops)->owner = THIS_MODULE;     \
+               __ovs_vport_ops_register(ops);  \
+       })
+
 void ovs_vport_ops_unregister(struct vport_ops *ops);
 
 static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
index 1cf928fb573e6769a4070327baa74adb300cde7e..992396aa635ce1174f6e62f19ae3f19a550668c6 100644 (file)
@@ -2329,8 +2329,8 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
 static bool ll_header_truncated(const struct net_device *dev, int len)
 {
        /* net device doesn't like empty head */
-       if (unlikely(len <= dev->hard_header_len)) {
-               net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
+       if (unlikely(len < dev->hard_header_len)) {
+               net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
                                     current->comm, len, dev->hard_header_len);
                return true;
        }
index d4564036a339a04d31c65ec6c90cdc9e6a051efc..e3b118cae81d5e859e0244df1bf323aaa1798b8e 100644 (file)
@@ -186,12 +186,6 @@ static struct rds_connection *__rds_conn_create(struct net *net,
                }
        }
 
-       if (trans == NULL) {
-               kmem_cache_free(rds_conn_slab, conn);
-               conn = ERR_PTR(-ENODEV);
-               goto out;
-       }
-
        conn->c_trans = trans;
 
        ret = trans->conn_alloc(conn, gfp);
index 827155c2ead10376cb633c45c2f43917f5f5cd12..c9cdb358ea885e3e356cc675b579f1313ed94ff9 100644 (file)
@@ -1013,11 +1013,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                release_sock(sk);
        }
 
-       /* racing with another thread binding seems ok here */
+       lock_sock(sk);
        if (daddr == 0 || rs->rs_bound_addr == 0) {
+               release_sock(sk);
                ret = -ENOTCONN; /* XXX not a great errno */
                goto out;
        }
+       release_sock(sk);
 
        if (payload_len > rds_sk_sndbuf(rs)) {
                ret = -EMSGSIZE;
index e0547f521f20d79c688c773286d609066c990a1d..adc555e0323d70b90d786c630f5a04dfddd60b32 100644 (file)
@@ -723,8 +723,10 @@ process_further:
 
                        if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
                             call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
-                           hard > tx)
+                           hard > tx) {
+                               call->acks_hard = tx;
                                goto all_acked;
+                       }
 
                        smp_rmb();
                        rxrpc_rotate_tx_window(call, hard - 1);
index a40d3afe93b7f3ee657985753c320210815f62a9..14c4e12c47b0f87f1c560d752bbd897d3ae2f952 100644 (file)
@@ -531,7 +531,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
        /* this should be in poll */
-       clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+       sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                return -EPIPE;
index f43c8f33f09ef60e0f2a49ffd62f03624798c61b..7ec667dd4ce1d01deef3698af8c291da0722a657 100644 (file)
@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name)
 }
 
 /* We know handle. Find qdisc among all qdisc's attached to device
-   (root qdisc, all its children, children of children etc.)
+ * (root qdisc, all its children, children of children etc.)
+ * Note: caller either uses rtnl or rcu_read_lock()
  */
 
 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
            root->handle == handle)
                return root;
 
-       list_for_each_entry(q, &root->list, list) {
+       list_for_each_entry_rcu(q, &root->list, list) {
                if (q->handle == handle)
                        return q;
        }
@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q)
                struct Qdisc *root = qdisc_dev(q)->qdisc;
 
                WARN_ON_ONCE(root == &noop_qdisc);
-               list_add_tail(&q->list, &root->list);
+               ASSERT_RTNL();
+               list_add_tail_rcu(&q->list, &root->list);
        }
 }
 EXPORT_SYMBOL(qdisc_list_add);
 
 void qdisc_list_del(struct Qdisc *q)
 {
-       if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
-               list_del(&q->list);
+       if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+               ASSERT_RTNL();
+               list_del_rcu(&q->list);
+       }
 }
 EXPORT_SYMBOL(qdisc_list_del);
 
@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
        if (n == 0)
                return;
        drops = max_t(int, n, 0);
+       rcu_read_lock();
        while ((parentid = sch->parent)) {
                if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
-                       return;
+                       break;
 
+               if (sch->flags & TCQ_F_NOPARENT)
+                       break;
+               /* TODO: perform the search on a per txq basis */
                sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
                if (sch == NULL) {
-                       WARN_ON(parentid != TC_H_ROOT);
-                       return;
+                       WARN_ON_ONCE(parentid != TC_H_ROOT);
+                       break;
                }
                cops = sch->ops->cl_ops;
                if (cops->qlen_notify) {
@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
                sch->q.qlen -= n;
                __qdisc_qstats_drop(sch, drops);
        }
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
 
@@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
                }
                lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
                if (!netif_is_multiqueue(dev))
-                       sch->flags |= TCQ_F_ONETXQUEUE;
+                       sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
        }
 
        sch->handle = handle;
index cb5d4ad32946cf0c6f3391fe9742bceb7dd702e6..e82a1ad80aa521291fba4e704bb3828e88a009db 100644 (file)
@@ -737,7 +737,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
                return;
        }
        if (!netif_is_multiqueue(dev))
-               qdisc->flags |= TCQ_F_ONETXQUEUE;
+               qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
        dev_queue->qdisc_sleeping = qdisc;
 }
 
index f3cbaecd283af4ce5a78ad90d135d008d2b542d7..3e82f047caaf40461c9f408bd0572a64147e1a8f 100644 (file)
@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
                if (qdisc == NULL)
                        goto err;
                priv->qdiscs[ntx] = qdisc;
-               qdisc->flags |= TCQ_F_ONETXQUEUE;
+               qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
        }
 
        sch->flags |= TCQ_F_MQROOT;
@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
 
        *old = dev_graft_qdisc(dev_queue, new);
        if (new)
-               new->flags |= TCQ_F_ONETXQUEUE;
+               new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
        if (dev->flags & IFF_UP)
                dev_activate(dev);
        return 0;
index 3811a745452cf402cd498ed47058fdf655d18cbd..ad70ecf57ce793d7b50b50e9220c24fa4ab30ab5 100644 (file)
@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
                        goto err;
                }
                priv->qdiscs[i] = qdisc;
-               qdisc->flags |= TCQ_F_ONETXQUEUE;
+               qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
        }
 
        /* If the mqprio options indicate that hardware should own
@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
        *old = dev_graft_qdisc(dev_queue, new);
 
        if (new)
-               new->flags |= TCQ_F_ONETXQUEUE;
+               new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
 
        if (dev->flags & IFF_UP)
                dev_activate(dev);
index e917d27328ea835419ba3e4c26eae1d7b7fade77..acb45b8c2a9d6ce902af7a2ffb9cbd53309a1bdc 100644 (file)
@@ -209,6 +209,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
        struct sock *sk = skb->sk;
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct flowi6 *fl6 = &transport->fl.u.ip6;
+       int res;
 
        pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
                 skb->len, &fl6->saddr, &fl6->daddr);
@@ -220,7 +221,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
 
        SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
 
-       return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
+       rcu_read_lock();
+       res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
+       rcu_read_unlock();
+       return res;
 }
 
 /* Returns the dst cache entry for the given source and destination ip
@@ -262,7 +266,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                pr_debug("src=%pI6 - ", &fl6->saddr);
        }
 
-       final_p = fl6_update_dst(fl6, np->opt, &final);
+       rcu_read_lock();
+       final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+       rcu_read_unlock();
+
        dst = ip6_dst_lookup_flow(sk, fl6, final_p);
        if (!asoc || saddr)
                goto out;
@@ -321,7 +328,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
        if (baddr) {
                fl6->saddr = baddr->v6.sin6_addr;
                fl6->fl6_sport = baddr->v6.sin6_port;
-               final_p = fl6_update_dst(fl6, np->opt, &final);
+               final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
                dst = ip6_dst_lookup_flow(sk, fl6, final_p);
        }
 
index 897c01c029cab3d5805cc56b0964c70e06f4143a..03c8256063ec6355fcce034366aa5d005d75b5f7 100644 (file)
@@ -972,7 +972,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
                return -EFAULT;
 
        /* Alloc space for the address array in kernel memory.  */
-       kaddrs = kmalloc(addrs_size, GFP_KERNEL);
+       kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
        if (unlikely(!kaddrs))
                return -ENOMEM;
 
@@ -4928,7 +4928,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
        to = optval + offsetof(struct sctp_getaddrs, addrs);
        space_left = len - offsetof(struct sctp_getaddrs, addrs);
 
-       addrs = kmalloc(space_left, GFP_KERNEL);
+       addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
        if (!addrs)
                return -ENOMEM;
 
@@ -6458,7 +6458,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
        if (sctp_writeable(sk)) {
                mask |= POLLOUT | POLLWRNORM;
        } else {
-               set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
                /*
                 * Since the socket is not locked, the buffer
                 * might be made available after the writeable check and
@@ -6801,26 +6801,30 @@ no_packet:
 static void __sctp_write_space(struct sctp_association *asoc)
 {
        struct sock *sk = asoc->base.sk;
-       struct socket *sock = sk->sk_socket;
 
-       if ((sctp_wspace(asoc) > 0) && sock) {
-               if (waitqueue_active(&asoc->wait))
-                       wake_up_interruptible(&asoc->wait);
+       if (sctp_wspace(asoc) <= 0)
+               return;
+
+       if (waitqueue_active(&asoc->wait))
+               wake_up_interruptible(&asoc->wait);
 
-               if (sctp_writeable(sk)) {
-                       wait_queue_head_t *wq = sk_sleep(sk);
+       if (sctp_writeable(sk)) {
+               struct socket_wq *wq;
 
-                       if (wq && waitqueue_active(wq))
-                               wake_up_interruptible(wq);
+               rcu_read_lock();
+               wq = rcu_dereference(sk->sk_wq);
+               if (wq) {
+                       if (waitqueue_active(&wq->wait))
+                               wake_up_interruptible(&wq->wait);
 
                        /* Note that we try to include the Async I/O support
                         * here by modeling from the current TCP/UDP code.
                         * We have not tested with it yet.
                         */
                        if (!(sk->sk_shutdown & SEND_SHUTDOWN))
-                               sock_wake_async(sock,
-                                               SOCK_WAKE_SPACE, POLL_OUT);
+                               sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
                }
+               rcu_read_unlock();
        }
 }
 
@@ -7375,6 +7379,13 @@ struct proto sctp_prot = {
 
 #if IS_ENABLED(CONFIG_IPV6)
 
+#include <net/transp_v6.h>
+static void sctp_v6_destroy_sock(struct sock *sk)
+{
+       sctp_destroy_sock(sk);
+       inet6_destroy_sock(sk);
+}
+
 struct proto sctpv6_prot = {
        .name           = "SCTPv6",
        .owner          = THIS_MODULE,
@@ -7384,7 +7395,7 @@ struct proto sctpv6_prot = {
        .accept         = sctp_accept,
        .ioctl          = sctp_ioctl,
        .init           = sctp_init_sock,
-       .destroy        = sctp_destroy_sock,
+       .destroy        = sctp_v6_destroy_sock,
        .shutdown       = sctp_shutdown,
        .setsockopt     = sctp_setsockopt,
        .getsockopt     = sctp_getsockopt,
index dd2c247c99e30a7950323c85e1ea9cb604eac218..456fadb3d8193a2238bf30881506cd66dbe11957 100644 (file)
@@ -1056,27 +1056,20 @@ static int sock_fasync(int fd, struct file *filp, int on)
        return 0;
 }
 
-/* This function may be called only under socket lock or callback_lock or rcu_lock */
+/* This function may be called only under rcu_lock */
 
-int sock_wake_async(struct socket *sock, int how, int band)
+int sock_wake_async(struct socket_wq *wq, int how, int band)
 {
-       struct socket_wq *wq;
-
-       if (!sock)
-               return -1;
-       rcu_read_lock();
-       wq = rcu_dereference(sock->wq);
-       if (!wq || !wq->fasync_list) {
-               rcu_read_unlock();
+       if (!wq || !wq->fasync_list)
                return -1;
-       }
+
        switch (how) {
        case SOCK_WAKE_WAITD:
-               if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
+               if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags))
                        break;
                goto call_kill;
        case SOCK_WAKE_SPACE:
-               if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags))
+               if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags))
                        break;
                /* fall through */
        case SOCK_WAKE_IO:
@@ -1086,7 +1079,7 @@ call_kill:
        case SOCK_WAKE_URG:
                kill_fasync(&wq->fasync_list, SIGURG, band);
        }
-       rcu_read_unlock();
+
        return 0;
 }
 EXPORT_SYMBOL(sock_wake_async);
index 1d1a7049891020264f5602d6237f12bac405c704..2ffaf6a794994ae20247fda57a6dd4c4bee64b0d 100644 (file)
@@ -398,7 +398,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
        if (unlikely(!sock))
                return -ENOTSOCK;
 
-       clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+       clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags);
        if (base != 0) {
                addr = NULL;
                addrlen = 0;
@@ -442,7 +442,7 @@ static void xs_nospace_callback(struct rpc_task *task)
        struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
 
        transport->inet->sk_write_pending--;
-       clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+       clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
 }
 
 /**
@@ -467,7 +467,7 @@ static int xs_nospace(struct rpc_task *task)
 
        /* Don't race with disconnect */
        if (xprt_connected(xprt)) {
-               if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
+               if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) {
                        /*
                         * Notify TCP that we're limited by the application
                         * window size
@@ -478,7 +478,7 @@ static int xs_nospace(struct rpc_task *task)
                        xprt_wait_for_buffer_space(task, xs_nospace_callback);
                }
        } else {
-               clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+               clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
                ret = -ENOTCONN;
        }
 
@@ -626,7 +626,7 @@ process_status:
        case -EPERM:
                /* When the server has died, an ICMP port unreachable message
                 * prompts ECONNREFUSED. */
-               clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+               clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
        }
 
        return status;
@@ -715,7 +715,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
        case -EADDRINUSE:
        case -ENOBUFS:
        case -EPIPE:
-               clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+               clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
        }
 
        return status;
@@ -1618,7 +1618,7 @@ static void xs_write_space(struct sock *sk)
 
        if (unlikely(!(xprt = xprt_from_sock(sk))))
                return;
-       if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
+       if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0)
                return;
 
        xprt_write_space(xprt);
index 9efbdbde2b0863542a08c91c136fadc9aa0cc6d8..91aea071ab27fee550e3c88ecc4097adac399d83 100644 (file)
@@ -191,6 +191,7 @@ void tipc_link_add_bc_peer(struct tipc_link *snd_l,
 
        snd_l->ackers++;
        rcv_l->acked = snd_l->snd_nxt - 1;
+       snd_l->state = LINK_ESTABLISHED;
        tipc_link_build_bc_init_msg(uc_l, xmitq);
 }
 
@@ -206,6 +207,7 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
        rcv_l->state = LINK_RESET;
        if (!snd_l->ackers) {
                tipc_link_reset(snd_l);
+               snd_l->state = LINK_RESET;
                __skb_queue_purge(xmitq);
        }
 }
index 552dbaba9cf386a07e6c4f499fda27ca1f8a8f4a..b53246fb04128345304f2de5d3c895683ab0133b 100644 (file)
@@ -105,6 +105,7 @@ struct tipc_sock {
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 static void tipc_data_ready(struct sock *sk);
 static void tipc_write_space(struct sock *sk);
+static void tipc_sock_destruct(struct sock *sk);
 static int tipc_release(struct socket *sock);
 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
@@ -381,6 +382,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        sk->sk_rcvbuf = sysctl_tipc_rmem[1];
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
+       sk->sk_destruct = tipc_sock_destruct;
        tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
        tsk->sent_unacked = 0;
        atomic_set(&tsk->dupl_rcvcnt, 0);
@@ -470,9 +472,6 @@ static int tipc_release(struct socket *sock)
                tipc_node_remove_conn(net, dnode, tsk->portid);
        }
 
-       /* Discard any remaining (connection-based) messages in receive queue */
-       __skb_queue_purge(&sk->sk_receive_queue);
-
        /* Reject any messages that accumulated in backlog queue */
        sock->state = SS_DISCONNECTING;
        release_sock(sk);
@@ -1515,6 +1514,11 @@ static void tipc_data_ready(struct sock *sk)
        rcu_read_unlock();
 }
 
+static void tipc_sock_destruct(struct sock *sk)
+{
+       __skb_queue_purge(&sk->sk_receive_queue);
+}
+
 /**
  * filter_connect - Handle all incoming messages for a connection-based socket
  * @tsk: TIPC socket
index ad2719ad4c1baf38cd8e31b244fe703382da002a..70c03271b798f429d8d6faa5142fb2654753d342 100644 (file)
@@ -158,8 +158,11 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
        struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
        struct rtable *rt;
 
-       if (skb_headroom(skb) < UDP_MIN_HEADROOM)
-               pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+       if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
+               err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+               if (err)
+                       goto tx_error;
+       }
 
        skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
        ub = rcu_dereference_rtnl(b->media_ptr);
index 955ec152cb71eac8c91e06f1c922d1df20f6e1f0..45aebd966978bdc6abb886487d029957036bdda4 100644 (file)
@@ -326,6 +326,118 @@ found:
        return s;
 }
 
+/* Support code for asymmetrically connected dgram sockets
+ *
+ * If a datagram socket is connected to a socket not itself connected
+ * to the first socket (eg, /dev/log), clients may only enqueue more
+ * messages if the present receive queue of the server socket is not
+ * "too large". This means there's a second writeability condition
+ * poll and sendmsg need to test. The dgram recv code will do a wake
+ * up on the peer_wait wait queue of a socket upon reception of a
+ * datagram which needs to be propagated to sleeping would-be writers
+ * since these might not have sent anything so far. This can't be
+ * accomplished via poll_wait because the lifetime of the server
+ * socket might be less than that of its clients if these break their
+ * association with it or if the server socket is closed while clients
+ * are still connected to it and there's no way to inform "a polling
+ * implementation" that it should let go of a certain wait queue
+ *
+ * In order to propagate a wake up, a wait_queue_t of the client
+ * socket is enqueued on the peer_wait queue of the server socket
+ * whose wake function does a wake_up on the ordinary client socket
+ * wait queue. This connection is established whenever a write (or
+ * poll for write) hit the flow control condition and broken when the
+ * association to the server socket is dissolved or after a wake up
+ * was relayed.
+ */
+
+static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
+                                     void *key)
+{
+       struct unix_sock *u;
+       wait_queue_head_t *u_sleep;
+
+       u = container_of(q, struct unix_sock, peer_wake);
+
+       __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
+                           q);
+       u->peer_wake.private = NULL;
+
+       /* relaying can only happen while the wq still exists */
+       u_sleep = sk_sleep(&u->sk);
+       if (u_sleep)
+               wake_up_interruptible_poll(u_sleep, key);
+
+       return 0;
+}
+
+static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
+{
+       struct unix_sock *u, *u_other;
+       int rc;
+
+       u = unix_sk(sk);
+       u_other = unix_sk(other);
+       rc = 0;
+       spin_lock(&u_other->peer_wait.lock);
+
+       if (!u->peer_wake.private) {
+               u->peer_wake.private = other;
+               __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
+
+               rc = 1;
+       }
+
+       spin_unlock(&u_other->peer_wait.lock);
+       return rc;
+}
+
+static void unix_dgram_peer_wake_disconnect(struct sock *sk,
+                                           struct sock *other)
+{
+       struct unix_sock *u, *u_other;
+
+       u = unix_sk(sk);
+       u_other = unix_sk(other);
+       spin_lock(&u_other->peer_wait.lock);
+
+       if (u->peer_wake.private == other) {
+               __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
+               u->peer_wake.private = NULL;
+       }
+
+       spin_unlock(&u_other->peer_wait.lock);
+}
+
+static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
+                                                  struct sock *other)
+{
+       unix_dgram_peer_wake_disconnect(sk, other);
+       wake_up_interruptible_poll(sk_sleep(sk),
+                                  POLLOUT |
+                                  POLLWRNORM |
+                                  POLLWRBAND);
+}
+
+/* preconditions:
+ *     - unix_peer(sk) == other
+ *     - association is stable
+ */
+static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+{
+       int connected;
+
+       connected = unix_dgram_peer_wake_connect(sk, other);
+
+       if (unix_recvq_full(other))
+               return 1;
+
+       if (connected)
+               unix_dgram_peer_wake_disconnect(sk, other);
+
+       return 0;
+}
+
 static int unix_writable(const struct sock *sk)
 {
        return sk->sk_state != TCP_LISTEN &&
@@ -431,6 +543,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
                        skpair->sk_state_change(skpair);
                        sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
                }
+
+               unix_dgram_peer_wake_disconnect(sk, skpair);
                sock_put(skpair); /* It may now die */
                unix_peer(sk) = NULL;
        }
@@ -666,6 +780,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
        INIT_LIST_HEAD(&u->link);
        mutex_init(&u->readlock); /* single task reading lock */
        init_waitqueue_head(&u->peer_wait);
+       init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
        unix_insert_socket(unix_sockets_unbound(sk), sk);
 out:
        if (sk == NULL)
@@ -1033,6 +1148,8 @@ restart:
        if (unix_peer(sk)) {
                struct sock *old_peer = unix_peer(sk);
                unix_peer(sk) = other;
+               unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
+
                unix_state_double_unlock(sk, other);
 
                if (other != old_peer)
@@ -1434,6 +1551,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
        return err;
 }
 
+static bool unix_passcred_enabled(const struct socket *sock,
+                                 const struct sock *other)
+{
+       return test_bit(SOCK_PASSCRED, &sock->flags) ||
+              !other->sk_socket ||
+              test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
+}
+
 /*
  * Some apps rely on write() giving SCM_CREDENTIALS
  * We include credentials if source or destination socket
@@ -1444,14 +1569,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
 {
        if (UNIXCB(skb).pid)
                return;
-       if (test_bit(SOCK_PASSCRED, &sock->flags) ||
-           !other->sk_socket ||
-           test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
+       if (unix_passcred_enabled(sock, other)) {
                UNIXCB(skb).pid  = get_pid(task_tgid(current));
                current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
        }
 }
 
+static int maybe_init_creds(struct scm_cookie *scm,
+                           struct socket *socket,
+                           const struct sock *other)
+{
+       int err;
+       struct msghdr msg = { .msg_controllen = 0 };
+
+       err = scm_send(socket, &msg, scm, false);
+       if (err)
+               return err;
+
+       if (unix_passcred_enabled(socket, other)) {
+               scm->pid = get_pid(task_tgid(current));
+               current_uid_gid(&scm->creds.uid, &scm->creds.gid);
+       }
+       return err;
+}
+
+static bool unix_skb_scm_eq(struct sk_buff *skb,
+                           struct scm_cookie *scm)
+{
+       const struct unix_skb_parms *u = &UNIXCB(skb);
+
+       return u->pid == scm->pid &&
+              uid_eq(u->uid, scm->creds.uid) &&
+              gid_eq(u->gid, scm->creds.gid) &&
+              unix_secdata_eq(scm, skb);
+}
+
 /*
  *     Send AF_UNIX data.
  */
@@ -1472,6 +1624,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
        struct scm_cookie scm;
        int max_level;
        int data_len = 0;
+       int sk_locked;
 
        wait_for_unix_gc();
        err = scm_send(sock, msg, &scm, false);
@@ -1550,12 +1703,14 @@ restart:
                goto out_free;
        }
 
+       sk_locked = 0;
        unix_state_lock(other);
+restart_locked:
        err = -EPERM;
        if (!unix_may_send(sk, other))
                goto out_unlock;
 
-       if (sock_flag(other, SOCK_DEAD)) {
+       if (unlikely(sock_flag(other, SOCK_DEAD))) {
                /*
                 *      Check with 1003.1g - what should
                 *      datagram error
@@ -1563,10 +1718,14 @@ restart:
                unix_state_unlock(other);
                sock_put(other);
 
+               if (!sk_locked)
+                       unix_state_lock(sk);
+
                err = 0;
-               unix_state_lock(sk);
                if (unix_peer(sk) == other) {
                        unix_peer(sk) = NULL;
+                       unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+
                        unix_state_unlock(sk);
 
                        unix_dgram_disconnected(sk, other);
@@ -1592,21 +1751,38 @@ restart:
                        goto out_unlock;
        }
 
-       if (unix_peer(other) != sk && unix_recvq_full(other)) {
-               if (!timeo) {
-                       err = -EAGAIN;
-                       goto out_unlock;
+       if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+               if (timeo) {
+                       timeo = unix_wait_for_peer(other, timeo);
+
+                       err = sock_intr_errno(timeo);
+                       if (signal_pending(current))
+                               goto out_free;
+
+                       goto restart;
                }
 
-               timeo = unix_wait_for_peer(other, timeo);
+               if (!sk_locked) {
+                       unix_state_unlock(other);
+                       unix_state_double_lock(sk, other);
+               }
 
-               err = sock_intr_errno(timeo);
-               if (signal_pending(current))
-                       goto out_free;
+               if (unix_peer(sk) != other ||
+                   unix_dgram_peer_wake_me(sk, other)) {
+                       err = -EAGAIN;
+                       sk_locked = 1;
+                       goto out_unlock;
+               }
 
-               goto restart;
+               if (!sk_locked) {
+                       sk_locked = 1;
+                       goto restart_locked;
+               }
        }
 
+       if (unlikely(sk_locked))
+               unix_state_unlock(sk);
+
        if (sock_flag(other, SOCK_RCVTSTAMP))
                __net_timestamp(skb);
        maybe_add_creds(skb, sock, other);
@@ -1620,6 +1796,8 @@ restart:
        return len;
 
 out_unlock:
+       if (sk_locked)
+               unix_state_unlock(sk);
        unix_state_unlock(other);
 out_free:
        kfree_skb(skb);
@@ -1741,8 +1919,10 @@ out_err:
 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
                                    int offset, size_t size, int flags)
 {
-       int err = 0;
-       bool send_sigpipe = true;
+       int err;
+       bool send_sigpipe = false;
+       bool init_scm = true;
+       struct scm_cookie scm;
        struct sock *other, *sk = socket->sk;
        struct sk_buff *skb, *newskb = NULL, *tail = NULL;
 
@@ -1760,7 +1940,7 @@ alloc_skb:
                newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
                                              &err, 0);
                if (!newskb)
-                       return err;
+                       goto err;
        }
 
        /* we must acquire readlock as we modify already present
@@ -1769,12 +1949,12 @@ alloc_skb:
        err = mutex_lock_interruptible(&unix_sk(other)->readlock);
        if (err) {
                err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
-               send_sigpipe = false;
                goto err;
        }
 
        if (sk->sk_shutdown & SEND_SHUTDOWN) {
                err = -EPIPE;
+               send_sigpipe = true;
                goto err_unlock;
        }
 
@@ -1783,17 +1963,27 @@ alloc_skb:
        if (sock_flag(other, SOCK_DEAD) ||
            other->sk_shutdown & RCV_SHUTDOWN) {
                err = -EPIPE;
+               send_sigpipe = true;
                goto err_state_unlock;
        }
 
+       if (init_scm) {
+               err = maybe_init_creds(&scm, socket, other);
+               if (err)
+                       goto err_state_unlock;
+               init_scm = false;
+       }
+
        skb = skb_peek_tail(&other->sk_receive_queue);
        if (tail && tail == skb) {
                skb = newskb;
-       } else if (!skb) {
-               if (newskb)
+       } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
+               if (newskb) {
                        skb = newskb;
-               else
+               } else {
+                       tail = skb;
                        goto alloc_skb;
+               }
        } else if (newskb) {
                /* this is fast path, we don't necessarily need to
                 * call to kfree_skb even though with newskb == NULL
@@ -1814,6 +2004,9 @@ alloc_skb:
        atomic_add(size, &sk->sk_wmem_alloc);
 
        if (newskb) {
+               err = unix_scm_to_skb(&scm, skb, false);
+               if (err)
+                       goto err_state_unlock;
                spin_lock(&other->sk_receive_queue.lock);
                __skb_queue_tail(&other->sk_receive_queue, newskb);
                spin_unlock(&other->sk_receive_queue.lock);
@@ -1823,7 +2016,7 @@ alloc_skb:
        mutex_unlock(&unix_sk(other)->readlock);
 
        other->sk_data_ready(other);
-
+       scm_destroy(&scm);
        return size;
 
 err_state_unlock:
@@ -1834,6 +2027,8 @@ err:
        kfree_skb(newskb);
        if (send_sigpipe && !(flags & MSG_NOSIGNAL))
                send_sig(SIGPIPE, current, 0);
+       if (!init_scm)
+               scm_destroy(&scm);
        return err;
 }
 
@@ -1996,7 +2191,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
                    !timeo)
                        break;
 
-               set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
                unix_state_unlock(sk);
                timeo = freezable_schedule_timeout(timeo);
                unix_state_lock(sk);
@@ -2004,7 +2199,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
                if (sock_flag(sk, SOCK_DEAD))
                        break;
 
-               clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+               sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
        }
 
        finish_wait(sk_sleep(sk), &wait);
@@ -2137,10 +2332,7 @@ unlock:
 
                if (check_creds) {
                        /* Never glue messages from different writers */
-                       if ((UNIXCB(skb).pid  != scm.pid) ||
-                           !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
-                           !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
-                           !unix_secdata_eq(&scm, skb))
+                       if (!unix_skb_scm_eq(skb, &scm))
                                break;
                } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
                        /* Copy credentials */
@@ -2476,20 +2668,22 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
                return mask;
 
        writable = unix_writable(sk);
-       other = unix_peer_get(sk);
-       if (other) {
-               if (unix_peer(other) != sk) {
-                       sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
-                       if (unix_recvq_full(other))
-                               writable = 0;
-               }
-               sock_put(other);
+       if (writable) {
+               unix_state_lock(sk);
+
+               other = unix_peer(sk);
+               if (other && unix_peer(other) != sk &&
+                   unix_recvq_full(other) &&
+                   unix_dgram_peer_wake_me(sk, other))
+                       writable = 0;
+
+               unix_state_unlock(sk);
        }
 
        if (writable)
                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
        else
-               set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
        return mask;
 }
index 1a10d8ac81620faad519d4f95ca8552ac4a958c1..dacf71a43ad41f678e3a0574c84321bacd0ff30e 100755 (executable)
@@ -62,7 +62,7 @@ vmlinux_link()
                        -Wl,--start-group                                    \
                                 ${KBUILD_VMLINUX_MAIN}                      \
                        -Wl,--end-group                                      \
-                       -lutil ${1}
+                       -lutil -lrt ${1}
                rm -f linux
        fi
 }
index c8b8ef5246a6f94c99b281c837e220e118dbf8a9..ef198903c0c3e6bca1685c3cc6a53420e77e1333 100644 (file)
@@ -955,6 +955,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
  */
 
 static const struct hda_device_id snd_hda_id_conexant[] = {
+       HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
@@ -972,9 +973,9 @@ static const struct hda_device_id snd_hda_id_conexant[] = {
        HDA_CODEC_ENTRY(0x14f150ac, "CX20652", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f150b8, "CX20664", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f150b9, "CX20665", patch_conexant_auto),
-       HDA_CODEC_ENTRY(0x14f150f1, "CX20721", patch_conexant_auto),
+       HDA_CODEC_ENTRY(0x14f150f1, "CX21722", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f150f2, "CX20722", patch_conexant_auto),
-       HDA_CODEC_ENTRY(0x14f150f3, "CX20723", patch_conexant_auto),
+       HDA_CODEC_ENTRY(0x14f150f3, "CX21724", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f150f4, "CX20724", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f1510f, "CX20751/2", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15110, "CX20751/2", patch_conexant_auto),
index bdb6f226d00686f67e29cbcf29f6259df0431145..4b6fb668c91cd1ba2e4964d49c0d610cab43c458 100644 (file)
@@ -2352,6 +2352,12 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
        struct hda_codec *codec = audio_ptr;
        int pin_nid = port + 0x04;
 
+       /* skip notification during system suspend (but not in runtime PM);
+        * the state will be updated at resume
+        */
+       if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
+               return;
+
        check_presence_and_report(codec, pin_nid);
 }
 
index 9929efc6b9aaa4257a155e844b7feb2f91ea80f9..b3ea24d64c5009cf4cc77e1877c894e3f21f4ee7 100644 (file)
@@ -1023,24 +1023,18 @@ void arizona_init_dvfs(struct arizona_priv *priv)
 }
 EXPORT_SYMBOL_GPL(arizona_init_dvfs);
 
-static unsigned int arizona_sysclk_48k_rates[] = {
+static unsigned int arizona_opclk_ref_48k_rates[] = {
        6144000,
        12288000,
        24576000,
        49152000,
-       73728000,
-       98304000,
-       147456000,
 };
 
-static unsigned int arizona_sysclk_44k1_rates[] = {
+static unsigned int arizona_opclk_ref_44k1_rates[] = {
        5644800,
        11289600,
        22579200,
        45158400,
-       67737600,
-       90316800,
-       135475200,
 };
 
 static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk,
@@ -1065,11 +1059,11 @@ static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk,
        }
 
        if (refclk % 8000)
-               rates = arizona_sysclk_44k1_rates;
+               rates = arizona_opclk_ref_44k1_rates;
        else
-               rates = arizona_sysclk_48k_rates;
+               rates = arizona_opclk_ref_48k_rates;
 
-       for (ref = 0; ref < ARRAY_SIZE(arizona_sysclk_48k_rates) &&
+       for (ref = 0; ref < ARRAY_SIZE(arizona_opclk_ref_48k_rates) &&
                     rates[ref] <= refclk; ref++) {
                div = 1;
                while (rates[ref] / div >= freq && div < 32) {
index 969e337dc17c131413e43f666edd5b299fc11544..84f5eb07a91b1d27274f595cf595d97f36663484 100644 (file)
@@ -205,18 +205,18 @@ static const struct snd_kcontrol_new es8328_right_line_controls =
 
 /* Left Mixer */
 static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
-       SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 8, 1, 0),
-       SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 7, 1, 0),
-       SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 8, 1, 0),
-       SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 7, 1, 0),
+       SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0),
+       SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0),
+       SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0),
+       SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0),
 };
 
 /* Right Mixer */
 static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
-       SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 8, 1, 0),
-       SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 7, 1, 0),
-       SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 8, 1, 0),
-       SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 7, 1, 0),
+       SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0),
+       SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0),
+       SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0),
+       SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0),
 };
 
 static const char * const es8328_pga_sel[] = {
index 7fc7b4e3f4442a52727e97d96a847c76ada6a58f..c1b87c5800b1d0980cf541e96a4d22ae76e9b32c 100644 (file)
@@ -1271,6 +1271,36 @@ static int nau8825_i2c_remove(struct i2c_client *client)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int nau8825_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct nau8825 *nau8825 = dev_get_drvdata(dev);
+
+       disable_irq(client->irq);
+       regcache_cache_only(nau8825->regmap, true);
+       regcache_mark_dirty(nau8825->regmap);
+
+       return 0;
+}
+
+static int nau8825_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct nau8825 *nau8825 = dev_get_drvdata(dev);
+
+       regcache_cache_only(nau8825->regmap, false);
+       regcache_sync(nau8825->regmap);
+       enable_irq(client->irq);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops nau8825_pm = {
+       SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
+};
+
 static const struct i2c_device_id nau8825_i2c_ids[] = {
        { "nau8825", 0 },
        { }
@@ -1297,6 +1327,7 @@ static struct i2c_driver nau8825_driver = {
                .name = "nau8825",
                .of_match_table = of_match_ptr(nau8825_of_ids),
                .acpi_match_table = ACPI_PTR(nau8825_acpi_match),
+               .pm = &nau8825_pm,
        },
        .probe = nau8825_i2c_probe,
        .remove = nau8825_i2c_remove,
index aca479fa767027174be48390fd0dc93c3802d022..1dc68ab08a1799e71ec17677c5b0da959d3cd40f 100644 (file)
@@ -80,8 +80,10 @@ int rl6231_calc_dmic_clk(int rate)
        }
 
        for (i = 0; i < ARRAY_SIZE(div); i++) {
-               /* find divider that gives DMIC frequency below 3MHz */
-               if (3000000 * div[i] >= rate)
+               if ((div[i] % 3) == 0)
+                       continue;
+               /* find divider that gives DMIC frequency below 3.072MHz */
+               if (3072000 * div[i] >= rate)
                        return i;
        }
 
index 28132375e4274a6e94e830c2fe1e8fcddd255747..ef76940f9dcb63196b10798f80d54528adc6109d 100644 (file)
@@ -245,7 +245,7 @@ struct rt5645_priv {
        struct snd_soc_jack *hp_jack;
        struct snd_soc_jack *mic_jack;
        struct snd_soc_jack *btn_jack;
-       struct delayed_work jack_detect_work;
+       struct delayed_work jack_detect_work, rcclock_work;
        struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
        struct rt5645_eq_param_s *eq_param;
 
@@ -565,12 +565,33 @@ static int rt5645_hweq_put(struct snd_kcontrol *kcontrol,
        .put = rt5645_hweq_put \
 }
 
+static int rt5645_spk_put_volsw(struct snd_kcontrol *kcontrol,
+               struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
+       int ret;
+
+       cancel_delayed_work_sync(&rt5645->rcclock_work);
+
+       regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
+               RT5645_PWR_CLK25M_MASK, RT5645_PWR_CLK25M_PU);
+
+       ret = snd_soc_put_volsw(kcontrol, ucontrol);
+
+       queue_delayed_work(system_power_efficient_wq, &rt5645->rcclock_work,
+               msecs_to_jiffies(200));
+
+       return ret;
+}
+
 static const struct snd_kcontrol_new rt5645_snd_controls[] = {
        /* Speaker Output Volume */
        SOC_DOUBLE("Speaker Channel Switch", RT5645_SPK_VOL,
                RT5645_VOL_L_SFT, RT5645_VOL_R_SFT, 1, 1),
-       SOC_DOUBLE_TLV("Speaker Playback Volume", RT5645_SPK_VOL,
-               RT5645_L_VOL_SFT, RT5645_R_VOL_SFT, 39, 1, out_vol_tlv),
+       SOC_DOUBLE_EXT_TLV("Speaker Playback Volume", RT5645_SPK_VOL,
+               RT5645_L_VOL_SFT, RT5645_R_VOL_SFT, 39, 1, snd_soc_get_volsw,
+               rt5645_spk_put_volsw, out_vol_tlv),
 
        /* ClassD modulator Speaker Gain Ratio */
        SOC_SINGLE_TLV("Speaker ClassD Playback Volume", RT5645_SPO_CLSD_RATIO,
@@ -1498,7 +1519,7 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on)
                                regmap_write(rt5645->regmap, RT5645_PR_BASE +
                                        RT5645_MAMP_INT_REG2, 0xfc00);
                                snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
-                               msleep(40);
+                               msleep(70);
                                rt5645->hp_on = true;
                        } else {
                                /* depop parameters */
@@ -3122,6 +3143,15 @@ static void rt5645_jack_detect_work(struct work_struct *work)
                                SND_JACK_BTN_2 | SND_JACK_BTN_3);
 }
 
+static void rt5645_rcclock_work(struct work_struct *work)
+{
+       struct rt5645_priv *rt5645 =
+               container_of(work, struct rt5645_priv, rcclock_work.work);
+
+       regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
+               RT5645_PWR_CLK25M_MASK, RT5645_PWR_CLK25M_PD);
+}
+
 static irqreturn_t rt5645_irq(int irq, void *data)
 {
        struct rt5645_priv *rt5645 = data;
@@ -3348,6 +3378,27 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Reks"),
                },
        },
+       {
+               .ident = "Google Edgar",
+               .callback = strago_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
+               },
+       },
+       {
+               .ident = "Google Wizpig",
+               .callback = strago_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Wizpig"),
+               },
+       },
+       {
+               .ident = "Google Terra",
+               .callback = strago_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Terra"),
+               },
+       },
        { }
 };
 
@@ -3587,6 +3638,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
        }
 
        INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
+       INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
 
        if (rt5645->i2c->irq) {
                ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
@@ -3621,6 +3673,7 @@ static int rt5645_i2c_remove(struct i2c_client *i2c)
                free_irq(i2c->irq, rt5645);
 
        cancel_delayed_work_sync(&rt5645->jack_detect_work);
+       cancel_delayed_work_sync(&rt5645->rcclock_work);
 
        snd_soc_unregister_codec(&i2c->dev);
        regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
index dc2b46236c5cb5cd7ba12ec5fd9f311868978983..3f1b0f1df8097ba9d47644b3dd74548642d3cf82 100644 (file)
 #define RT5670_SCLK_SRC_MCLK                   (0x0 << 14)
 #define RT5670_SCLK_SRC_PLL1                   (0x1 << 14)
 #define RT5670_SCLK_SRC_RCCLK                  (0x2 << 14) /* 15MHz */
-#define RT5670_PLL1_SRC_MASK                   (0x3 << 12)
-#define RT5670_PLL1_SRC_SFT                    12
-#define RT5670_PLL1_SRC_MCLK                   (0x0 << 12)
-#define RT5670_PLL1_SRC_BCLK1                  (0x1 << 12)
-#define RT5670_PLL1_SRC_BCLK2                  (0x2 << 12)
-#define RT5670_PLL1_SRC_BCLK3                  (0x3 << 12)
+#define RT5670_PLL1_SRC_MASK                   (0x7 << 11)
+#define RT5670_PLL1_SRC_SFT                    11
+#define RT5670_PLL1_SRC_MCLK                   (0x0 << 11)
+#define RT5670_PLL1_SRC_BCLK1                  (0x1 << 11)
+#define RT5670_PLL1_SRC_BCLK2                  (0x2 << 11)
+#define RT5670_PLL1_SRC_BCLK3                  (0x3 << 11)
 #define RT5670_PLL1_PD_MASK                    (0x1 << 3)
 #define RT5670_PLL1_PD_SFT                     3
 #define RT5670_PLL1_PD_1                       (0x0 << 3)
index b4cd7e3bf5f89f9ea940528f233f017151788a4b..69d987a9935c9f91311db09b1a2cfb7cd5a2c609 100644 (file)
@@ -1386,90 +1386,90 @@ static const struct snd_kcontrol_new rt5677_dac_r_mix[] = {
 };
 
 static const struct snd_kcontrol_new rt5677_sto1_dac_l_mix[] = {
-       SOC_DAPM_SINGLE("ST L Switch", RT5677_STO1_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("ST L Switch", RT5677_STO1_DAC_MIXER,
                        RT5677_M_ST_DAC1_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
                        RT5677_M_DAC1_L_STO_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_STO1_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_STO1_DAC_MIXER,
                        RT5677_M_DAC2_L_STO_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
                        RT5677_M_DAC1_R_STO_L_SFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5677_sto1_dac_r_mix[] = {
-       SOC_DAPM_SINGLE("ST R Switch", RT5677_STO1_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("ST R Switch", RT5677_STO1_DAC_MIXER,
                        RT5677_M_ST_DAC1_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
                        RT5677_M_DAC1_R_STO_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_STO1_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_STO1_DAC_MIXER,
                        RT5677_M_DAC2_R_STO_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
                        RT5677_M_DAC1_L_STO_R_SFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5677_mono_dac_l_mix[] = {
-       SOC_DAPM_SINGLE("ST L Switch", RT5677_MONO_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("ST L Switch", RT5677_MONO_DAC_MIXER,
                        RT5677_M_ST_DAC2_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_MONO_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_MONO_DAC_MIXER,
                        RT5677_M_DAC1_L_MONO_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
                        RT5677_M_DAC2_L_MONO_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
                        RT5677_M_DAC2_R_MONO_L_SFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5677_mono_dac_r_mix[] = {
-       SOC_DAPM_SINGLE("ST R Switch", RT5677_MONO_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("ST R Switch", RT5677_MONO_DAC_MIXER,
                        RT5677_M_ST_DAC2_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_MONO_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_MONO_DAC_MIXER,
                        RT5677_M_DAC1_R_MONO_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
                        RT5677_M_DAC2_R_MONO_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
                        RT5677_M_DAC2_L_MONO_R_SFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5677_dd1_l_mix[] = {
-       SOC_DAPM_SINGLE("Sto DAC Mix L Switch", RT5677_DD1_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix L Switch", RT5677_DD1_MIXER,
                        RT5677_M_STO_L_DD1_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("Mono DAC Mix L Switch", RT5677_DD1_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix L Switch", RT5677_DD1_MIXER,
                        RT5677_M_MONO_L_DD1_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC3 L Switch", RT5677_DD1_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC3 L Switch", RT5677_DD1_MIXER,
                        RT5677_M_DAC3_L_DD1_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC3 R Switch", RT5677_DD1_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC3 R Switch", RT5677_DD1_MIXER,
                        RT5677_M_DAC3_R_DD1_L_SFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5677_dd1_r_mix[] = {
-       SOC_DAPM_SINGLE("Sto DAC Mix R Switch", RT5677_DD1_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix R Switch", RT5677_DD1_MIXER,
                        RT5677_M_STO_R_DD1_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("Mono DAC Mix R Switch", RT5677_DD1_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix R Switch", RT5677_DD1_MIXER,
                        RT5677_M_MONO_R_DD1_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC3 R Switch", RT5677_DD1_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC3 R Switch", RT5677_DD1_MIXER,
                        RT5677_M_DAC3_R_DD1_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC3 L Switch", RT5677_DD1_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC3 L Switch", RT5677_DD1_MIXER,
                        RT5677_M_DAC3_L_DD1_R_SFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5677_dd2_l_mix[] = {
-       SOC_DAPM_SINGLE("Sto DAC Mix L Switch", RT5677_DD2_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix L Switch", RT5677_DD2_MIXER,
                        RT5677_M_STO_L_DD2_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("Mono DAC Mix L Switch", RT5677_DD2_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix L Switch", RT5677_DD2_MIXER,
                        RT5677_M_MONO_L_DD2_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC4 L Switch", RT5677_DD2_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC4 L Switch", RT5677_DD2_MIXER,
                        RT5677_M_DAC4_L_DD2_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC4 R Switch", RT5677_DD2_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC4 R Switch", RT5677_DD2_MIXER,
                        RT5677_M_DAC4_R_DD2_L_SFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5677_dd2_r_mix[] = {
-       SOC_DAPM_SINGLE("Sto DAC Mix R Switch", RT5677_DD2_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix R Switch", RT5677_DD2_MIXER,
                        RT5677_M_STO_R_DD2_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("Mono DAC Mix R Switch", RT5677_DD2_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix R Switch", RT5677_DD2_MIXER,
                        RT5677_M_MONO_R_DD2_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC4 R Switch", RT5677_DD2_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC4 R Switch", RT5677_DD2_MIXER,
                        RT5677_M_DAC4_R_DD2_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC4 L Switch", RT5677_DD2_MIXER,
+       SOC_DAPM_SINGLE_AUTODISABLE("DAC4 L Switch", RT5677_DD2_MIXER,
                        RT5677_M_DAC4_L_DD2_R_SFT, 1, 1),
 };
 
@@ -2596,6 +2596,21 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
+static int rt5677_filter_power_event(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *kcontrol, int event)
+{
+       switch (event) {
+       case SND_SOC_DAPM_POST_PMU:
+               msleep(50);
+               break;
+
+       default:
+               return 0;
+       }
+
+       return 0;
+}
+
 static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
                0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
@@ -3072,19 +3087,26 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
 
        /* DAC Mixer */
        SND_SOC_DAPM_SUPPLY("dac stereo1 filter", RT5677_PWR_DIG2,
-               RT5677_PWR_DAC_S1F_BIT, 0, NULL, 0),
+               RT5677_PWR_DAC_S1F_BIT, 0, rt5677_filter_power_event,
+               SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("dac mono2 left filter", RT5677_PWR_DIG2,
-               RT5677_PWR_DAC_M2F_L_BIT, 0, NULL, 0),
+               RT5677_PWR_DAC_M2F_L_BIT, 0, rt5677_filter_power_event,
+               SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("dac mono2 right filter", RT5677_PWR_DIG2,
-               RT5677_PWR_DAC_M2F_R_BIT, 0, NULL, 0),
+               RT5677_PWR_DAC_M2F_R_BIT, 0, rt5677_filter_power_event,
+               SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("dac mono3 left filter", RT5677_PWR_DIG2,
-               RT5677_PWR_DAC_M3F_L_BIT, 0, NULL, 0),
+               RT5677_PWR_DAC_M3F_L_BIT, 0, rt5677_filter_power_event,
+               SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("dac mono3 right filter", RT5677_PWR_DIG2,
-               RT5677_PWR_DAC_M3F_R_BIT, 0, NULL, 0),
+               RT5677_PWR_DAC_M3F_R_BIT, 0, rt5677_filter_power_event,
+               SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("dac mono4 left filter", RT5677_PWR_DIG2,
-               RT5677_PWR_DAC_M4F_L_BIT, 0, NULL, 0),
+               RT5677_PWR_DAC_M4F_L_BIT, 0, rt5677_filter_power_event,
+               SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("dac mono4 right filter", RT5677_PWR_DIG2,
-               RT5677_PWR_DAC_M4F_R_BIT, 0, NULL, 0),
+               RT5677_PWR_DAC_M4F_R_BIT, 0, rt5677_filter_power_event,
+               SND_SOC_DAPM_POST_PMU),
 
        SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0,
                rt5677_sto1_dac_l_mix, ARRAY_SIZE(rt5677_sto1_dac_l_mix)),
index 056375339ea32948207723b1affd84ebdd0b1730..5380798883b5d342df4dee0ffdd54e3b474ceac6 100644 (file)
@@ -229,7 +229,7 @@ SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL,
 SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL,
        6, 1, 0),
 SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
-       7, 1, 0),
+       7, 1, 1),
 
 SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
               WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
index 39ebd7bf4f5306382c86fb49f3417dc3c908dcad..a7e79784fc16ca184b9b56dc73e2b38e2da93b1c 100644 (file)
@@ -365,8 +365,8 @@ static const struct reg_default wm8962_reg[] = {
        { 16924, 0x0059 },   /* R16924 - HDBASS_PG_1 */
        { 16925, 0x999A },   /* R16925 - HDBASS_PG_0 */
 
-       { 17048, 0x0083 },   /* R17408 - HPF_C_1 */
-       { 17049, 0x98AD },   /* R17409 - HPF_C_0 */
+       { 17408, 0x0083 },   /* R17408 - HPF_C_1 */
+       { 17409, 0x98AD },   /* R17409 - HPF_C_0 */
 
        { 17920, 0x007F },   /* R17920 - ADCL_RETUNE_C1_1 */
        { 17921, 0xFFFF },   /* R17921 - ADCL_RETUNE_C1_0 */
index 4495a40a94680600ff324bd035098ff3d73f594b..c1c9c2e3525bf88f8cf6de409fcc9370320dd3b2 100644 (file)
@@ -681,8 +681,8 @@ static int davinci_mcasp_set_tdm_slot(struct snd_soc_dai *dai,
        }
 
        mcasp->tdm_slots = slots;
-       mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = rx_mask;
-       mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = tx_mask;
+       mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
+       mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
        mcasp->slot_width = slot_width;
 
        return davinci_mcasp_set_ch_constraints(mcasp);
@@ -908,6 +908,14 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
                mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
                mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
                               FSRMOD(total_slots), FSRMOD(0x1FF));
+               /*
+                * If McASP is set to be TX/RX synchronous and the playback is
+                * not running already we need to configure the TX slots in
+                * order to have correct FSX on the bus
+                */
+               if (mcasp_is_synchronous(mcasp) && !mcasp->channels)
+                       mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
+                                      FSXMOD(total_slots), FSXMOD(0x1FF));
        }
 
        return 0;
index 19c302b0d763976fba1bc79b927f8b39e03dfc83..14dfdee05fd5ae842bebdd0092e47bd4bc88f605 100644 (file)
@@ -283,6 +283,8 @@ config SND_SOC_IMX_MC13783
 config SND_SOC_FSL_ASOC_CARD
        tristate "Generic ASoC Sound Card with ASRC support"
        depends on OF && I2C
+       # enforce SND_SOC_FSL_ASOC_CARD=m if SND_AC97_CODEC=m:
+       depends on SND_AC97_CODEC || SND_AC97_CODEC=n
        select SND_SOC_IMX_AUDMUX
        select SND_SOC_IMX_PCM_DMA
        select SND_SOC_FSL_ESAI
index a4435f5e3be910447f9168b4708d19140f3c1f4f..ffd5f9acc849ffb3d8152bdfa1e5c13a26342668 100644 (file)
@@ -454,7 +454,8 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
         * Rx sync with Tx clocks: Clear SYNC for Tx, set it for Rx.
         * Tx sync with Rx clocks: Clear SYNC for Rx, set it for Tx.
         */
-       regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC, 0);
+       regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC,
+                          sai->synchronous[TX] ? FSL_SAI_CR2_SYNC : 0);
        regmap_update_bits(sai->regmap, FSL_SAI_RCR2, FSL_SAI_CR2_SYNC,
                           sai->synchronous[RX] ? FSL_SAI_CR2_SYNC : 0);
 
index 7b778ab85f8b41234487d0810f4c350f6dc09775..d430ef5a4f388985f1ca0674f4315d3b38e8e6cf 100644 (file)
@@ -144,7 +144,7 @@ config SND_SOC_INTEL_SKYLAKE
 
 config SND_SOC_INTEL_SKL_RT286_MACH
        tristate "ASoC Audio driver for SKL with RT286 I2S mode"
-       depends on X86 && ACPI
+       depends on X86 && ACPI && I2C
        select SND_SOC_INTEL_SST
        select SND_SOC_INTEL_SKYLAKE
        select SND_SOC_RT286
index a7854c8fc523e832f6137437c6e7737cc5eee46c..ffea427aeca8eab29809f049c53ba27f71b54393 100644 (file)
@@ -1240,6 +1240,7 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
         */
        ret = snd_soc_tplg_component_load(&platform->component,
                                        &skl_tplg_ops, fw, 0);
+       release_firmware(fw);
        if (ret < 0) {
                dev_err(bus->dev, "tplg component load failed%d\n", ret);
                return -EINVAL;
index a38a3029062c8296581af1209a176b53dde29f7c..ac72ff5055bbce626550c4fd277a96c351321b25 100644 (file)
@@ -280,7 +280,7 @@ static int rk_spdif_probe(struct platform_device *pdev)
        int ret;
 
        match = of_match_node(rk_spdif_match, np);
-       if ((int) match->data == RK_SPDIF_RK3288) {
+       if (match->data == (void *)RK_SPDIF_RK3288) {
                struct regmap *grf;
 
                grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
index 07f86a21046a524b32687f7252b54affa5fce1f7..921b4095fb92c98d7b09fb8d7ec8bbea7f9139bb 100644 (file)
@@ -28,9 +28,9 @@
 #define SPDIF_CFGR_VDW(x)      (x << SPDIF_CFGR_VDW_SHIFT)
 #define SDPIF_CFGR_VDW_MASK    (0xf << SPDIF_CFGR_VDW_SHIFT)
 
-#define SPDIF_CFGR_VDW_16      SPDIF_CFGR_VDW(0x00)
-#define SPDIF_CFGR_VDW_20      SPDIF_CFGR_VDW(0x01)
-#define SPDIF_CFGR_VDW_24      SPDIF_CFGR_VDW(0x10)
+#define SPDIF_CFGR_VDW_16      SPDIF_CFGR_VDW(0x0)
+#define SPDIF_CFGR_VDW_20      SPDIF_CFGR_VDW(0x1)
+#define SPDIF_CFGR_VDW_24      SPDIF_CFGR_VDW(0x2)
 
 /*
  * DMACR
index 76da7620904c982ee1946f7db9369c7065028133..edcf4cc2e84fa51549ac4f2e16573260e2f48b82 100644 (file)
@@ -235,7 +235,7 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
                RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8),
                RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc),
                RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0),
-               RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1c4),
+               RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1d4),
                RSND_GEN_M_REG(SRC_SWRSR,       0x200,  0x40),
                RSND_GEN_M_REG(SRC_SRCIR,       0x204,  0x40),
                RSND_GEN_M_REG(SRC_ADINR,       0x214,  0x40),
index 261b50217c48d94f0a66f44c513f685755ddae07..68b439ed22d7f4bb065bae68db731c4a844d40ac 100644 (file)
@@ -923,6 +923,7 @@ static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod,
                            struct snd_soc_pcm_runtime *rtd)
 {
        struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
+       struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
        struct rsnd_src *src = rsnd_mod_to_src(mod);
        int ret;
 
@@ -936,6 +937,12 @@ static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod,
        if (!rsnd_rdai_is_clk_master(rdai))
                return 0;
 
+       /*
+        * SRC In doesn't work if DVC was enabled
+        */
+       if (dvc && !rsnd_io_is_play(io))
+               return 0;
+
        /*
         * enable sync convert
         */
index 24b096066a07205c88e377e28f94eacff1261f3e..a1305f827a98f077ac3cdeffed96b2ce73d45c63 100644 (file)
@@ -795,12 +795,12 @@ static void soc_resume_deferred(struct work_struct *work)
 
        dev_dbg(card->dev, "ASoC: resume work completed\n");
 
-       /* userspace can access us now we are back as we were before */
-       snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
-
        /* Recheck all endpoints too, their state is affected by suspend */
        dapm_mark_endpoints_dirty(card);
        snd_soc_dapm_sync(&card->dapm);
+
+       /* userspace can access us now we are back as we were before */
+       snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
 }
 
 /* powers up audio subsystem after a suspend */
index 016eba10b1ec2744a8b9b1b3492d6c0fa1c7a561..7d009428934acab676ca34a1d17dfa4e1af83f25 100644 (file)
@@ -2293,6 +2293,12 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
        kfree(w);
 }
 
+void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm)
+{
+       dapm->path_sink_cache.widget = NULL;
+       dapm->path_source_cache.widget = NULL;
+}
+
 /* free all dapm widgets and resources */
 static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
 {
@@ -2303,6 +2309,7 @@ static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
                        continue;
                snd_soc_dapm_free_widget(w);
        }
+       snd_soc_dapm_reset_cache(dapm);
 }
 
 static struct snd_soc_dapm_widget *dapm_find_widget(
index ecd38e52285a964d53fce4c5c0d4ae71fd4bc15d..2f67ba6d7a8fd5848ce2c8612bef62625903e9cd 100644 (file)
@@ -404,7 +404,7 @@ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx);
 /**
  * snd_soc_put_volsw_sx - double mixer set callback
  * @kcontrol: mixer control
- * @uinfo: control element information
+ * @ucontrol: control element information
  *
  * Callback to set the value of a double mixer control that spans 2 registers.
  *
index 8d7ec80af51b499738dc2032356896a08144078a..6963ba20991c10066fdad2ad65f3102fc5752424 100644 (file)
@@ -531,7 +531,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
                /* TLV bytes controls need standard kcontrol info handler,
                 * TLV callback and extended put/get handlers.
                 */
-               k->info = snd_soc_bytes_info;
+               k->info = snd_soc_bytes_info_ext;
                k->tlv.c = snd_soc_bytes_tlv_callback;
 
                ext_ops = tplg->bytes_ext_ops;
@@ -1805,6 +1805,7 @@ void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
                snd_soc_tplg_widget_remove(w);
                snd_soc_dapm_free_widget(w);
        }
+       snd_soc_dapm_reset_cache(dapm);
 }
 EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
 
index 843f037a317da31aecc0b1761ddde2dfa24a1334..5c2bc53f0a9b721fd6a2f0b285c4d782d76d5bd6 100644 (file)
@@ -669,6 +669,7 @@ static int uni_player_startup(struct snd_pcm_substream *substream,
 {
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *player = priv->dai_data.uni;
+       player->substream = substream;
 
        player->clk_adj = 0;
 
@@ -950,6 +951,8 @@ static void uni_player_shutdown(struct snd_pcm_substream *substream,
        if (player->state != UNIPERIF_STATE_STOPPED)
                /* Stop the player */
                uni_player_stop(player);
+
+       player->substream = NULL;
 }
 
 static int uni_player_parse_dt_clk_glue(struct platform_device *pdev,
@@ -989,7 +992,7 @@ static int uni_player_parse_dt(struct platform_device *pdev,
        if (!info)
                return -ENOMEM;
 
-       if (of_property_read_u32(pnode, "version", &player->ver) ||
+       if (of_property_read_u32(pnode, "st,version", &player->ver) ||
            player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
                dev_err(dev, "Unknown uniperipheral version ");
                return -EINVAL;
@@ -998,13 +1001,13 @@ static int uni_player_parse_dt(struct platform_device *pdev,
        if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
                info->underflow_enabled = 1;
 
-       if (of_property_read_u32(pnode, "uniperiph-id", &info->id)) {
+       if (of_property_read_u32(pnode, "st,uniperiph-id", &info->id)) {
                dev_err(dev, "uniperipheral id not defined");
                return -EINVAL;
        }
 
        /* Read the device mode property */
-       if (of_property_read_string(pnode, "mode", &mode)) {
+       if (of_property_read_string(pnode, "st,mode", &mode)) {
                dev_err(dev, "uniperipheral mode not defined");
                return -EINVAL;
        }
index f791239a30872927b4c117f43ec457da6532b037..8a0eb20501694b16bf90991c3bbd71bce1461296 100644 (file)
@@ -316,7 +316,7 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
        if (!info)
                return -ENOMEM;
 
-       if (of_property_read_u32(node, "version", &reader->ver) ||
+       if (of_property_read_u32(node, "st,version", &reader->ver) ||
            reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
                dev_err(&pdev->dev, "Unknown uniperipheral version ");
                return -EINVAL;
@@ -346,7 +346,6 @@ int uni_reader_init(struct platform_device *pdev,
        reader->hw = &uni_reader_pcm_hw;
        reader->dai_ops = &uni_reader_dai_ops;
 
-       dev_err(reader->dev, "%s: enter\n", __func__);
        ret = uni_reader_parse_dt(pdev, reader);
        if (ret < 0) {
                dev_err(reader->dev, "Failed to parse DeviceTree");
index bcbf4da168b637341cdd7f3a35b084ab576ede36..1bb896d78d09817eff6fc38af7e2d1d1b3622ef3 100644 (file)
@@ -2,6 +2,7 @@
  * Copyright 2014 Emilio López <emilio@elopez.com.ar>
  * Copyright 2014 Jon Smirl <jonsmirl@gmail.com>
  * Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright 2015 Adam Sampson <ats@offog.org>
  *
  * Based on the Allwinner SDK driver, released under the GPL.
  *
@@ -404,7 +405,7 @@ static const struct snd_kcontrol_new sun4i_codec_pa_mute =
 static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1);
 
 static const struct snd_kcontrol_new sun4i_codec_widgets[] = {
-       SOC_SINGLE_TLV("PA Volume", SUN4I_CODEC_DAC_ACTL,
+       SOC_SINGLE_TLV("Power Amplifier Volume", SUN4I_CODEC_DAC_ACTL,
                       SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0,
                       sun4i_codec_pa_volume_scale),
 };
@@ -452,12 +453,12 @@ static const struct snd_soc_dapm_widget sun4i_codec_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("Mixer Enable", SUN4I_CODEC_DAC_ACTL,
                            SUN4I_CODEC_DAC_ACTL_MIXEN, 0, NULL, 0),
 
-       /* Pre-Amplifier */
-       SND_SOC_DAPM_MIXER("Pre-Amplifier", SUN4I_CODEC_ADC_ACTL,
+       /* Power Amplifier */
+       SND_SOC_DAPM_MIXER("Power Amplifier", SUN4I_CODEC_ADC_ACTL,
                           SUN4I_CODEC_ADC_ACTL_PA_EN, 0,
                           sun4i_codec_pa_mixer_controls,
                           ARRAY_SIZE(sun4i_codec_pa_mixer_controls)),
-       SND_SOC_DAPM_SWITCH("Pre-Amplifier Mute", SND_SOC_NOPM, 0, 0,
+       SND_SOC_DAPM_SWITCH("Power Amplifier Mute", SND_SOC_NOPM, 0, 0,
                            &sun4i_codec_pa_mute),
 
        SND_SOC_DAPM_OUTPUT("HP Right"),
@@ -480,16 +481,16 @@ static const struct snd_soc_dapm_route sun4i_codec_dapm_routes[] = {
        { "Left Mixer", NULL, "Mixer Enable" },
        { "Left Mixer", "Left DAC Playback Switch", "Left DAC" },
 
-       /* Pre-Amplifier Mixer Routes */
-       { "Pre-Amplifier", "Mixer Playback Switch", "Left Mixer" },
-       { "Pre-Amplifier", "Mixer Playback Switch", "Right Mixer" },
-       { "Pre-Amplifier", "DAC Playback Switch", "Left DAC" },
-       { "Pre-Amplifier", "DAC Playback Switch", "Right DAC" },
+       /* Power Amplifier Routes */
+       { "Power Amplifier", "Mixer Playback Switch", "Left Mixer" },
+       { "Power Amplifier", "Mixer Playback Switch", "Right Mixer" },
+       { "Power Amplifier", "DAC Playback Switch", "Left DAC" },
+       { "Power Amplifier", "DAC Playback Switch", "Right DAC" },
 
-       /* PA -> HP path */
-       { "Pre-Amplifier Mute", "Switch", "Pre-Amplifier" },
-       { "HP Right", NULL, "Pre-Amplifier Mute" },
-       { "HP Left", NULL, "Pre-Amplifier Mute" },
+       /* Headphone Output Routes */
+       { "Power Amplifier Mute", "Switch", "Power Amplifier" },
+       { "HP Right", NULL, "Power Amplifier Mute" },
+       { "HP Left", NULL, "Power Amplifier Mute" },
 };
 
 static struct snd_soc_codec_driver sun4i_codec_codec = {
index 40ab4476c80a2039ab543cc94b09725789b2fe57..51cf8256c6cda1b8c28cf4393496a77b63e0c171 100644 (file)
@@ -420,8 +420,7 @@ static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
 
 static int nfit_test0_alloc(struct nfit_test *t)
 {
-       size_t nfit_size = sizeof(struct acpi_table_nfit)
-                       + sizeof(struct acpi_nfit_system_address) * NUM_SPA
+       size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
                        + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
                        + sizeof(struct acpi_nfit_control_region) * NUM_DCR
                        + sizeof(struct acpi_nfit_data_region) * NUM_BDW
@@ -471,8 +470,7 @@ static int nfit_test0_alloc(struct nfit_test *t)
 
 static int nfit_test1_alloc(struct nfit_test *t)
 {
-       size_t nfit_size = sizeof(struct acpi_table_nfit)
-               + sizeof(struct acpi_nfit_system_address)
+       size_t nfit_size = sizeof(struct acpi_nfit_system_address)
                + sizeof(struct acpi_nfit_memory_map)
                + sizeof(struct acpi_nfit_control_region);
 
@@ -488,39 +486,24 @@ static int nfit_test1_alloc(struct nfit_test *t)
        return 0;
 }
 
-static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size)
-{
-       memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4);
-       nfit->header.length = size;
-       nfit->header.revision = 1;
-       memcpy(nfit->header.oem_id, "LIBND", 6);
-       memcpy(nfit->header.oem_table_id, "TEST", 5);
-       nfit->header.oem_revision = 1;
-       memcpy(nfit->header.asl_compiler_id, "TST", 4);
-       nfit->header.asl_compiler_revision = 1;
-}
-
 static void nfit_test0_setup(struct nfit_test *t)
 {
        struct nvdimm_bus_descriptor *nd_desc;
        struct acpi_nfit_desc *acpi_desc;
        struct acpi_nfit_memory_map *memdev;
        void *nfit_buf = t->nfit_buf;
-       size_t size = t->nfit_size;
        struct acpi_nfit_system_address *spa;
        struct acpi_nfit_control_region *dcr;
        struct acpi_nfit_data_region *bdw;
        struct acpi_nfit_flush_address *flush;
        unsigned int offset;
 
-       nfit_test_init_header(nfit_buf, size);
-
        /*
         * spa0 (interleave first half of dimm0 and dimm1, note storage
         * does not actually alias the related block-data-window
         * regions)
         */
-       spa = nfit_buf + sizeof(struct acpi_table_nfit);
+       spa = nfit_buf;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
        spa->header.length = sizeof(*spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
@@ -533,7 +516,7 @@ static void nfit_test0_setup(struct nfit_test *t)
         * does not actually alias the related block-data-window
         * regions)
         */
-       spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa);
+       spa = nfit_buf + sizeof(*spa);
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
        spa->header.length = sizeof(*spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
@@ -542,7 +525,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        spa->length = SPA1_SIZE;
 
        /* spa2 (dcr0) dimm0 */
-       spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2;
+       spa = nfit_buf + sizeof(*spa) * 2;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
        spa->header.length = sizeof(*spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -551,7 +534,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        spa->length = DCR_SIZE;
 
        /* spa3 (dcr1) dimm1 */
-       spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3;
+       spa = nfit_buf + sizeof(*spa) * 3;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
        spa->header.length = sizeof(*spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -560,7 +543,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        spa->length = DCR_SIZE;
 
        /* spa4 (dcr2) dimm2 */
-       spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4;
+       spa = nfit_buf + sizeof(*spa) * 4;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
        spa->header.length = sizeof(*spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -569,7 +552,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        spa->length = DCR_SIZE;
 
        /* spa5 (dcr3) dimm3 */
-       spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5;
+       spa = nfit_buf + sizeof(*spa) * 5;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
        spa->header.length = sizeof(*spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -578,7 +561,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        spa->length = DCR_SIZE;
 
        /* spa6 (bdw for dcr0) dimm0 */
-       spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6;
+       spa = nfit_buf + sizeof(*spa) * 6;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
        spa->header.length = sizeof(*spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -587,7 +570,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        spa->length = DIMM_SIZE;
 
        /* spa7 (bdw for dcr1) dimm1 */
-       spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7;
+       spa = nfit_buf + sizeof(*spa) * 7;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
        spa->header.length = sizeof(*spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -596,7 +579,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        spa->length = DIMM_SIZE;
 
        /* spa8 (bdw for dcr2) dimm2 */
-       spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8;
+       spa = nfit_buf + sizeof(*spa) * 8;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
        spa->header.length = sizeof(*spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -605,7 +588,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        spa->length = DIMM_SIZE;
 
        /* spa9 (bdw for dcr3) dimm3 */
-       spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9;
+       spa = nfit_buf + sizeof(*spa) * 9;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
        spa->header.length = sizeof(*spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -613,7 +596,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        spa->address = t->dimm_dma[3];
        spa->length = DIMM_SIZE;
 
-       offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10;
+       offset = sizeof(*spa) * 10;
        /* mem-region0 (spa0, dimm0) */
        memdev = nfit_buf + offset;
        memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
@@ -1100,15 +1083,13 @@ static void nfit_test0_setup(struct nfit_test *t)
 
 static void nfit_test1_setup(struct nfit_test *t)
 {
-       size_t size = t->nfit_size, offset;
+       size_t offset;
        void *nfit_buf = t->nfit_buf;
        struct acpi_nfit_memory_map *memdev;
        struct acpi_nfit_control_region *dcr;
        struct acpi_nfit_system_address *spa;
 
-       nfit_test_init_header(nfit_buf, size);
-
-       offset = sizeof(struct acpi_table_nfit);
+       offset = 0;
        /* spa0 (flat range with no bdw aliasing) */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
index 0a3da64638ceda0e94df98f9651fc241ae1f34c1..4db7d5691ba71b33d2bda8c85d634fe3781ae6d6 100644 (file)
@@ -110,4 +110,10 @@ static inline void free_page(unsigned long addr)
        (void) (&_min1 == &_min2);              \
        _min1 < _min2 ? _min1 : _min2; })
 
+/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
+#define list_add_tail(a, b) do {} while (0)
+#define list_del(a) do {} while (0)
+#define list_for_each_entry(a, b, c) while (0)
+/* end of stubs */
+
 #endif /* KERNEL_H */
index a3e07016a44017c8c9f25c5dd51e77a09152e127..ee125e714053a91a76658d417a46232349927450 100644 (file)
@@ -3,12 +3,6 @@
 #include <linux/scatterlist.h>
 #include <linux/kernel.h>
 
-/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
-#define list_add_tail(a, b) do {} while (0)
-#define list_del(a) do {} while (0)
-#define list_for_each_entry(a, b, c) while (0)
-/* end of stubs */
-
 struct virtio_device {
        void *dev;
        u64 features;
index 806d683ab10789a659edfe1c96d15bb08e107356..57a6964a1e355b8daa154adff9bd007c93f8374f 100644 (file)
@@ -40,33 +40,39 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev,
 #define virtio_has_feature(dev, feature) \
        (__virtio_test_bit((dev), feature))
 
+static inline bool virtio_is_little_endian(struct virtio_device *vdev)
+{
+       return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
+               virtio_legacy_is_little_endian();
+}
+
+/* Memory accessors */
 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
 {
-       return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
 }
 
 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
 {
-       return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
 }
 
 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
 {
-       return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
 }
 
 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
 {
-       return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
 }
 
 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
 {
-       return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
 }
 
 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
 {
-       return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
 }
-